You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by ta...@apache.org on 2019/01/12 18:19:00 UTC

[01/26] impala git commit: IMPALA-6544/IMPALA-7070: Disable tests which fail due to S3's eventual consistency

Repository: impala
Updated Branches:
  refs/heads/master 6d5ca479f -> 85b9c6c42


IMPALA-6544/IMPALA-7070: Disable tests which fail due to S3's eventual consistency

This patch is a temporary fix to disable tests which fail due to
S3's eventually consistent behavior. The permanent fix would
involve running tests with S3Guard enabled.

Change-Id: I676faa191bec8b156e430661c22ee69242eeba9d
Reviewed-on: http://gerrit.cloudera.org:8080/12203
Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
Tested-by: Impala Public Jenkins <im...@cloudera.com>


Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/3a3ab7ff
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/3a3ab7ff
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/3a3ab7ff

Branch: refs/heads/master
Commit: 3a3ab7ff8f6fef0407dc0e2b05b122fd9a0586db
Parents: 6d5ca47
Author: poojanilangekar <po...@cloudera.com>
Authored: Wed Jan 9 14:07:27 2019 -0800
Committer: Impala Public Jenkins <im...@cloudera.com>
Committed: Sat Jan 12 03:42:48 2019 +0000

----------------------------------------------------------------------
 tests/metadata/test_compute_stats.py        | 5 +++++
 tests/query_test/test_insert.py             | 4 +++-
 tests/query_test/test_insert_permutation.py | 2 ++
 tests/query_test/test_nested_types.py       | 2 ++
 4 files changed, 12 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/impala/blob/3a3ab7ff/tests/metadata/test_compute_stats.py
----------------------------------------------------------------------
diff --git a/tests/metadata/test_compute_stats.py b/tests/metadata/test_compute_stats.py
index 9adc8cd..f9ac55d 100644
--- a/tests/metadata/test_compute_stats.py
+++ b/tests/metadata/test_compute_stats.py
@@ -42,16 +42,19 @@ class TestComputeStats(ImpalaTestSuite):
         create_uncompressed_text_dimension(cls.get_workload()))
 
   @SkipIfLocal.hdfs_blocks
+  @SkipIfS3.eventually_consistent
   def test_compute_stats(self, vector, unique_database):
     self.run_test_case('QueryTest/compute-stats', vector, unique_database)
     # Test compute stats on decimal columns separately so we can vary between platforms
     # with and without write support for decimals (Hive < 0.11 and >= 0.11).
     self.run_test_case('QueryTest/compute-stats-decimal', vector, unique_database)
 
+  @SkipIfS3.eventually_consistent
   def test_compute_stats_incremental(self, vector, unique_database):
     self.run_test_case('QueryTest/compute-stats-incremental', vector, unique_database)
 
   @pytest.mark.execute_serially
+  @SkipIfS3.eventually_consistent
   def test_compute_stats_many_partitions(self, vector):
     # To cut down on test execution time, only run the compute stats test against many
     # partitions if performing an exhaustive test run.
@@ -59,6 +62,7 @@ class TestComputeStats(ImpalaTestSuite):
     self.run_test_case('QueryTest/compute-stats-many-partitions', vector)
 
   @pytest.mark.execute_serially
+  @SkipIfS3.eventually_consistent
   def test_compute_stats_keywords(self, vector):
     """IMPALA-1055: Tests compute stats with a db/table name that are keywords."""
     self.execute_query("drop database if exists `parquet` cascade")
@@ -120,6 +124,7 @@ class TestComputeStats(ImpalaTestSuite):
     assert(len(show_result.data) == 2)
     assert("1\tpval\t8" in show_result.data[0])
 
+  @SkipIfS3.eventually_consistent
   def test_pull_stats_profile(self, vector, unique_database):
     """Checks that the frontend profile includes metrics when computing
        incremental statistics.

http://git-wip-us.apache.org/repos/asf/impala/blob/3a3ab7ff/tests/query_test/test_insert.py
----------------------------------------------------------------------
diff --git a/tests/query_test/test_insert.py b/tests/query_test/test_insert.py
index ecb1a8e..08abbfa 100644
--- a/tests/query_test/test_insert.py
+++ b/tests/query_test/test_insert.py
@@ -23,7 +23,8 @@ from testdata.common import widetable
 from tests.common.impala_cluster import ImpalaCluster
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.parametrize import UniqueDatabase
-from tests.common.skip import SkipIfABFS, SkipIfEC, SkipIfLocal, SkipIfNotHdfsMinicluster
+from tests.common.skip import SkipIfABFS, SkipIfEC, SkipIfLocal, \
+    SkipIfNotHdfsMinicluster, SkipIfS3
 from tests.common.test_dimensions import (
     create_exec_option_dimension,
     create_uncompressed_text_dimension)
@@ -149,6 +150,7 @@ class TestInsertQueries(ImpalaTestSuite):
       v.wait_for_metric("impala-server.num-fragments-in-flight", 0, timeout=60)
 
   @pytest.mark.execute_serially
+  @SkipIfS3.eventually_consistent
   def test_insert_overwrite(self, vector):
     self.run_test_case('QueryTest/insert_overwrite', vector,
         multiple_impalad=vector.get_value('exec_option')['sync_ddl'] == 1)

http://git-wip-us.apache.org/repos/asf/impala/blob/3a3ab7ff/tests/query_test/test_insert_permutation.py
----------------------------------------------------------------------
diff --git a/tests/query_test/test_insert_permutation.py b/tests/query_test/test_insert_permutation.py
index 46d1090..fc292dd 100644
--- a/tests/query_test/test_insert_permutation.py
+++ b/tests/query_test/test_insert_permutation.py
@@ -18,6 +18,7 @@
 # Targeted Impala insert tests
 
 from tests.common.impala_test_suite import ImpalaTestSuite
+from tests.common.skip import SkipIfS3
 from tests.common.test_dimensions import (
     create_exec_option_dimension,
     create_uncompressed_text_dimension)
@@ -45,6 +46,7 @@ class TestInsertQueriesWithPermutation(ImpalaTestSuite):
     cls.ImpalaTestMatrix.add_dimension(
         create_uncompressed_text_dimension(cls.get_workload()))
 
+  @SkipIfS3.eventually_consistent
   def test_insert_permutation(self, vector):
     map(self.cleanup_db, ["insert_permutation_test"])
     self.run_test_case('QueryTest/insert_permutation', vector)

http://git-wip-us.apache.org/repos/asf/impala/blob/3a3ab7ff/tests/query_test/test_nested_types.py
----------------------------------------------------------------------
diff --git a/tests/query_test/test_nested_types.py b/tests/query_test/test_nested_types.py
index 38a2582..ad7bf96 100644
--- a/tests/query_test/test_nested_types.py
+++ b/tests/query_test/test_nested_types.py
@@ -365,6 +365,7 @@ class TestParquetArrayEncodings(ImpalaTestSuite):
   # ..intListsColumn_tuple_tuple = 6
   # ..intListsColumn_tuple_tuple = 7
   # ..intListsColumn_tuple_tuple = 8
+  @SkipIfS3.eventually_consistent
   def test_thrift_array_of_arrays(self, vector, unique_database):
     self.__test_array_of_arrays(unique_database, "ThriftArrayOfArrays",
       "bad-thrift.parquet", vector, 1)
@@ -513,6 +514,7 @@ class TestParquetArrayEncodings(ImpalaTestSuite):
   # ...f22 = 220
   # ..F11 = 110
   # ..F12 = 120
+  @SkipIfS3.eventually_consistent
   def test_ambiguous_list(self, vector, unique_database):
     """IMPALA-4725: Tests the schema-resolution behavior with different values for the
     PARQUET_ARRAY_RESOLUTION and PARQUET_FALLBACK_SCHEMA_RESOLUTION query options.


[17/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct-materialization.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct-materialization.test b/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct-materialization.test
index 40f799f..8b294ed 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct-materialization.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct-materialization.test
@@ -9,6 +9,7 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 7, count(int_col)), aggif(valid_tid() = 11, min(float_col)), aggif(valid_tid() = 11, max(float_col)), aggif(valid_tid() = 11, sum(double_col))
+|  row-size=32B cardinality=1
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -17,6 +18,7 @@ PLAN-ROOT SINK
 |    output: count(int_col)
 |  Class 2
 |    output: min:merge(float_col), max:merge(float_col), sum:merge(double_col)
+|  row-size=32B cardinality=3
 |
 01:AGGREGATE
 |  Class 0
@@ -25,14 +27,17 @@ PLAN-ROOT SINK
 |    group by: int_col
 |  Class 2
 |    output: min(float_col), max(float_col), sum(double_col)
+|  row-size=21B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 7, count(int_col)), aggif(valid_tid() = 11, min(float_col)), aggif(valid_tid() = 11, max(float_col)), aggif(valid_tid() = 11, sum(double_col))
+|  row-size=32B cardinality=1
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -41,6 +46,7 @@ PLAN-ROOT SINK
 |    output: count:merge(int_col)
 |  Class 2
 |    output: min:merge(float_col), max:merge(float_col), sum:merge(double_col)
+|  row-size=32B cardinality=3
 |
 06:EXCHANGE [UNPARTITIONED]
 |
@@ -51,6 +57,7 @@ PLAN-ROOT SINK
 |    output: count(int_col)
 |  Class 2
 |    output: min:merge(float_col), max:merge(float_col), sum:merge(double_col)
+|  row-size=32B cardinality=3
 |
 05:AGGREGATE
 |  Class 0
@@ -59,6 +66,7 @@ PLAN-ROOT SINK
 |    group by: int_col
 |  Class 2
 |    output: min:merge(float_col), max:merge(float_col), sum:merge(double_col)
+|  row-size=21B cardinality=31
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 6 THEN murmur_hash(int_col) WHEN 11 THEN 0 END)]
 |
@@ -69,9 +77,11 @@ PLAN-ROOT SINK
 |    group by: int_col
 |  Class 2
 |    output: min(float_col), max(float_col), sum(double_col)
+|  row-size=21B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ====
 # A subset of aggregation classes are materialized. A subset of aggregation functions
 # of the surviving classes are materialized. No group by.
@@ -87,6 +97,7 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, count(tinyint_col)), aggif(valid_tid() = 9, avg(int_col)), aggif(valid_tid() = 13, max(float_col))
+|  row-size=20B cardinality=1
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -95,6 +106,7 @@ PLAN-ROOT SINK
 |    output: avg(int_col)
 |  Class 2
 |    output: max:merge(float_col)
+|  row-size=20B cardinality=3
 |
 01:AGGREGATE
 |  Class 0
@@ -103,14 +115,17 @@ PLAN-ROOT SINK
 |    group by: int_col
 |  Class 2
 |    output: max(float_col)
+|  row-size=9B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=9B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, count(tinyint_col)), aggif(valid_tid() = 9, avg(int_col)), aggif(valid_tid() = 13, max(float_col))
+|  row-size=20B cardinality=1
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -119,6 +134,7 @@ PLAN-ROOT SINK
 |    output: avg:merge(int_col)
 |  Class 2
 |    output: max:merge(float_col)
+|  row-size=20B cardinality=3
 |
 06:EXCHANGE [UNPARTITIONED]
 |
@@ -129,6 +145,7 @@ PLAN-ROOT SINK
 |    output: avg(int_col)
 |  Class 2
 |    output: max:merge(float_col)
+|  row-size=20B cardinality=3
 |
 05:AGGREGATE
 |  Class 0
@@ -137,6 +154,7 @@ PLAN-ROOT SINK
 |    group by: int_col
 |  Class 2
 |    output: max:merge(float_col)
+|  row-size=9B cardinality=31
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 7 THEN murmur_hash(int_col) WHEN 13 THEN 0 END)]
 |
@@ -147,9 +165,11 @@ PLAN-ROOT SINK
 |    group by: int_col
 |  Class 2
 |    output: max(float_col)
+|  row-size=9B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=9B cardinality=7.30K
 ====
 # A subset of aggregation classes are materialized. With group by.
 select a, c, e1, e2, e3, gby2 from
@@ -165,6 +185,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 7, count(int_col)), aggif(valid_tid() = 11, min(float_col)), aggif(valid_tid() = 11, max(float_col)), aggif(valid_tid() = 11, sum(double_col))
 |  group by: CASE valid_tid() WHEN 2 THEN string_col WHEN 7 THEN string_col WHEN 11 THEN string_col END, CASE valid_tid() WHEN 2 THEN date_string_col WHEN 7 THEN date_string_col WHEN 11 THEN date_string_col END
+|  row-size=56B cardinality=7.30K
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -176,6 +197,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: min:merge(float_col), max:merge(float_col), sum:merge(double_col)
 |    group by: string_col, date_string_col
+|  row-size=131B cardinality=7.30K
 |
 01:AGGREGATE
 |  Class 0
@@ -185,9 +207,11 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: min(float_col), max(float_col), sum(double_col)
 |    group by: string_col, date_string_col
+|  row-size=120B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=50B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -196,6 +220,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 7, count(int_col)), aggif(valid_tid() = 11, min(float_col)), aggif(valid_tid() = 11, max(float_col)), aggif(valid_tid() = 11, sum(double_col))
 |  group by: CASE valid_tid() WHEN 2 THEN string_col WHEN 7 THEN string_col WHEN 11 THEN string_col END, CASE valid_tid() WHEN 2 THEN date_string_col WHEN 7 THEN date_string_col WHEN 11 THEN date_string_col END
+|  row-size=56B cardinality=7.30K
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -207,6 +232,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: min:merge(float_col), max:merge(float_col), sum:merge(double_col)
 |    group by: string_col, date_string_col
+|  row-size=131B cardinality=7.30K
 |
 06:EXCHANGE [HASH(CASE valid_tid() WHEN 2 THEN murmur_hash(string_col) WHEN 7 THEN murmur_hash(string_col) WHEN 11 THEN murmur_hash(string_col) END,CASE valid_tid() WHEN 2 THEN murmur_hash(date_string_col) WHEN 7 THEN murmur_hash(date_string_col) WHEN 11 THEN murmur_hash(date_string_col) END)]
 |
@@ -220,6 +246,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: min:merge(float_col), max:merge(float_col), sum:merge(double_col)
 |    group by: string_col, date_string_col
+|  row-size=131B cardinality=7.30K
 |
 05:AGGREGATE
 |  Class 0
@@ -229,6 +256,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: min:merge(float_col), max:merge(float_col), sum:merge(double_col)
 |    group by: string_col, date_string_col
+|  row-size=120B cardinality=7.30K
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(string_col) WHEN 6 THEN murmur_hash(string_col) WHEN 11 THEN murmur_hash(string_col) END,CASE valid_tid() WHEN 1 THEN murmur_hash(date_string_col) WHEN 6 THEN murmur_hash(date_string_col) WHEN 11 THEN murmur_hash(date_string_col) END,CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 6 THEN murmur_hash(int_col) WHEN 11 THEN 0 END)]
 |
@@ -240,9 +268,11 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: min(float_col), max(float_col), sum(double_col)
 |    group by: string_col, date_string_col
+|  row-size=120B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=50B cardinality=7.30K
 ====
 # A subset of aggregation classes are materialized. A subset of aggregation functions
 # of the surviving classes are materialized. With group by.
@@ -261,6 +291,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, count(tinyint_col)), aggif(valid_tid() = 9, avg(int_col)), aggif(valid_tid() = 13, max(float_col))
 |  group by: CASE valid_tid() WHEN 3 THEN string_col WHEN 9 THEN string_col WHEN 13 THEN string_col END, CASE valid_tid() WHEN 3 THEN date_string_col WHEN 9 THEN date_string_col WHEN 13 THEN date_string_col END
+|  row-size=44B cardinality=7.30K
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -272,6 +303,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: max:merge(float_col)
 |    group by: string_col, date_string_col
+|  row-size=119B cardinality=7.30K
 |
 01:AGGREGATE
 |  Class 0
@@ -281,9 +313,11 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: max(float_col)
 |    group by: string_col, date_string_col
+|  row-size=108B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=42B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -292,6 +326,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, count(tinyint_col)), aggif(valid_tid() = 9, avg(int_col)), aggif(valid_tid() = 13, max(float_col))
 |  group by: CASE valid_tid() WHEN 3 THEN string_col WHEN 9 THEN string_col WHEN 13 THEN string_col END, CASE valid_tid() WHEN 3 THEN date_string_col WHEN 9 THEN date_string_col WHEN 13 THEN date_string_col END
+|  row-size=44B cardinality=7.30K
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -303,6 +338,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: max:merge(float_col)
 |    group by: string_col, date_string_col
+|  row-size=119B cardinality=7.30K
 |
 06:EXCHANGE [HASH(CASE valid_tid() WHEN 2 THEN murmur_hash(string_col) WHEN 8 THEN murmur_hash(string_col) WHEN 13 THEN murmur_hash(string_col) END,CASE valid_tid() WHEN 2 THEN murmur_hash(date_string_col) WHEN 8 THEN murmur_hash(date_string_col) WHEN 13 THEN murmur_hash(date_string_col) END)]
 |
@@ -316,6 +352,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: max:merge(float_col)
 |    group by: string_col, date_string_col
+|  row-size=119B cardinality=7.30K
 |
 05:AGGREGATE
 |  Class 0
@@ -325,6 +362,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: max:merge(float_col)
 |    group by: string_col, date_string_col
+|  row-size=108B cardinality=7.30K
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(string_col) WHEN 7 THEN murmur_hash(string_col) WHEN 13 THEN murmur_hash(string_col) END,CASE valid_tid() WHEN 1 THEN murmur_hash(date_string_col) WHEN 7 THEN murmur_hash(date_string_col) WHEN 13 THEN murmur_hash(date_string_col) END,CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 7 THEN murmur_hash(int_col) WHEN 13 THEN 0 END)]
 |
@@ -336,9 +374,11 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: max(float_col)
 |    group by: string_col, date_string_col
+|  row-size=108B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=42B cardinality=7.30K
 ====
 # Simplifies to a single aggrgeation class. Only first distinct agg is materialized.
 select a from
@@ -349,33 +389,41 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(tinyint_col)
+|  row-size=8B cardinality=1
 |
 01:AGGREGATE
 |  group by: tinyint_col
+|  row-size=1B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=1B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: count:merge(tinyint_col)
+|  row-size=8B cardinality=1
 |
 06:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: count(tinyint_col)
+|  row-size=8B cardinality=1
 |
 05:AGGREGATE
 |  group by: tinyint_col
+|  row-size=1B cardinality=10
 |
 04:EXCHANGE [HASH(tinyint_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: tinyint_col
+|  row-size=1B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=1B cardinality=7.30K
 ====
 # Simplifies to a single aggrgeation class. Only second distinct agg is materialized.
 select b from
@@ -386,33 +434,41 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(smallint_col)
+|  row-size=8B cardinality=1
 |
 01:AGGREGATE
 |  group by: smallint_col
+|  row-size=2B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=2B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: count:merge(smallint_col)
+|  row-size=8B cardinality=1
 |
 06:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: count(smallint_col)
+|  row-size=8B cardinality=1
 |
 05:AGGREGATE
 |  group by: smallint_col
+|  row-size=2B cardinality=10
 |
 04:EXCHANGE [HASH(smallint_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: smallint_col
+|  row-size=2B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=2B cardinality=7.30K
 ====
 # Some aggs only referenced in HAVING clause.
 select count(distinct tinyint_col), min(timestamp_col) from functional.alltypes
@@ -423,6 +479,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, min(timestamp_col)), aggif(valid_tid() = 5, max(date_string_col))
 |  having: aggif(valid_tid() = 4, count(smallint_col)) < 10, aggif(valid_tid() = 5, max(date_string_col)) = 'test'
+|  row-size=44B cardinality=0
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -431,6 +488,7 @@ PLAN-ROOT SINK
 |    output: count(smallint_col)
 |  Class 2
 |    output: min:merge(timestamp_col), max:merge(date_string_col)
+|  row-size=44B cardinality=3
 |
 01:AGGREGATE
 |  Class 0
@@ -439,15 +497,18 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: min(timestamp_col), max(date_string_col)
+|  row-size=31B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=39B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, min(timestamp_col)), aggif(valid_tid() = 5, max(date_string_col))
 |  having: aggif(valid_tid() = 4, count(smallint_col)) < 10, aggif(valid_tid() = 5, max(date_string_col)) = 'test'
+|  row-size=44B cardinality=0
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -456,6 +517,7 @@ PLAN-ROOT SINK
 |    output: count:merge(smallint_col)
 |  Class 2
 |    output: min:merge(timestamp_col), max:merge(date_string_col)
+|  row-size=44B cardinality=3
 |
 06:EXCHANGE [UNPARTITIONED]
 |
@@ -466,6 +528,7 @@ PLAN-ROOT SINK
 |    output: count(smallint_col)
 |  Class 2
 |    output: min:merge(timestamp_col), max:merge(date_string_col)
+|  row-size=44B cardinality=3
 |
 05:AGGREGATE
 |  Class 0
@@ -474,6 +537,7 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: min:merge(timestamp_col), max:merge(date_string_col)
+|  row-size=31B cardinality=31
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 3 THEN murmur_hash(smallint_col) WHEN 5 THEN 0 END)]
 |
@@ -484,9 +548,11 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: min(timestamp_col), max(date_string_col)
+|  row-size=31B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=39B cardinality=7.30K
 ====
 # Some aggs only referenced in ORDER BY clause.
 select count(distinct tinyint_col), min(timestamp_col) from functional.alltypes
@@ -497,10 +563,12 @@ PLAN-ROOT SINK
 |
 04:SORT
 |  order by: aggif(valid_tid() = 4, count(smallint_col)) ASC, aggif(valid_tid() = 5, max(date_string_col)) ASC
+|  row-size=44B cardinality=11
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, min(timestamp_col)), aggif(valid_tid() = 5, max(date_string_col))
 |  group by: CASE valid_tid() WHEN 2 THEN bigint_col WHEN 4 THEN bigint_col WHEN 5 THEN bigint_col END
+|  row-size=52B cardinality=11
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -512,6 +580,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: min:merge(timestamp_col), max:merge(date_string_col)
 |    group by: bigint_col
+|  row-size=68B cardinality=70
 |
 01:AGGREGATE
 |  Class 0
@@ -521,9 +590,11 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: min(timestamp_col), max(date_string_col)
 |    group by: bigint_col
+|  row-size=55B cardinality=610
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=47B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -532,10 +603,12 @@ PLAN-ROOT SINK
 |
 04:SORT
 |  order by: aggif(valid_tid() = 4, count(smallint_col)) ASC, aggif(valid_tid() = 5, max(date_string_col)) ASC
+|  row-size=44B cardinality=11
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, min(timestamp_col)), aggif(valid_tid() = 5, max(date_string_col))
 |  group by: CASE valid_tid() WHEN 2 THEN bigint_col WHEN 4 THEN bigint_col WHEN 5 THEN bigint_col END
+|  row-size=52B cardinality=11
 |
 08:AGGREGATE [FINALIZE]
 |  Class 0
@@ -547,6 +620,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: min:merge(timestamp_col), max:merge(date_string_col)
 |    group by: bigint_col
+|  row-size=68B cardinality=70
 |
 07:EXCHANGE [HASH(CASE valid_tid() WHEN 2 THEN murmur_hash(bigint_col) WHEN 4 THEN murmur_hash(bigint_col) WHEN 5 THEN murmur_hash(bigint_col) END)]
 |
@@ -560,6 +634,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: min:merge(timestamp_col), max:merge(date_string_col)
 |    group by: bigint_col
+|  row-size=68B cardinality=70
 |
 06:AGGREGATE
 |  Class 0
@@ -569,6 +644,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: min:merge(timestamp_col), max:merge(date_string_col)
 |    group by: bigint_col
+|  row-size=55B cardinality=610
 |
 05:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(bigint_col) WHEN 3 THEN murmur_hash(bigint_col) WHEN 5 THEN murmur_hash(bigint_col) END,CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 3 THEN murmur_hash(smallint_col) WHEN 5 THEN 0 END)]
 |
@@ -580,9 +656,11 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: min(timestamp_col), max(date_string_col)
 |    group by: bigint_col
+|  row-size=55B cardinality=610
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=47B cardinality=7.30K
 ====
 # Mixed distinct and non-distinct aggs. No materialized aggregations. No group by.
 select 1 from
@@ -593,9 +671,11 @@ select 1 from
 PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
+|  row-size=0B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ====
 # Mixed distinct and non-distinct aggs. No materialized aggregations. With group by.
 select v.gby1 from
@@ -609,9 +689,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  group by: string_col, date_string_col
+|  row-size=33B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=33B cardinality=7.30K
 ====
 # Only distinct aggs. No materialized aggregations. No group by.
 select 1 from
@@ -621,12 +703,15 @@ select 1 from
 PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
+|  row-size=0B cardinality=1
 |
 01:AGGREGATE
 |  group by: smallint_col
+|  row-size=2B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=2B cardinality=7.30K
 ====
 # Only distinct aggs. No materialized aggregations. With group by.
 select v.gby2 from
@@ -639,12 +724,15 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  group by: string_col, date_string_col
+|  row-size=33B cardinality=7.30K
 |
 01:AGGREGATE
 |  group by: string_col, date_string_col, smallint_col
+|  row-size=35B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=35B cardinality=7.30K
 ====
 # Simplifies to a single aggregation class. Only first distinct agg is materialized.
 # No group by.
@@ -659,34 +747,42 @@ PLAN-ROOT SINK
 02:AGGREGATE [FINALIZE]
 |  output: count(tinyint_col)
 |  having: count(tinyint_col) < 9
+|  row-size=8B cardinality=0
 |
 01:AGGREGATE
 |  group by: tinyint_col
+|  row-size=1B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=1B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: count:merge(tinyint_col)
 |  having: count(tinyint_col) < 9
+|  row-size=8B cardinality=0
 |
 06:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: count(tinyint_col)
+|  row-size=8B cardinality=0
 |
 05:AGGREGATE
 |  group by: tinyint_col
+|  row-size=1B cardinality=10
 |
 04:EXCHANGE [HASH(tinyint_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: tinyint_col
+|  row-size=1B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=1B cardinality=7.30K
 ====
 # Simplifies to a single aggregation class. Only second distinct agg is materialized.
 # No group by.
@@ -701,34 +797,42 @@ PLAN-ROOT SINK
 02:AGGREGATE [FINALIZE]
 |  output: count(smallint_col)
 |  having: count(smallint_col) < 9
+|  row-size=8B cardinality=0
 |
 01:AGGREGATE
 |  group by: smallint_col
+|  row-size=2B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=2B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: count:merge(smallint_col)
 |  having: count(smallint_col) < 9
+|  row-size=8B cardinality=0
 |
 06:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: count(smallint_col)
+|  row-size=8B cardinality=0
 |
 05:AGGREGATE
 |  group by: smallint_col
+|  row-size=2B cardinality=10
 |
 04:EXCHANGE [HASH(smallint_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: smallint_col
+|  row-size=2B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=2B cardinality=7.30K
 ====
 # Simplifies to a single aggregation class. Only non-distinct aggs remain.
 # No group by.
@@ -744,23 +848,28 @@ PLAN-ROOT SINK
 01:AGGREGATE [FINALIZE]
 |  output: min(string_col), max(string_col)
 |  having: max(string_col) > '0', min(string_col) < '9', max(string_col) < min(string_col)
+|  row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=13B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 04:AGGREGATE [FINALIZE]
 |  output: min:merge(string_col), max:merge(string_col)
 |  having: max(string_col) > '0', min(string_col) < '9', max(string_col) < min(string_col)
+|  row-size=24B cardinality=0
 |
 03:EXCHANGE [UNPARTITIONED]
 |
 01:AGGREGATE
 |  output: min(string_col), max(string_col)
+|  row-size=24B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=13B cardinality=7.30K
 ====
 # Simplifies to a single aggregation class with distinct and non-distinct aggs.
 # No group by.
@@ -776,37 +885,45 @@ PLAN-ROOT SINK
 02:AGGREGATE [FINALIZE]
 |  output: count(tinyint_col), min:merge(string_col), max:merge(string_col)
 |  having: count(tinyint_col) = 10, max(string_col) > '0', min(string_col) < '9', max(string_col) < min(string_col)
+|  row-size=32B cardinality=0
 |
 01:AGGREGATE
 |  output: min(string_col), max(string_col)
 |  group by: tinyint_col
+|  row-size=25B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=14B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: count:merge(tinyint_col), min:merge(string_col), max:merge(string_col)
 |  having: count(tinyint_col) = 10, max(string_col) > '0', min(string_col) < '9', max(string_col) < min(string_col)
+|  row-size=32B cardinality=0
 |
 06:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: count(tinyint_col), min:merge(string_col), max:merge(string_col)
+|  row-size=32B cardinality=0
 |
 05:AGGREGATE
 |  output: min:merge(string_col), max:merge(string_col)
 |  group by: tinyint_col
+|  row-size=25B cardinality=10
 |
 04:EXCHANGE [HASH(tinyint_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: min(string_col), max(string_col)
 |  group by: tinyint_col
+|  row-size=25B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=14B cardinality=7.30K
 ====
 # Simplifies to a single aggregation class with one distinct agg.
 # With group by.
@@ -825,13 +942,16 @@ PLAN-ROOT SINK
 |  output: count(tinyint_col)
 |  group by: date_string_col, timestamp_col
 |  having: count(tinyint_col) < 10
+|  row-size=44B cardinality=10
 |
 01:AGGREGATE
 |  group by: date_string_col, timestamp_col, tinyint_col
+|  row-size=37B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.date_string_col = 'test1'
+   row-size=37B cardinality=10
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -841,24 +961,29 @@ PLAN-ROOT SINK
 |  output: count:merge(tinyint_col)
 |  group by: date_string_col, timestamp_col
 |  having: count(tinyint_col) < 10
+|  row-size=44B cardinality=10
 |
 06:EXCHANGE [HASH(date_string_col,timestamp_col)]
 |
 02:AGGREGATE [STREAMING]
 |  output: count(tinyint_col)
 |  group by: date_string_col, timestamp_col
+|  row-size=44B cardinality=10
 |
 05:AGGREGATE
 |  group by: date_string_col, timestamp_col, tinyint_col
+|  row-size=37B cardinality=10
 |
 04:EXCHANGE [HASH(date_string_col,timestamp_col,tinyint_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: date_string_col, timestamp_col, tinyint_col
+|  row-size=37B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.date_string_col = 'test1'
+   row-size=37B cardinality=10
 ====
 # Simplifies to a single aggregation class with two non-distinct aggs.
 # With group by.
@@ -877,14 +1002,17 @@ PLAN-ROOT SINK
 |  output: count(tinyint_col), min:merge(string_col), max:merge(string_col)
 |  group by: date_string_col, timestamp_col
 |  having: count(tinyint_col) < 10, date_string_col < timestamp_col
+|  row-size=68B cardinality=3
 |
 01:AGGREGATE
 |  output: min(string_col), max(string_col)
 |  group by: date_string_col, timestamp_col, tinyint_col
+|  row-size=61B cardinality=3
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.timestamp_col > NULL, functional.alltypes.date_string_col = 'test1'
+   row-size=50B cardinality=3
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -894,26 +1022,31 @@ PLAN-ROOT SINK
 |  output: count:merge(tinyint_col), min:merge(string_col), max:merge(string_col)
 |  group by: date_string_col, timestamp_col
 |  having: count(tinyint_col) < 10, date_string_col < timestamp_col
+|  row-size=68B cardinality=3
 |
 06:EXCHANGE [HASH(date_string_col,timestamp_col)]
 |
 02:AGGREGATE [STREAMING]
 |  output: count(tinyint_col), min:merge(string_col), max:merge(string_col)
 |  group by: date_string_col, timestamp_col
+|  row-size=68B cardinality=3
 |
 05:AGGREGATE
 |  output: min:merge(string_col), max:merge(string_col)
 |  group by: date_string_col, timestamp_col, tinyint_col
+|  row-size=61B cardinality=3
 |
 04:EXCHANGE [HASH(date_string_col,timestamp_col,tinyint_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: min(string_col), max(string_col)
 |  group by: date_string_col, timestamp_col, tinyint_col
+|  row-size=61B cardinality=3
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.timestamp_col > NULL, functional.alltypes.date_string_col = 'test1'
+   row-size=50B cardinality=3
 ====
 # Simplifies to a single aggregation class with one distinct and one non-distinct agg.
 # With group by.
@@ -932,14 +1065,17 @@ PLAN-ROOT SINK
 |  output: count(smallint_col), max:merge(string_col)
 |  group by: date_string_col, timestamp_col
 |  having: count(smallint_col) < 20, date_string_col < timestamp_col
+|  row-size=56B cardinality=3
 |
 01:AGGREGATE
 |  output: max(string_col)
 |  group by: date_string_col, timestamp_col, smallint_col
+|  row-size=50B cardinality=3
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.timestamp_col > NULL, functional.alltypes.date_string_col = 'test1'
+   row-size=51B cardinality=3
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -949,24 +1085,29 @@ PLAN-ROOT SINK
 |  output: count:merge(smallint_col), max:merge(string_col)
 |  group by: date_string_col, timestamp_col
 |  having: count(smallint_col) < 20, date_string_col < timestamp_col
+|  row-size=56B cardinality=3
 |
 06:EXCHANGE [HASH(date_string_col,timestamp_col)]
 |
 02:AGGREGATE [STREAMING]
 |  output: count(smallint_col), max:merge(string_col)
 |  group by: date_string_col, timestamp_col
+|  row-size=56B cardinality=3
 |
 05:AGGREGATE
 |  output: max:merge(string_col)
 |  group by: date_string_col, timestamp_col, smallint_col
+|  row-size=50B cardinality=3
 |
 04:EXCHANGE [HASH(date_string_col,timestamp_col,smallint_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: max(string_col)
 |  group by: date_string_col, timestamp_col, smallint_col
+|  row-size=50B cardinality=3
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.timestamp_col > NULL, functional.alltypes.date_string_col = 'test1'
+   row-size=51B cardinality=3
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct-predicates.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct-predicates.test b/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct-predicates.test
index 668345c..d9aea35 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct-predicates.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct-predicates.test
@@ -10,6 +10,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, max(string_col))
 |  having: aggif(valid_tid() = 2, count(tinyint_col)) < 10, aggif(valid_tid() = 4, count(smallint_col)) <= 30, aggif(valid_tid() = 4, count(smallint_col)) >= 20, aggif(valid_tid() = 5, max(string_col)) = 'test'
+|  row-size=28B cardinality=0
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -18,6 +19,7 @@ PLAN-ROOT SINK
 |    output: count(smallint_col)
 |  Class 2
 |    output: max:merge(string_col)
+|  row-size=28B cardinality=3
 |
 01:AGGREGATE
 |  Class 0
@@ -26,15 +28,18 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: max(string_col)
+|  row-size=15B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=16B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, max(string_col))
 |  having: aggif(valid_tid() = 2, count(tinyint_col)) < 10, aggif(valid_tid() = 4, count(smallint_col)) <= 30, aggif(valid_tid() = 4, count(smallint_col)) >= 20, aggif(valid_tid() = 5, max(string_col)) = 'test'
+|  row-size=28B cardinality=0
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -43,6 +48,7 @@ PLAN-ROOT SINK
 |    output: count:merge(smallint_col)
 |  Class 2
 |    output: max:merge(string_col)
+|  row-size=28B cardinality=3
 |
 06:EXCHANGE [UNPARTITIONED]
 |
@@ -53,6 +59,7 @@ PLAN-ROOT SINK
 |    output: count(smallint_col)
 |  Class 2
 |    output: max:merge(string_col)
+|  row-size=28B cardinality=3
 |
 05:AGGREGATE
 |  Class 0
@@ -61,6 +68,7 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: max:merge(string_col)
+|  row-size=15B cardinality=31
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 3 THEN murmur_hash(smallint_col) WHEN 5 THEN 0 END)]
 |
@@ -71,9 +79,11 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: max(string_col)
+|  row-size=15B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=16B cardinality=7.30K
 ====
 # HAVING predicate references multiple aggregates. No group by.
 select count(distinct tinyint_col), count(distinct smallint_col), max(string_col)
@@ -85,6 +95,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, max(string_col))
 |  having: aggif(valid_tid() = 2, count(tinyint_col)) < aggif(valid_tid() = 4, count(smallint_col))
+|  row-size=28B cardinality=0
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -93,6 +104,7 @@ PLAN-ROOT SINK
 |    output: count(smallint_col)
 |  Class 2
 |    output: max:merge(string_col)
+|  row-size=28B cardinality=3
 |
 01:AGGREGATE
 |  Class 0
@@ -101,15 +113,18 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: max(string_col)
+|  row-size=15B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=16B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, max(string_col))
 |  having: aggif(valid_tid() = 2, count(tinyint_col)) < aggif(valid_tid() = 4, count(smallint_col))
+|  row-size=28B cardinality=0
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -118,6 +133,7 @@ PLAN-ROOT SINK
 |    output: count:merge(smallint_col)
 |  Class 2
 |    output: max:merge(string_col)
+|  row-size=28B cardinality=3
 |
 06:EXCHANGE [UNPARTITIONED]
 |
@@ -128,6 +144,7 @@ PLAN-ROOT SINK
 |    output: count(smallint_col)
 |  Class 2
 |    output: max:merge(string_col)
+|  row-size=28B cardinality=3
 |
 05:AGGREGATE
 |  Class 0
@@ -136,6 +153,7 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: max:merge(string_col)
+|  row-size=15B cardinality=31
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 3 THEN murmur_hash(smallint_col) WHEN 5 THEN 0 END)]
 |
@@ -146,9 +164,11 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: max(string_col)
+|  row-size=15B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=16B cardinality=7.30K
 ====
 # Predicates assigned through inline view. No group by.
 select a, b, c from
@@ -161,6 +181,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, max(string_col))
 |  having: aggif(valid_tid() = 2, count(tinyint_col)) < aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 4, count(smallint_col)) <= 30, aggif(valid_tid() = 4, count(smallint_col)) >= 20, aggif(valid_tid() = 5, max(string_col)) = 'test'
+|  row-size=28B cardinality=0
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -169,6 +190,7 @@ PLAN-ROOT SINK
 |    output: count(smallint_col)
 |  Class 2
 |    output: max:merge(string_col)
+|  row-size=28B cardinality=3
 |
 01:AGGREGATE
 |  Class 0
@@ -177,15 +199,18 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: max(string_col)
+|  row-size=15B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=16B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, max(string_col))
 |  having: aggif(valid_tid() = 2, count(tinyint_col)) < aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 4, count(smallint_col)) <= 30, aggif(valid_tid() = 4, count(smallint_col)) >= 20, aggif(valid_tid() = 5, max(string_col)) = 'test'
+|  row-size=28B cardinality=0
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -194,6 +219,7 @@ PLAN-ROOT SINK
 |    output: count:merge(smallint_col)
 |  Class 2
 |    output: max:merge(string_col)
+|  row-size=28B cardinality=3
 |
 06:EXCHANGE [UNPARTITIONED]
 |
@@ -204,6 +230,7 @@ PLAN-ROOT SINK
 |    output: count(smallint_col)
 |  Class 2
 |    output: max:merge(string_col)
+|  row-size=28B cardinality=3
 |
 05:AGGREGATE
 |  Class 0
@@ -212,6 +239,7 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: max:merge(string_col)
+|  row-size=15B cardinality=31
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 3 THEN murmur_hash(smallint_col) WHEN 5 THEN 0 END)]
 |
@@ -222,9 +250,11 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: max(string_col)
+|  row-size=15B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=16B cardinality=7.30K
 ====
 # Predicates assigned through inline view. No group by.
 select a, b, c from
@@ -237,6 +267,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, max(string_col))
 |  having: aggif(valid_tid() = 2, count(tinyint_col)) < aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 4, count(smallint_col)) <= 30, aggif(valid_tid() = 4, count(smallint_col)) >= 20, aggif(valid_tid() = 5, max(string_col)) = 'test'
+|  row-size=28B cardinality=0
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -245,6 +276,7 @@ PLAN-ROOT SINK
 |    output: count(smallint_col)
 |  Class 2
 |    output: max:merge(string_col)
+|  row-size=28B cardinality=3
 |
 01:AGGREGATE
 |  Class 0
@@ -253,15 +285,18 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: max(string_col)
+|  row-size=15B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=16B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, max(string_col))
 |  having: aggif(valid_tid() = 2, count(tinyint_col)) < aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 4, count(smallint_col)) <= 30, aggif(valid_tid() = 4, count(smallint_col)) >= 20, aggif(valid_tid() = 5, max(string_col)) = 'test'
+|  row-size=28B cardinality=0
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -270,6 +305,7 @@ PLAN-ROOT SINK
 |    output: count:merge(smallint_col)
 |  Class 2
 |    output: max:merge(string_col)
+|  row-size=28B cardinality=3
 |
 06:EXCHANGE [UNPARTITIONED]
 |
@@ -280,6 +316,7 @@ PLAN-ROOT SINK
 |    output: count(smallint_col)
 |  Class 2
 |    output: max:merge(string_col)
+|  row-size=28B cardinality=3
 |
 05:AGGREGATE
 |  Class 0
@@ -288,6 +325,7 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: max:merge(string_col)
+|  row-size=15B cardinality=31
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 3 THEN murmur_hash(smallint_col) WHEN 5 THEN 0 END)]
 |
@@ -298,9 +336,11 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: max(string_col)
+|  row-size=15B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=16B cardinality=7.30K
 ====
 # HAVING predicate assignment. With group by.
 select count(distinct tinyint_col), count(distinct smallint_col), max(string_col)
@@ -317,6 +357,7 @@ PLAN-ROOT SINK
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, max(string_col))
 |  group by: CASE valid_tid() WHEN 2 THEN date_string_col WHEN 4 THEN date_string_col WHEN 5 THEN date_string_col END, CASE valid_tid() WHEN 2 THEN timestamp_col WHEN 4 THEN timestamp_col WHEN 5 THEN timestamp_col END
 |  having: aggif(valid_tid() = 2, count(tinyint_col)) < 10, CAST(CASE valid_tid() WHEN 2 THEN timestamp_col WHEN 4 THEN timestamp_col WHEN 5 THEN timestamp_col END AS BIGINT) = aggif(valid_tid() = 2, count(tinyint_col)), CAST(CASE valid_tid() WHEN 2 THEN timestamp_col WHEN 4 THEN timestamp_col WHEN 5 THEN timestamp_col END AS STRING) < CASE valid_tid() WHEN 2 THEN date_string_col WHEN 4 THEN date_string_col WHEN 5 THEN date_string_col END
+|  row-size=56B cardinality=730
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -328,6 +369,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: max:merge(string_col)
 |    group by: date_string_col, timestamp_col
+|  row-size=136B cardinality=730
 |
 01:AGGREGATE
 |  Class 0
@@ -337,10 +379,12 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: max(string_col)
 |    group by: date_string_col, timestamp_col
+|  row-size=123B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.date_string_col <= 'test2', functional.alltypes.date_string_col >= 'test1', CAST(functional.alltypes.timestamp_col AS STRING) < functional.alltypes.date_string_col
+   row-size=52B cardinality=730
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -350,6 +394,7 @@ PLAN-ROOT SINK
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, max(string_col))
 |  group by: CASE valid_tid() WHEN 2 THEN date_string_col WHEN 4 THEN date_string_col WHEN 5 THEN date_string_col END, CASE valid_tid() WHEN 2 THEN timestamp_col WHEN 4 THEN timestamp_col WHEN 5 THEN timestamp_col END
 |  having: aggif(valid_tid() = 2, count(tinyint_col)) < 10, CAST(CASE valid_tid() WHEN 2 THEN timestamp_col WHEN 4 THEN timestamp_col WHEN 5 THEN timestamp_col END AS BIGINT) = aggif(valid_tid() = 2, count(tinyint_col)), CAST(CASE valid_tid() WHEN 2 THEN timestamp_col WHEN 4 THEN timestamp_col WHEN 5 THEN timestamp_col END AS STRING) < CASE valid_tid() WHEN 2 THEN date_string_col WHEN 4 THEN date_string_col WHEN 5 THEN date_string_col END
+|  row-size=56B cardinality=730
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -361,6 +406,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: max:merge(string_col)
 |    group by: date_string_col, timestamp_col
+|  row-size=136B cardinality=730
 |
 06:EXCHANGE [HASH(CASE valid_tid() WHEN 2 THEN murmur_hash(date_string_col) WHEN 4 THEN murmur_hash(date_string_col) WHEN 5 THEN murmur_hash(date_string_col) END,CASE valid_tid() WHEN 2 THEN murmur_hash(timestamp_col) WHEN 4 THEN murmur_hash(timestamp_col) WHEN 5 THEN murmur_hash(timestamp_col) END)]
 |
@@ -374,6 +420,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: max:merge(string_col)
 |    group by: date_string_col, timestamp_col
+|  row-size=136B cardinality=730
 |
 05:AGGREGATE
 |  Class 0
@@ -383,6 +430,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: max:merge(string_col)
 |    group by: date_string_col, timestamp_col
+|  row-size=123B cardinality=730
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(date_string_col) WHEN 3 THEN murmur_hash(date_string_col) WHEN 5 THEN murmur_hash(date_string_col) END,CASE valid_tid() WHEN 1 THEN murmur_hash(timestamp_col) WHEN 3 THEN murmur_hash(timestamp_col) WHEN 5 THEN murmur_hash(timestamp_col) END,CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 3 THEN murmur_hash(smallint_col) WHEN 5 THEN 0 END)]
 |
@@ -394,10 +442,12 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: max(string_col)
 |    group by: date_string_col, timestamp_col
+|  row-size=123B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.date_string_col <= 'test2', functional.alltypes.date_string_col >= 'test1', CAST(functional.alltypes.timestamp_col AS STRING) < functional.alltypes.date_string_col
+   row-size=52B cardinality=730
 ====
 # Predicate assignment through an inline view. With group by.
 select a, b, c from
@@ -414,6 +464,7 @@ PLAN-ROOT SINK
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, max(string_col))
 |  group by: CASE valid_tid() WHEN 2 THEN date_string_col WHEN 4 THEN date_string_col WHEN 5 THEN date_string_col END, CASE valid_tid() WHEN 2 THEN timestamp_col WHEN 4 THEN timestamp_col WHEN 5 THEN timestamp_col END
 |  having: aggif(valid_tid() = 2, count(tinyint_col)) < 10, CAST(CASE valid_tid() WHEN 2 THEN timestamp_col WHEN 4 THEN timestamp_col WHEN 5 THEN timestamp_col END AS BIGINT) = aggif(valid_tid() = 2, count(tinyint_col)), CAST(CASE valid_tid() WHEN 2 THEN timestamp_col WHEN 4 THEN timestamp_col WHEN 5 THEN timestamp_col END AS STRING) < CASE valid_tid() WHEN 2 THEN date_string_col WHEN 4 THEN date_string_col WHEN 5 THEN date_string_col END
+|  row-size=56B cardinality=730
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -425,6 +476,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: max:merge(string_col)
 |    group by: date_string_col, timestamp_col
+|  row-size=136B cardinality=730
 |
 01:AGGREGATE
 |  Class 0
@@ -434,10 +486,12 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: max(string_col)
 |    group by: date_string_col, timestamp_col
+|  row-size=123B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.date_string_col <= 'test2', functional.alltypes.date_string_col >= 'test1', CAST(functional.alltypes.timestamp_col AS STRING) < functional.alltypes.date_string_col
+   row-size=52B cardinality=730
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -447,6 +501,7 @@ PLAN-ROOT SINK
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, max(string_col))
 |  group by: CASE valid_tid() WHEN 2 THEN date_string_col WHEN 4 THEN date_string_col WHEN 5 THEN date_string_col END, CASE valid_tid() WHEN 2 THEN timestamp_col WHEN 4 THEN timestamp_col WHEN 5 THEN timestamp_col END
 |  having: aggif(valid_tid() = 2, count(tinyint_col)) < 10, CAST(CASE valid_tid() WHEN 2 THEN timestamp_col WHEN 4 THEN timestamp_col WHEN 5 THEN timestamp_col END AS BIGINT) = aggif(valid_tid() = 2, count(tinyint_col)), CAST(CASE valid_tid() WHEN 2 THEN timestamp_col WHEN 4 THEN timestamp_col WHEN 5 THEN timestamp_col END AS STRING) < CASE valid_tid() WHEN 2 THEN date_string_col WHEN 4 THEN date_string_col WHEN 5 THEN date_string_col END
+|  row-size=56B cardinality=730
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -458,6 +513,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: max:merge(string_col)
 |    group by: date_string_col, timestamp_col
+|  row-size=136B cardinality=730
 |
 06:EXCHANGE [HASH(CASE valid_tid() WHEN 2 THEN murmur_hash(date_string_col) WHEN 4 THEN murmur_hash(date_string_col) WHEN 5 THEN murmur_hash(date_string_col) END,CASE valid_tid() WHEN 2 THEN murmur_hash(timestamp_col) WHEN 4 THEN murmur_hash(timestamp_col) WHEN 5 THEN murmur_hash(timestamp_col) END)]
 |
@@ -471,6 +527,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: max:merge(string_col)
 |    group by: date_string_col, timestamp_col
+|  row-size=136B cardinality=730
 |
 05:AGGREGATE
 |  Class 0
@@ -480,6 +537,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: max:merge(string_col)
 |    group by: date_string_col, timestamp_col
+|  row-size=123B cardinality=730
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(date_string_col) WHEN 3 THEN murmur_hash(date_string_col) WHEN 5 THEN murmur_hash(date_string_col) END,CASE valid_tid() WHEN 1 THEN murmur_hash(timestamp_col) WHEN 3 THEN murmur_hash(timestamp_col) WHEN 5 THEN murmur_hash(timestamp_col) END,CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 3 THEN murmur_hash(smallint_col) WHEN 5 THEN 0 END)]
 |
@@ -491,8 +549,10 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: max(string_col)
 |    group by: date_string_col, timestamp_col
+|  row-size=123B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.date_string_col <= 'test2', functional.alltypes.date_string_col >= 'test1', CAST(functional.alltypes.timestamp_col AS STRING) < functional.alltypes.date_string_col
+   row-size=52B cardinality=730
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct.test b/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct.test
index d6cd1f1..026840f 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct.test
@@ -6,6 +6,7 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, sum(int_col)), aggif(valid_tid() = 6, count(smallint_col))
+|  row-size=24B cardinality=1
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -14,6 +15,7 @@ PLAN-ROOT SINK
 |    output: sum(int_col)
 |  Class 2
 |    output: count(smallint_col)
+|  row-size=24B cardinality=3
 |
 01:AGGREGATE
 |  Class 0
@@ -22,14 +24,17 @@ PLAN-ROOT SINK
 |    group by: int_col
 |  Class 2
 |    group by: smallint_col
+|  row-size=7B cardinality=70
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=7B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, sum(int_col)), aggif(valid_tid() = 6, count(smallint_col))
+|  row-size=24B cardinality=1
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -38,6 +43,7 @@ PLAN-ROOT SINK
 |    output: sum:merge(int_col)
 |  Class 2
 |    output: count:merge(smallint_col)
+|  row-size=24B cardinality=3
 |
 06:EXCHANGE [UNPARTITIONED]
 |
@@ -48,6 +54,7 @@ PLAN-ROOT SINK
 |    output: sum(int_col)
 |  Class 2
 |    output: count(smallint_col)
+|  row-size=24B cardinality=3
 |
 05:AGGREGATE
 |  Class 0
@@ -56,6 +63,7 @@ PLAN-ROOT SINK
 |    group by: int_col
 |  Class 2
 |    group by: smallint_col
+|  row-size=7B cardinality=70
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 3 THEN murmur_hash(int_col) WHEN 5 THEN murmur_hash(smallint_col) END)]
 |
@@ -66,9 +74,11 @@ PLAN-ROOT SINK
 |    group by: int_col
 |  Class 2
 |    group by: smallint_col
+|  row-size=7B cardinality=70
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=7B cardinality=7.30K
 ====
 # Multiple distinct without grouping. First needs intermediate tuple.
 select avg(distinct tinyint_col), sum(distinct int_col), count(distinct smallint_col)
@@ -78,6 +88,7 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, avg(tinyint_col)), aggif(valid_tid() = 5, sum(int_col)), aggif(valid_tid() = 7, count(smallint_col))
+|  row-size=24B cardinality=1
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -86,6 +97,7 @@ PLAN-ROOT SINK
 |    output: sum(int_col)
 |  Class 2
 |    output: count(smallint_col)
+|  row-size=24B cardinality=3
 |
 01:AGGREGATE
 |  Class 0
@@ -94,14 +106,17 @@ PLAN-ROOT SINK
 |    group by: int_col
 |  Class 2
 |    group by: smallint_col
+|  row-size=7B cardinality=70
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=7B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, avg(tinyint_col)), aggif(valid_tid() = 5, sum(int_col)), aggif(valid_tid() = 7, count(smallint_col))
+|  row-size=24B cardinality=1
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -110,6 +125,7 @@ PLAN-ROOT SINK
 |    output: sum:merge(int_col)
 |  Class 2
 |    output: count:merge(smallint_col)
+|  row-size=24B cardinality=3
 |
 06:EXCHANGE [UNPARTITIONED]
 |
@@ -120,6 +136,7 @@ PLAN-ROOT SINK
 |    output: sum(int_col)
 |  Class 2
 |    output: count(smallint_col)
+|  row-size=24B cardinality=3
 |
 05:AGGREGATE
 |  Class 0
@@ -128,6 +145,7 @@ PLAN-ROOT SINK
 |    group by: int_col
 |  Class 2
 |    group by: smallint_col
+|  row-size=7B cardinality=70
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 4 THEN murmur_hash(int_col) WHEN 6 THEN murmur_hash(smallint_col) END)]
 |
@@ -138,9 +156,11 @@ PLAN-ROOT SINK
 |    group by: int_col
 |  Class 2
 |    group by: smallint_col
+|  row-size=7B cardinality=70
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=7B cardinality=7.30K
 ====
 # Multiple distinct without grouping. Last needs intermediate tuple.
 select count(distinct tinyint_col), sum(distinct int_col), avg(distinct smallint_col)
@@ -150,6 +170,7 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, sum(int_col)), aggif(valid_tid() = 7, avg(smallint_col))
+|  row-size=24B cardinality=1
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -158,6 +179,7 @@ PLAN-ROOT SINK
 |    output: sum(int_col)
 |  Class 2
 |    output: avg(smallint_col)
+|  row-size=24B cardinality=3
 |
 01:AGGREGATE
 |  Class 0
@@ -166,14 +188,17 @@ PLAN-ROOT SINK
 |    group by: int_col
 |  Class 2
 |    group by: smallint_col
+|  row-size=7B cardinality=70
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=7B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, sum(int_col)), aggif(valid_tid() = 7, avg(smallint_col))
+|  row-size=24B cardinality=1
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -182,6 +207,7 @@ PLAN-ROOT SINK
 |    output: sum:merge(int_col)
 |  Class 2
 |    output: avg:merge(smallint_col)
+|  row-size=24B cardinality=3
 |
 06:EXCHANGE [UNPARTITIONED]
 |
@@ -192,6 +218,7 @@ PLAN-ROOT SINK
 |    output: sum(int_col)
 |  Class 2
 |    output: avg(smallint_col)
+|  row-size=24B cardinality=3
 |
 05:AGGREGATE
 |  Class 0
@@ -200,6 +227,7 @@ PLAN-ROOT SINK
 |    group by: int_col
 |  Class 2
 |    group by: smallint_col
+|  row-size=7B cardinality=70
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 3 THEN murmur_hash(int_col) WHEN 5 THEN murmur_hash(smallint_col) END)]
 |
@@ -210,9 +238,11 @@ PLAN-ROOT SINK
 |    group by: int_col
 |  Class 2
 |    group by: smallint_col
+|  row-size=7B cardinality=70
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=7B cardinality=7.30K
 ====
 # Multiple distinct without grouping. All need intermediate tuples
 select avg(distinct tinyint_col), avg(distinct int_col), avg(distinct smallint_col)
@@ -222,6 +252,7 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, avg(tinyint_col)), aggif(valid_tid() = 6, avg(int_col)), aggif(valid_tid() = 9, avg(smallint_col))
+|  row-size=24B cardinality=1
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -230,6 +261,7 @@ PLAN-ROOT SINK
 |    output: avg(int_col)
 |  Class 2
 |    output: avg(smallint_col)
+|  row-size=24B cardinality=3
 |
 01:AGGREGATE
 |  Class 0
@@ -238,14 +270,17 @@ PLAN-ROOT SINK
 |    group by: int_col
 |  Class 2
 |    group by: smallint_col
+|  row-size=7B cardinality=70
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=7B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, avg(tinyint_col)), aggif(valid_tid() = 6, avg(int_col)), aggif(valid_tid() = 9, avg(smallint_col))
+|  row-size=24B cardinality=1
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -254,6 +289,7 @@ PLAN-ROOT SINK
 |    output: avg:merge(int_col)
 |  Class 2
 |    output: avg:merge(smallint_col)
+|  row-size=24B cardinality=3
 |
 06:EXCHANGE [UNPARTITIONED]
 |
@@ -264,6 +300,7 @@ PLAN-ROOT SINK
 |    output: avg(int_col)
 |  Class 2
 |    output: avg(smallint_col)
+|  row-size=24B cardinality=3
 |
 05:AGGREGATE
 |  Class 0
@@ -272,6 +309,7 @@ PLAN-ROOT SINK
 |    group by: int_col
 |  Class 2
 |    group by: smallint_col
+|  row-size=7B cardinality=70
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 4 THEN murmur_hash(int_col) WHEN 7 THEN murmur_hash(smallint_col) END)]
 |
@@ -282,9 +320,11 @@ PLAN-ROOT SINK
 |    group by: int_col
 |  Class 2
 |    group by: smallint_col
+|  row-size=7B cardinality=70
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=7B cardinality=7.30K
 ====
 # Multiple distinct with grouping.
 select bigint_col, count(distinct tinyint_col), sum(distinct int_col),
@@ -296,6 +336,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, sum(int_col)), aggif(valid_tid() = 6, count(smallint_col))
 |  group by: CASE valid_tid() WHEN 2 THEN bigint_col WHEN 4 THEN bigint_col WHEN 6 THEN bigint_col END
+|  row-size=32B cardinality=11
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -307,6 +348,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: count(smallint_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 01:AGGREGATE
 |  Class 0
@@ -315,9 +357,11 @@ PLAN-ROOT SINK
 |    group by: bigint_col, int_col
 |  Class 2
 |    group by: bigint_col, smallint_col
+|  row-size=31B cardinality=700
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -326,6 +370,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, sum(int_col)), aggif(valid_tid() = 6, count(smallint_col))
 |  group by: CASE valid_tid() WHEN 2 THEN bigint_col WHEN 4 THEN bigint_col WHEN 6 THEN bigint_col END
+|  row-size=32B cardinality=11
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -337,6 +382,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: count:merge(smallint_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 06:EXCHANGE [HASH(CASE valid_tid() WHEN 2 THEN murmur_hash(bigint_col) WHEN 4 THEN murmur_hash(bigint_col) WHEN 6 THEN murmur_hash(bigint_col) END)]
 |
@@ -350,6 +396,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: count(smallint_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 05:AGGREGATE
 |  Class 0
@@ -358,6 +405,7 @@ PLAN-ROOT SINK
 |    group by: bigint_col, int_col
 |  Class 2
 |    group by: bigint_col, smallint_col
+|  row-size=31B cardinality=700
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(bigint_col) WHEN 3 THEN murmur_hash(bigint_col) WHEN 5 THEN murmur_hash(bigint_col) END,CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 3 THEN murmur_hash(int_col) WHEN 5 THEN murmur_hash(smallint_col) END)]
 |
@@ -368,9 +416,11 @@ PLAN-ROOT SINK
 |    group by: bigint_col, int_col
 |  Class 2
 |    group by: bigint_col, smallint_col
+|  row-size=31B cardinality=700
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ====
 # Multiple distinct with grouping. First needs intermediate tuple.
 select bigint_col, avg(distinct tinyint_col), sum(distinct int_col),
@@ -382,6 +432,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, avg(tinyint_col)), aggif(valid_tid() = 5, sum(int_col)), aggif(valid_tid() = 7, count(smallint_col))
 |  group by: CASE valid_tid() WHEN 3 THEN bigint_col WHEN 5 THEN bigint_col WHEN 7 THEN bigint_col END
+|  row-size=32B cardinality=11
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -393,6 +444,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: count(smallint_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 01:AGGREGATE
 |  Class 0
@@ -401,9 +453,11 @@ PLAN-ROOT SINK
 |    group by: bigint_col, int_col
 |  Class 2
 |    group by: bigint_col, smallint_col
+|  row-size=31B cardinality=700
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -412,6 +466,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, avg(tinyint_col)), aggif(valid_tid() = 5, sum(int_col)), aggif(valid_tid() = 7, count(smallint_col))
 |  group by: CASE valid_tid() WHEN 3 THEN bigint_col WHEN 5 THEN bigint_col WHEN 7 THEN bigint_col END
+|  row-size=32B cardinality=11
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -423,6 +478,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: count:merge(smallint_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 06:EXCHANGE [HASH(CASE valid_tid() WHEN 2 THEN murmur_hash(bigint_col) WHEN 5 THEN murmur_hash(bigint_col) WHEN 7 THEN murmur_hash(bigint_col) END)]
 |
@@ -436,6 +492,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: count(smallint_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 05:AGGREGATE
 |  Class 0
@@ -444,6 +501,7 @@ PLAN-ROOT SINK
 |    group by: bigint_col, int_col
 |  Class 2
 |    group by: bigint_col, smallint_col
+|  row-size=31B cardinality=700
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(bigint_col) WHEN 4 THEN murmur_hash(bigint_col) WHEN 6 THEN murmur_hash(bigint_col) END,CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 4 THEN murmur_hash(int_col) WHEN 6 THEN murmur_hash(smallint_col) END)]
 |
@@ -454,9 +512,11 @@ PLAN-ROOT SINK
 |    group by: bigint_col, int_col
 |  Class 2
 |    group by: bigint_col, smallint_col
+|  row-size=31B cardinality=700
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ====
 # Multiple distinct with grouping. Last needs intermediate tuple.
 select bigint_col, count(distinct tinyint_col), sum(distinct int_col),
@@ -468,6 +528,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, sum(int_col)), aggif(valid_tid() = 7, avg(smallint_col))
 |  group by: CASE valid_tid() WHEN 2 THEN bigint_col WHEN 4 THEN bigint_col WHEN 7 THEN bigint_col END
+|  row-size=32B cardinality=11
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -479,6 +540,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: avg(smallint_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 01:AGGREGATE
 |  Class 0
@@ -487,9 +549,11 @@ PLAN-ROOT SINK
 |    group by: bigint_col, int_col
 |  Class 2
 |    group by: bigint_col, smallint_col
+|  row-size=31B cardinality=700
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -498,6 +562,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, sum(int_col)), aggif(valid_tid() = 7, avg(smallint_col))
 |  group by: CASE valid_tid() WHEN 2 THEN bigint_col WHEN 4 THEN bigint_col WHEN 7 THEN bigint_col END
+|  row-size=32B cardinality=11
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -509,6 +574,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: avg:merge(smallint_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 06:EXCHANGE [HASH(CASE valid_tid() WHEN 2 THEN murmur_hash(bigint_col) WHEN 4 THEN murmur_hash(bigint_col) WHEN 6 THEN murmur_hash(bigint_col) END)]
 |
@@ -522,6 +588,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: avg(smallint_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 05:AGGREGATE
 |  Class 0
@@ -530,6 +597,7 @@ PLAN-ROOT SINK
 |    group by: bigint_col, int_col
 |  Class 2
 |    group by: bigint_col, smallint_col
+|  row-size=31B cardinality=700
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(bigint_col) WHEN 3 THEN murmur_hash(bigint_col) WHEN 5 THEN murmur_hash(bigint_col) END,CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 3 THEN murmur_hash(int_col) WHEN 5 THEN murmur_hash(smallint_col) END)]
 |
@@ -540,9 +608,11 @@ PLAN-ROOT SINK
 |    group by: bigint_col, int_col
 |  Class 2
 |    group by: bigint_col, smallint_col
+|  row-size=31B cardinality=700
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ====
 # Multiple distinct with grouping. All need intermediate tuples
 select bigint_col, avg(distinct tinyint_col), avg(distinct int_col),
@@ -554,6 +624,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, avg(tinyint_col)), aggif(valid_tid() = 6, avg(int_col)), aggif(valid_tid() = 9, avg(smallint_col))
 |  group by: CASE valid_tid() WHEN 3 THEN bigint_col WHEN 6 THEN bigint_col WHEN 9 THEN bigint_col END
+|  row-size=32B cardinality=11
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -565,6 +636,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: avg(smallint_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 01:AGGREGATE
 |  Class 0
@@ -573,9 +645,11 @@ PLAN-ROOT SINK
 |    group by: bigint_col, int_col
 |  Class 2
 |    group by: bigint_col, smallint_col
+|  row-size=31B cardinality=700
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -584,6 +658,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, avg(tinyint_col)), aggif(valid_tid() = 6, avg(int_col)), aggif(valid_tid() = 9, avg(smallint_col))
 |  group by: CASE valid_tid() WHEN 3 THEN bigint_col WHEN 6 THEN bigint_col WHEN 9 THEN bigint_col END
+|  row-size=32B cardinality=11
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -595,6 +670,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: avg:merge(smallint_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 06:EXCHANGE [HASH(CASE valid_tid() WHEN 2 THEN murmur_hash(bigint_col) WHEN 5 THEN murmur_hash(bigint_col) WHEN 8 THEN murmur_hash(bigint_col) END)]
 |
@@ -608,6 +684,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: avg(smallint_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 05:AGGREGATE
 |  Class 0
@@ -616,6 +693,7 @@ PLAN-ROOT SINK
 |    group by: bigint_col, int_col
 |  Class 2
 |    group by: bigint_col, smallint_col
+|  row-size=31B cardinality=700
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(bigint_col) WHEN 4 THEN murmur_hash(bigint_col) WHEN 7 THEN murmur_hash(bigint_col) END,CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 4 THEN murmur_hash(int_col) WHEN 7 THEN murmur_hash(smallint_col) END)]
 |
@@ -626,9 +704,11 @@ PLAN-ROOT SINK
 |    group by: bigint_col, int_col
 |  Class 2
 |    group by: bigint_col, smallint_col
+|  row-size=31B cardinality=700
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ====
 # Multiple distinct and non-distinct without grouping.
 select count(distinct tinyint_col), count(distinct smallint_col), count(int_col)
@@ -638,6 +718,7 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, count(int_col))
+|  row-size=24B cardinality=1
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -646,6 +727,7 @@ PLAN-ROOT SINK
 |    output: count(smallint_col)
 |  Class 2
 |    output: count:merge(int_col)
+|  row-size=24B cardinality=3
 |
 01:AGGREGATE
 |  Class 0
@@ -654,14 +736,17 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: count(int_col)
+|  row-size=11B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=7B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, count(int_col))
+|  row-size=24B cardinality=1
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -670,6 +755,7 @@ PLAN-ROOT SINK
 |    output: count:merge(smallint_col)
 |  Class 2
 |    output: count:merge(int_col)
+|  row-size=24B cardinality=3
 |
 06:EXCHANGE [UNPARTITIONED]
 |
@@ -680,6 +766,7 @@ PLAN-ROOT SINK
 |    output: count(smallint_col)
 |  Class 2
 |    output: count:merge(int_col)
+|  row-size=24B cardinality=3
 |
 05:AGGREGATE
 |  Class 0
@@ -688,6 +775,7 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: count:merge(int_col)
+|  row-size=11B cardinality=31
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 3 THEN murmur_hash(smallint_col) WHEN 5 THEN 0 END)]
 |
@@ -698,9 +786,11 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: count(int_col)
+|  row-size=11B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=7B cardinality=7.30K
 ====
 # Multiple distinct and non-distinct without grouping. First distinct needs
 # intermediate agg tuple.
@@ -711,6 +801,7 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, avg(tinyint_col)), aggif(valid_tid() = 5, count(smallint_col)), aggif(valid_tid() = 6, count(int_col))
+|  row-size=24B cardinality=1
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -719,6 +810,7 @@ PLAN-ROOT SINK
 |    output: count(smallint_col)
 |  Class 2
 |    output: count:merge(int_col)
+|  row-size=24B cardinality=3
 |
 01:AGGREGATE
 |  Class 0
@@ -727,14 +819,17 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: count(int_col)
+|  row-size=11B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=7B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, avg(tinyint_col)), aggif(valid_tid() = 5, count(smallint_col)), aggif(valid_tid() = 6, count(int_col))
+|  row-size=24B cardinality=1
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -743,6 +838,7 @@ PLAN-ROOT SINK
 |    output: count:merge(smallint_col)
 |  Class 2
 |    output: count:merge(int_col)
+|  row-size=24B cardinality=3
 |
 06:EXCHANGE [UNPARTITIONED]
 |
@@ -753,6 +849,7 @@ PLAN-ROOT SINK
 |    output: count(smallint_col)
 |  Class 2
 |    output: count:merge(int_col)
+|  row-size=24B cardinality=3
 |
 05:AGGREGATE
 |  Class 0
@@ -761,6 +858,7 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: count:merge(int_col)
+|  row-size=11B cardinality=31
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 4 THEN murmur_hash(smallint_col) WHEN 6 THEN 0 END)]
 |
@@ -771,9 +869,11 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: count(int_col)
+|  row-size=11B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=7B cardinality=7.30K
 ====
 # Multiple distinct and non-distinct without grouping. Non-distinct needs
 # intermediate agg tuple.
@@ -784,6 +884,7 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 6, avg(int_col))
+|  row-size=24B cardinality=1
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -792,6 +893,7 @@ PLAN-ROOT SINK
 |    output: count(smallint_col)
 |  Class 2
 |    output: avg:merge(int_col)
+|  row-size=24B cardinality=3
 |
 01:AGGREGATE
 |  Class 0
@@ -800,14 +902,17 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: avg(int_col)
+|  row-size=11B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=7B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 6, avg(int_col))
+|  row-size=24B cardinality=1
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -816,6 +921,7 @@ PLAN-ROOT SINK
 |    output: count:merge(smallint_col)
 |  Class 2
 |    output: avg:merge(int_col)
+|  row-size=24B cardinality=3
 |
 06:EXCHANGE [UNPARTITIONED]
 |
@@ -826,6 +932,7 @@ PLAN-ROOT SINK
 |    output: count(smallint_col)
 |  Class 2
 |    output: avg:merge(int_col)
+|  row-size=24B cardinality=3
 |
 05:AGGREGATE
 |  Class 0
@@ -834,6 +941,7 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: avg:merge(int_col)
+|  row-size=11B cardinality=31
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 3 THEN murmur_hash(smallint_col) WHEN 5 THEN 0 END)]
 |
@@ -844,9 +952,11 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: avg(int_col)
+|  row-size=11B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=7B cardinality=7.30K
 ====
 # Multiple distinct and non-distinct without grouping. All need intermediate agg tuples.
 select avg(distinct tinyint_col), avg(distinct smallint_col), avg(int_col)
@@ -856,6 +966,7 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, avg(tinyint_col)), aggif(valid_tid() = 6, avg(smallint_col)), aggif(valid_tid() = 8, avg(int_col))
+|  row-size=24B cardinality=1
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -864,6 +975,7 @@ PLAN-ROOT SINK
 |    output: avg(smallint_col)
 |  Class 2
 |    output: avg:merge(int_col)
+|  row-size=24B cardinality=3
 |
 01:AGGREGATE
 |  Class 0
@@ -872,14 +984,17 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: avg(int_col)
+|  row-size=11B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=7B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, avg(tinyint_col)), aggif(valid_tid() = 6, avg(smallint_col)), aggif(valid_tid() = 8, avg(int_col))
+|  row-size=24B cardinality=1
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -888,6 +1003,7 @@ PLAN-ROOT SINK
 |    output: avg:merge(smallint_col)
 |  Class 2
 |    output: avg:merge(int_col)
+|  row-size=24B cardinality=3
 |
 06:EXCHANGE [UNPARTITIONED]
 |
@@ -898,6 +1014,7 @@ PLAN-ROOT SINK
 |    output: avg(smallint_col)
 |  Class 2
 |    output: avg:merge(int_col)
+|  row-size=24B cardinality=3
 |
 05:AGGREGATE
 |  Class 0
@@ -906,6 +1023,7 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: avg:merge(int_col)
+|  row-size=11B cardinality=31
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 4 THEN murmur_hash(smallint_col) WHEN 7 THEN 0 END)]
 |
@@ -916,9 +1034,11 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: avg(int_col)
+|  row-size=11B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=7B cardinality=7.30K
 ====
 # Multiple distinct and non-distinct with grouping.
 select bigint_col, count(distinct tinyint_col), count(distinct smallint_col),
@@ -930,6 +1050,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, count(int_col))
 |  group by: CASE valid_tid() WHEN 2 THEN bigint_col WHEN 4 THEN bigint_col WHEN 5 THEN bigint_col END
+|  row-size=32B cardinality=11
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -941,6 +1062,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: count:merge(int_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 01:AGGREGATE
 |  Class 0
@@ -950,9 +1072,11 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: count(int_col)
 |    group by: bigint_col
+|  row-size=35B cardinality=610
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -961,6 +1085,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, count(int_col))
 |  group by: CASE valid_tid() WHEN 2 THEN bigint_col WHEN 4 THEN bigint_col WHEN 5 THEN bigint_col END
+|  row-size=32B cardinality=11
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -972,6 +1097,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: count:merge(int_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 06:EXCHANGE [HASH(CASE valid_tid() WHEN 2 THEN murmur_hash(bigint_col) WHEN 4 THEN murmur_hash(bigint_col) WHEN 5 THEN murmur_hash(bigint_col) END)]
 |
@@ -985,6 +1111,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: count:merge(int_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 05:AGGREGATE
 |  Class 0
@@ -994,6 +1121,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: count:merge(int_col)
 |    group by: bigint_col
+|  row-size=35B cardinality=610
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(bigint_col) WHEN 3 THEN murmur_hash(bigint_col) WHEN 5 THEN murmur_hash(bigint_col) END,CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 3 THEN murmur_hash(smallint_col) WHEN 5 THEN 0 END)]
 |
@@ -1005,9 +1133,11 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: count(int_col)
 |    group by: bigint_col
+|  row-size=35B cardinality=610
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ====
 # Multiple distinct and non-distinct without grouping. First distinct needs
 # intermediate agg tuple.
@@ -1020,6 +1150,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, avg(tinyint_col)), aggif(valid_tid() = 5, count(smallint_col)), aggif(valid_tid() = 6, count(int_col))
 |  group by: CASE valid_tid() WHEN 3 THEN bigint_col WHEN 5 THEN bigint_col WHEN 6 THEN bigint_col END
+|  row-size=32B cardinality=11
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -1031,6 +1162,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: count:merge(int_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 01:AGGREGATE
 |  Class 0
@@ -1040,9 +1172,11 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: count(int_col)
 |    group by: bigint_col
+|  row-size=35B cardinality=610
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -1051,6 +1185,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, avg(tinyint_col)), aggif(valid_tid() = 5, count(smallint_col)), aggif(valid_tid() = 6, count(int_col))
 |  group by: CASE valid_tid() WHEN 3 THEN bigint_col WHEN 5 THEN bigint_col WHEN 6 THEN bigint_col END
+|  row-size=32B cardinality=11
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -1062,6 +1197,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: count:merge(int_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 06:EXCHANGE [HASH(CASE valid_tid() WHEN 2 THEN murmur_hash(bigint_col) WHEN 5 THEN murmur_hash(bigint_col) WHEN 6 THEN murmur_hash(bigint_col) END)]
 |
@@ -1075,6 +1211,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: count:merge(int_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 05:AGGREGATE
 |  Class 0
@@ -1084,6 +1221,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: count:merge(int_col)
 |    group by: bigint_col
+|  row-size=35B cardinality=610
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(bigint_col) WHEN 4 THEN murmur_hash(bigint_col) WHEN 6 THEN murmur_hash(bigint_col) END,CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 4 THEN murmur_hash(smallint_col) WHEN 6 THEN 0 END)]
 |
@@ -1095,9 +1233,11 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: count(int_col)
 |    group by: bigint_col
+|  row-size=35B cardinality=610
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ====
 # Multiple distinct and non-distinct without grouping. Non-distinct needs
 # intermediate agg tuple.
@@ -1110,6 +1250,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 6, avg(int_col))
 |  group by: CASE valid_tid() WHEN 2 THEN bigint_col WHEN 4 THEN bigint_col WHEN 6 THEN bigint_col END
+|  row-size=32B cardinality=11
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -1121,6 +1262,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: avg:merge(int_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 01:AGGREGATE
 |  Class 0
@@ -1130,9 +1272,11 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: avg(int_col)
 |    group by: bigint_col
+|  row-size=35B cardinality=610
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -1141,6 +1285,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 6, avg(int_col))
 |  group by: CASE valid_tid() WHEN 2 THEN bigint_col WHEN 4 THEN bigint_col WHEN 6 THEN bigint_col END
+|  row-size=32B cardinality=11
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -1152,6 +1297,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: avg:merge(int_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 06:EXCHANGE [HASH(CASE valid_tid() WHEN 2 THEN murmur_hash(bigint_col) WHEN 4 THEN murmur_hash(bigint_col) WHEN 5 THEN murmur_hash(bigint_col) END)]
 |
@@ -1165,6 +1311,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: avg:merge(int_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 05:AGGREGATE
 |  Class 0
@@ -1174,6 +1321,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: avg:merge(int_col)
 |    group by: bigint_col
+|  row-size=35B cardinality=610
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(bigint_col) WHEN 3 THEN murmur_hash(bigint_col) WHEN 5 THEN murmur_hash(bigint_col) END,CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 3 THEN murmur_hash(smallint_col) WHEN 5 THEN 0 END)]
 |
@@ -1185,9 +1333,11 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: avg(int_col)
 |    group by: bigint_col
+|  row-size=35B cardinality=610
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ====
 # Multiple distinct and non-distinct without grouping. All need intermediate agg tuples.
 select bigint_col, avg(distinct tinyint_col), avg(distinct smallint_col),
@@ -1199,6 +1349,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, avg(tinyint_col)), aggif(valid_tid() = 6, avg(smallint_col)), aggif(valid_tid() = 8, avg(int_col))
 |  group by: CASE valid_tid() WHEN 3 THEN bigint_col WHEN 6 THEN bigint_col WHEN 8 THEN bigint_col END
+|  row-size=32B cardinality=11
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -1210,6 +1361,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: avg:merge(int_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 01:AGGREGATE
 |  Class 0
@@ -1219,9 +1371,11 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: avg(int_col)
 |    group by: bigint_col
+|  row-size=35B cardinality=610
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -1230,6 +1384,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 3, avg(tinyint_col)), aggif(valid_tid() = 6, avg(smallint_col)), aggif(valid_tid() = 8, avg(int_col))
 |  group by: CASE valid_tid() WHEN 3 THEN bigint_col WHEN 6 THEN bigint_col WHEN 8 THEN bigint_col END
+|  row-size=32B cardinality=11
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -1241,6 +1396,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: avg:merge(int_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 06:EXCHANGE [HASH(CASE valid_tid() WHEN 2 THEN murmur_hash(bigint_col) WHEN 5 THEN murmur_hash(bigint_col) WHEN 7 THEN murmur_hash(bigint_col) END)]
 |
@@ -1254,6 +1410,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: avg:merge(int_col)
 |    group by: bigint_col
+|  row-size=48B cardinality=70
 |
 05:AGGREGATE
 |  Class 0
@@ -1263,6 +1420,7 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: avg:merge(int_col)
 |    group by: bigint_col
+|  row-size=35B cardinality=610
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(bigint_col) WHEN 4 THEN murmur_hash(bigint_col) WHEN 7 THEN murmur_hash(bigint_col) END,CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 4 THEN murmur_hash(smallint_col) WHEN 7 THEN 0 END)]
 |
@@ -1274,7 +1432,9 @@ PLAN-ROOT SINK
 |  Class 2
 |    output: avg(int_col)
 |    group by: bigint_col
+|  row-size=35B cardinality=610
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ====


[04/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/tpch-views.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/tpch-views.test b/testdata/workloads/functional-planner/queries/PlannerTest/tpch-views.test
index 7bc22c3..6a62e3d 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/tpch-views.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/tpch-views.test
@@ -26,14 +26,17 @@ PLAN-ROOT SINK
 |
 02:SORT
 |  order by: l_returnflag ASC, l_linestatus ASC
+|  row-size=122B cardinality=6
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum(tpch.lineitem.l_quantity), sum(tpch.lineitem.l_extendedprice), sum(tpch.lineitem.l_extendedprice * (1 - tpch.lineitem.l_discount)), sum(tpch.lineitem.l_extendedprice * (1 - tpch.lineitem.l_discount) * (1 + tpch.lineitem.l_tax)), avg(tpch.lineitem.l_quantity), avg(tpch.lineitem.l_extendedprice), avg(tpch.lineitem.l_discount), count(*)
 |  group by: tpch.lineitem.l_returnflag, tpch.lineitem.l_linestatus
+|  row-size=122B cardinality=6
 |
 00:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: tpch.lineitem.l_shipdate <= '1998-09-02'
+   row-size=80B cardinality=600.12K
 ====
 # TPCH-Q2
 # Q2 - Minimum Cost Supplier Query
@@ -86,78 +89,97 @@ PLAN-ROOT SINK
 |
 18:TOP-N [LIMIT=100]
 |  order by: s_acctbal DESC, n_name ASC, s_name ASC, p_partkey ASC
+|  row-size=230B cardinality=100
 |
 17:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: ps_partkey = tpch.part.p_partkey, min(ps_supplycost) = tpch.partsupp.ps_supplycost
 |  runtime filters: RF000 <- tpch.part.p_partkey
+|  row-size=325B cardinality=1.01K
 |
 |--16:HASH JOIN [INNER JOIN]
 |  |  hash predicates: tpch.nation.n_regionkey = tpch.region.r_regionkey
 |  |  runtime filters: RF010 <- tpch.region.r_regionkey
+|  |  row-size=325B cardinality=1.01K
 |  |
 |  |--04:SCAN HDFS [tpch.region]
 |  |     partitions=1/1 files=1 size=384B
 |  |     predicates: tpch.region.r_name = 'EUROPE'
+|  |     row-size=21B cardinality=1
 |  |
 |  15:HASH JOIN [INNER JOIN]
 |  |  hash predicates: tpch.supplier.s_nationkey = tpch.nation.n_nationkey
 |  |  runtime filters: RF012 <- tpch.nation.n_nationkey
+|  |  row-size=304B cardinality=5.05K
 |  |
 |  |--03:SCAN HDFS [tpch.nation]
 |  |     partitions=1/1 files=1 size=2.15KB
 |  |     runtime filters: RF010 -> tpch.nation.n_regionkey
+|  |     row-size=23B cardinality=25
 |  |
 |  14:HASH JOIN [INNER JOIN]
 |  |  hash predicates: tpch.supplier.s_suppkey = tpch.partsupp.ps_suppkey
 |  |  runtime filters: RF014 <- tpch.partsupp.ps_suppkey
+|  |  row-size=281B cardinality=5.05K
 |  |
 |  |--13:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: tpch.partsupp.ps_partkey = tpch.part.p_partkey
 |  |  |  runtime filters: RF016 <- tpch.part.p_partkey
+|  |  |  row-size=95B cardinality=5.05K
 |  |  |
 |  |  |--00:SCAN HDFS [tpch.part]
 |  |  |     partitions=1/1 files=1 size=22.83MB
 |  |  |     predicates: tpch.part.p_size = 15, tpch.part.p_type LIKE '%BRASS'
+|  |  |     row-size=71B cardinality=1.26K
 |  |  |
 |  |  02:SCAN HDFS [tpch.partsupp]
 |  |     partitions=1/1 files=1 size=112.71MB
 |  |     runtime filters: RF016 -> tpch.partsupp.ps_partkey
+|  |     row-size=24B cardinality=800.00K
 |  |
 |  01:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF012 -> tpch.supplier.s_nationkey, RF014 -> tpch.supplier.s_suppkey
+|     row-size=187B cardinality=10.00K
 |
 12:AGGREGATE [FINALIZE]
 |  output: min(tpch.partsupp.ps_supplycost)
 |  group by: tpch.partsupp.ps_partkey
+|  row-size=16B cardinality=160.00K
 |
 11:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.nation.n_regionkey = tpch.region.r_regionkey
 |  runtime filters: RF004 <- tpch.region.r_regionkey
+|  row-size=59B cardinality=160.00K
 |
 |--08:SCAN HDFS [tpch.region]
 |     partitions=1/1 files=1 size=384B
 |     predicates: tpch.region.r_name = 'EUROPE'
+|     row-size=21B cardinality=1
 |
 10:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.supplier.s_nationkey = tpch.nation.n_nationkey
 |  runtime filters: RF006 <- tpch.nation.n_nationkey
+|  row-size=38B cardinality=800.00K
 |
 |--07:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     runtime filters: RF004 -> tpch.nation.n_regionkey
+|     row-size=4B cardinality=25
 |
 09:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.partsupp.ps_suppkey = tpch.supplier.s_suppkey
 |  runtime filters: RF008 <- tpch.supplier.s_suppkey
+|  row-size=34B cardinality=800.00K
 |
 |--06:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF006 -> tpch.supplier.s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 05:SCAN HDFS [tpch.partsupp]
    partitions=1/1 files=1 size=112.71MB
    runtime filters: RF000 -> tpch.partsupp.ps_partkey, RF008 -> tpch.partsupp.ps_suppkey
+   row-size=24B cardinality=800.00K
 ====
 # TPCH-Q3
 # Q3 - Shipping Priority Query
@@ -189,32 +211,39 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=10]
 |  order by: sum(l_extendedprice * (1 - l_discount)) DESC, o_orderdate ASC
+|  row-size=50B cardinality=10
 |
 05:AGGREGATE [FINALIZE]
 |  output: sum(tpch.lineitem.l_extendedprice * (1 - tpch.lineitem.l_discount))
 |  group by: tpch.lineitem.l_orderkey, tpch.orders.o_orderdate, tpch.orders.o_shippriority
+|  row-size=50B cardinality=17.56K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.orders.o_custkey = tpch.customer.c_custkey
 |  runtime filters: RF000 <- tpch.customer.c_custkey
+|  row-size=117B cardinality=17.56K
 |
 |--00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     predicates: tpch.customer.c_mktsegment = 'BUILDING'
+|     row-size=29B cardinality=30.00K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.lineitem.l_orderkey = tpch.orders.o_orderkey
 |  runtime filters: RF002 <- tpch.orders.o_orderkey
+|  row-size=88B cardinality=57.58K
 |
 |--01:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: tpch.orders.o_orderdate < '1995-03-15'
 |     runtime filters: RF000 -> tpch.orders.o_custkey
+|     row-size=42B cardinality=150.00K
 |
 02:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: tpch.lineitem.l_shipdate > '1995-03-15'
    runtime filters: RF002 -> tpch.lineitem.l_orderkey
+   row-size=46B cardinality=600.12K
 ====
 # TPCH-Q4
 # Q4 - Order Priority Checking Query
@@ -244,23 +273,28 @@ PLAN-ROOT SINK
 |
 04:SORT
 |  order by: o_orderpriority ASC
+|  row-size=28B cardinality=5
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: tpch.orders.o_orderpriority
+|  row-size=28B cardinality=5
 |
 02:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: tpch.lineitem.l_orderkey = tpch.orders.o_orderkey
 |  runtime filters: RF000 <- tpch.orders.o_orderkey
+|  row-size=50B cardinality=150.00K
 |
 |--00:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: tpch.orders.o_orderdate < '1993-10-01', tpch.orders.o_orderdate >= '1993-07-01'
+|     row-size=50B cardinality=150.00K
 |
 01:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: tpch.lineitem.l_commitdate < tpch.lineitem.l_receiptdate
    runtime filters: RF000 -> tpch.lineitem.l_orderkey
+   row-size=52B cardinality=600.12K
 ====
 # TPCH-Q5
 # Q5 - Local Supplier Volume Query
@@ -293,55 +327,68 @@ PLAN-ROOT SINK
 |
 12:SORT
 |  order by: sum(l_extendedprice * (1 - l_discount)) DESC
+|  row-size=35B cardinality=25
 |
 11:AGGREGATE [FINALIZE]
 |  output: sum(tpch.lineitem.l_extendedprice * (1 - tpch.lineitem.l_discount))
 |  group by: tpch.nation.n_name
+|  row-size=35B cardinality=25
 |
 10:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.nation.n_regionkey = tpch.region.r_regionkey
 |  runtime filters: RF000 <- tpch.region.r_regionkey
+|  row-size=134B cardinality=115.16K
 |
 |--05:SCAN HDFS [tpch.region]
 |     partitions=1/1 files=1 size=384B
 |     predicates: tpch.region.r_name = 'ASIA'
+|     row-size=21B cardinality=1
 |
 09:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.supplier.s_nationkey = tpch.nation.n_nationkey
 |  runtime filters: RF002 <- tpch.nation.n_nationkey
+|  row-size=113B cardinality=575.77K
 |
 |--04:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     runtime filters: RF000 -> tpch.nation.n_regionkey
+|     row-size=23B cardinality=25
 |
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.customer.c_nationkey = tpch.supplier.s_nationkey, tpch.lineitem.l_suppkey = tpch.supplier.s_suppkey
 |  runtime filters: RF004 <- tpch.supplier.s_nationkey, RF005 <- tpch.supplier.s_suppkey
+|  row-size=90B cardinality=575.77K
 |
 |--03:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF002 -> tpch.supplier.s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.orders.o_custkey = tpch.customer.c_custkey
 |  runtime filters: RF008 <- tpch.customer.c_custkey
+|  row-size=80B cardinality=575.77K
 |
 |--00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     runtime filters: RF002 -> tpch.customer.c_nationkey, RF004 -> tpch.customer.c_nationkey
+|     row-size=10B cardinality=150.00K
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.lineitem.l_orderkey = tpch.orders.o_orderkey
 |  runtime filters: RF010 <- tpch.orders.o_orderkey
+|  row-size=70B cardinality=575.77K
 |
 |--01:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: tpch.orders.o_orderdate < '1995-01-01', tpch.orders.o_orderdate >= '1994-01-01'
 |     runtime filters: RF008 -> tpch.orders.o_custkey
+|     row-size=38B cardinality=150.00K
 |
 02:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    runtime filters: RF005 -> tpch.lineitem.l_suppkey, RF010 -> tpch.lineitem.l_orderkey
+   row-size=32B cardinality=6.00M
 ====
 # TPCH-Q6
 # Q6 - Forecasting Revenue Change Query
@@ -359,10 +406,12 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum(tpch.lineitem.l_extendedprice * tpch.lineitem.l_discount)
+|  row-size=16B cardinality=1
 |
 00:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: tpch.lineitem.l_discount <= 0.07, tpch.lineitem.l_discount >= 0.05, tpch.lineitem.l_quantity < 24, tpch.lineitem.l_shipdate < '1995-01-01', tpch.lineitem.l_shipdate >= '1994-01-01'
+   row-size=46B cardinality=600.12K
 ====
 # TPCH-Q7
 # Q7 - Volume Shipping Query
@@ -409,54 +458,67 @@ PLAN-ROOT SINK
 |
 12:SORT
 |  order by: supp_nation ASC, cust_nation ASC, l_year ASC
+|  row-size=58B cardinality=575.77K
 |
 11:AGGREGATE [FINALIZE]
 |  output: sum(tpch.lineitem.l_extendedprice * (1 - tpch.lineitem.l_discount))
 |  group by: tpch.nation.n_name, tpch.nation.n_name, year(tpch.lineitem.l_shipdate)
+|  row-size=58B cardinality=575.77K
 |
 10:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.customer.c_nationkey = tpch.nation.n_nationkey
 |  other predicates: ((tpch.nation.n_name = 'FRANCE' AND tpch.nation.n_name = 'GERMANY') OR (tpch.nation.n_name = 'GERMANY' AND tpch.nation.n_name = 'FRANCE'))
 |  runtime filters: RF000 <- tpch.nation.n_nationkey
+|  row-size=132B cardinality=575.77K
 |
 |--05:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 09:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.supplier.s_nationkey = tpch.nation.n_nationkey
 |  runtime filters: RF002 <- tpch.nation.n_nationkey
+|  row-size=111B cardinality=575.77K
 |
 |--04:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.orders.o_custkey = tpch.customer.c_custkey
 |  runtime filters: RF004 <- tpch.customer.c_custkey
+|  row-size=90B cardinality=575.77K
 |
 |--03:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     runtime filters: RF000 -> tpch.customer.c_nationkey
+|     row-size=10B cardinality=150.00K
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.lineitem.l_suppkey = tpch.supplier.s_suppkey
 |  runtime filters: RF006 <- tpch.supplier.s_suppkey
+|  row-size=80B cardinality=575.77K
 |
 |--00:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF002 -> tpch.supplier.s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.lineitem.l_orderkey = tpch.orders.o_orderkey
 |  runtime filters: RF008 <- tpch.orders.o_orderkey
+|  row-size=70B cardinality=575.77K
 |
 |--02:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
 |     runtime filters: RF004 -> tpch.orders.o_custkey
+|     row-size=16B cardinality=1.50M
 |
 01:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: tpch.lineitem.l_shipdate <= '1996-12-31', tpch.lineitem.l_shipdate >= '1995-01-01'
    runtime filters: RF006 -> tpch.lineitem.l_suppkey, RF008 -> tpch.lineitem.l_orderkey
+   row-size=54B cardinality=600.12K
 ====
 # TPCH-Q8
 # Q8 - National Market Share Query
@@ -502,70 +564,87 @@ PLAN-ROOT SINK
 |
 16:SORT
 |  order by: o_year ASC
+|  row-size=36B cardinality=761
 |
 15:AGGREGATE [FINALIZE]
 |  output: sum(CASE WHEN tpch.nation.n_name = 'BRAZIL' THEN tpch.lineitem.l_extendedprice * (1 - tpch.lineitem.l_discount) ELSE 0 END), sum(tpch.lineitem.l_extendedprice * (1 - tpch.lineitem.l_discount))
 |  group by: year(tpch.orders.o_orderdate)
+|  row-size=36B cardinality=761
 |
 14:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.supplier.s_nationkey = tpch.nation.n_nationkey
 |  runtime filters: RF000 <- tpch.nation.n_nationkey
+|  row-size=184B cardinality=761
 |
 |--06:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 13:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.nation.n_regionkey = tpch.region.r_regionkey
 |  runtime filters: RF002 <- tpch.region.r_regionkey
+|  row-size=163B cardinality=761
 |
 |--07:SCAN HDFS [tpch.region]
 |     partitions=1/1 files=1 size=384B
 |     predicates: tpch.region.r_name = 'AMERICA'
+|     row-size=21B cardinality=1
 |
 12:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.customer.c_nationkey = tpch.nation.n_nationkey
 |  runtime filters: RF004 <- tpch.nation.n_nationkey
+|  row-size=143B cardinality=3.81K
 |
 |--05:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     runtime filters: RF002 -> tpch.nation.n_regionkey
+|     row-size=4B cardinality=25
 |
 11:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.customer.c_custkey = tpch.orders.o_custkey
 |  runtime filters: RF006 <- tpch.orders.o_custkey
+|  row-size=139B cardinality=3.81K
 |
 |--10:HASH JOIN [INNER JOIN]
 |  |  hash predicates: tpch.lineitem.l_suppkey = tpch.supplier.s_suppkey
 |  |  runtime filters: RF008 <- tpch.supplier.s_suppkey
+|  |  row-size=129B cardinality=3.81K
 |  |
 |  |--01:SCAN HDFS [tpch.supplier]
 |  |     partitions=1/1 files=1 size=1.33MB
 |  |     runtime filters: RF000 -> tpch.supplier.s_nationkey
+|  |     row-size=10B cardinality=10.00K
 |  |
 |  09:HASH JOIN [INNER JOIN]
 |  |  hash predicates: tpch.orders.o_orderkey = tpch.lineitem.l_orderkey
 |  |  runtime filters: RF010 <- tpch.lineitem.l_orderkey
+|  |  row-size=119B cardinality=3.81K
 |  |
 |  |--08:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: tpch.lineitem.l_partkey = tpch.part.p_partkey
 |  |  |  runtime filters: RF012 <- tpch.part.p_partkey
+|  |  |  row-size=81B cardinality=39.66K
 |  |  |
 |  |  |--00:SCAN HDFS [tpch.part]
 |  |  |     partitions=1/1 files=1 size=22.83MB
 |  |  |     predicates: tpch.part.p_type = 'ECONOMY ANODIZED STEEL'
+|  |  |     row-size=41B cardinality=1.32K
 |  |  |
 |  |  02:SCAN HDFS [tpch.lineitem]
 |  |     partitions=1/1 files=1 size=718.94MB
 |  |     runtime filters: RF008 -> tpch.lineitem.l_suppkey, RF012 -> tpch.lineitem.l_partkey
+|  |     row-size=40B cardinality=6.00M
 |  |
 |  03:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: tpch.orders.o_orderdate <= '1996-12-31', tpch.orders.o_orderdate >= '1995-01-01'
 |     runtime filters: RF010 -> tpch.orders.o_orderkey
+|     row-size=38B cardinality=150.00K
 |
 04:SCAN HDFS [tpch.customer]
    partitions=1/1 files=1 size=23.08MB
    runtime filters: RF004 -> tpch.customer.c_nationkey, RF006 -> tpch.customer.c_custkey
+   row-size=10B cardinality=150.00K
 ====
 # TPCH-Q9
 # Q9 - Product Type Measure Query
@@ -605,52 +684,65 @@ PLAN-ROOT SINK
 |
 12:SORT
 |  order by: nation ASC, o_year DESC
+|  row-size=39B cardinality=61.70K
 |
 11:AGGREGATE [FINALIZE]
 |  output: sum(tpch.lineitem.l_extendedprice * (1 - tpch.lineitem.l_discount) - tpch.partsupp.ps_supplycost * tpch.lineitem.l_quantity)
 |  group by: tpch.nation.n_name, year(tpch.orders.o_orderdate)
+|  row-size=39B cardinality=61.70K
 |
 10:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.supplier.s_nationkey = tpch.nation.n_nationkey
 |  runtime filters: RF000 <- tpch.nation.n_nationkey
+|  row-size=186B cardinality=574.29K
 |
 |--05:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 09:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.lineitem.l_partkey = tpch.partsupp.ps_partkey, tpch.lineitem.l_suppkey = tpch.partsupp.ps_suppkey
 |  runtime filters: RF002 <- tpch.partsupp.ps_partkey, RF003 <- tpch.partsupp.ps_suppkey
+|  row-size=165B cardinality=574.29K
 |
 |--03:SCAN HDFS [tpch.partsupp]
 |     partitions=1/1 files=1 size=112.71MB
+|     row-size=24B cardinality=800.00K
 |
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.lineitem.l_suppkey = tpch.supplier.s_suppkey
 |  runtime filters: RF006 <- tpch.supplier.s_suppkey
+|  row-size=141B cardinality=574.29K
 |
 |--01:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF000 -> tpch.supplier.s_nationkey, RF003 -> tpch.supplier.s_suppkey
+|     row-size=10B cardinality=10.00K
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.lineitem.l_orderkey = tpch.orders.o_orderkey
 |  runtime filters: RF008 <- tpch.orders.o_orderkey
+|  row-size=131B cardinality=574.29K
 |
 |--04:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
+|     row-size=30B cardinality=1.50M
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.lineitem.l_partkey = tpch.part.p_partkey
 |  runtime filters: RF010 <- tpch.part.p_partkey
+|  row-size=101B cardinality=598.58K
 |
 |--00:SCAN HDFS [tpch.part]
 |     partitions=1/1 files=1 size=22.83MB
 |     predicates: tpch.part.p_name LIKE '%green%'
 |     runtime filters: RF002 -> tpch.part.p_partkey
+|     row-size=53B cardinality=20.00K
 |
 02:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    runtime filters: RF002 -> tpch.lineitem.l_partkey, RF003 -> tpch.lineitem.l_suppkey, RF006 -> tpch.lineitem.l_suppkey, RF008 -> tpch.lineitem.l_orderkey, RF010 -> tpch.lineitem.l_partkey
+   row-size=48B cardinality=6.00M
 ====
 # TPCH-Q10
 # Q10 - Returned Item Reporting Query
@@ -692,38 +784,47 @@ PLAN-ROOT SINK
 |
 08:TOP-N [LIMIT=20]
 |  order by: sum(l_extendedprice * (1 - l_discount)) DESC
+|  row-size=230B cardinality=20
 |
 07:AGGREGATE [FINALIZE]
 |  output: sum(tpch.lineitem.l_extendedprice * (1 - tpch.lineitem.l_discount))
 |  group by: tpch.customer.c_custkey, tpch.customer.c_name, tpch.customer.c_acctbal, tpch.customer.c_phone, tpch.nation.n_name, tpch.customer.c_address, tpch.customer.c_comment
+|  row-size=230B cardinality=191.92K
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.customer.c_nationkey = tpch.nation.n_nationkey
 |  runtime filters: RF000 <- tpch.nation.n_nationkey
+|  row-size=293B cardinality=191.92K
 |
 |--03:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.customer.c_custkey = tpch.orders.o_custkey
 |  runtime filters: RF002 <- tpch.orders.o_custkey
+|  row-size=272B cardinality=191.92K
 |
 |--04:HASH JOIN [INNER JOIN]
 |  |  hash predicates: tpch.lineitem.l_orderkey = tpch.orders.o_orderkey
 |  |  runtime filters: RF004 <- tpch.orders.o_orderkey
+|  |  row-size=75B cardinality=191.92K
 |  |
 |  |--01:SCAN HDFS [tpch.orders]
 |  |     partitions=1/1 files=1 size=162.56MB
 |  |     predicates: tpch.orders.o_orderdate < '1994-01-01', tpch.orders.o_orderdate >= '1993-10-01'
+|  |     row-size=38B cardinality=150.00K
 |  |
 |  02:SCAN HDFS [tpch.lineitem]
 |     partitions=1/1 files=1 size=718.94MB
 |     predicates: tpch.lineitem.l_returnflag = 'R'
 |     runtime filters: RF004 -> tpch.lineitem.l_orderkey
+|     row-size=37B cardinality=2.00M
 |
 00:SCAN HDFS [tpch.customer]
    partitions=1/1 files=1 size=23.08MB
    runtime filters: RF000 -> tpch.customer.c_nationkey, RF002 -> tpch.customer.c_custkey
+   row-size=197B cardinality=150.00K
 ====
 # TPCH-Q11
 # Q11 - Important Stock Identification
@@ -765,56 +866,70 @@ PLAN-ROOT SINK
 |
 13:SORT
 |  order by: value DESC
+|  row-size=24B cardinality=32.00K
 |
 12:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: sum(ps_supplycost * ps_availqty) > sum(ps_supplycost * ps_availqty) * 0.0001
+|  row-size=40B cardinality=32.00K
 |
 |--11:AGGREGATE [FINALIZE]
 |  |  output: sum(tpch.partsupp.ps_supplycost * tpch.partsupp.ps_availqty)
+|  |  row-size=16B cardinality=1
 |  |
 |  10:HASH JOIN [INNER JOIN]
 |  |  hash predicates: tpch.supplier.s_nationkey = tpch.nation.n_nationkey
 |  |  runtime filters: RF004 <- tpch.nation.n_nationkey
+|  |  row-size=51B cardinality=32.00K
 |  |
 |  |--08:SCAN HDFS [tpch.nation]
 |  |     partitions=1/1 files=1 size=2.15KB
 |  |     predicates: tpch.nation.n_name = 'GERMANY'
+|  |     row-size=21B cardinality=1
 |  |
 |  09:HASH JOIN [INNER JOIN]
 |  |  hash predicates: tpch.partsupp.ps_suppkey = tpch.supplier.s_suppkey
 |  |  runtime filters: RF006 <- tpch.supplier.s_suppkey
+|  |  row-size=30B cardinality=800.00K
 |  |
 |  |--07:SCAN HDFS [tpch.supplier]
 |  |     partitions=1/1 files=1 size=1.33MB
 |  |     runtime filters: RF004 -> tpch.supplier.s_nationkey
+|  |     row-size=10B cardinality=10.00K
 |  |
 |  06:SCAN HDFS [tpch.partsupp]
 |     partitions=1/1 files=1 size=112.71MB
 |     runtime filters: RF006 -> tpch.partsupp.ps_suppkey
+|     row-size=20B cardinality=800.00K
 |
 05:AGGREGATE [FINALIZE]
 |  output: sum(tpch.partsupp.ps_supplycost * tpch.partsupp.ps_availqty)
 |  group by: tpch.partsupp.ps_partkey
+|  row-size=24B cardinality=32.00K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.supplier.s_nationkey = tpch.nation.n_nationkey
 |  runtime filters: RF000 <- tpch.nation.n_nationkey
+|  row-size=59B cardinality=32.00K
 |
 |--02:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     predicates: tpch.nation.n_name = 'GERMANY'
+|     row-size=21B cardinality=1
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.partsupp.ps_suppkey = tpch.supplier.s_suppkey
 |  runtime filters: RF002 <- tpch.supplier.s_suppkey
+|  row-size=38B cardinality=800.00K
 |
 |--01:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF000 -> tpch.supplier.s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 00:SCAN HDFS [tpch.partsupp]
    partitions=1/1 files=1 size=112.71MB
    runtime filters: RF002 -> tpch.partsupp.ps_suppkey
+   row-size=28B cardinality=800.00K
 ====
 # TPCH-Q12
 # Q12 - Shipping Mode and Order Priority Query
@@ -851,22 +966,27 @@ PLAN-ROOT SINK
 |
 04:SORT
 |  order by: l_shipmode ASC
+|  row-size=32B cardinality=7
 |
 03:AGGREGATE [FINALIZE]
 |  output: sum(CASE WHEN tpch.orders.o_orderpriority IN ('1-URGENT', '2-HIGH') THEN 1 ELSE 0 END), sum(CASE WHEN tpch.orders.o_orderpriority != '1-URGENT' AND tpch.orders.o_orderpriority != '2-HIGH' THEN 1 ELSE 0 END)
 |  group by: tpch.lineitem.l_shipmode
+|  row-size=32B cardinality=7
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.orders.o_orderkey = tpch.lineitem.l_orderkey
 |  runtime filters: RF000 <- tpch.lineitem.l_orderkey
+|  row-size=119B cardinality=320.78K
 |
 |--01:SCAN HDFS [tpch.lineitem]
 |     partitions=1/1 files=1 size=718.94MB
 |     predicates: tpch.lineitem.l_shipmode IN ('MAIL', 'SHIP'), tpch.lineitem.l_commitdate < tpch.lineitem.l_receiptdate, tpch.lineitem.l_receiptdate < '1995-01-01', tpch.lineitem.l_receiptdate >= '1994-01-01', tpch.lineitem.l_shipdate < tpch.lineitem.l_commitdate
+|     row-size=90B cardinality=320.78K
 |
 00:SCAN HDFS [tpch.orders]
    partitions=1/1 files=1 size=162.56MB
    runtime filters: RF000 -> tpch.orders.o_orderkey
+   row-size=28B cardinality=1.50M
 ====
 # TPCH-Q13
 # Q13 - Customer Distribution Query
@@ -895,26 +1015,32 @@ PLAN-ROOT SINK
 |
 05:SORT
 |  order by: count(*) DESC, c_count DESC
+|  row-size=16B cardinality=150.00K
 |
 04:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: count(o_orderkey)
+|  row-size=16B cardinality=150.00K
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(tpch.orders.o_orderkey)
 |  group by: tpch.customer.c_custkey
+|  row-size=16B cardinality=150.00K
 |
 02:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: tpch.orders.o_custkey = tpch.customer.c_custkey
 |  runtime filters: RF000 <- tpch.customer.c_custkey
+|  row-size=85B cardinality=150.00K
 |
 |--00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
+|     row-size=8B cardinality=150.00K
 |
 01:SCAN HDFS [tpch.orders]
    partitions=1/1 files=1 size=162.56MB
    predicates: NOT tpch.orders.o_comment LIKE '%special%requests%'
    runtime filters: RF000 -> tpch.orders.o_custkey
+   row-size=77B cardinality=150.00K
 ====
 # TPCH-Q14
 # Q14 - Promotion Effect
@@ -936,18 +1062,22 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: sum(CASE WHEN tpch.part.p_type LIKE 'PROMO%' THEN tpch.lineitem.l_extendedprice * (1 - tpch.lineitem.l_discount) ELSE 0 END), sum(tpch.lineitem.l_extendedprice * (1 - tpch.lineitem.l_discount))
+|  row-size=32B cardinality=1
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.lineitem.l_partkey = tpch.part.p_partkey
 |  runtime filters: RF000 <- tpch.part.p_partkey
+|  row-size=87B cardinality=598.58K
 |
 |--01:SCAN HDFS [tpch.part]
 |     partitions=1/1 files=1 size=22.83MB
+|     row-size=41B cardinality=200.00K
 |
 00:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: tpch.lineitem.l_shipdate < '1995-10-01', tpch.lineitem.l_shipdate >= '1995-09-01'
    runtime filters: RF000 -> tpch.lineitem.l_partkey
+   row-size=46B cardinality=600.12K
 ====
 # TPCH-Q15
 # Q15 - Top Supplier Query
@@ -986,36 +1116,45 @@ PLAN-ROOT SINK
 |
 08:SORT
 |  order by: s_suppkey ASC
+|  row-size=118B cardinality=1
 |
 07:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: sum(l_extendedprice * (1 - l_discount)) = max(total_revenue)
+|  row-size=126B cardinality=1
 |
 |--05:AGGREGATE [FINALIZE]
 |  |  output: max(sum(l_extendedprice * (1 - l_discount)))
+|  |  row-size=16B cardinality=1
 |  |
 |  04:AGGREGATE [FINALIZE]
 |  |  output: sum(tpch.lineitem.l_extendedprice * (1 - tpch.lineitem.l_discount))
 |  |  group by: tpch.lineitem.l_suppkey
+|  |  row-size=24B cardinality=9.71K
 |  |
 |  03:SCAN HDFS [tpch.lineitem]
 |     partitions=1/1 files=1 size=718.94MB
 |     predicates: tpch.lineitem.l_shipdate < '1996-04-01', tpch.lineitem.l_shipdate >= '1996-01-01'
+|     row-size=46B cardinality=600.12K
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.supplier.s_suppkey = l_suppkey
 |  runtime filters: RF000 <- l_suppkey
+|  row-size=126B cardinality=10.00K
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: sum(tpch.lineitem.l_extendedprice * (1 - tpch.lineitem.l_discount))
 |  |  group by: tpch.lineitem.l_suppkey
+|  |  row-size=24B cardinality=9.71K
 |  |
 |  01:SCAN HDFS [tpch.lineitem]
 |     partitions=1/1 files=1 size=718.94MB
 |     predicates: tpch.lineitem.l_shipdate < '1996-04-01', tpch.lineitem.l_shipdate >= '1996-01-01'
+|     row-size=46B cardinality=600.12K
 |
 00:SCAN HDFS [tpch.supplier]
    partitions=1/1 files=1 size=1.33MB
    runtime filters: RF000 -> tpch.supplier.s_suppkey
+   row-size=102B cardinality=10.00K
 ====
 # TPCH-Q16
 # Q16 - Parts/Supplier Relation Query
@@ -1054,32 +1193,40 @@ PLAN-ROOT SINK
 |
 07:SORT
 |  order by: count(ps_suppkey) DESC, p_brand ASC, p_type ASC, p_size ASC
+|  row-size=65B cardinality=31.92K
 |
 06:AGGREGATE [FINALIZE]
 |  output: count(ps_suppkey)
 |  group by: p_brand, p_type, p_size
+|  row-size=65B cardinality=31.92K
 |
 05:AGGREGATE
 |  group by: tpch.part.p_brand, tpch.part.p_type, tpch.part.p_size, tpch.partsupp.ps_suppkey
+|  row-size=65B cardinality=31.92K
 |
 04:HASH JOIN [NULL AWARE LEFT ANTI JOIN]
 |  hash predicates: tpch.partsupp.ps_suppkey = tpch.supplier.s_suppkey
+|  row-size=81B cardinality=31.92K
 |
 |--02:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     predicates: tpch.supplier.s_comment LIKE '%Customer%Complaints%'
+|     row-size=83B cardinality=1.00K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.partsupp.ps_partkey = tpch.part.p_partkey
 |  runtime filters: RF000 <- tpch.part.p_partkey
+|  row-size=81B cardinality=31.92K
 |
 |--01:SCAN HDFS [tpch.part]
 |     partitions=1/1 files=1 size=22.83MB
 |     predicates: tpch.part.p_size IN (49, 14, 23, 45, 19, 3, 36, 9), tpch.part.p_brand != 'Brand#45', NOT tpch.part.p_type LIKE 'MEDIUM POLISHED%'
+|     row-size=65B cardinality=8.00K
 |
 00:SCAN HDFS [tpch.partsupp]
    partitions=1/1 files=1 size=112.71MB
    runtime filters: RF000 -> tpch.partsupp.ps_partkey
+   row-size=16B cardinality=800.00K
 ====
 # TPCH-Q17
 # Q17 - Small-Quantity-Order Revenue Query
@@ -1105,31 +1252,38 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: sum(tpch.lineitem.l_extendedprice)
+|  row-size=16B cardinality=1
 |
 05:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: l_partkey = tpch.part.p_partkey
 |  other join predicates: tpch.lineitem.l_quantity < 0.2 * avg(l_quantity)
 |  runtime filters: RF000 <- tpch.part.p_partkey
+|  row-size=72B cardinality=29.93K
 |
 |--04:HASH JOIN [INNER JOIN]
 |  |  hash predicates: tpch.lineitem.l_partkey = tpch.part.p_partkey
 |  |  runtime filters: RF002 <- tpch.part.p_partkey
+|  |  row-size=72B cardinality=29.93K
 |  |
 |  |--01:SCAN HDFS [tpch.part]
 |  |     partitions=1/1 files=1 size=22.83MB
 |  |     predicates: tpch.part.p_container = 'MED BOX', tpch.part.p_brand = 'Brand#23'
+|  |     row-size=48B cardinality=1.00K
 |  |
 |  00:SCAN HDFS [tpch.lineitem]
 |     partitions=1/1 files=1 size=718.94MB
 |     runtime filters: RF002 -> tpch.lineitem.l_partkey
+|     row-size=24B cardinality=6.00M
 |
 03:AGGREGATE [FINALIZE]
 |  output: avg(tpch.lineitem.l_quantity)
 |  group by: tpch.lineitem.l_partkey
+|  row-size=16B cardinality=200.52K
 |
 02:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    runtime filters: RF000 -> tpch.lineitem.l_partkey
+   row-size=16B cardinality=6.00M
 ====
 # TPCH-Q18
 # Q18 - Large Value Customer Query
@@ -1172,41 +1326,51 @@ PLAN-ROOT SINK
 |
 09:TOP-N [LIMIT=100]
 |  order by: o_totalprice DESC, o_orderdate ASC
+|  row-size=92B cardinality=100
 |
 08:AGGREGATE [FINALIZE]
 |  output: sum(tpch.lineitem.l_quantity)
 |  group by: tpch.customer.c_name, tpch.customer.c_custkey, tpch.orders.o_orderkey, tpch.orders.o_orderdate, tpch.orders.o_totalprice
+|  row-size=92B cardinality=600.12K
 |
 07:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: tpch.orders.o_orderkey = l_orderkey
 |  runtime filters: RF000 <- l_orderkey
+|  row-size=100B cardinality=600.12K
 |
 |--04:AGGREGATE [FINALIZE]
 |  |  output: sum(tpch.lineitem.l_quantity)
 |  |  group by: tpch.lineitem.l_orderkey
 |  |  having: sum(l_quantity) > 300
+|  |  row-size=24B cardinality=156.34K
 |  |
 |  03:SCAN HDFS [tpch.lineitem]
 |     partitions=1/1 files=1 size=718.94MB
+|     row-size=16B cardinality=6.00M
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.orders.o_custkey = tpch.customer.c_custkey
 |  runtime filters: RF002 <- tpch.customer.c_custkey
+|  row-size=100B cardinality=5.76M
 |
 |--00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
+|     row-size=38B cardinality=150.00K
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.lineitem.l_orderkey = tpch.orders.o_orderkey
 |  runtime filters: RF004 <- tpch.orders.o_orderkey
+|  row-size=62B cardinality=5.76M
 |
 |--01:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
 |     runtime filters: RF000 -> tpch.orders.o_orderkey, RF002 -> tpch.orders.o_custkey
+|     row-size=46B cardinality=1.50M
 |
 02:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    runtime filters: RF000 -> tpch.lineitem.l_orderkey, RF004 -> tpch.lineitem.l_orderkey
+   row-size=16B cardinality=6.00M
 ====
 # TPCH-Q19
 # Q19 - Discounted Revenue Query
@@ -1250,20 +1414,24 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: sum(tpch.lineitem.l_extendedprice * (1 - tpch.lineitem.l_discount))
+|  row-size=16B cardinality=1
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: tpch.lineitem.l_partkey = tpch.part.p_partkey
 |  other predicates: ((tpch.part.p_brand = 'Brand#12' AND tpch.part.p_container IN ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') AND tpch.lineitem.l_quantity >= 1 AND tpch.lineitem.l_quantity <= 11 AND tpch.part.p_size <= 5) OR (tpch.part.p_brand = 'Brand#23' AND tpch.part.p_container IN ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') AND tpch.lineitem.l_quantity >= 10 AND tpch.lineitem.l_quantity <= 20 AND tpch.part.p_size <= 10) OR (tpch.part.p_brand = 'Brand#34' AND tpch.part.p_container IN ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') AND tpch.lineitem.l_quantity >= 20 AND tpch.lineitem.l_quantity <= 30 AND tpch.part.p_size <= 15))
 |  runtime filters: RF000 <- tpch.part.p_partkey
+|  row-size=124B cardinality=79.99K
 |
 |--01:SCAN HDFS [tpch.part]
 |     partitions=1/1 files=1 size=22.83MB
 |     predicates: tpch.part.p_size >= 1
+|     row-size=52B cardinality=20.00K
 |
 00:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: tpch.lineitem.l_shipmode IN ('AIR', 'AIR REG'), tpch.lineitem.l_shipinstruct = 'DELIVER IN PERSON'
    runtime filters: RF000 -> tpch.lineitem.l_partkey
+   row-size=72B cardinality=801.95K
 ====
 # TPCH-Q20
 # Q20 - Potential Part Promotion Query
@@ -1309,48 +1477,59 @@ PLAN-ROOT SINK
 |
 10:SORT
 |  order by: s_name ASC
+|  row-size=67B cardinality=400
 |
 09:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: tpch.partsupp.ps_suppkey = tpch.supplier.s_suppkey
 |  runtime filters: RF000 <- tpch.supplier.s_suppkey
+|  row-size=98B cardinality=400
 |
 |--08:HASH JOIN [INNER JOIN]
 |  |  hash predicates: tpch.supplier.s_nationkey = tpch.nation.n_nationkey
 |  |  runtime filters: RF008 <- tpch.nation.n_nationkey
+|  |  row-size=98B cardinality=400
 |  |
 |  |--01:SCAN HDFS [tpch.nation]
 |  |     partitions=1/1 files=1 size=2.15KB
 |  |     predicates: tpch.nation.n_name = 'CANADA'
+|  |     row-size=21B cardinality=1
 |  |
 |  00:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF008 -> tpch.supplier.s_nationkey
+|     row-size=77B cardinality=10.00K
 |
 07:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: l_partkey = tpch.partsupp.ps_partkey, l_suppkey = tpch.partsupp.ps_suppkey
 |  other join predicates: tpch.partsupp.ps_availqty > 0.5 * sum(l_quantity)
 |  runtime filters: RF002 <- tpch.partsupp.ps_partkey, RF003 <- tpch.partsupp.ps_suppkey
+|  row-size=20B cardinality=79.79K
 |
 |--06:HASH JOIN [LEFT SEMI JOIN]
 |  |  hash predicates: tpch.partsupp.ps_partkey = tpch.part.p_partkey
 |  |  runtime filters: RF006 <- tpch.part.p_partkey
+|  |  row-size=20B cardinality=79.79K
 |  |
 |  |--03:SCAN HDFS [tpch.part]
 |  |     partitions=1/1 files=1 size=22.83MB
 |  |     predicates: tpch.part.p_name LIKE 'forest%'
+|  |     row-size=53B cardinality=20.00K
 |  |
 |  02:SCAN HDFS [tpch.partsupp]
 |     partitions=1/1 files=1 size=112.71MB
 |     runtime filters: RF000 -> tpch.partsupp.ps_suppkey, RF006 -> tpch.partsupp.ps_partkey
+|     row-size=20B cardinality=800.00K
 |
 05:AGGREGATE [FINALIZE]
 |  output: sum(tpch.lineitem.l_quantity)
 |  group by: tpch.lineitem.l_partkey, tpch.lineitem.l_suppkey
+|  row-size=32B cardinality=600.12K
 |
 04:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: tpch.lineitem.l_shipdate < '1995-01-01', tpch.lineitem.l_shipdate >= '1994-01-01'
    runtime filters: RF000 -> tpch.lineitem.l_suppkey, RF002 -> tpch.lineitem.l_partkey, RF003 -> tpch.lineitem.l_suppkey
+   row-size=46B cardinality=600.12K
 ====
 # TPCH-Q21
 # Q21 - Suppliers Who Kept Orders Waiting Query
@@ -1399,56 +1578,69 @@ PLAN-ROOT SINK
 |
 12:TOP-N [LIMIT=100]
 |  order by: count(*) DESC, s_name ASC
+|  row-size=38B cardinality=100
 |
 11:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: tpch.supplier.s_name
+|  row-size=38B cardinality=7.68K
 |
 10:HASH JOIN [RIGHT ANTI JOIN]
 |  hash predicates: tpch.lineitem.l_orderkey = tpch.lineitem.l_orderkey
 |  other join predicates: tpch.lineitem.l_suppkey != tpch.lineitem.l_suppkey
+|  row-size=142B cardinality=7.68K
 |
 |--09:HASH JOIN [RIGHT SEMI JOIN]
 |  |  hash predicates: tpch.lineitem.l_orderkey = tpch.lineitem.l_orderkey
 |  |  other join predicates: tpch.lineitem.l_suppkey != tpch.lineitem.l_suppkey
 |  |  runtime filters: RF000 <- tpch.lineitem.l_orderkey
+|  |  row-size=142B cardinality=7.68K
 |  |
 |  |--08:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: tpch.supplier.s_nationkey = tpch.nation.n_nationkey
 |  |  |  runtime filters: RF002 <- tpch.nation.n_nationkey
+|  |  |  row-size=142B cardinality=7.68K
 |  |  |
 |  |  |--03:SCAN HDFS [tpch.nation]
 |  |  |     partitions=1/1 files=1 size=2.15KB
 |  |  |     predicates: tpch.nation.n_name = 'SAUDI ARABIA'
+|  |  |     row-size=21B cardinality=1
 |  |  |
 |  |  07:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: tpch.lineitem.l_suppkey = tpch.supplier.s_suppkey
 |  |  |  runtime filters: RF004 <- tpch.supplier.s_suppkey
+|  |  |  row-size=121B cardinality=191.92K
 |  |  |
 |  |  |--00:SCAN HDFS [tpch.supplier]
 |  |  |     partitions=1/1 files=1 size=1.33MB
 |  |  |     runtime filters: RF002 -> tpch.supplier.s_nationkey
+|  |  |     row-size=40B cardinality=10.00K
 |  |  |
 |  |  06:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: tpch.lineitem.l_orderkey = tpch.orders.o_orderkey
 |  |  |  runtime filters: RF006 <- tpch.orders.o_orderkey
+|  |  |  row-size=81B cardinality=191.92K
 |  |  |
 |  |  |--02:SCAN HDFS [tpch.orders]
 |  |  |     partitions=1/1 files=1 size=162.56MB
 |  |  |     predicates: tpch.orders.o_orderstatus = 'F'
+|  |  |     row-size=21B cardinality=500.00K
 |  |  |
 |  |  01:SCAN HDFS [tpch.lineitem]
 |  |     partitions=1/1 files=1 size=718.94MB
 |  |     predicates: tpch.lineitem.l_receiptdate > tpch.lineitem.l_commitdate
 |  |     runtime filters: RF004 -> tpch.lineitem.l_suppkey, RF006 -> tpch.lineitem.l_orderkey
+|  |     row-size=60B cardinality=600.12K
 |  |
 |  04:SCAN HDFS [tpch.lineitem]
 |     partitions=1/1 files=1 size=718.94MB
 |     runtime filters: RF000 -> tpch.lineitem.l_orderkey
+|     row-size=16B cardinality=6.00M
 |
 05:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: tpch.lineitem.l_receiptdate > tpch.lineitem.l_commitdate
+   row-size=60B cardinality=600.12K
 ====
 # TPCH-Q22
 # Q22 - Global Sales Opportunity Query
@@ -1491,28 +1683,36 @@ PLAN-ROOT SINK
 |
 07:SORT
 |  order by: cntrycode ASC
+|  row-size=36B cardinality=15.00K
 |
 06:AGGREGATE [FINALIZE]
 |  output: count(*), sum(tpch.customer.c_acctbal)
 |  group by: substr(tpch.customer.c_phone, 1, 2)
+|  row-size=36B cardinality=15.00K
 |
 05:HASH JOIN [RIGHT ANTI JOIN]
 |  hash predicates: tpch.orders.o_custkey = tpch.customer.c_custkey
+|  row-size=51B cardinality=15.00K
 |
 |--04:NESTED LOOP JOIN [INNER JOIN]
 |  |  predicates: tpch.customer.c_acctbal > avg(c_acctbal)
+|  |  row-size=51B cardinality=15.00K
 |  |
 |  |--02:AGGREGATE [FINALIZE]
 |  |  |  output: avg(tpch.customer.c_acctbal)
+|  |  |  row-size=8B cardinality=1
 |  |  |
 |  |  01:SCAN HDFS [tpch.customer]
 |  |     partitions=1/1 files=1 size=23.08MB
 |  |     predicates: tpch.customer.c_acctbal > 0, substr(tpch.customer.c_phone, 1, 2) IN ('13', '31', '23', '29', '30', '18', '17')
+|  |     row-size=35B cardinality=15.00K
 |  |
 |  00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     predicates: substr(tpch.customer.c_phone, 1, 2) IN ('13', '31', '23', '29', '30', '18', '17')
+|     row-size=43B cardinality=15.00K
 |
 03:SCAN HDFS [tpch.orders]
    partitions=1/1 files=1 size=162.56MB
+   row-size=8B cardinality=1.50M
 ====


[02/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/values.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/values.test b/testdata/workloads/functional-planner/queries/PlannerTest/values.test
index 87e9aa8..ffa5632 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/values.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/values.test
@@ -4,11 +4,13 @@ PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=1
+   row-size=18B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=1
+   row-size=18B cardinality=1
 ====
 values(1+1, 2, 5.0, 'a') order by 1 limit 10
 ---- PLAN
@@ -16,17 +18,21 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=10]
 |  order by: 2 ASC
+|  row-size=18B cardinality=1
 |
 00:UNION
    constant-operands=1
+   row-size=18B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=10]
 |  order by: 2 ASC
+|  row-size=18B cardinality=1
 |
 00:UNION
    constant-operands=1
+   row-size=18B cardinality=1
 ====
 values((1+1, 2, 5.0, 'a'), (2, 3, 6.0, 'b'), (3, 4, 7.0, 'c'))
 ---- PLAN
@@ -34,11 +40,13 @@ PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=3
+   row-size=18B cardinality=3
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=3
+   row-size=18B cardinality=3
 ====
 values((1+1, 2, 5.0, 'a'), (2, 3, 6.0, 'b'), (3, 4, 7.0, 'c')) order by 1 limit 10
 ---- PLAN
@@ -46,15 +54,19 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=10]
 |  order by: 2 ASC
+|  row-size=18B cardinality=3
 |
 00:UNION
    constant-operands=3
+   row-size=18B cardinality=3
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=10]
 |  order by: 2 ASC
+|  row-size=18B cardinality=3
 |
 00:UNION
    constant-operands=3
+   row-size=18B cardinality=3
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/views.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/views.test b/testdata/workloads/functional-planner/queries/PlannerTest/views.test
index 5caeab5..0057e84 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/views.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/views.test
@@ -5,6 +5,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -12,6 +13,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ====
 # Basic test with a complex view.
 select * from functional.complex_view
@@ -20,23 +22,28 @@ PLAN-ROOT SINK
 |
 04:TOP-N [LIMIT=100]
 |  order by: b.string_col ASC
+|  row-size=21B cardinality=0
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(a.bigint_col)
 |  group by: b.string_col
 |  having: count(a.bigint_col) > 1
+|  row-size=21B cardinality=0
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF000 <- b.id
+|  row-size=29B cardinality=1
 |
 |--01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
+|     row-size=17B cardinality=8
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    predicates: a.bigint_col < 50
    runtime filters: RF000 -> a.id
+   row-size=12B cardinality=1.10K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -46,31 +53,37 @@ PLAN-ROOT SINK
 |
 04:TOP-N [LIMIT=100]
 |  order by: b.string_col ASC
+|  row-size=21B cardinality=0
 |
 07:AGGREGATE [FINALIZE]
 |  output: count:merge(a.bigint_col)
 |  group by: b.string_col
 |  having: count(a.bigint_col) > 1
+|  row-size=21B cardinality=0
 |
 06:EXCHANGE [HASH(b.string_col)]
 |
 03:AGGREGATE [STREAMING]
 |  output: count(a.bigint_col)
 |  group by: b.string_col
+|  row-size=21B cardinality=1
 |
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF000 <- b.id
+|  row-size=29B cardinality=1
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
+|     row-size=17B cardinality=8
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    predicates: a.bigint_col < 50
    runtime filters: RF000 -> a.id
+   row-size=12B cardinality=1.10K
 ====
 # Basic test with a view on a view
 select int_col, string_col from functional.view_view
@@ -79,6 +92,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -86,6 +100,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ====
 # view used in a union.
 select * from functional.alltypes_view union all
@@ -94,26 +109,32 @@ select * from functional.alltypes_view where id < 10
 PLAN-ROOT SINK
 |
 00:UNION
+|  row-size=89B cardinality=8.03K
 |
 |--02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: functional.alltypes.id < 10
+|     row-size=89B cardinality=730
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:EXCHANGE [UNPARTITIONED]
 |
 00:UNION
+|  row-size=89B cardinality=8.03K
 |
 |--02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: functional.alltypes.id < 10
+|     row-size=89B cardinality=730
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # view used in an inline view.
 select t.id from (select id from functional.alltypes_view) t
@@ -124,6 +145,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id < 10
+   row-size=4B cardinality=730
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -132,6 +154,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id < 10
+   row-size=4B cardinality=730
 ====
 # Multiple views used in a join.
 select * from functional.alltypes_view t1, functional.alltypes_view_sub t2,
@@ -142,40 +165,49 @@ PLAN-ROOT SINK
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = count(a.bigint_col)
 |  runtime filters: RF000 <- count(a.bigint_col)
+|  row-size=143B cardinality=730
 |
 |--06:TOP-N [LIMIT=100]
 |  |  order by: b.string_col ASC
+|  |  row-size=21B cardinality=0
 |  |
 |  05:AGGREGATE [FINALIZE]
 |  |  output: count(a.bigint_col)
 |  |  group by: b.string_col
 |  |  having: count(a.bigint_col) > 1
+|  |  row-size=21B cardinality=0
 |  |
 |  04:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.id = b.id
 |  |  runtime filters: RF004 <- b.id
+|  |  row-size=29B cardinality=1
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny b]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=17B cardinality=8
 |  |
 |  02:SCAN HDFS [functional.alltypesagg a]
 |     partitions=11/11 files=11 size=814.73KB
 |     predicates: a.bigint_col < 50
 |     runtime filters: RF004 -> a.id
+|     row-size=12B cardinality=1.10K
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=122B cardinality=730
 |
 |--01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: functional.alltypes.int_col > 1
 |     runtime filters: RF000 -> int_col
+|     row-size=33B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id > 1
    runtime filters: RF000 -> functional.alltypes.id, RF002 -> functional.alltypes.id
+   row-size=89B cardinality=730
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -184,6 +216,7 @@ PLAN-ROOT SINK
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: int_col = count(a.bigint_col)
 |  runtime filters: RF000 <- count(a.bigint_col)
+|  row-size=143B cardinality=730
 |
 |--15:EXCHANGE [BROADCAST]
 |  |
@@ -193,35 +226,42 @@ PLAN-ROOT SINK
 |  |
 |  06:TOP-N [LIMIT=100]
 |  |  order by: b.string_col ASC
+|  |  row-size=21B cardinality=0
 |  |
 |  13:AGGREGATE [FINALIZE]
 |  |  output: count:merge(a.bigint_col)
 |  |  group by: b.string_col
 |  |  having: count(a.bigint_col) > 1
+|  |  row-size=21B cardinality=0
 |  |
 |  12:EXCHANGE [HASH(b.string_col)]
 |  |
 |  05:AGGREGATE [STREAMING]
 |  |  output: count(a.bigint_col)
 |  |  group by: b.string_col
+|  |  row-size=21B cardinality=1
 |  |
 |  04:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: a.id = b.id
 |  |  runtime filters: RF004 <- b.id
+|  |  row-size=29B cardinality=1
 |  |
 |  |--11:EXCHANGE [BROADCAST]
 |  |  |
 |  |  03:SCAN HDFS [functional.alltypestiny b]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=17B cardinality=8
 |  |
 |  02:SCAN HDFS [functional.alltypesagg a]
 |     partitions=11/11 files=11 size=814.73KB
 |     predicates: a.bigint_col < 50
 |     runtime filters: RF004 -> a.id
+|     row-size=12B cardinality=1.10K
 |
 07:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypes.id = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=122B cardinality=730
 |
 |--10:EXCHANGE [HASH(int_col)]
 |  |
@@ -229,6 +269,7 @@ PLAN-ROOT SINK
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: functional.alltypes.int_col > 1
 |     runtime filters: RF000 -> int_col
+|     row-size=33B cardinality=730
 |
 09:EXCHANGE [HASH(functional.alltypes.id)]
 |
@@ -236,6 +277,7 @@ PLAN-ROOT SINK
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id > 1
    runtime filters: RF000 -> functional.alltypes.id, RF002 -> functional.alltypes.id
+   row-size=89B cardinality=730
 ====
 # Self-join of view to make sure the on clause is properly set
 # in the cloned view instances.
@@ -248,21 +290,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF000 <- functional.alltypes.id
+|  row-size=267B cardinality=7.30K
 |
 |--02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF002 <- functional.alltypes.id
+|  row-size=178B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> functional.alltypes.id
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.id, RF002 -> functional.alltypes.id
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -271,27 +318,32 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF000 <- functional.alltypes.id
+|  row-size=267B cardinality=7.30K
 |
 |--07:EXCHANGE [HASH(functional.alltypes.id)]
 |  |
 |  02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF002 <- functional.alltypes.id
+|  row-size=178B cardinality=7.30K
 |
 |--06:EXCHANGE [HASH(functional.alltypes.id)]
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> functional.alltypes.id
+|     row-size=89B cardinality=7.30K
 |
 05:EXCHANGE [HASH(functional.alltypes.id)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.id, RF002 -> functional.alltypes.id
+   row-size=89B cardinality=7.30K
 ====
 # Self-join views to make sure the using clause is properly set
 # in the cloned view instances.
@@ -304,21 +356,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF000 <- functional.alltypes.id
+|  row-size=267B cardinality=7.30K
 |
 |--02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF002 <- functional.alltypes.id
+|  row-size=178B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> functional.alltypes.id
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.id, RF002 -> functional.alltypes.id
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -327,27 +384,32 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF000 <- functional.alltypes.id
+|  row-size=267B cardinality=7.30K
 |
 |--07:EXCHANGE [HASH(functional.alltypes.id)]
 |  |
 |  02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF002 <- functional.alltypes.id
+|  row-size=178B cardinality=7.30K
 |
 |--06:EXCHANGE [HASH(functional.alltypes.id)]
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> functional.alltypes.id
+|     row-size=89B cardinality=7.30K
 |
 05:EXCHANGE [HASH(functional.alltypes.id)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.id, RF002 -> functional.alltypes.id
+   row-size=89B cardinality=7.30K
 ====
 # Self-join of view to make sure the join op is properly set
 # in the cloned view instances.
@@ -359,18 +421,23 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
+|  row-size=267B cardinality=14.60K
 |
 |--02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
+|  row-size=178B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -378,24 +445,29 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [FULL OUTER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
+|  row-size=267B cardinality=14.60K
 |
 |--07:EXCHANGE [HASH(functional.alltypes.id)]
 |  |
 |  02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:HASH JOIN [LEFT OUTER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
+|  row-size=178B cardinality=7.30K
 |
 |--06:EXCHANGE [HASH(functional.alltypes.id)]
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 05:EXCHANGE [HASH(functional.alltypes.id)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # Self-join of view to make sure join hints are properly set
 # in the cloned view instances.
@@ -410,21 +482,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF000 <- functional.alltypes.id
+|  row-size=267B cardinality=7.30K
 |
 |--02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF002 <- functional.alltypes.id
+|  row-size=178B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> functional.alltypes.id
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.id, RF002 -> functional.alltypes.id
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -433,27 +510,32 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF000 <- functional.alltypes.id
+|  row-size=267B cardinality=7.30K
 |
 |--07:EXCHANGE [HASH(functional.alltypes.id)]
 |  |
 |  02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 06:EXCHANGE [HASH(functional.alltypes.id)]
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF002 <- functional.alltypes.id
+|  row-size=178B cardinality=7.30K
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> functional.alltypes.id
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.id, RF002 -> functional.alltypes.id
+   row-size=89B cardinality=7.30K
 ====
 # Tests that parentheses are preserved when creating a view
 # enabling proper partition pruning for this particular view.
@@ -462,16 +544,20 @@ select * from functional.alltypes_parens
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month = 1
    partitions=1/24 files=1 size=19.95KB
    predicates: (int_col < 100 OR bool_col = FALSE)
+   row-size=89B cardinality=31
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month = 1
    partitions=1/24 files=1 size=19.95KB
    predicates: (int_col < 100 OR bool_col = FALSE)
+   row-size=89B cardinality=31
 ====
 # Tests that slotrefs are correctly marked as assigned inside an inline view where
 # possible (see IMPALA-923)
@@ -482,6 +568,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes t]
    partitions=24/24 files=24 size=478.45KB
    predicates: bool_col
+   row-size=1B cardinality=730
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -490,4 +577,5 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes t]
    partitions=24/24 files=24 size=478.45KB
    predicates: bool_col
+   row-size=1B cardinality=730
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/with-clause.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/with-clause.test b/testdata/workloads/functional-planner/queries/PlannerTest/with-clause.test
index 9c5f577..a1f045f 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/with-clause.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/with-clause.test
@@ -5,6 +5,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -12,6 +13,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ====
 # Basic test with a single with-clause view that references a catalog view.
 with t as (select int_col x, bigint_col y from functional.alltypes_view)
@@ -21,6 +23,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -28,6 +31,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ====
 # Multiple views in with-clause. Only one view is used.
 with t1 as (select int_col x, bigint_col y from functional.alltypes),
@@ -38,11 +42,13 @@ PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=1
+   row-size=2B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=1
+   row-size=2B cardinality=1
 ====
 # Multiple views in with-clause. All views are used in a union.
 with t1 as (select int_col x, bigint_col y from functional.alltypes),
@@ -52,34 +58,42 @@ select * from t1 union all select * from t2 union all select * from t3
 PLAN-ROOT SINK
 |
 00:UNION
+|  row-size=12B cardinality=7.30K
 |
 |--03:UNION
 |     constant-operands=2
+|     row-size=2B cardinality=2
 |
 |--02:UNION
 |     constant-operands=1
+|     row-size=2B cardinality=1
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 06:EXCHANGE [UNPARTITIONED]
 |
 00:UNION
+|  row-size=12B cardinality=7.30K
 |
 |--05:EXCHANGE [RANDOM]
 |  |
 |  03:UNION
 |     constant-operands=2
+|     row-size=2B cardinality=2
 |
 |--04:EXCHANGE [RANDOM]
 |  |
 |  02:UNION
 |     constant-operands=1
+|     row-size=2B cardinality=1
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ====
 # Multiple views in with-clause. All views are used in a join.
 with t1 as (select int_col x, bigint_col y from functional.alltypes),
@@ -92,21 +106,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=36B cardinality=58.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=12B cardinality=100
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=24B cardinality=5.84K
 |
 |--01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> int_col
+|     row-size=12B cardinality=8
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.int_col, RF002 -> int_col
+   row-size=12B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -115,25 +134,30 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=36B cardinality=58.40K
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=12B cardinality=100
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=24B cardinality=5.84K
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> int_col
+|     row-size=12B cardinality=8
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.int_col, RF002 -> int_col
+   row-size=12B cardinality=7.30K
 ====
 # Multiple dependent views in with-clause
 with t1 as (
@@ -158,37 +182,46 @@ PLAN-ROOT SINK
 |
 05:TOP-N [LIMIT=3]
 |  order by: c3 ASC, c1 DESC
+|  row-size=9B cardinality=1
 |
 04:SELECT
 |  predicates: c1 > 0
+|  row-size=9B cardinality=1
 |
 03:AGGREGATE [FINALIZE]
 |  output: max(tinyint_col)
 |  group by: int_col, max(id)
 |  limit: 10
+|  row-size=9B cardinality=5
 |
 02:TOP-N [LIMIT=5]
 |  order by: int_col ASC, tinyint_col ASC
+|  row-size=9B cardinality=5
 |
 01:AGGREGATE [FINALIZE]
 |  output: max(id)
 |  group by: int_col, tinyint_col
+|  row-size=9B cardinality=100
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=9B cardinality=100
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 05:TOP-N [LIMIT=3]
 |  order by: c3 ASC, c1 DESC
+|  row-size=9B cardinality=1
 |
 04:SELECT
 |  predicates: c1 > 0
+|  row-size=9B cardinality=1
 |
 03:AGGREGATE [FINALIZE]
 |  output: max(tinyint_col)
 |  group by: int_col, max(id)
 |  limit: 10
+|  row-size=9B cardinality=5
 |
 08:MERGING-EXCHANGE [UNPARTITIONED]
 |  order by: int_col ASC, tinyint_col ASC
@@ -196,19 +229,23 @@ PLAN-ROOT SINK
 |
 02:TOP-N [LIMIT=5]
 |  order by: int_col ASC, tinyint_col ASC
+|  row-size=9B cardinality=5
 |
 07:AGGREGATE [FINALIZE]
 |  output: max:merge(id)
 |  group by: int_col, tinyint_col
+|  row-size=9B cardinality=100
 |
 06:EXCHANGE [HASH(int_col,tinyint_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: max(id)
 |  group by: int_col, tinyint_col
+|  row-size=9B cardinality=100
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=9B cardinality=100
 ====
 # Self-join of with-clause table to make sure the on clause is properly set
 # in the cloned inline-view instances.
@@ -220,21 +257,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=36B cardinality=128
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=24B cardinality=32
 |
 |--01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> int_col
+|     row-size=12B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> functional.alltypestiny.int_col, RF002 -> int_col
+   row-size=12B cardinality=8
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -243,27 +285,32 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=36B cardinality=128
 |
 |--07:EXCHANGE [HASH(int_col)]
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 03:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=24B cardinality=32
 |
 |--06:EXCHANGE [HASH(int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> int_col
+|     row-size=12B cardinality=8
 |
 05:EXCHANGE [HASH(int_col)]
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> functional.alltypestiny.int_col, RF002 -> int_col
+   row-size=12B cardinality=8
 ====
 # Self-join of with-clause table to make sure the using clause is properly set
 # in the cloned inline-view instances.
@@ -275,21 +322,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=36B cardinality=128
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=24B cardinality=32
 |
 |--01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> int_col
+|     row-size=12B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> functional.alltypestiny.int_col, RF002 -> int_col
+   row-size=12B cardinality=8
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -298,27 +350,32 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=36B cardinality=128
 |
 |--07:EXCHANGE [HASH(int_col)]
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 03:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=24B cardinality=32
 |
 |--06:EXCHANGE [HASH(int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> int_col
+|     row-size=12B cardinality=8
 |
 05:EXCHANGE [HASH(int_col)]
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> functional.alltypestiny.int_col, RF002 -> int_col
+   row-size=12B cardinality=8
 ====
 # Self-join of with-clause table to make sure the join op is properly set
 # in the cloned inline-view instances.
@@ -329,18 +386,23 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: int_col = int_col
+|  row-size=36B cardinality=128
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 03:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: int_col = int_col
+|  row-size=24B cardinality=32
 |
 |--01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=12B cardinality=8
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -348,24 +410,29 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [FULL OUTER JOIN, PARTITIONED]
 |  hash predicates: int_col = int_col
+|  row-size=36B cardinality=128
 |
 |--07:EXCHANGE [HASH(int_col)]
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 03:HASH JOIN [LEFT OUTER JOIN, PARTITIONED]
 |  hash predicates: int_col = int_col
+|  row-size=24B cardinality=32
 |
 |--06:EXCHANGE [HASH(int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 05:EXCHANGE [HASH(int_col)]
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=12B cardinality=8
 ====
 # Self-join of with-clause table to make sure join hints are properly set
 # in the cloned inline-view instances.
@@ -379,21 +446,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=36B cardinality=128
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=24B cardinality=32
 |
 |--01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> int_col
+|     row-size=12B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> functional.alltypestiny.int_col, RF002 -> int_col
+   row-size=12B cardinality=8
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -402,27 +474,32 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=36B cardinality=128
 |
 |--07:EXCHANGE [HASH(int_col)]
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 06:EXCHANGE [HASH(int_col)]
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=24B cardinality=32
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> int_col
+|     row-size=12B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> functional.alltypestiny.int_col, RF002 -> int_col
+   row-size=12B cardinality=8
 ====
 # Multiple with clauses. One for the UnionStmt and one for each union operand.
 with t1 as (values('a', 'b'))
@@ -433,27 +510,35 @@ PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=1]
 |  order by: 'c' ASC
+|  row-size=24B cardinality=1
 |
 00:UNION
+|  row-size=24B cardinality=2
 |
 |--02:UNION
 |     constant-operands=1
+|     row-size=24B cardinality=1
 |
 01:UNION
    constant-operands=1
+   row-size=24B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=1]
 |  order by: 'c' ASC
+|  row-size=24B cardinality=1
 |
 00:UNION
+|  row-size=24B cardinality=2
 |
 |--02:UNION
 |     constant-operands=1
+|     row-size=24B cardinality=1
 |
 01:UNION
    constant-operands=1
+   row-size=24B cardinality=1
 ====
 # Multiple with clauses. One for the UnionStmt and one for each union operand.
 with t1 as (values('a', 'b'))
@@ -464,27 +549,35 @@ PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=1]
 |  order by: 'c' ASC
+|  row-size=24B cardinality=1
 |
 00:UNION
+|  row-size=24B cardinality=2
 |
 |--02:UNION
 |     constant-operands=1
+|     row-size=24B cardinality=1
 |
 01:UNION
    constant-operands=1
+   row-size=24B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=1]
 |  order by: 'c' ASC
+|  row-size=24B cardinality=1
 |
 00:UNION
+|  row-size=24B cardinality=2
 |
 |--02:UNION
 |     constant-operands=1
+|     row-size=24B cardinality=1
 |
 01:UNION
    constant-operands=1
+   row-size=24B cardinality=1
 ====
 # Test with clause in an insert statement.
 with t1 as (select * from functional.alltypestiny)
@@ -495,20 +588,24 @@ WRITE TO HDFS [functional.alltypesinsert, OVERWRITE=false, PARTITION-KEYS=(year,
 |
 01:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypesinsert, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |  partitions=4
 |
 02:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=8
 |
 01:EXCHANGE [HASH(functional.alltypestiny.year,functional.alltypestiny.month)]
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ====
 # Test with clause in an insert statement and in its query statement.
 with t1 as (select * from functional.alltypestiny)
@@ -521,30 +618,38 @@ WRITE TO HDFS [functional.alltypesinsert, OVERWRITE=false, PARTITION-KEYS=(year,
 |
 03:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=16
 |
 00:UNION
+|  row-size=89B cardinality=16
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=89B cardinality=8
 |
 01:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypesinsert, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |  partitions=16
 |
 04:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=16
 |
 03:EXCHANGE [HASH(year,month)]
 |
 00:UNION
+|  row-size=89B cardinality=16
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=89B cardinality=8
 |
 01:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ====
 # IMPALA-5293: Test with clause in an insert statement and in its query statement. Make
 # sure that noclustered hint prevents addition of a sort node before writing to HDFS.
@@ -557,12 +662,15 @@ WRITE TO HDFS [functional.alltypesinsert, OVERWRITE=false, PARTITION-KEYS=(year,
 |  partitions=16
 |
 00:UNION
+|  row-size=89B cardinality=16
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=89B cardinality=8
 |
 01:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypesinsert, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |  partitions=16
@@ -570,12 +678,15 @@ WRITE TO HDFS [functional.alltypesinsert, OVERWRITE=false, PARTITION-KEYS=(year,
 03:EXCHANGE [HASH(year,month)]
 |
 00:UNION
+|  row-size=89B cardinality=16
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=89B cardinality=8
 |
 01:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ====
 # Test with clause with a query statement that references the same column from a
 # base table multiple times (IMPALA-1412)
@@ -589,6 +700,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    predicates: bigint_col = bigint_col
+   row-size=9B cardinality=1
 ====
 # IMPALA-2414: Test basic correlated WITH clause view.
 select pos from functional.allcomplextypes t inner join
@@ -599,16 +711,21 @@ on v.pos = t.id
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=24B cardinality=0
 |
 |--04:NESTED LOOP JOIN [INNER JOIN]
 |  |  join predicates: pos = t.id
+|  |  row-size=24B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [t.int_array_col]
+|     row-size=8B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes t]
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 # IMPALA-2414: Test correlated WITH clause view nested in another WITH clause.
 select pos from functional.allcomplextypes t inner join
@@ -618,15 +735,20 @@ select pos from functional.allcomplextypes t inner join
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=20B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=12B cardinality=1
 |  |
 |  03:UNNEST [t.int_array_col]
+|     row-size=8B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes t]
    partitions=0/0 files=0 size=0B
+   row-size=12B cardinality=0
 ====
 # IMPALA-2414: Test correlated WITH clause view nested in another WITH clause.
 with w1 as (select pos from functional.allcomplextypes t,
@@ -636,15 +758,20 @@ select * from w1
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=20B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=12B cardinality=1
 |  |
 |  03:UNNEST [t.int_array_col]
+|     row-size=8B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes t]
    partitions=0/0 files=0 size=0B
+   row-size=12B cardinality=0
 ====
 # IMPALA-2414: Test multiple correlated WITH clause views that are joined.
 select pos from functional.allcomplextypes t inner join
@@ -655,26 +782,37 @@ select pos from functional.allcomplextypes t inner join
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=64B cardinality=0
 |
 |--10:NESTED LOOP JOIN [INNER JOIN]
 |  |  join predicates: value = t.id
+|  |  row-size=64B cardinality=10.00K
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=52B cardinality=1
 |  |
 |  09:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=12B cardinality=10.00K
 |  |
 |  |--06:UNNEST [t.map_map_col.value]
+|  |     row-size=4B cardinality=10
 |  |
 |  08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=8B cardinality=1.00K
 |  |
 |  |--05:UNNEST [t.map_map_col.value]
+|  |     row-size=0B cardinality=10
 |  |
 |  07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=8B cardinality=100
 |  |
 |  |--04:UNNEST [t.int_array_col]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [t.int_array_col]
+|     row-size=8B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes t]
    partitions=0/0 files=0 size=0B
+   row-size=52B cardinality=0
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-query/queries/QueryTest/corrupt-stats.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/corrupt-stats.test b/testdata/workloads/functional-query/queries/QueryTest/corrupt-stats.test
index da88f3b..74707db 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/corrupt-stats.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/corrupt-stats.test
@@ -66,7 +66,7 @@ explain select count(*) from corrupted where org = 1;
 '|  output: count(*)'
 '|'
 '00:SCAN HDFS [$DATABASE.corrupted]'
-'   partitions=1/2 files=1 size=24B'
+'   partitions=1/2 files=1 size=24B row-size=0B cardinality=0'
 ---- TYPES
 STRING
 ====
@@ -92,7 +92,7 @@ explain select count(*) from corrupted;
 '|  output: count(*)'
 '|'
 '00:SCAN HDFS [$DATABASE.corrupted]'
-'   partitions=2/2 files=2 size=48B'
+'   partitions=2/2 files=2 size=48B row-size=0B cardinality=6'
 ---- TYPES
 STRING
 ====
@@ -117,7 +117,7 @@ explain select count(*) from corrupted;
 '|  output: count(*)'
 '|'
 '00:SCAN HDFS [$DATABASE.corrupted]'
-'   partitions=2/2 files=2 size=48B'
+'   partitions=2/2 files=2 size=48B row-size=0B cardinality=6'
 ---- TYPES
 STRING
 ====
@@ -139,7 +139,7 @@ explain select count(*) from corrupted where org = 2;
 '|  output: count(*)'
 '|'
 '00:SCAN HDFS [$DATABASE.corrupted]'
-'   partitions=1/2 files=1 size=24B'
+'   partitions=1/2 files=1 size=24B row-size=0B cardinality=6'
 ---- TYPES
 STRING
 ====
@@ -165,7 +165,7 @@ explain select count(*) from corrupted_no_part;
 '|  output: count(*)'
 '|'
 '00:SCAN HDFS [$DATABASE.corrupted_no_part]'
-'   partitions=1/1 files=1 size=6B'
+'   partitions=1/1 files=1 size=6B row-size=0B cardinality=3'
 ---- TYPES
 STRING
 ====
@@ -197,7 +197,7 @@ explain select count(*) from corrupted_no_part;
 '|  output: count(*)'
 '|'
 '00:SCAN HDFS [$DATABASE.corrupted_no_part]'
-'   partitions=1/1 files=1 size=6B'
+'   partitions=1/1 files=1 size=6B row-size=0B cardinality=0'
 ---- TYPES
 STRING
 ====
@@ -219,7 +219,7 @@ explain select count(*) from corrupted_no_part;
 '|  output: count(*)'
 '|'
 '00:SCAN HDFS [$DATABASE.corrupted_no_part]'
-'   partitions=1/1 files=1 size=6B'
+'   partitions=1/1 files=1 size=6B row-size=0B cardinality=unavailable'
 ---- TYPES
 STRING
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-query/queries/QueryTest/explain-level1.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/explain-level1.test b/testdata/workloads/functional-query/queries/QueryTest/explain-level1.test
index 13f7148..9a6dea3 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/explain-level1.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/explain-level1.test
@@ -15,15 +15,18 @@ row_regex:.*Per-Host Resource Estimates: Memory=[0-9.]*MB.*
 '02:HASH JOIN [INNER JOIN, BROADCAST]'
 '|  hash predicates: l_orderkey = o_orderkey'
 '|  runtime filters: RF000 <- o_orderkey'
+row_regex:.*row-size=.* cardinality=.*
 '|'
 '|--03:EXCHANGE [BROADCAST]'
 '|  |'
 '|  01:SCAN HDFS [tpch.orders]'
 row_regex:.*partitions=1/1 files=1 size=.*
+row_regex:.*row-size=.* cardinality=.*
 '|'
 '00:SCAN HDFS [tpch.lineitem]'
 row_regex:.*partitions=1/1 files=1 size=.*
 '   runtime filters: RF000 -> l_orderkey'
+row_regex:.*row-size=.* cardinality=.*
 ====
 ---- QUERY
 # Tests the warning about missing table stats in the explain header.

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-query/queries/QueryTest/explain-level2.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/explain-level2.test b/testdata/workloads/functional-query/queries/QueryTest/explain-level2.test
index 7d65337..6c07f77 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/explain-level2.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/explain-level2.test
@@ -11,13 +11,13 @@ row_regex:.*Per-Host Resource Estimates: Memory=[0-9.]*MB.*
 '= o_orderkey'
 ''
 'F02:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1'
-row_regex:.*|  Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=1
+row_regex:.*Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=1
 'PLAN-ROOT SINK'
-row_regex:.*|  mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=0
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=0
 '|'
 '04:EXCHANGE [UNPARTITIONED]'
-row_regex:.*|  mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=0
-'|  tuple-ids=0,1 row-size=402B cardinality=5757710'
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=0
+'|  tuple-ids=0,1 row-size=402B cardinality=5.76M'
 '|  in pipelines: 00(GETNEXT)'
 '|'
 'F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3'
@@ -26,25 +26,25 @@ row_regex:.*Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9
 '|  hash predicates: l_orderkey = o_orderkey'
 '|  fk/pk conjuncts: l_orderkey = o_orderkey'
 '|  runtime filters: RF000[bloom] <- o_orderkey'
-row_regex:.*|  mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B spill-buffer=[0-9.]*MB thread-reservation=0.*
-'|  tuple-ids=0,1 row-size=402B cardinality=5757710'
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B spill-buffer=[0-9.]*MB thread-reservation=0.*
+'|  tuple-ids=0,1 row-size=402B cardinality=5.76M'
 '|  in pipelines: 00(GETNEXT), 01(OPEN)'
 '|'
 '|--03:EXCHANGE [BROADCAST]'
-row_regex:.*|  |  mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=0
-'|  |  tuple-ids=1 row-size=171B cardinality=1500000'
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=0
+'|  |  tuple-ids=1 row-size=171B cardinality=1.50M'
 '|  |  in pipelines: 01(GETNEXT)'
 '|  |'
 '|  F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=2'
-row_regex:.*|  Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=.*
+row_regex:.*Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=.*
 '|  01:SCAN HDFS [tpch.orders, RANDOM]'
 row_regex:.*partitions=1/1 files=1 size=.*
 '|     stored statistics:'
 row_regex:.*table: rows=1500000 size=.*
 '|       columns: all'
-row_regex:.*|     extrapolated-rows=disabled max-scan-range-rows=[0-9]*.*
-row_regex:.*|     mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=1.*
-'|     tuple-ids=1 row-size=171B cardinality=1500000'
+row_regex:.*extrapolated-rows=disabled max-scan-range-rows=[0-9]*.*
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=1.*
+'|     tuple-ids=1 row-size=171B cardinality=1.50M'
 '|     in pipelines: 01(GETNEXT)'
 '|'
 '00:SCAN HDFS [tpch.lineitem, RANDOM]'
@@ -53,9 +53,9 @@ row_regex:.*partitions=1/1 files=1 size=.*
 '   stored statistics:'
 row_regex:.*table: rows=6001215 size=.*
 '     columns: all'
-row_regex:.*   extrapolated-rows=disabled max-scan-range-rows=[0-9]*.*
-row_regex:.*   mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=1.*
-'   tuple-ids=0 row-size=231B cardinality=6001215'
+row_regex:.*extrapolated-rows=disabled max-scan-range-rows=[0-9]*.*
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=1.*
+'   tuple-ids=0 row-size=231B cardinality=6.00M'
 '   in pipelines: 00(GETNEXT)'
 ====
 ---- QUERY

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-query/queries/QueryTest/explain-level3.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/explain-level3.test b/testdata/workloads/functional-query/queries/QueryTest/explain-level3.test
index 23e97f0..d095028 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/explain-level3.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/explain-level3.test
@@ -13,28 +13,28 @@ row_regex:.*Per-Host Resource Estimates: Memory=[0-9.]*MB.*
 'F02:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1'
 row_regex:.*Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=1
 '  PLAN-ROOT SINK'
-row_regex:.*  |  mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=0
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=0
 '  |'
 '  04:EXCHANGE [UNPARTITIONED]'
-row_regex:.*     mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=0
-'     tuple-ids=0,1 row-size=402B cardinality=5757710'
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=0
+'     tuple-ids=0,1 row-size=402B cardinality=5.76M'
 '     in pipelines: 00(GETNEXT)'
 ''
 'F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3'
 row_regex:.*Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=.*
 '  DATASTREAM SINK [FRAGMENT=F02, EXCHANGE=04, UNPARTITIONED]'
-row_regex:.*  |  mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=0
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=0
 '  02:HASH JOIN [INNER JOIN, BROADCAST]'
 '  |  hash predicates: l_orderkey = o_orderkey'
 '  |  fk/pk conjuncts: l_orderkey = o_orderkey'
 '  |  runtime filters: RF000[bloom] <- o_orderkey'
-row_regex:.*  |  mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B spill-buffer=[0-9.]*MB thread-reservation=.*
-'  |  tuple-ids=0,1 row-size=402B cardinality=5757710'
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B spill-buffer=[0-9.]*MB thread-reservation=.*
+'  |  tuple-ids=0,1 row-size=402B cardinality=5.76M'
 '  |  in pipelines: 00(GETNEXT), 01(OPEN)'
 '  |'
 '  |--03:EXCHANGE [BROADCAST]'
-row_regex:.*  |     mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=0
-'  |     tuple-ids=1 row-size=171B cardinality=1500000'
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=0
+'  |     tuple-ids=1 row-size=171B cardinality=1.50M'
 '  |     in pipelines: 01(GETNEXT)'
 '  |'
 '  00:SCAN HDFS [tpch.lineitem, RANDOM]'
@@ -43,9 +43,9 @@ row_regex:.*partitions=1/1 files=1 size=.*
 '     stored statistics:'
 row_regex:.*table: rows=6001215 size=.*
 '       columns: all'
-row_regex:.*|     extrapolated-rows=disabled max-scan-range-rows=[0-9]*.*
-row_regex:.*     mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=1.*
-'     tuple-ids=0 row-size=231B cardinality=6001215'
+row_regex:.*extrapolated-rows=disabled max-scan-range-rows=[0-9]*.*
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=1.*
+'     tuple-ids=0 row-size=231B cardinality=6.00M'
 '     in pipelines: 00(GETNEXT)'
 ''
 'F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=2'
@@ -59,7 +59,7 @@ row_regex:.*table: rows=1500000 size=.*
 '       columns: all'
 row_regex:.*   extrapolated-rows=disabled max-scan-range-rows=[0-9]*.*
 row_regex:.*     mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B thread-reservation=.*
-'     tuple-ids=1 row-size=171B cardinality=1500000'
+'     tuple-ids=1 row-size=171B cardinality=1.50M'
 '     in pipelines: 01(GETNEXT)'
 ====
 ---- QUERY

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test b/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test
index 7a4999c..0c6deb4 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test
@@ -70,7 +70,7 @@ row_regex:.*table: rows=3650 size=.*
 '     columns: all'
 row_regex:.* extrapolated-rows=3650.*
 row_regex:.*mem-estimate=.* mem-reservation=.*
-'   tuple-ids=0 row-size=4B cardinality=3650'
+'   tuple-ids=0 row-size=4B cardinality=3.65K'
 '   in pipelines: 00(GETNEXT)'
 ---- TYPES
 STRING
@@ -91,6 +91,7 @@ row_regex:.*mem-estimate=.* mem-reservation=.*
 row_regex:.*mem-estimate=.* mem-reservation=.*
 '|'
 '00:SCAN HDFS [$DATABASE.alltypes]'
+'   partition predicates: month IN (CAST(1 AS INT), CAST(2 AS INT), CAST(3 AS INT))'
 row_regex:.*partitions=3/12 files=3 size=.*
 '   stored statistics:'
 row_regex:.*table: rows=3650 size=.*
@@ -126,7 +127,7 @@ row_regex:.*table: rows=3650 size=.*
 '     columns: all'
 row_regex:.* extrapolated-rows=7300.*
 row_regex:.*mem-estimate=.* mem-reservation=.*
-'   tuple-ids=0 row-size=4B cardinality=7300'
+'   tuple-ids=0 row-size=4B cardinality=7.30K'
 '   in pipelines: 00(GETNEXT)'
 ---- TYPES
 STRING
@@ -149,6 +150,7 @@ row_regex:.*Per-Host Resources: mem-estimate=.* mem-reservation=.*
 row_regex:.*mem-estimate=.* mem-reservation=.*
 '|'
 '00:SCAN HDFS [$DATABASE.alltypes]'
+'   partition predicates: year = CAST(2010 AS INT)'
 row_regex:.*partitions=12/24 files=12 size=.*
 '   stored statistics:'
 row_regex:.*table: rows=3650 size=.*
@@ -156,7 +158,7 @@ row_regex:.*table: rows=3650 size=.*
 '     columns: all'
 row_regex:.* extrapolated-rows=3651.*
 row_regex:.*mem-estimate=.* mem-reservation=.*
-'   tuple-ids=0 row-size=4B cardinality=3651'
+'   tuple-ids=0 row-size=4B cardinality=3.65K'
 '   in pipelines: 00(GETNEXT)'
 ---- TYPES
 STRING
@@ -178,6 +180,7 @@ row_regex:.*Per-Host Resources: mem-estimate=.* mem-reservation=.*
 row_regex:.*mem-estimate=.* mem-reservation=.*
 '|'
 '00:SCAN HDFS [$DATABASE.alltypes]'
+'   partition predicates: year = CAST(2010 AS INT)'
 row_regex:.*partitions=12/24 files=12 size=.*
 '   stored statistics:'
 row_regex:.*table: rows=10950 size=.*
@@ -185,7 +188,7 @@ row_regex:.*table: rows=10950 size=.*
 '     columns: all'
 row_regex:.* extrapolated-rows=3651
 row_regex:.*mem-estimate=.* mem-reservation=.*
-'   tuple-ids=0 row-size=4B cardinality=3651'
+'   tuple-ids=0 row-size=4B cardinality=3.65K'
 '   in pipelines: 00(GETNEXT)'
 ---- TYPES
 STRING

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/tests/metadata/test_explain.py
----------------------------------------------------------------------
diff --git a/tests/metadata/test_explain.py b/tests/metadata/test_explain.py
index ba206f2..48a6d69 100644
--- a/tests/metadata/test_explain.py
+++ b/tests/metadata/test_explain.py
@@ -78,7 +78,7 @@ class TestExplain(ImpalaTestSuite):
     tbl_name = 'alltypes'
 
     def check_cardinality(query_result, expected_cardinality):
-      regex = re.compile('tuple-ids=\d+ row-size=\d+B cardinality=(\d+)')
+      regex = re.compile(' row-size=\d+B cardinality=(.*)$')
       for res in query_result:
         m = regex.match(res.strip())
         if m:
@@ -94,12 +94,12 @@ class TestExplain(ImpalaTestSuite):
     # Half of the partitions are filtered out, cardinality should be 3650.
     result = self.execute_query("explain select * from %s.%s where year = 2010" % (
         db_name, tbl_name), query_options={'explain_level':3})
-    check_cardinality(result.data, '3650')
+    check_cardinality(result.data, '3.65K')
 
     # None of the partitions are filtered out, cardinality should be 7300.
     result = self.execute_query("explain select * from %s.%s" % (db_name, tbl_name),
         query_options={'explain_level':3})
-    check_cardinality(result.data, '7300')
+    check_cardinality(result.data, '7.30K')
 
     # Create a partitioned table with a mixed set of available stats,
     mixed_tbl = unique_database + ".t"


[23/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/constant-propagation.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/constant-propagation.test b/testdata/workloads/functional-planner/queries/PlannerTest/constant-propagation.test
index 067698a..617eb3a 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/constant-propagation.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/constant-propagation.test
@@ -19,6 +19,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.widetable_250_cols a]
    partitions=1/1 files=1 size=28.69KB
    predicates: a.int_col1 = 10, a.int_col2 = 11, a.int_col3 = 55, a.int_col4 = 110
+   row-size=1.21KB cardinality=unavailable
 ====
 # Test multiple forward propagation
 select * from functional.widetable_250_cols a
@@ -30,6 +31,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.widetable_250_cols a]
    partitions=1/1 files=1 size=28.69KB
    predicates: a.int_col1 = 10, a.int_col2 = 11, a.int_col3 = 55, a.int_col4 = -385
+   row-size=1.21KB cardinality=unavailable
 ====
 # Test multiple forward propagation
 select * from functional.widetable_250_cols a
@@ -41,6 +43,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.widetable_250_cols a]
    partitions=1/1 files=1 size=28.69KB
    predicates: a.int_col1 = 10, a.int_col2 = 11, a.int_col3 = 55, a.int_col4 = -495
+   row-size=1.21KB cardinality=unavailable
 ====
 # Test multiple forward propagation, and a reversed propagation
 # (which fails as we can't rewrite 55 = a.int_col4 / 10)
@@ -53,6 +56,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.widetable_250_cols a]
    partitions=1/1 files=1 size=28.69KB
    predicates: a.int_col1 = 10, a.int_col2 = 11, a.int_col3 = 55, a.int_col4 / 10 = 55
+   row-size=1.21KB cardinality=unavailable
 ====
 # Another impossibility (a.int_col3 = a.int_col2 * 5 = a.int_col2 * -7)
 select * from functional.widetable_250_cols a
@@ -72,6 +76,7 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 00:EMPTYSET
 ====
@@ -84,6 +89,7 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 00:EMPTYSET
 ====
@@ -96,6 +102,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: CAST(a.int_col AS STRING) = 'abc', CAST(int_col AS STRING) > 'xyz'
+   row-size=89B cardinality=231
 ====
 # Implicit casts are considered for propagation
 select * from functional.alltypes a
@@ -115,17 +122,21 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 02:SELECT
 |  predicates: int_col = 12, int_col > 1
+|  row-size=4B cardinality=0
 |
 01:AGGREGATE [FINALIZE]
 |  group by: int_col
 |  limit: 10
+|  row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: int_col = 10
+   row-size=4B cardinality=730
 ====
 # Many constant predicates removed
 select count(*) from
@@ -137,10 +148,12 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: int_col = 10
+   row-size=4B cardinality=730
 ====
 # All true predicates elided
 select count(*) from
@@ -151,9 +164,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ====
 # Many redundant / duplicate predicates
  select count(*) from
@@ -204,10 +219,12 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: int_col = 10
+   row-size=4B cardinality=730
 ====
 # Non-trivial expr substitution (const false)
 select count(*) from
@@ -219,6 +236,7 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 00:EMPTYSET
 ====
@@ -233,10 +251,12 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: int_col = 10
+   row-size=4B cardinality=730
 ====
 # Non-trivial expr substitution (non-constant)
 select count(*) from
@@ -249,10 +269,12 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: int_col = 10, TRUE OR 10 + random() * functional.alltypes.tinyint_col = 100
+   row-size=5B cardinality=231
 ====
 # Collection predicates within HDFS scan nodes get optimized
 select 1
@@ -262,26 +284,35 @@ where l_partkey < l_suppkey and c.c_nationkey = 10 and o_orderkey = 4 and l_supp
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=50B cardinality=600.00K
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=50B cardinality=100
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=14B cardinality=1
 |  |
 |  04:SUBPLAN
+|  |  row-size=36B cardinality=100
 |  |
 |  |--07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=36B cardinality=10
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=20B cardinality=1
 |  |  |
 |  |  06:UNNEST [o.o_lineitems]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=292.36MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: c.c_nationkey = 10, !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o_orderkey = 4
    predicates on o_lineitems: l_partkey < 10, l_suppkey = 10
+   row-size=14B cardinality=6.00K
 ====
 # Nested predicates also get propagated
 select 1
@@ -292,27 +323,36 @@ where l_partkey < l_suppkey and c.c_nationkey = 10 and o_orderkey = o_shippriori
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=54B cardinality=600.00K
 |
 |--08:NESTED LOOP JOIN [INNER JOIN]
 |  |  join predicates: o_shippriority = c_nationkey
+|  |  row-size=54B cardinality=100
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=14B cardinality=1
 |  |
 |  04:SUBPLAN
+|  |  row-size=40B cardinality=100
 |  |
 |  |--07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=40B cardinality=10
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=24B cardinality=1
 |  |  |
 |  |  06:UNNEST [o.o_lineitems]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=292.36MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: c.c_nationkey = 10, !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o.o_orderkey = 10, o.o_shippriority = 10
    predicates on o_lineitems: l_partkey < 10, l_suppkey = 10
+   row-size=14B cardinality=6.00K
 ====
 # Using IS NULL
 select count(*) from functional.alltypes where id = 10 and bool_col is null
@@ -322,6 +362,7 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 00:EMPTYSET
 ====
@@ -332,6 +373,7 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 00:EMPTYSET
 ====
@@ -343,10 +385,12 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: bool_col IS NULL, functional.alltypes.id IS NULL, id > 0, functional.alltypes.bool_col > 0, id = bool_col
+   row-size=5B cardinality=730
 ====
 # = NULL and >
 select count(*) from functional.alltypes where id > 0 and bool_col = null
@@ -356,6 +400,7 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 00:EMPTYSET
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/constant.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/constant.test b/testdata/workloads/functional-planner/queries/PlannerTest/constant.test
index 3c03cd5..5cfa415 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/constant.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/constant.test
@@ -4,9 +4,11 @@ PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=1
+   row-size=2B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=1
+   row-size=2B cardinality=1
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/data-source-tables.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/data-source-tables.test b/testdata/workloads/functional-planner/queries/PlannerTest/data-source-tables.test
index ce4dbd7..3368cd7 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/data-source-tables.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/data-source-tables.test
@@ -10,6 +10,7 @@ PLAN-ROOT SINK
 00:SCAN DATA SOURCE [functional.alltypes_datasource]
 data source predicates: tinyint_col < 256
 predicates: float_col != 0, CAST(int_col AS BIGINT) < 10
+   row-size=112B cardinality=500
 ====
 # The first four predicates are in a form that can be offered to the data source
 # and the first and third will be accepted (it accepts every other conjunct).
@@ -29,6 +30,7 @@ PLAN-ROOT SINK
 00:SCAN DATA SOURCE [functional.alltypes_datasource]
 data source predicates: int_col < 10, string_col != 'Foo'
 predicates: double_col < 5, NOT bool_col = TRUE, NOT double_col = 5.0, string_col != 'Bar'
+   row-size=112B cardinality=500
 ====
 # The 3rd predicate is not in a form that can be offered to the data source so
 # the 4th will be offered and accepted instead.
@@ -43,6 +45,7 @@ PLAN-ROOT SINK
 00:SCAN DATA SOURCE [functional.alltypes_datasource]
 data source predicates: int_col < 10, bool_col != FALSE
 predicates: double_col > 5, string_col IN ('Foo', 'Bar')
+   row-size=112B cardinality=500
 ====
 # Tests that all predicates from the On-clause are applied (IMPALA-805)
 # and that slot equivalences are enforced at lowest possible plan node
@@ -60,12 +63,15 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id
+|  row-size=35B cardinality=500
 |
 |--01:SCAN DATA SOURCE [functional.alltypes_datasource b]
 |--predicates: b.id = b.int_col, b.id = b.bigint_col
+|     row-size=0B cardinality=500
 |
 00:SCAN DATA SOURCE [functional.alltypes_datasource a]
 predicates: a.id = a.int_col, a.id = a.tinyint_col, a.int_col = a.bigint_col, a.tinyint_col = a.smallint_col
+   row-size=0B cardinality=500
 ====
 # Tests that <=>, IS DISTINCT FROM, and IS NOT DISTINCT FROM all can be offered to the
 # data source.
@@ -82,6 +88,7 @@ PLAN-ROOT SINK
 00:SCAN DATA SOURCE [functional.alltypes_datasource]
 data source predicates: id IS NOT DISTINCT FROM 1, tinyint_col IS DISTINCT FROM 2, int_col IS NOT DISTINCT FROM 4
 predicates: bigint_col IS NOT DISTINCT FROM 5, bool_col IS NOT DISTINCT FROM TRUE, smallint_col IS DISTINCT FROM 3
+   row-size=112B cardinality=500
 ====
 # EmptySet datasource
 select * from functional.alltypes_datasource
@@ -96,7 +103,6 @@ PLAN-ROOT SINK
 |
 00:EMPTYSET
 ====
----- QUERY
 # IMPALA-5602: If a query contains predicates that are all pushed to the datasource and
 # there is a limit, then the query should not incorrectly run with 'small query'
 # optimization.
@@ -110,4 +116,5 @@ PLAN-ROOT SINK
 00:SCAN DATA SOURCE [functional.alltypes_datasource]
 data source predicates: id = 1
    limit: 15
+   row-size=112B cardinality=15
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/ddl.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/ddl.test b/testdata/workloads/functional-planner/queries/PlannerTest/ddl.test
index 74c7e5f..ce495b3 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/ddl.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/ddl.test
@@ -5,12 +5,14 @@ WRITE TO HDFS [default.t, OVERWRITE=false]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [default.t, OVERWRITE=false]
 |  partitions=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 create table t as
 select distinct a.int_col, a.string_col from functional.alltypes a
@@ -22,41 +24,54 @@ WRITE TO HDFS [default.t, OVERWRITE=false]
 |
 03:AGGREGATE [FINALIZE]
 |  group by: a.int_col, a.string_col
+|  row-size=17B cardinality=13
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF000 <- b.id
+|  row-size=25B cardinality=13
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
+|     partition predicates: b.month = 2
 |     partitions=1/4 files=1 size=1.58KB
+|     row-size=4B cardinality=25
 |
 00:SCAN HDFS [functional.alltypes a]
+   partition predicates: a.year = 2009
    partitions=12/24 files=12 size=238.68KB
    runtime filters: RF000 -> a.id
+   row-size=21B cardinality=3.65K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [default.t, OVERWRITE=false]
 |  partitions=1
 |
 06:AGGREGATE [FINALIZE]
 |  group by: a.int_col, a.string_col
+|  row-size=17B cardinality=13
 |
 05:EXCHANGE [HASH(a.int_col,a.string_col)]
 |
 03:AGGREGATE [STREAMING]
 |  group by: a.int_col, a.string_col
+|  row-size=17B cardinality=13
 |
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF000 <- b.id
+|  row-size=25B cardinality=13
 |
 |--04:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall b]
+|     partition predicates: b.month = 2
 |     partitions=1/4 files=1 size=1.58KB
+|     row-size=4B cardinality=25
 |
 00:SCAN HDFS [functional.alltypes a]
+   partition predicates: a.year = 2009
    partitions=12/24 files=12 size=238.68KB
    runtime filters: RF000 -> a.id
+   row-size=21B cardinality=3.65K
 ====
 # CTAS with a view that has a limit clause (IMPALA-1411)
 create table t as
@@ -68,6 +83,7 @@ WRITE TO HDFS [default.t, OVERWRITE=false]
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    limit: 1
+   row-size=0B cardinality=1
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [default.t, OVERWRITE=false]
 |  partitions=1
@@ -78,6 +94,7 @@ WRITE TO HDFS [default.t, OVERWRITE=false]
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    limit: 1
+   row-size=0B cardinality=1
 ====
 # CTAS with multiple nested inline views that have a limit clause (IMPALA-1411)
 create table t as
@@ -91,6 +108,7 @@ WRITE TO HDFS [default.t, OVERWRITE=false]
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    limit: 1
+   row-size=4B cardinality=1
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [default.t, OVERWRITE=false]
 |  partitions=1
@@ -101,6 +119,7 @@ WRITE TO HDFS [default.t, OVERWRITE=false]
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    limit: 1
+   row-size=4B cardinality=1
 ====
 
 # CTAS with a select statement that has a limit and offset clause (IMPALA-1411)
@@ -112,9 +131,11 @@ WRITE TO HDFS [default.t, OVERWRITE=false]
 |
 01:TOP-N [LIMIT=1 OFFSET=5]
 |  order by: id ASC
+|  row-size=89B cardinality=1
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [default.t, OVERWRITE=false]
 |  partitions=1
@@ -126,9 +147,11 @@ WRITE TO HDFS [default.t, OVERWRITE=false]
 |
 01:TOP-N [LIMIT=6]
 |  order by: id ASC
+|  row-size=89B cardinality=6
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ====
 # CTAS with an inline view that has a limit and offset clause (IMPALA-1411)
 create table t as
@@ -140,9 +163,11 @@ WRITE TO HDFS [default.t, OVERWRITE=false]
 |
 01:TOP-N [LIMIT=2 OFFSET=5]
 |  order by: id ASC
+|  row-size=8B cardinality=2
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=8B cardinality=8
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [default.t, OVERWRITE=false]
 |  partitions=1
@@ -154,9 +179,11 @@ WRITE TO HDFS [default.t, OVERWRITE=false]
 |
 01:TOP-N [LIMIT=7]
 |  order by: id ASC
+|  row-size=8B cardinality=7
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=8B cardinality=8
 ====
 # CTAS with sort columns
 create table t sort by (int_col, bool_col) as
@@ -167,18 +194,22 @@ WRITE TO HDFS [default.t, OVERWRITE=false]
 |
 01:SORT
 |  order by: int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [default.t, OVERWRITE=false]
 |  partitions=1
 |
 01:SORT
 |  order by: int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # CTAS with partitions and sort columns
 create table t partitioned by (year, month) sort by (int_col, bool_col) as
@@ -189,20 +220,24 @@ WRITE TO HDFS [default.t, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |
 01:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST, int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [default.t, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |  partitions=24
 |
 02:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST, int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=89B cardinality=7.30K
 |
 01:EXCHANGE [HASH(functional.alltypes.year,functional.alltypes.month)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # IMPALA-4167: if no (no)shuffle hint is given for CTAS into partitioned HDFS table, then
 # Impala is free to decide whether to add an exchange node or not. In this example, the
@@ -220,6 +255,7 @@ WRITE TO HDFS [default.t, OVERWRITE=false, PARTITION-KEYS=(functional.alltypes.y
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # IMPALA-4167: non-shuffled CTAS into partitioned table has no exchange node before write.
 # Note that plan hint tests for CTAS are minimal by design, as this logic is covered well
@@ -234,6 +270,7 @@ WRITE TO HDFS [default.t, OVERWRITE=false, PARTITION-KEYS=(functional.alltypes.y
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # CTAS with more complex select query
 create table t partitioned by (c_nationkey) sort by (c_custkey) as
@@ -247,52 +284,63 @@ WRITE TO HDFS [default.t, OVERWRITE=false, PARTITION-KEYS=(c_nationkey)]
 |
 04:SORT
 |  order by: c_nationkey ASC NULLS LAST, c_custkey ASC NULLS LAST
+|  row-size=18B cardinality=228.68K
 |
 03:AGGREGATE [FINALIZE]
 |  output: max(o_totalprice)
 |  group by: c_custkey, c_nationkey
+|  row-size=18B cardinality=228.68K
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF000 <- c_custkey
+|  row-size=26B cardinality=228.68K
 |
 |--01:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     predicates: c_nationkey < 10
+|     row-size=10B cardinality=15.00K
 |
 00:SCAN HDFS [tpch.orders]
    partitions=1/1 files=1 size=162.56MB
    runtime filters: RF000 -> o_custkey
+   row-size=16B cardinality=1.50M
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [default.t, OVERWRITE=false, PARTITION-KEYS=(c_nationkey)]
 |  partitions=25
 |
 08:SORT
 |  order by: c_nationkey ASC NULLS LAST, c_custkey ASC NULLS LAST
+|  row-size=18B cardinality=228.68K
 |
 07:EXCHANGE [HASH(c_nationkey)]
 |
 06:AGGREGATE [FINALIZE]
 |  output: max:merge(o_totalprice)
 |  group by: c_custkey, c_nationkey
+|  row-size=18B cardinality=228.68K
 |
 05:EXCHANGE [HASH(c_custkey,c_nationkey)]
 |
 03:AGGREGATE [STREAMING]
 |  output: max(o_totalprice)
 |  group by: c_custkey, c_nationkey
+|  row-size=18B cardinality=228.68K
 |
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF000 <- c_custkey
+|  row-size=26B cardinality=228.68K
 |
 |--04:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     predicates: c_nationkey < 10
+|     row-size=10B cardinality=15.00K
 |
 00:SCAN HDFS [tpch.orders]
    partitions=1/1 files=1 size=162.56MB
    runtime filters: RF000 -> o_custkey
+   row-size=16B cardinality=1.50M
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/default-join-distr-mode-broadcast.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/default-join-distr-mode-broadcast.test b/testdata/workloads/functional-planner/queries/PlannerTest/default-join-distr-mode-broadcast.test
index 8735f97..3ff9e66 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/default-join-distr-mode-broadcast.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/default-join-distr-mode-broadcast.test
@@ -9,15 +9,18 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: x.a = y.a
 |  runtime filters: RF000 <- y.a
+|  row-size=48B cardinality=unavailable
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.tinytable y]
 |     partitions=1/1 files=1 size=38B
+|     row-size=24B cardinality=unavailable
 |
 00:SCAN HDFS [functional.tinytable x]
    partitions=1/1 files=1 size=38B
    runtime filters: RF000 -> x.a
+   row-size=24B cardinality=unavailable
 ====
 # Left join input has an unknown cardinality.
 select /* +straight_join */ * from
@@ -30,15 +33,18 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: x.a = y.string_col
 |  runtime filters: RF000 <- y.string_col
+|  row-size=113B cardinality=unavailable
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypes y]
-|     partitions=24/24 files=24 size=469.90KB
+|     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.tinytable x]
    partitions=1/1 files=1 size=38B
    runtime filters: RF000 -> x.a
+   row-size=24B cardinality=unavailable
 ====
 # Right join input has an unknown cardinality.
 select /* +straight_join */ * from
@@ -51,13 +57,16 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: x.string_col = y.a
 |  runtime filters: RF000 <- y.a
+|  row-size=113B cardinality=7.30K
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.tinytable y]
 |     partitions=1/1 files=1 size=38B
+|     row-size=24B cardinality=unavailable
 |
 00:SCAN HDFS [functional.alltypes x]
-   partitions=24/24 files=24 size=469.90KB
+   partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> x.string_col
+   row-size=89B cardinality=7.30K
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/default-join-distr-mode-shuffle.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/default-join-distr-mode-shuffle.test b/testdata/workloads/functional-planner/queries/PlannerTest/default-join-distr-mode-shuffle.test
index 59e60c9..7065371 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/default-join-distr-mode-shuffle.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/default-join-distr-mode-shuffle.test
@@ -9,17 +9,20 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: x.a = y.a
 |  runtime filters: RF000 <- y.a
+|  row-size=48B cardinality=unavailable
 |
 |--04:EXCHANGE [HASH(y.a)]
 |  |
 |  01:SCAN HDFS [functional.tinytable y]
 |     partitions=1/1 files=1 size=38B
+|     row-size=24B cardinality=unavailable
 |
 03:EXCHANGE [HASH(x.a)]
 |
 00:SCAN HDFS [functional.tinytable x]
    partitions=1/1 files=1 size=38B
    runtime filters: RF000 -> x.a
+   row-size=24B cardinality=unavailable
 ====
 # Left join input has an unknown cardinality.
 select /* +straight_join */ * from
@@ -32,17 +35,20 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: x.a = y.string_col
 |  runtime filters: RF000 <- y.string_col
+|  row-size=113B cardinality=unavailable
 |
 |--04:EXCHANGE [HASH(y.string_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypes y]
-|     partitions=24/24 files=24 size=469.90KB
+|     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:EXCHANGE [HASH(x.a)]
 |
 00:SCAN HDFS [functional.tinytable x]
    partitions=1/1 files=1 size=38B
    runtime filters: RF000 -> x.a
+   row-size=24B cardinality=unavailable
 ====
 # Right join input has an unknown cardinality.
 select /* +straight_join */ * from
@@ -55,15 +61,18 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: x.string_col = y.a
 |  runtime filters: RF000 <- y.a
+|  row-size=113B cardinality=7.30K
 |
 |--04:EXCHANGE [HASH(y.a)]
 |  |
 |  01:SCAN HDFS [functional.tinytable y]
 |     partitions=1/1 files=1 size=38B
+|     row-size=24B cardinality=unavailable
 |
 03:EXCHANGE [HASH(x.string_col)]
 |
 00:SCAN HDFS [functional.alltypes x]
-   partitions=24/24 files=24 size=469.90KB
+   partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> x.string_col
+   row-size=89B cardinality=7.30K
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/disable-codegen.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/disable-codegen.test b/testdata/workloads/functional-planner/queries/PlannerTest/disable-codegen.test
index 3987410..3d2702b 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/disable-codegen.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/disable-codegen.test
@@ -9,14 +9,17 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=1
 |
 02:EXCHANGE [UNPARTITIONED]
 |
 01:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ====
 # Rows per node is > 3000: codegen should be enabled.
 select count(*) from functional.alltypesagg
@@ -28,14 +31,17 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=1
 |
 02:EXCHANGE [UNPARTITIONED]
 |
 01:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=0B cardinality=11.00K
 ====
 # No stats on functional_parquet: codegen should be disabled.
 select count(*) from functional_parquet.alltypes
@@ -49,14 +55,17 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=1
 |
 02:EXCHANGE [UNPARTITIONED]
 |
 01:AGGREGATE
 |  output: sum_init_zero(functional_parquet.alltypes.parquet-stats: num_rows)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=188.29KB
+   partitions=24/24 files=24 size=189.28KB
+   row-size=8B cardinality=unavailable
 ====
 # > 3000 rows returned to coordinator: codegen should be enabled
 select * from functional_parquet.alltypes
@@ -71,7 +80,8 @@ PLAN-ROOT SINK
 01:EXCHANGE [UNPARTITIONED]
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=188.29KB
+   partitions=24/24 files=24 size=189.28KB
+   row-size=80B cardinality=unavailable
 ====
 # Optimisation is enabled for join producing < 3000 rows
 select count(*)
@@ -86,24 +96,29 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=1
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 03:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=8B cardinality=8
 |
 |--04:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny t2]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t1.id
+   row-size=4B cardinality=7.30K
 ====
 # Optimisation is disabled by cross join producing > 3000 rows
 select count(*) from functional.alltypes t1, functional.alltypes t2
@@ -115,21 +130,26 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=1
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 03:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 02:NESTED LOOP JOIN [CROSS JOIN, BROADCAST]
+|  row-size=0B cardinality=53.29M
 |
 |--04:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=0B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ====
 # Optimisation is enabled for union producing < 3000 rows
 select count(*) from (
@@ -145,20 +165,25 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=1
 |
 04:EXCHANGE [UNPARTITIONED]
 |
 03:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=0B cardinality=7.31K
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=0B cardinality=8
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ====
 # Optimisation is disabled by union producing > 3000 rows
 select count(*) from (
@@ -173,20 +198,25 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=1
 |
 04:EXCHANGE [UNPARTITIONED]
 |
 03:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=0B cardinality=14.60K
 |
 |--02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=0B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ====
 # Scan with limit on large table: the number of rows scanned is bounded,
 # codegen should be disabled
@@ -201,6 +231,7 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum(tpch.lineitem.l_discount)
+|  row-size=16B cardinality=1
 |
 02:EXCHANGE [UNPARTITIONED]
 |  limit: 1000
@@ -208,6 +239,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    limit: 1000
+   row-size=8B cardinality=1.00K
 ====
 # Scan with limit and predicates on large table: any number of rows could be scanned:
 # codegen should be enabled
@@ -221,6 +253,7 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum(tpch.lineitem.l_discount)
+|  row-size=16B cardinality=1
 |
 02:EXCHANGE [UNPARTITIONED]
 |  limit: 1000
@@ -229,4 +262,5 @@ PLAN-ROOT SINK
    partitions=1/1 files=1 size=718.94MB
    predicates: l_orderkey > 100
    limit: 1000
+   row-size=16B cardinality=1.00K
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/disable-preaggregations.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/disable-preaggregations.test b/testdata/workloads/functional-planner/queries/PlannerTest/disable-preaggregations.test
index 07726c9..ac2b6e4 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/disable-preaggregations.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/disable-preaggregations.test
@@ -9,15 +9,18 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: tinyint_col
+|  row-size=9B cardinality=9
 |
 02:EXCHANGE [HASH(tinyint_col)]
 |
 01:AGGREGATE
 |  output: count(*)
 |  group by: tinyint_col
+|  row-size=9B cardinality=9
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=1B cardinality=11.00K
 ====
 select count(distinct id)
 from functional.alltypesagg
@@ -26,20 +29,25 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(id)
+|  row-size=8B cardinality=1
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: count(id)
+|  row-size=8B cardinality=1
 |
 04:AGGREGATE
 |  group by: id
+|  row-size=4B cardinality=10.28K
 |
 03:EXCHANGE [HASH(id)]
 |
 01:AGGREGATE
 |  group by: id
+|  row-size=4B cardinality=10.28K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=4B cardinality=11.00K
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/distinct-estimate.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/distinct-estimate.test b/testdata/workloads/functional-planner/queries/PlannerTest/distinct-estimate.test
index b895ad0..cb8a4e0 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/distinct-estimate.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/distinct-estimate.test
@@ -5,22 +5,27 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: distinctpc(l_orderkey)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
+   row-size=8B cardinality=6.00M
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: distinctpc:merge(l_orderkey)
+|  row-size=8B cardinality=1
 |
 02:EXCHANGE [UNPARTITIONED]
 |
 01:AGGREGATE
 |  output: distinctpc(l_orderkey)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
+   row-size=8B cardinality=6.00M
 ====
 # Distinct estimate with distinct
 select count(distinct l_orderkey), distinctpc(l_orderkey) from tpch.lineitem
@@ -29,34 +34,42 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(l_orderkey), distinctpc:merge(l_orderkey)
+|  row-size=16B cardinality=1
 |
 01:AGGREGATE
 |  output: distinctpc(l_orderkey)
 |  group by: l_orderkey
+|  row-size=16B cardinality=1.56M
 |
 00:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
+   row-size=8B cardinality=6.00M
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(l_orderkey), distinctpc:merge(l_orderkey)
+|  row-size=16B cardinality=1
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: count(l_orderkey), distinctpc:merge(l_orderkey)
+|  row-size=16B cardinality=1
 |
 04:AGGREGATE
 |  output: distinctpc:merge(l_orderkey)
 |  group by: l_orderkey
+|  row-size=16B cardinality=1.56M
 |
 03:EXCHANGE [HASH(l_orderkey)]
 |
 01:AGGREGATE [STREAMING]
 |  output: distinctpc(l_orderkey)
 |  group by: l_orderkey
+|  row-size=16B cardinality=1.56M
 |
 00:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
+   row-size=8B cardinality=6.00M
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/distinct.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/distinct.test b/testdata/workloads/functional-planner/queries/PlannerTest/distinct.test
index 6e66e30..3e671da 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/distinct.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/distinct.test
@@ -6,9 +6,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  group by: functional.testtbl.id, functional.testtbl.name, functional.testtbl.zip
+|  row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
+   row-size=24B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -16,14 +18,17 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  group by: functional.testtbl.id, functional.testtbl.name, functional.testtbl.zip
+|  row-size=24B cardinality=0
 |
 02:EXCHANGE [HASH(functional.testtbl.id,functional.testtbl.name,functional.testtbl.zip)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: functional.testtbl.id, functional.testtbl.name, functional.testtbl.zip
+|  row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
+   row-size=24B cardinality=0
 ====
 # distinct w/ explicit select list
 select distinct id, zip
@@ -33,9 +38,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  group by: id, zip
+|  row-size=12B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
+   row-size=12B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -43,14 +50,17 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  group by: id, zip
+|  row-size=12B cardinality=0
 |
 02:EXCHANGE [HASH(id,zip)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: id, zip
+|  row-size=12B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
+   row-size=12B cardinality=0
 ====
 # count(distinct)
 select count(distinct id, zip)
@@ -60,33 +70,41 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(if(id IS NULL, NULL, zip))
+|  row-size=8B cardinality=0
 |
 01:AGGREGATE
 |  group by: id, zip
+|  row-size=12B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
+   row-size=12B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(if(id IS NULL, NULL, zip))
+|  row-size=8B cardinality=0
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: count(if(id IS NULL, NULL, zip))
+|  row-size=8B cardinality=0
 |
 04:AGGREGATE
 |  group by: id, zip
+|  row-size=12B cardinality=0
 |
 03:EXCHANGE [HASH(id,zip)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: id, zip
+|  row-size=12B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
+   row-size=12B cardinality=0
 ====
 # count(distinct) w/ grouping
 select tinyint_col, count(distinct int_col, bigint_col)
@@ -98,12 +116,15 @@ PLAN-ROOT SINK
 02:AGGREGATE [FINALIZE]
 |  output: count(if(int_col IS NULL, NULL, bigint_col))
 |  group by: tinyint_col
+|  row-size=9B cardinality=9
 |
 01:AGGREGATE
 |  group by: tinyint_col, int_col, bigint_col
+|  row-size=13B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=13B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -112,23 +133,28 @@ PLAN-ROOT SINK
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(if(int_col IS NULL, NULL, bigint_col))
 |  group by: tinyint_col
+|  row-size=9B cardinality=9
 |
 05:EXCHANGE [HASH(tinyint_col)]
 |
 02:AGGREGATE [STREAMING]
 |  output: count(if(int_col IS NULL, NULL, bigint_col))
 |  group by: tinyint_col
+|  row-size=9B cardinality=9
 |
 04:AGGREGATE
 |  group by: tinyint_col, int_col, bigint_col
+|  row-size=13B cardinality=11.00K
 |
 03:EXCHANGE [HASH(tinyint_col,int_col,bigint_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: tinyint_col, int_col, bigint_col
+|  row-size=13B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=13B cardinality=11.00K
 ====
 # count(distinct) and sum(distinct) w/ grouping
 select tinyint_col, count(distinct int_col), sum(distinct int_col)
@@ -140,12 +166,15 @@ PLAN-ROOT SINK
 02:AGGREGATE [FINALIZE]
 |  output: count(int_col), sum(int_col)
 |  group by: tinyint_col
+|  row-size=17B cardinality=9
 |
 01:AGGREGATE
 |  group by: tinyint_col, int_col
+|  row-size=5B cardinality=8.61K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=5B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -154,23 +183,28 @@ PLAN-ROOT SINK
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(int_col), sum:merge(int_col)
 |  group by: tinyint_col
+|  row-size=17B cardinality=9
 |
 05:EXCHANGE [HASH(tinyint_col)]
 |
 02:AGGREGATE [STREAMING]
 |  output: count(int_col), sum(int_col)
 |  group by: tinyint_col
+|  row-size=17B cardinality=9
 |
 04:AGGREGATE
 |  group by: tinyint_col, int_col
+|  row-size=5B cardinality=8.61K
 |
 03:EXCHANGE [HASH(tinyint_col,int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: tinyint_col, int_col
+|  row-size=5B cardinality=8.61K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=5B cardinality=11.00K
 ====
 # sum(distinct) w/o grouping
 select sum(distinct int_col)
@@ -180,33 +214,41 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: sum(int_col)
+|  row-size=8B cardinality=1
 |
 01:AGGREGATE
 |  group by: int_col
+|  row-size=4B cardinality=957
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=4B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: sum:merge(int_col)
+|  row-size=8B cardinality=1
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: sum(int_col)
+|  row-size=8B cardinality=1
 |
 04:AGGREGATE
 |  group by: int_col
+|  row-size=4B cardinality=957
 |
 03:EXCHANGE [HASH(int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: int_col
+|  row-size=4B cardinality=957
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=4B cardinality=11.00K
 ====
 # count(distinct) and sum(distinct) w/ grouping; distinct in min() and max()
 # is ignored
@@ -219,13 +261,16 @@ PLAN-ROOT SINK
 02:AGGREGATE [FINALIZE]
 |  output: count(int_col), min:merge(smallint_col), max:merge(string_col)
 |  group by: tinyint_col
+|  row-size=23B cardinality=9
 |
 01:AGGREGATE
 |  output: min(smallint_col), max(string_col)
 |  group by: tinyint_col, int_col
+|  row-size=19B cardinality=8.61K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=22B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -234,25 +279,30 @@ PLAN-ROOT SINK
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(int_col), min:merge(smallint_col), max:merge(string_col)
 |  group by: tinyint_col
+|  row-size=23B cardinality=9
 |
 05:EXCHANGE [HASH(tinyint_col)]
 |
 02:AGGREGATE [STREAMING]
 |  output: count(int_col), min:merge(smallint_col), max:merge(string_col)
 |  group by: tinyint_col
+|  row-size=23B cardinality=9
 |
 04:AGGREGATE
 |  output: min:merge(smallint_col), max:merge(string_col)
 |  group by: tinyint_col, int_col
+|  row-size=19B cardinality=8.61K
 |
 03:EXCHANGE [HASH(tinyint_col,int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: min(smallint_col), max(string_col)
 |  group by: tinyint_col, int_col
+|  row-size=19B cardinality=8.61K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=22B cardinality=11.00K
 ====
 # aggregate fns with and without distinct
 select tinyint_col, count(distinct int_col), count(*), sum(distinct int_col),
@@ -264,13 +314,16 @@ PLAN-ROOT SINK
 02:AGGREGATE [FINALIZE]
 |  output: count(int_col), sum(int_col), count:merge(*), sum:merge(int_col), min:merge(smallint_col), max:merge(bigint_col)
 |  group by: tinyint_col
+|  row-size=43B cardinality=9
 |
 01:AGGREGATE
 |  output: count(*), sum(int_col), min(smallint_col), max(bigint_col)
 |  group by: tinyint_col, int_col
+|  row-size=31B cardinality=8.61K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=15B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -279,25 +332,30 @@ PLAN-ROOT SINK
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(int_col), sum:merge(int_col), count:merge(*), sum:merge(int_col), min:merge(smallint_col), max:merge(bigint_col)
 |  group by: tinyint_col
+|  row-size=43B cardinality=9
 |
 05:EXCHANGE [HASH(tinyint_col)]
 |
 02:AGGREGATE [STREAMING]
 |  output: count(int_col), sum(int_col), count:merge(*), sum:merge(int_col), min:merge(smallint_col), max:merge(bigint_col)
 |  group by: tinyint_col
+|  row-size=43B cardinality=9
 |
 04:AGGREGATE
 |  output: count:merge(*), sum:merge(int_col), min:merge(smallint_col), max:merge(bigint_col)
 |  group by: tinyint_col, int_col
+|  row-size=31B cardinality=8.61K
 |
 03:EXCHANGE [HASH(tinyint_col,int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: count(*), sum(int_col), min(smallint_col), max(bigint_col)
 |  group by: tinyint_col, int_col
+|  row-size=31B cardinality=8.61K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=15B cardinality=11.00K
 ====
 # test join on inline views containing distinct aggregates to make sure
 # the aggregation info reports the correct tuple ids (from the 2nd phase
@@ -310,69 +368,87 @@ PLAN-ROOT SINK
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: count(int_col) = count(bigint_col)
+|  row-size=16B cardinality=1
 |
 |--05:AGGREGATE [FINALIZE]
 |  |  output: count(bigint_col)
+|  |  row-size=8B cardinality=1
 |  |
 |  04:AGGREGATE
 |  |  group by: bigint_col
+|  |  row-size=8B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=8B cardinality=8
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(int_col)
+|  row-size=8B cardinality=1
 |
 01:AGGREGATE
 |  group by: int_col
+|  row-size=4B cardinality=2
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=4B cardinality=8
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: count(int_col) = count(bigint_col)
+|  row-size=16B cardinality=1
 |
 |--15:EXCHANGE [UNPARTITIONED]
 |  |
 |  14:AGGREGATE [FINALIZE]
 |  |  output: count:merge(bigint_col)
+|  |  row-size=8B cardinality=1
 |  |
 |  13:EXCHANGE [UNPARTITIONED]
 |  |
 |  05:AGGREGATE
 |  |  output: count(bigint_col)
+|  |  row-size=8B cardinality=1
 |  |
 |  12:AGGREGATE
 |  |  group by: bigint_col
+|  |  row-size=8B cardinality=2
 |  |
 |  11:EXCHANGE [HASH(bigint_col)]
 |  |
 |  04:AGGREGATE [STREAMING]
 |  |  group by: bigint_col
+|  |  row-size=8B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=8B cardinality=8
 |
 10:AGGREGATE [FINALIZE]
 |  output: count:merge(int_col)
+|  row-size=8B cardinality=1
 |
 09:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: count(int_col)
+|  row-size=8B cardinality=1
 |
 08:AGGREGATE
 |  group by: int_col
+|  row-size=4B cardinality=2
 |
 07:EXCHANGE [HASH(int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: int_col
+|  row-size=4B cardinality=2
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=4B cardinality=8
 ====
 # Test placement of having predicate into 2nd phase merge agg for
 # distinct + non-distinct aggregates without group by (IMPALA-845).
@@ -388,37 +464,45 @@ PLAN-ROOT SINK
 02:AGGREGATE [FINALIZE]
 |  output: count(tinyint_col), count:merge(bigint_col)
 |  having: zeroifnull(count(bigint_col)) > 0
+|  row-size=16B cardinality=0
 |
 01:AGGREGATE
 |  output: count(bigint_col)
 |  group by: tinyint_col
+|  row-size=9B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=9B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(tinyint_col), count:merge(bigint_col)
 |  having: zeroifnull(count(bigint_col)) > 0
+|  row-size=16B cardinality=0
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: count(tinyint_col), count:merge(bigint_col)
+|  row-size=16B cardinality=0
 |
 04:AGGREGATE
 |  output: count:merge(bigint_col)
 |  group by: tinyint_col
+|  row-size=9B cardinality=10
 |
 03:EXCHANGE [HASH(tinyint_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: count(bigint_col)
 |  group by: tinyint_col
+|  row-size=9B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=9B cardinality=7.30K
 ====
 # test slot materialization on a distinct agg inside an inline view
 # triggered by a predicate in an outer query block (IMPALA-861)
@@ -431,34 +515,42 @@ PLAN-ROOT SINK
 02:AGGREGATE [FINALIZE]
 |  output: count(1)
 |  having: count(1) IS NOT NULL
+|  row-size=8B cardinality=0
 |
 01:AGGREGATE
 |  group by: 1
+|  row-size=1B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(1)
 |  having: count(1) IS NOT NULL
+|  row-size=8B cardinality=0
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: count(1)
+|  row-size=8B cardinality=0
 |
 04:AGGREGATE
 |  group by: 1
+|  row-size=1B cardinality=1
 |
 03:EXCHANGE [HASH(1)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: 1
+|  row-size=1B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ====
 # test slot materialization on a distinct agg inside an inline view
 # triggered by a predicate in an outer query block (IMPALA-861)
@@ -471,37 +563,45 @@ PLAN-ROOT SINK
 02:AGGREGATE [FINALIZE]
 |  output: count(1), count:merge(*)
 |  having: count(1) > 0, zeroifnull(count(*)) > 1, count(1) + zeroifnull(count(*)) > 10
+|  row-size=16B cardinality=0
 |
 01:AGGREGATE
 |  output: count(*)
 |  group by: 1
+|  row-size=9B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(1), count:merge(*)
 |  having: count(1) > 0, zeroifnull(count(*)) > 1, count(1) + zeroifnull(count(*)) > 10
+|  row-size=16B cardinality=0
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: count(1), count:merge(*)
+|  row-size=16B cardinality=0
 |
 04:AGGREGATE
 |  output: count:merge(*)
 |  group by: 1
+|  row-size=9B cardinality=1
 |
 03:EXCHANGE [HASH(1)]
 |
 01:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: 1
+|  row-size=9B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ====
 # IMPALA-2266: Test non-grouping distinct aggregation inside an inline view.
 select * from (select count(distinct int_col) cd from functional.alltypes) v
@@ -510,22 +610,27 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(int_col)
+|  row-size=8B cardinality=1
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: count(int_col)
+|  row-size=8B cardinality=1
 |
 04:AGGREGATE
 |  group by: int_col
+|  row-size=4B cardinality=10
 |
 03:EXCHANGE [HASH(int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: int_col
+|  row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 # IMPALA-2266: Test grouping distinct aggregation inside an inline view.
 select * from (select count(distinct int_col) cd from functional.alltypes group by bool_col) v
@@ -537,23 +642,28 @@ PLAN-ROOT SINK
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(int_col)
 |  group by: bool_col
+|  row-size=9B cardinality=2
 |
 05:EXCHANGE [HASH(bool_col)]
 |
 02:AGGREGATE [STREAMING]
 |  output: count(int_col)
 |  group by: bool_col
+|  row-size=9B cardinality=2
 |
 04:AGGREGATE
 |  group by: bool_col, int_col
+|  row-size=5B cardinality=20
 |
 03:EXCHANGE [HASH(bool_col,int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: bool_col, int_col
+|  row-size=5B cardinality=20
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=5B cardinality=7.30K
 ====
 # IMPALA-4042: count(distinct NULL) fails on a view
 select count(distinct null) from functional.alltypes_view
@@ -562,22 +672,27 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(NULL)
+|  row-size=8B cardinality=1
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: count(NULL)
+|  row-size=8B cardinality=1
 |
 04:AGGREGATE
 |  group by: NULL
+|  row-size=1B cardinality=1
 |
 03:EXCHANGE [HASH(NULL)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: NULL
+|  row-size=1B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ====
 # Query block with a single distinct and multiple non-distinct aggs simplifies to a
 # non-grouping aggregation plan.
@@ -592,35 +707,43 @@ PLAN-ROOT SINK
 02:AGGREGATE [FINALIZE]
 |  output: min:merge(string_col), max:merge(string_col)
 |  having: min(string_col) < '9', min(string_col) < max(string_col)
+|  row-size=24B cardinality=0
 |
 01:AGGREGATE
 |  output: min(string_col), max(string_col)
 |  group by: smallint_col
+|  row-size=26B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: min:merge(string_col), max:merge(string_col)
 |  having: min(string_col) < '9', min(string_col) < max(string_col)
+|  row-size=24B cardinality=0
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: min:merge(string_col), max:merge(string_col)
+|  row-size=24B cardinality=0
 |
 04:AGGREGATE
 |  output: min:merge(string_col), max:merge(string_col)
 |  group by: smallint_col
+|  row-size=26B cardinality=10
 |
 03:EXCHANGE [HASH(smallint_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: min(string_col), max(string_col)
 |  group by: smallint_col
+|  row-size=26B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/empty.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/empty.test b/testdata/workloads/functional-planner/queries/PlannerTest/empty.test
index 43d1fcf..964b7f9 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/empty.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/empty.test
@@ -47,6 +47,7 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(int_col), avg(double_col), count(*)
+|  row-size=24B cardinality=0
 |
 00:EMPTYSET
 ====
@@ -61,6 +62,7 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 00:EMPTYSET
 ====
@@ -93,12 +95,14 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: f.id = t1.id
 |  runtime filters: RF000 <- t1.id
+|  row-size=8B cardinality=0
 |
 |--01:EMPTYSET
 |
 00:SCAN HDFS [functional.alltypessmall f]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> f.id
+   row-size=4B cardinality=100
 ====
 # Constant conjunct causes union operand to be dropped.
 select * from functional.alltypessmall
@@ -111,12 +115,15 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=108
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=89B cardinality=8
 |
 01:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=89B cardinality=100
 ====
 # Constant conjunct turns union into an empty-set node.
 select *
@@ -133,11 +140,13 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: a.id = id
+|  row-size=178B cardinality=7.30K
 |
 |--01:EMPTYSET
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # Constant conjunct in the ON-clause of an outer join is
 # assigned to the join.
@@ -151,12 +160,15 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: a.id = b.id
 |  other join predicates: FALSE
+|  row-size=178B cardinality=100
 |
 |--01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
+|     row-size=89B cardinality=8
 |
 00:SCAN HDFS [functional.alltypessmall a]
    partitions=4/4 files=4 size=6.32KB
+   row-size=89B cardinality=100
 ====
 # Constant conjunct in the ON-clause of an outer join is
 # assigned to the join.
@@ -171,13 +183,16 @@ PLAN-ROOT SINK
 |  hash predicates: a.id = b.id
 |  other join predicates: FALSE
 |  runtime filters: RF000 <- b.id
+|  row-size=178B cardinality=9
 |
 |--01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
+|     row-size=89B cardinality=8
 |
 00:SCAN HDFS [functional.alltypessmall a]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> a.id
+   row-size=89B cardinality=100
 ====
 # Constant conjunct in the ON-clause of an outer join is
 # assigned to the join.
@@ -191,12 +206,15 @@ PLAN-ROOT SINK
 02:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: a.id = b.id
 |  other join predicates: NULL
+|  row-size=178B cardinality=108
 |
 |--01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
+|     row-size=89B cardinality=8
 |
 00:SCAN HDFS [functional.alltypessmall a]
    partitions=4/4 files=4 size=6.32KB
+   row-size=89B cardinality=100
 ====
 # Limit 0 turns query block into an empty-set node.
 select t1.id, t2.id
@@ -234,12 +252,14 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: f.id = t1.id
 |  runtime filters: RF000 <- t1.id
+|  row-size=8B cardinality=0
 |
 |--01:EMPTYSET
 |
 00:SCAN HDFS [functional.alltypessmall f]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> f.id
+   row-size=4B cardinality=100
 ====
 # Limit 0 causes union operand to be dropped.
 select * from functional.alltypessmall
@@ -252,12 +272,15 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=108
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=89B cardinality=8
 |
 01:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=89B cardinality=100
 ====
 # Limit 0 causes empty-set union.
 select * from functional.alltypessmall
@@ -282,12 +305,15 @@ select int_col from functional.alltypesagg
 PLAN-ROOT SINK
 |
 00:UNION
+|  row-size=8B cardinality=11.00K
 |
 |--03:SCAN HDFS [functional.alltypesagg]
 |     partitions=11/11 files=11 size=814.73KB
+|     row-size=4B cardinality=11.00K
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(1)
+|  row-size=8B cardinality=0
 |
 01:EMPTYSET
 ====
@@ -307,6 +333,7 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)
 |
 01:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=0
 |
 00:EMPTYSET
 ====
@@ -320,6 +347,7 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)
 |
 01:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=0
 |
 00:EMPTYSET
 ====
@@ -343,6 +371,7 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum(id), count(int_col)
+|  row-size=16B cardinality=0
 |
 00:EMPTYSET
 ====
@@ -358,6 +387,7 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum(id + int_col)
+|  row-size=8B cardinality=0
 |
 00:EMPTYSET
 ====
@@ -373,9 +403,11 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(T1.int_col)
+|  row-size=8B cardinality=0
 |
 01:AGGREGATE
 |  group by: int_col
+|  row-size=4B cardinality=0
 |
 00:EMPTYSET
 ====
@@ -396,11 +428,14 @@ PLAN-ROOT SINK
 |
 02:UNION
 |  pass-through-operands: all
+|  row-size=2B cardinality=0
 |
 01:AGGREGATE [FINALIZE]
 |  group by: lead(-496, 81, NULL) OVER(...)
+|  row-size=2B cardinality=0
 |
 00:UNION
+   row-size=2B cardinality=0
 ====
 # IMPALA-2088: Test empty union operands with analytic functions.
 select lead(-496, 81) over (order by t1.double_col desc, t1.id asc)
@@ -421,14 +456,18 @@ PLAN-ROOT SINK
 02:UNION
 |  constant-operands=1
 |  pass-through-operands: 01
+|  row-size=2B cardinality=9
 |
 |--03:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=8B cardinality=8
 |
 01:AGGREGATE [FINALIZE]
 |  group by: lead(-496, 81, NULL) OVER(...)
+|  row-size=2B cardinality=0
 |
 00:UNION
+   row-size=2B cardinality=0
 ====
 # IMPALA-2216: Make sure the final output exprs are substituted, even
 # if the resulting plan is an EmptySetNode.
@@ -471,42 +510,55 @@ where c_custkey < 10
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=48B cardinality=15.00K
 |
 |--16:NESTED LOOP JOIN [LEFT OUTER JOIN]
+|  |  row-size=48B cardinality=1
 |  |
 |  |--12:AGGREGATE [FINALIZE]
 |  |  |  output: count(*)
+|  |  |  row-size=8B cardinality=1
 |  |  |
 |  |  08:SUBPLAN
+|  |  |  row-size=0B cardinality=10
 |  |  |
 |  |  |--11:NESTED LOOP JOIN [RIGHT OUTER JOIN]
+|  |  |  |  row-size=0B cardinality=1
 |  |  |  |
 |  |  |  |--09:SINGULAR ROW SRC
+|  |  |  |     row-size=12B cardinality=1
 |  |  |  |
 |  |  |  10:EMPTYSET
 |  |  |
 |  |  07:UNNEST [c.c_orders o]
+|  |     row-size=0B cardinality=10
 |  |
 |  15:NESTED LOOP JOIN [LEFT OUTER JOIN]
+|  |  row-size=40B cardinality=1
 |  |
 |  |--06:EMPTYSET
 |  |
 |  14:NESTED LOOP JOIN [LEFT OUTER JOIN]
+|  |  row-size=36B cardinality=1
 |  |
 |  |--05:EMPTYSET
 |  |
 |  13:NESTED LOOP JOIN [RIGHT OUTER JOIN]
+|  |  row-size=28B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=56B cardinality=1
 |  |
 |  04:AGGREGATE [FINALIZE]
 |  |  output: count(*)
+|  |  row-size=8B cardinality=0
 |  |
 |  03:EMPTYSET
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=292.36MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: c_custkey < 10
+   row-size=56B cardinality=15.00K
 ====
 # IMPALA-2539: Test empty union operands containing relative table refs.
 select c_custkey, o_orderkey
@@ -524,23 +576,31 @@ where c_custkey = 1
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=28B cardinality=10
 |
 |--07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=28B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=44B cardinality=1
 |  |
 |  06:UNION
+|  |  row-size=8B cardinality=10
 |  |
 |  05:AGGREGATE [FINALIZE]
 |  |  group by: o_orderkey
+|  |  row-size=8B cardinality=10
 |  |
 |  03:UNION
+|  |  row-size=8B cardinality=10
 |  |
 |  04:UNNEST [c.c_orders o1]
+|     row-size=8B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=292.36MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: c_custkey = 1
+   row-size=44B cardinality=1
 ====
 # IMPALA-2215: Having clause without aggregation.
 select 1 from (select 1) v having 1 > 1
@@ -564,11 +624,14 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 02:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=1B cardinality=0
 |
 |--01:EMPTYSET
 |
 00:SCAN HDFS [functional.alltypes x]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/fk-pk-join-detection.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/fk-pk-join-detection.test b/testdata/workloads/functional-planner/queries/PlannerTest/fk-pk-join-detection.test
index 8b71f11..5383858 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/fk-pk-join-detection.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/fk-pk-join-detection.test
@@ -14,7 +14,7 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: ss_customer_sk = c_customer_sk
 |  runtime filters: RF000[bloom] <- c_customer_sk
 |  mem-estimate=4.75MB mem-reservation=4.75MB spill-buffer=256.00KB thread-reservation=0
-|  tuple-ids=0,1 row-size=319B cardinality=529700
+|  tuple-ids=0,1 row-size=319B cardinality=529.70K
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--01:SCAN HDFS [tpcds.customer]
@@ -25,7 +25,7 @@ PLAN-ROOT SINK
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=100000
 |     mem-estimate=48.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=1 row-size=219B cardinality=16667
+|     tuple-ids=1 row-size=219B cardinality=16.67K
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpcds.store_sales]
@@ -37,7 +37,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=130093
    mem-estimate=128.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=100B cardinality=2880404
+   tuple-ids=0 row-size=100B cardinality=2.88M
    in pipelines: 00(GETNEXT)
 ====
 # Single-column FK/PK join detection on left outer join. The join cardinality
@@ -57,7 +57,7 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: ss_customer_sk = c_customer_sk
 |  other predicates: c_salutation = 'Mrs.'
 |  mem-estimate=4.75MB mem-reservation=4.75MB spill-buffer=256.00KB thread-reservation=0
-|  tuple-ids=0,1N row-size=319B cardinality=2880404
+|  tuple-ids=0,1N row-size=319B cardinality=2.88M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--01:SCAN HDFS [tpcds.customer]
@@ -68,7 +68,7 @@ PLAN-ROOT SINK
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=100000
 |     mem-estimate=48.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=1 row-size=219B cardinality=16667
+|     tuple-ids=1 row-size=219B cardinality=16.67K
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpcds.store_sales]
@@ -79,7 +79,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=130093
    mem-estimate=128.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=100B cardinality=2880404
+   tuple-ids=0 row-size=100B cardinality=2.88M
    in pipelines: 00(GETNEXT)
 ====
 # Single-column FK/PK join detection on right outer join. The join cardinality
@@ -99,7 +99,7 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: ss_customer_sk = c_customer_sk
 |  runtime filters: RF000[bloom] <- c_customer_sk
 |  mem-estimate=4.75MB mem-reservation=4.75MB spill-buffer=256.00KB thread-reservation=0
-|  tuple-ids=0N,1 row-size=319B cardinality=529700
+|  tuple-ids=0N,1 row-size=319B cardinality=529.70K
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--01:SCAN HDFS [tpcds.customer]
@@ -110,7 +110,7 @@ PLAN-ROOT SINK
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=100000
 |     mem-estimate=48.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=1 row-size=219B cardinality=16667
+|     tuple-ids=1 row-size=219B cardinality=16.67K
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpcds.store_sales]
@@ -122,7 +122,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=130093
    mem-estimate=128.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=100B cardinality=2880404
+   tuple-ids=0 row-size=100B cardinality=2.88M
    in pipelines: 00(GETNEXT)
 ====
 # Multi-column FK/PK join detection
@@ -141,7 +141,7 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: ss_item_sk = sr_item_sk, ss_ticket_number = sr_ticket_number
 |  runtime filters: RF000[bloom] <- sr_item_sk, RF001[bloom] <- sr_ticket_number
 |  mem-estimate=4.75MB mem-reservation=4.75MB spill-buffer=256.00KB thread-reservation=0
-|  tuple-ids=0,1 row-size=188B cardinality=211838
+|  tuple-ids=0,1 row-size=188B cardinality=211.84K
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--01:SCAN HDFS [tpcds.store_returns]
@@ -152,7 +152,7 @@ PLAN-ROOT SINK
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=287514
 |     mem-estimate=80.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=1 row-size=88B cardinality=28751
+|     tuple-ids=1 row-size=88B cardinality=28.75K
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpcds.store_sales]
@@ -164,7 +164,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=130093
    mem-estimate=128.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=100B cardinality=2880404
+   tuple-ids=0 row-size=100B cardinality=2.88M
    in pipelines: 00(GETNEXT)
 ====
 # Many-to-many join detection.
@@ -182,7 +182,7 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: none
 |  runtime filters: RF000[bloom] <- ws_sold_time_sk
 |  mem-estimate=108.67MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=0,1 row-size=244B cardinality=44136418
+|  tuple-ids=0,1 row-size=244B cardinality=44.14M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--01:SCAN HDFS [tpcds.web_sales]
@@ -192,7 +192,7 @@ PLAN-ROOT SINK
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=657377
 |     mem-estimate=160.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=1 row-size=144B cardinality=719384
+|     tuple-ids=1 row-size=144B cardinality=719.38K
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpcds.store_sales]
@@ -204,7 +204,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=130093
    mem-estimate=128.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=100B cardinality=2880404
+   tuple-ids=0 row-size=100B cardinality=2.88M
    in pipelines: 00(GETNEXT)
 ====
 # PK/PK join is detected as FK/PK.
@@ -223,7 +223,7 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: b.d_date_sk = a.d_date_sk
 |  runtime filters: RF000[bloom] <- a.d_date_sk
 |  mem-estimate=17.00MB mem-reservation=17.00MB spill-buffer=1.00MB thread-reservation=0
-|  tuple-ids=1,0 row-size=510B cardinality=36525
+|  tuple-ids=1,0 row-size=510B cardinality=36.52K
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 |--00:SCAN HDFS [tpcds.date_dim a]
@@ -234,7 +234,7 @@ PLAN-ROOT SINK
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=73049
 |     mem-estimate=48.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=0 row-size=255B cardinality=36525
+|     tuple-ids=0 row-size=255B cardinality=36.52K
 |     in pipelines: 00(GETNEXT)
 |
 01:SCAN HDFS [tpcds.date_dim b]
@@ -245,7 +245,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=73049
    mem-estimate=48.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=1 row-size=255B cardinality=73049
+   tuple-ids=1 row-size=255B cardinality=73.05K
    in pipelines: 01(GETNEXT)
 ====
 # Single query with various join types combined.
@@ -268,7 +268,7 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: none
 |  runtime filters: RF000[bloom] <- c_current_addr_sk
 |  mem-estimate=1.94MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
-|  tuple-ids=1,0,3,4,2 row-size=60B cardinality=19358
+|  tuple-ids=1,0,3,4,2 row-size=60B cardinality=19.36K
 |  in pipelines: 01(GETNEXT), 02(OPEN)
 |
 |--02:SCAN HDFS [tpcds.customer]
@@ -278,7 +278,7 @@ PLAN-ROOT SINK
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=100000
 |     mem-estimate=48.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=2 row-size=4B cardinality=100000
+|     tuple-ids=2 row-size=4B cardinality=100.00K
 |     in pipelines: 02(GETNEXT)
 |
 07:HASH JOIN [INNER JOIN]
@@ -286,7 +286,7 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: sr_returned_date_sk = d2.d_date_sk
 |  runtime filters: RF002[bloom] <- d2.d_date_sk
 |  mem-estimate=1.94MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
-|  tuple-ids=1,0,3,4 row-size=56B cardinality=8131
+|  tuple-ids=1,0,3,4 row-size=56B cardinality=8.13K
 |  in pipelines: 01(GETNEXT), 04(OPEN)
 |
 |--04:SCAN HDFS [tpcds.date_dim d2]
@@ -296,7 +296,7 @@ PLAN-ROOT SINK
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=73049
 |     mem-estimate=48.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=4 row-size=4B cardinality=73049
+|     tuple-ids=4 row-size=4B cardinality=73.05K
 |     in pipelines: 04(GETNEXT)
 |
 06:HASH JOIN [INNER JOIN]
@@ -304,7 +304,7 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: sr_item_sk = ss_item_sk, sr_ticket_number = ss_ticket_number
 |  runtime filters: RF004[bloom] <- ss_item_sk, RF005[bloom] <- ss_ticket_number
 |  mem-estimate=1.94MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
-|  tuple-ids=1,0,3 row-size=52B cardinality=8131
+|  tuple-ids=1,0,3 row-size=52B cardinality=8.13K
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 |--05:HASH JOIN [INNER JOIN]
@@ -312,7 +312,7 @@ PLAN-ROOT SINK
 |  |  fk/pk conjuncts: ss_sold_date_sk = d1.d_date_sk
 |  |  runtime filters: RF008[bloom] <- d1.d_date_sk
 |  |  mem-estimate=1.94MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
-|  |  tuple-ids=0,3 row-size=32B cardinality=11055
+|  |  tuple-ids=0,3 row-size=32B cardinality=11.05K
 |  |  in pipelines: 00(GETNEXT), 03(OPEN)
 |  |
 |  |--03:SCAN HDFS [tpcds.date_dim d1]
@@ -335,7 +335,7 @@ PLAN-ROOT SINK
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=130093
 |     mem-estimate=128.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=0 row-size=24B cardinality=2880404
+|     tuple-ids=0 row-size=24B cardinality=2.88M
 |     in pipelines: 00(GETNEXT)
 |
 01:SCAN HDFS [tpcds.store_returns]
@@ -346,7 +346,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=287514
    mem-estimate=80.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=1 row-size=20B cardinality=287514
+   tuple-ids=1 row-size=20B cardinality=287.51K
    in pipelines: 01(GETNEXT)
 ====
 # Assumed FK/PK join becasue of non-trivial equi-join exprs.
@@ -364,7 +364,7 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: assumed fk/pk
 |  runtime filters: RF000[bloom] <- c_customer_sk / 100
 |  mem-estimate=34.00MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=0,1 row-size=319B cardinality=2880404
+|  tuple-ids=0,1 row-size=319B cardinality=2.88M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--01:SCAN HDFS [tpcds.customer]
@@ -374,7 +374,7 @@ PLAN-ROOT SINK
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=100000
 |     mem-estimate=48.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=1 row-size=219B cardinality=100000
+|     tuple-ids=1 row-size=219B cardinality=100.00K
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpcds.store_sales]
@@ -386,7 +386,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=130093
    mem-estimate=128.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=100B cardinality=2880404
+   tuple-ids=0 row-size=100B cardinality=2.88M
    in pipelines: 00(GETNEXT)
 ====
 # Assumed FK/PK join due to missing stats on the rhs. Join cardinality is equal to
@@ -405,7 +405,7 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: assumed fk/pk
 |  runtime filters: RF000[bloom] <- c_customer_sk
 |  mem-estimate=2.00GB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=0,1 row-size=8B cardinality=2880404
+|  tuple-ids=0,1 row-size=8B cardinality=2.88M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--01:SCAN HDFS [tpcds_seq_snap.customer]
@@ -427,7 +427,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=130093
    mem-estimate=128.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=4B cardinality=2880404
+   tuple-ids=0 row-size=4B cardinality=2.88M
    in pipelines: 00(GETNEXT)
 ====
 # Assumed FK/PK join due to missing stats on the lhs. Join cardinality is unknown.
@@ -455,7 +455,7 @@ PLAN-ROOT SINK
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=100000
 |     mem-estimate=48.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=1 row-size=4B cardinality=100000
+|     tuple-ids=1 row-size=4B cardinality=100.00K
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpcds_seq_snap.store_sales]
@@ -487,13 +487,13 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: none
 |  runtime filters: RF000[bloom] <- ws_sold_time_sk
 |  mem-estimate=1.94MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
-|  tuple-ids=0,2 row-size=104B cardinality=2440073
+|  tuple-ids=0,2 row-size=104B cardinality=2.44M
 |  in pipelines: 00(GETNEXT), 02(OPEN)
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  group by: ws_sold_time_sk
 |  |  mem-estimate=10.00MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
-|  |  tuple-ids=2 row-size=4B cardinality=39771
+|  |  tuple-ids=2 row-size=4B cardinality=39.77K
 |  |  in pipelines: 02(GETNEXT), 01(OPEN)
 |  |
 |  01:SCAN HDFS [tpcds.web_sales]
@@ -503,7 +503,7 @@ PLAN-ROOT SINK
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=657377
 |     mem-estimate=160.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=1 row-size=4B cardinality=719384
+|     tuple-ids=1 row-size=4B cardinality=719.38K
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpcds.store_sales]
@@ -515,6 +515,6 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=130093
    mem-estimate=128.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=100B cardinality=2880404
+   tuple-ids=0 row-size=100B cardinality=2.88M
    in pipelines: 00(GETNEXT)
 ====


[15/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/order.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/order.test b/testdata/workloads/functional-planner/queries/PlannerTest/order.test
index cd60a3a..28d6aa8 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/order.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/order.test
@@ -6,9 +6,11 @@ PLAN-ROOT SINK
 |
 01:SORT
 |  order by: name ASC
+|  row-size=16B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
+   row-size=16B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -18,9 +20,11 @@ PLAN-ROOT SINK
 |
 01:SORT
 |  order by: name ASC
+|  row-size=16B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 select name, zip
 from functional.testtbl
@@ -30,9 +34,11 @@ PLAN-ROOT SINK
 |
 01:SORT
 |  order by: name ASC
+|  row-size=16B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
+   row-size=16B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -41,9 +47,11 @@ PLAN-ROOT SINK
 |
 01:SORT
 |  order by: name ASC
+|  row-size=16B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 select zip, count(*)
 from functional.testtbl
@@ -55,14 +63,17 @@ PLAN-ROOT SINK
 |
 02:SORT
 |  order by: count(*) DESC
+|  row-size=12B cardinality=0
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: zip
+|  row-size=12B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
    predicates: name LIKE 'm%'
+   row-size=16B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -71,20 +82,24 @@ PLAN-ROOT SINK
 |
 02:SORT
 |  order by: count(*) DESC
+|  row-size=12B cardinality=0
 |
 04:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: zip
+|  row-size=12B cardinality=0
 |
 03:EXCHANGE [HASH(zip)]
 |
 01:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: zip
+|  row-size=12B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
    predicates: name LIKE 'm%'
+   row-size=16B cardinality=0
 ====
 select int_col, sum(float_col)
 from functional_hbase.alltypessmall
@@ -96,13 +111,16 @@ PLAN-ROOT SINK
 |
 02:SORT
 |  order by: sum(float_col) ASC
+|  row-size=12B cardinality=5
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum(float_col)
 |  group by: int_col
+|  row-size=12B cardinality=5
 |
 00:SCAN HBASE [functional_hbase.alltypessmall]
    predicates: id < 5
+   row-size=12B cardinality=5
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -111,19 +129,23 @@ PLAN-ROOT SINK
 |
 02:SORT
 |  order by: sum(float_col) ASC
+|  row-size=12B cardinality=5
 |
 04:AGGREGATE [FINALIZE]
 |  output: sum:merge(float_col)
 |  group by: int_col
+|  row-size=12B cardinality=5
 |
 03:EXCHANGE [HASH(int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: sum(float_col)
 |  group by: int_col
+|  row-size=12B cardinality=5
 |
 00:SCAN HBASE [functional_hbase.alltypessmall]
    predicates: id < 5
+   row-size=12B cardinality=5
 ====
 select int_col, sum(float_col), min(float_col)
 from functional_hbase.alltypessmall
@@ -134,12 +156,15 @@ PLAN-ROOT SINK
 |
 02:SORT
 |  order by: sum(float_col) ASC, min(float_col) DESC
+|  row-size=16B cardinality=10
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum(float_col), min(float_col)
 |  group by: int_col
+|  row-size=16B cardinality=10
 |
 00:SCAN HBASE [functional_hbase.alltypessmall]
+   row-size=8B cardinality=50
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -148,18 +173,22 @@ PLAN-ROOT SINK
 |
 02:SORT
 |  order by: sum(float_col) ASC, min(float_col) DESC
+|  row-size=16B cardinality=10
 |
 04:AGGREGATE [FINALIZE]
 |  output: sum:merge(float_col), min:merge(float_col)
 |  group by: int_col
+|  row-size=16B cardinality=10
 |
 03:EXCHANGE [HASH(int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: sum(float_col), min(float_col)
 |  group by: int_col
+|  row-size=16B cardinality=10
 |
 00:SCAN HBASE [functional_hbase.alltypessmall]
+   row-size=8B cardinality=50
 ====
 # Test that the sort is on int_col and not on the id column
 select int_col as id from functional.alltypessmall order by id
@@ -168,9 +197,11 @@ PLAN-ROOT SINK
 |
 01:SORT
 |  order by: int_col ASC
+|  row-size=4B cardinality=100
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=4B cardinality=100
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -179,9 +210,11 @@ PLAN-ROOT SINK
 |
 01:SORT
 |  order by: int_col ASC
+|  row-size=4B cardinality=100
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=4B cardinality=100
 ====
 # Test that the sort is on id and not on int_col
 select int_col as id from functional.alltypessmall order by functional.alltypessmall.id
@@ -190,9 +223,11 @@ PLAN-ROOT SINK
 |
 01:SORT
 |  order by: id ASC
+|  row-size=8B cardinality=100
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=8B cardinality=100
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -201,9 +236,11 @@ PLAN-ROOT SINK
 |
 01:SORT
 |  order by: id ASC
+|  row-size=8B cardinality=100
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=8B cardinality=100
 ====
 # test distributed sort over a union
 select int_col, bigint_col from
@@ -216,15 +253,19 @@ PLAN-ROOT SINK
 |
 03:SORT
 |  order by: int_col DESC
+|  row-size=12B cardinality=7.40K
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=12B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=12B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -234,15 +275,19 @@ PLAN-ROOT SINK
 |
 03:SORT
 |  order by: int_col DESC
+|  row-size=12B cardinality=7.40K
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=12B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=12B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ====
 # test distributed sort over a union distinct
 select int_col, bigint_col from
@@ -255,18 +300,23 @@ PLAN-ROOT SINK
 |
 04:SORT
 |  order by: int_col DESC
+|  row-size=12B cardinality=7.40K
 |
 03:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=7.40K
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -276,23 +326,29 @@ PLAN-ROOT SINK
 |
 04:SORT
 |  order by: int_col DESC
+|  row-size=12B cardinality=7.40K
 |
 06:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=7.40K
 |
 05:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 03:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=7.40K
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # simple join
 select j.*, d.* from functional.JoinTbl j full outer join functional.DimTbl d
@@ -303,15 +359,19 @@ PLAN-ROOT SINK
 |
 03:SORT
 |  order by: test_id ASC, test_name ASC, test_zip ASC, alltypes_id ASC, name ASC
+|  row-size=63B cardinality=29
 |
 02:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: j.test_id = d.id
+|  row-size=63B cardinality=29
 |
 |--01:SCAN HDFS [functional.dimtbl d]
 |     partitions=1/1 files=1 size=171B
+|     row-size=29B cardinality=10
 |
 00:SCAN HDFS [functional.jointbl j]
    partitions=1/1 files=1 size=433B
+   row-size=33B cardinality=19
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -320,19 +380,23 @@ PLAN-ROOT SINK
 |
 03:SORT
 |  order by: test_id ASC, test_name ASC, test_zip ASC, alltypes_id ASC, name ASC
+|  row-size=63B cardinality=29
 |
 02:HASH JOIN [FULL OUTER JOIN, PARTITIONED]
 |  hash predicates: j.test_id = d.id
+|  row-size=63B cardinality=29
 |
 |--05:EXCHANGE [HASH(d.id)]
 |  |
 |  01:SCAN HDFS [functional.dimtbl d]
 |     partitions=1/1 files=1 size=171B
+|     row-size=29B cardinality=10
 |
 04:EXCHANGE [HASH(j.test_id)]
 |
 00:SCAN HDFS [functional.jointbl j]
    partitions=1/1 files=1 size=433B
+   row-size=33B cardinality=19
 ====
 # more joins
 select a.smallint_col, b.id, a.tinyint_col, c.id, a.int_col, b.float_col, c.string_col
@@ -351,29 +415,36 @@ PLAN-ROOT SINK
 |
 05:SORT
 |  order by: string_col DESC, smallint_col ASC
+|  row-size=32B cardinality=11
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: c.id = a.tinyint_col
 |  other predicates: a.int_col + b.float_col + CAST(c.string_col AS FLOAT) < 1000
 |  runtime filters: RF000 <- a.tinyint_col
+|  row-size=32B cardinality=11
 |
 |--03:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.smallint_col = b.id
 |  |  runtime filters: RF002 <- b.id
+|  |  row-size=15B cardinality=11
 |  |
 |  |--01:SCAN HDFS [functional.alltypessmall b]
 |  |     partitions=4/4 files=4 size=6.32KB
 |  |     predicates: b.float_col > 4.5
+|  |     row-size=8B cardinality=10
 |  |
 |  00:SCAN HDFS [functional.alltypesagg a]
+|     partition predicates: a.month = 1, a.day = 1
 |     partitions=1/11 files=1 size=73.39KB
 |     predicates: a.int_col > 899
 |     runtime filters: RF002 -> a.smallint_col
+|     row-size=7B cardinality=100
 |
 02:SCAN HDFS [functional.alltypessmall c]
    partitions=4/4 files=4 size=6.32KB
    predicates: c.string_col < '7'
    runtime filters: RF000 -> c.id
+   row-size=17B cardinality=10
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -382,23 +453,28 @@ PLAN-ROOT SINK
 |
 05:SORT
 |  order by: string_col DESC, smallint_col ASC
+|  row-size=32B cardinality=11
 |
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: c.id = a.tinyint_col
 |  other predicates: a.int_col + b.float_col + CAST(c.string_col AS FLOAT) < 1000
 |  runtime filters: RF000 <- a.tinyint_col
+|  row-size=32B cardinality=11
 |
 |--09:EXCHANGE [HASH(a.tinyint_col)]
 |  |
 |  03:HASH JOIN [INNER JOIN, PARTITIONED]
 |  |  hash predicates: b.id = a.smallint_col
 |  |  runtime filters: RF002 <- a.smallint_col
+|  |  row-size=15B cardinality=11
 |  |
 |  |--07:EXCHANGE [HASH(a.smallint_col)]
 |  |  |
 |  |  00:SCAN HDFS [functional.alltypesagg a]
+|  |     partition predicates: a.month = 1, a.day = 1
 |  |     partitions=1/11 files=1 size=73.39KB
 |  |     predicates: a.int_col > 899
+|  |     row-size=7B cardinality=100
 |  |
 |  06:EXCHANGE [HASH(b.id)]
 |  |
@@ -406,6 +482,7 @@ PLAN-ROOT SINK
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: b.float_col > 4.5
 |     runtime filters: RF002 -> b.id
+|     row-size=8B cardinality=10
 |
 08:EXCHANGE [HASH(c.id)]
 |
@@ -413,6 +490,7 @@ PLAN-ROOT SINK
    partitions=4/4 files=4 size=6.32KB
    predicates: c.string_col < '7'
    runtime filters: RF000 -> c.id
+   row-size=17B cardinality=10
 ====
 # agg in ordering
 select int_col, count(*), avg(tinyint_col)
@@ -424,13 +502,16 @@ PLAN-ROOT SINK
 |
 02:SORT
 |  order by: avg(tinyint_col) ASC
+|  row-size=20B cardinality=957
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*), avg(tinyint_col)
 |  group by: int_col
+|  row-size=20B cardinality=957
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=5B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -439,19 +520,23 @@ PLAN-ROOT SINK
 |
 02:SORT
 |  order by: avg(tinyint_col) ASC
+|  row-size=20B cardinality=957
 |
 04:AGGREGATE [FINALIZE]
 |  output: count:merge(*), avg:merge(tinyint_col)
 |  group by: int_col
+|  row-size=20B cardinality=957
 |
 03:EXCHANGE [HASH(int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: count(*), avg(tinyint_col)
 |  group by: int_col
+|  row-size=20B cardinality=957
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=5B cardinality=11.00K
 ====
 # outer join
 select t1.id, t1.int_col, t2.id, t2.int_col
@@ -464,15 +549,19 @@ PLAN-ROOT SINK
 |
 03:SORT
 |  order by: id ASC, id ASC
+|  row-size=16B cardinality=11.00K
 |
 02:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t1.int_col = t2.int_col
+|  row-size=16B cardinality=11.00K
 |
 |--01:SCAN HDFS [functional.alltypessmall t2]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=8B cardinality=100
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
+   row-size=8B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -481,17 +570,21 @@ PLAN-ROOT SINK
 |
 03:SORT
 |  order by: id ASC, id ASC
+|  row-size=16B cardinality=11.00K
 |
 02:HASH JOIN [LEFT OUTER JOIN, BROADCAST]
 |  hash predicates: t1.int_col = t2.int_col
+|  row-size=16B cardinality=11.00K
 |
 |--04:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall t2]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=8B cardinality=100
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
+   row-size=8B cardinality=11.00K
 ====
 # cross join
 select t1.id, t2.id from functional.alltypestiny t1 cross join functional.alltypestiny t2
@@ -502,16 +595,20 @@ PLAN-ROOT SINK
 |
 03:SORT
 |  order by: id ASC, id ASC
+|  row-size=8B cardinality=1
 |
 02:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=8B cardinality=1
 |
 |--01:SCAN HDFS [functional.alltypestiny t2]
 |     partitions=4/4 files=4 size=460B
 |     predicates: t2.id < 3
+|     row-size=4B cardinality=1
 |
 00:SCAN HDFS [functional.alltypestiny t1]
    partitions=4/4 files=4 size=460B
    predicates: t1.id < 3
+   row-size=4B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -520,18 +617,22 @@ PLAN-ROOT SINK
 |
 03:SORT
 |  order by: id ASC, id ASC
+|  row-size=8B cardinality=1
 |
 02:NESTED LOOP JOIN [CROSS JOIN, BROADCAST]
+|  row-size=8B cardinality=1
 |
 |--04:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny t2]
 |     partitions=4/4 files=4 size=460B
 |     predicates: t2.id < 3
+|     row-size=4B cardinality=1
 |
 00:SCAN HDFS [functional.alltypestiny t1]
    partitions=4/4 files=4 size=460B
    predicates: t1.id < 3
+   row-size=4B cardinality=1
 ====
 # union queries with mutiple operands/union types.
 select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month from functional.alltypestiny where year=2009 and month=1
@@ -547,24 +648,35 @@ PLAN-ROOT SINK
 |
 06:SORT
 |  order by: id ASC
+|  row-size=89B cardinality=8
 |
 05:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=8
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--04:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -573,29 +685,41 @@ PLAN-ROOT SINK
 |
 06:SORT
 |  order by: id ASC
+|  row-size=89B cardinality=8
 |
 08:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=8
 |
 07:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 05:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=8
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--04:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month from functional.alltypestiny where year=2009 and month=1
 union distinct
@@ -610,27 +734,39 @@ PLAN-ROOT SINK
 |
 07:SORT
 |  order by: id ASC, bool_col ASC
+|  row-size=89B cardinality=8
 |
 04:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--03:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  00:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 1
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--06:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 05:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 2
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -639,32 +775,45 @@ PLAN-ROOT SINK
 |
 07:SORT
 |  order by: id ASC, bool_col ASC
+|  row-size=89B cardinality=8
 |
 04:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--09:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  08:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |  |
 |  03:AGGREGATE [STREAMING]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  00:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 1
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--06:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 05:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 2
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month from functional.alltypestiny where year=2009 and month=1
 union distinct
@@ -691,54 +840,80 @@ PLAN-ROOT SINK
 |
 16:SORT
 |  order by: year ASC, month ASC, id ASC
+|  row-size=89B cardinality=15
 |
 09:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=15
 |
 |--08:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=9
 |  |
 |  00:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=9
 |  |
 |  |--07:TOP-N [LIMIT=3]
 |  |  |  order by: id ASC
+|  |  |  row-size=89B cardinality=3
 |  |  |
 |  |  04:UNION
 |  |  |  pass-through-operands: all
+|  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  |--06:SCAN HDFS [functional.alltypestiny]
+|  |  |     partition predicates: year = 2009, month = 3
 |  |  |     partitions=1/4 files=1 size=115B
+|  |  |     row-size=89B cardinality=2
 |  |  |
 |  |  05:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 1
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--15:TOP-N [LIMIT=3]
 |  |  order by: id ASC
+|  |  row-size=89B cardinality=2
 |  |
 |  12:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=2
 |  |
 |  |--14:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 5
 |  |     partitions=0/4 files=0 size=0B
+|  |     row-size=89B cardinality=0
 |  |
 |  13:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 4
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--11:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 4
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 10:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 3
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -747,20 +922,25 @@ PLAN-ROOT SINK
 |
 16:SORT
 |  order by: year ASC, month ASC, id ASC
+|  row-size=89B cardinality=15
 |
 09:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=15
 |
 |--21:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=9
 |  |
 |  20:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |  |
 |  08:AGGREGATE [STREAMING]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=9
 |  |
 |  00:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=9
 |  |
 |  |--19:EXCHANGE [RANDOM]
 |  |  |
@@ -770,24 +950,36 @@ PLAN-ROOT SINK
 |  |  |
 |  |  07:TOP-N [LIMIT=3]
 |  |  |  order by: id ASC
+|  |  |  row-size=89B cardinality=3
 |  |  |
 |  |  04:UNION
 |  |  |  pass-through-operands: all
+|  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  |--06:SCAN HDFS [functional.alltypestiny]
+|  |  |     partition predicates: year = 2009, month = 3
 |  |  |     partitions=1/4 files=1 size=115B
+|  |  |     row-size=89B cardinality=2
 |  |  |
 |  |  05:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 1
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--22:EXCHANGE [RANDOM]
 |  |
@@ -797,21 +989,31 @@ PLAN-ROOT SINK
 |  |
 |  15:TOP-N [LIMIT=3]
 |  |  order by: id ASC
+|  |  row-size=89B cardinality=2
 |  |
 |  12:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=2
 |  |
 |  |--14:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 5
 |  |     partitions=0/4 files=0 size=0B
+|  |     row-size=89B cardinality=0
 |  |
 |  13:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 4
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--11:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 4
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 10:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 3
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 # Sort over top-n
 select * from (select * from functional.alltypes order by bigint_col limit 10) t
@@ -821,17 +1023,21 @@ PLAN-ROOT SINK
 |
 02:SORT
 |  order by: int_col ASC
+|  row-size=89B cardinality=10
 |
 01:TOP-N [LIMIT=10]
 |  order by: bigint_col ASC
+|  row-size=89B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 02:SORT
 |  order by: int_col ASC
+|  row-size=89B cardinality=10
 |
 03:MERGING-EXCHANGE [UNPARTITIONED]
 |  order by: bigint_col ASC
@@ -839,9 +1045,11 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=10]
 |  order by: bigint_col ASC
+|  row-size=89B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # Sort over top-n over union
 select * from
@@ -854,23 +1062,29 @@ PLAN-ROOT SINK
 |
 04:SORT
 |  order by: int_col ASC
+|  row-size=89B cardinality=10
 |
 03:TOP-N [LIMIT=10]
 |  order by: bigint_col ASC
+|  row-size=89B cardinality=10
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 04:SORT
 |  order by: int_col ASC
+|  row-size=89B cardinality=10
 |
 05:MERGING-EXCHANGE [UNPARTITIONED]
 |  order by: bigint_col ASC
@@ -878,15 +1092,19 @@ PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=10]
 |  order by: bigint_col ASC
+|  row-size=89B cardinality=10
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # ignore order by with no limit in subqueries and with clause
 with B as (select * from functional.alltypes order by int_col)
@@ -899,13 +1117,16 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.string_col = functional.alltypes.string_col
 |  runtime filters: RF000 <- functional.alltypes.string_col
+|  row-size=178B cardinality=5.33M
 |
 |--01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.string_col
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -914,17 +1135,20 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypes.string_col = functional.alltypes.string_col
 |  runtime filters: RF000 <- functional.alltypes.string_col
+|  row-size=178B cardinality=5.33M
 |
 |--04:EXCHANGE [HASH(functional.alltypes.string_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:EXCHANGE [HASH(functional.alltypes.string_col)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.string_col
+   row-size=89B cardinality=7.30K
 ====
 # ignore order by with no limit in insert
 insert into functional.alltypes partition(year, month)
@@ -935,20 +1159,24 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)
 |
 01:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |  partitions=24
 |
 02:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=7.30K
 |
 01:EXCHANGE [HASH(functional.alltypes.year,functional.alltypes.month)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # IMPALA-5293: test that noclustered hint prevents sort node when order by is used without
 # limit in insert.
@@ -960,6 +1188,7 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(functional.
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(functional.alltypes.year,functional.alltypes.month)]
 |  partitions=24
@@ -968,6 +1197,7 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(functional.
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # ignore order by with no limit in CTAS
 create table functional.alltypescopy as
@@ -978,12 +1208,14 @@ WRITE TO HDFS [functional.alltypescopy, OVERWRITE=false]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypescopy, OVERWRITE=false]
 |  partitions=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # ignore order by with no limit in union operand
 select * from functional.alltypes
@@ -994,12 +1226,15 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -1007,12 +1242,15 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # Do not ignore sort over union output
 select * from functional.alltypes
@@ -1023,15 +1261,19 @@ PLAN-ROOT SINK
 |
 03:SORT
 |  order by: bigint_col ASC
+|  row-size=89B cardinality=7.40K
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -1040,15 +1282,19 @@ PLAN-ROOT SINK
 |
 03:SORT
 |  order by: bigint_col ASC
+|  row-size=89B cardinality=7.40K
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # Multiple union operands with valid order-by clauses
 select int_col from functional.alltypes order by int_col
@@ -1060,27 +1306,35 @@ PLAN-ROOT SINK
 |
 07:SORT
 |  order by: int_col ASC
+|  row-size=4B cardinality=30
 |
 06:AGGREGATE [FINALIZE]
 |  group by: int_col
+|  row-size=4B cardinality=30
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=4B cardinality=7.32K
 |
 |--05:TOP-N [LIMIT=10 OFFSET=5]
 |  |  order by: int_col ASC
+|  |  row-size=4B cardinality=10
 |  |
 |  04:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 |--03:TOP-N [LIMIT=10]
 |  |  order by: int_col ASC
+|  |  row-size=4B cardinality=10
 |  |
 |  02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -1090,17 +1344,21 @@ PLAN-ROOT SINK
 |
 07:SORT
 |  order by: int_col ASC
+|  row-size=4B cardinality=30
 |
 13:AGGREGATE [FINALIZE]
 |  group by: int_col
+|  row-size=4B cardinality=30
 |
 12:EXCHANGE [HASH(int_col)]
 |
 06:AGGREGATE [STREAMING]
 |  group by: int_col
+|  row-size=4B cardinality=30
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=4B cardinality=7.32K
 |
 |--11:EXCHANGE [RANDOM]
 |  |
@@ -1111,9 +1369,11 @@ PLAN-ROOT SINK
 |  |
 |  05:TOP-N [LIMIT=15]
 |  |  order by: int_col ASC
+|  |  row-size=4B cardinality=15
 |  |
 |  04:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 |--10:EXCHANGE [RANDOM]
 |  |
@@ -1123,12 +1383,15 @@ PLAN-ROOT SINK
 |  |
 |  03:TOP-N [LIMIT=10]
 |  |  order by: int_col ASC
+|  |  row-size=4B cardinality=10
 |  |
 |  02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 # Test slot materialization
 select * from
@@ -1152,66 +1415,85 @@ PLAN-ROOT SINK
 |
 11:SORT
 |  order by: x ASC
+|  row-size=9B cardinality=1
 |
 10:AGGREGATE [FINALIZE]
 |  output: sum(float_col)
 |  group by: bigint_col
 |  having: sum(float_col) > 10
+|  row-size=16B cardinality=1
 |
 09:SELECT
 |  predicates: id < 10
+|  row-size=20B cardinality=1
 |
 08:TOP-N [LIMIT=100]
 |  order by: int_col ASC
+|  row-size=20B cardinality=10
 |
 07:SELECT
 |  predicates: smallint_col < 5
+|  row-size=22B cardinality=10
 |
 06:TOP-N [LIMIT=100]
 |  order by: bigint_col ASC
+|  row-size=22B cardinality=100
 |
 00:UNION
 |  pass-through-operands: 05
+|  row-size=22B cardinality=200
 |
 |--02:TOP-N [LIMIT=100]
 |  |  order by: tinyint_col ASC
+|  |  row-size=23B cardinality=100
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=23B cardinality=7.30K
 |
 05:TOP-N [LIMIT=100]
 |  order by: int_col ASC
+|  row-size=22B cardinality=100
 |
 04:AGGREGATE [FINALIZE]
 |  group by: float_col, bigint_col, int_col, tinyint_col, smallint_col, id
+|  row-size=23B cardinality=7.30K
 |
 03:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=23B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 11:SORT
 |  order by: x ASC
+|  row-size=9B cardinality=1
 |
 10:AGGREGATE [FINALIZE]
 |  output: sum(float_col)
 |  group by: bigint_col
 |  having: sum(float_col) > 10
+|  row-size=16B cardinality=1
 |
 09:SELECT
 |  predicates: id < 10
+|  row-size=20B cardinality=1
 |
 08:TOP-N [LIMIT=100]
 |  order by: int_col ASC
+|  row-size=20B cardinality=10
 |
 07:SELECT
 |  predicates: smallint_col < 5
+|  row-size=22B cardinality=10
 |
 06:TOP-N [LIMIT=100]
 |  order by: bigint_col ASC
+|  row-size=22B cardinality=100
 |
 00:UNION
 |  pass-through-operands: 14
+|  row-size=22B cardinality=200
 |
 |--15:MERGING-EXCHANGE [UNPARTITIONED]
 |  |  order by: tinyint_col ASC
@@ -1219,9 +1501,11 @@ PLAN-ROOT SINK
 |  |
 |  02:TOP-N [LIMIT=100]
 |  |  order by: tinyint_col ASC
+|  |  row-size=23B cardinality=100
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=23B cardinality=7.30K
 |
 14:MERGING-EXCHANGE [UNPARTITIONED]
 |  order by: int_col ASC
@@ -1229,17 +1513,21 @@ PLAN-ROOT SINK
 |
 05:TOP-N [LIMIT=100]
 |  order by: int_col ASC
+|  row-size=22B cardinality=100
 |
 13:AGGREGATE [FINALIZE]
 |  group by: float_col, bigint_col, int_col, tinyint_col, smallint_col, id
+|  row-size=23B cardinality=7.30K
 |
 12:EXCHANGE [HASH(float_col,bigint_col,int_col,tinyint_col,smallint_col,id)]
 |
 04:AGGREGATE [STREAMING]
 |  group by: float_col, bigint_col, int_col, tinyint_col, smallint_col, id
+|  row-size=23B cardinality=7.30K
 |
 03:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=23B cardinality=7.30K
 ====
 # Test slot materialization IMPALA-1006
 select int_col from
@@ -1253,15 +1541,19 @@ PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=10]
 |  order by: bigint_col ASC
+|  row-size=12B cardinality=10
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=12B cardinality=22.00K
 |
 |--02:SCAN HDFS [functional.alltypesagg]
 |     partitions=11/11 files=11 size=814.73KB
+|     row-size=12B cardinality=11.00K
 |
 01:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=12B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -1271,15 +1563,19 @@ PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=10]
 |  order by: bigint_col ASC
+|  row-size=12B cardinality=10
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=12B cardinality=22.00K
 |
 |--02:SCAN HDFS [functional.alltypesagg]
 |     partitions=11/11 files=11 size=814.73KB
+|     row-size=12B cardinality=11.00K
 |
 01:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=12B cardinality=11.00K
 ====
 # Sort node is unnecessary (IMPALA-1148).
 select 1 from functional.alltypes order by 1
@@ -1288,6 +1584,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ====
 # Sort node is unnecessary (IMPALA-1148).
 select a from
@@ -1299,4 +1596,5 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    limit: 1
+   row-size=0B cardinality=1
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/outer-joins.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/outer-joins.test b/testdata/workloads/functional-planner/queries/PlannerTest/outer-joins.test
index d593dd0..5841f8d 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/outer-joins.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/outer-joins.test
@@ -33,23 +33,28 @@ PLAN-ROOT SINK
 |  hash predicates: t1.id = t3.id
 |  other join predicates: t2.id = 15, t1.id - t2.id = 0
 |  other predicates: t3.id IS NOT NULL, t1.zip + t2.zip + t3.zip = 20
+|  row-size=72B cardinality=0
 |
 |--02:SCAN HDFS [functional.testtbl t3]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t3.id IS NOT NULL, t3.id > 0, t3.zip = 94720
+|     row-size=24B cardinality=0
 |
 03:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t1.id - 1 = t2.id + 1
 |  other join predicates: t1.zip = 94611
 |  other predicates: t2.id IS NULL, t1.zip + t2.zip = 10
+|  row-size=48B cardinality=0
 |
 |--01:SCAN HDFS [functional.testtbl t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.zip = 94104
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
    predicates: t1.id > 0
+   row-size=24B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -59,27 +64,32 @@ PLAN-ROOT SINK
 |  hash predicates: t1.id = t3.id
 |  other join predicates: t2.id = 15, t1.id - t2.id = 0
 |  other predicates: t3.id IS NOT NULL, t1.zip + t2.zip + t3.zip = 20
+|  row-size=72B cardinality=0
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [functional.testtbl t3]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t3.id IS NOT NULL, t3.id > 0, t3.zip = 94720
+|     row-size=24B cardinality=0
 |
 03:HASH JOIN [LEFT OUTER JOIN, BROADCAST]
 |  hash predicates: t1.id - 1 = t2.id + 1
 |  other join predicates: t1.zip = 94611
 |  other predicates: t2.id IS NULL, t1.zip + t2.zip = 10
+|  row-size=48B cardinality=0
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.testtbl t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.zip = 94104
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
    predicates: t1.id > 0
+   row-size=24B cardinality=0
 ====
 # the same thing with subqueries; should produce the same result
 select *
@@ -97,23 +107,28 @@ PLAN-ROOT SINK
 |  hash predicates: a1.id = a3.id
 |  other join predicates: a2.id = 15, a1.id - a2.id = 0
 |  other predicates: a3.id IS NOT NULL, a1.zip + a2.zip + a3.zip = 20
+|  row-size=72B cardinality=0
 |
 |--02:SCAN HDFS [functional.testtbl a3]
 |     partitions=1/1 files=0 size=0B
 |     predicates: a3.id IS NOT NULL, a3.id > 0, a3.zip = 94720
+|     row-size=24B cardinality=0
 |
 03:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: a1.id - 1 = a2.id + 1
 |  other join predicates: a1.zip = 94611
 |  other predicates: a2.id IS NULL, a1.zip + a2.zip = 10
+|  row-size=48B cardinality=0
 |
 |--01:SCAN HDFS [functional.testtbl a2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: a2.zip = 94104
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl a1]
    partitions=1/1 files=0 size=0B
    predicates: a1.id > 0
+   row-size=24B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -123,27 +138,32 @@ PLAN-ROOT SINK
 |  hash predicates: a1.id = a3.id
 |  other join predicates: a2.id = 15, a1.id - a2.id = 0
 |  other predicates: a3.id IS NOT NULL, a1.zip + a2.zip + a3.zip = 20
+|  row-size=72B cardinality=0
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [functional.testtbl a3]
 |     partitions=1/1 files=0 size=0B
 |     predicates: a3.id IS NOT NULL, a3.id > 0, a3.zip = 94720
+|     row-size=24B cardinality=0
 |
 03:HASH JOIN [LEFT OUTER JOIN, BROADCAST]
 |  hash predicates: a1.id - 1 = a2.id + 1
 |  other join predicates: a1.zip = 94611
 |  other predicates: a2.id IS NULL, a1.zip + a2.zip = 10
+|  row-size=48B cardinality=0
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.testtbl a2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: a2.zip = 94104
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl a1]
    partitions=1/1 files=0 size=0B
    predicates: a1.id > 0
+   row-size=24B cardinality=0
 ====
 # correct propagation of scan predicates in OJ On clauses:
 # id = 17 must not be applied by the t1 and t3 scans
@@ -158,22 +178,27 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t3.id
 |  runtime filters: RF000 <- t3.id
+|  row-size=72B cardinality=0
 |
 |--02:SCAN HDFS [functional.testtbl t3]
 |     partitions=1/1 files=0 size=0B
+|     row-size=24B cardinality=0
 |
 03:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t1.id = t2.id
 |  other join predicates: t1.id = 17
+|  row-size=48B cardinality=0
 |
 |--01:SCAN HDFS [functional.testtbl t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.id = 17
 |     runtime filters: RF000 -> t2.id
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
    runtime filters: RF000 -> t1.id
+   row-size=24B cardinality=0
 ====
 # correct placement of predicates with right outer joins; t1 and t2 are nullable
 select *
@@ -212,24 +237,29 @@ PLAN-ROOT SINK
 |  other join predicates: t3.zip = 94720, t1.id - t2.id = 0
 |  other predicates: t2.id IS NULL, t1.id > 0, t1.zip + t2.zip = 10, t1.zip + t2.zip + t3.zip = 20
 |  runtime filters: RF000 <- t3.id
+|  row-size=72B cardinality=0
 |
 |--02:SCAN HDFS [functional.testtbl t3]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t3.id IS NOT NULL
+|     row-size=24B cardinality=0
 |
 03:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: t1.id - 1 = t2.id + 1
 |  other join predicates: t1.zip = 94611, t2.zip = 94104
 |  runtime filters: RF002 <- t2.id + 1
+|  row-size=48B cardinality=0
 |
 |--01:SCAN HDFS [functional.testtbl t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.id = 15
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
    predicates: t1.id IS NOT NULL, t1.id > 0
    runtime filters: RF000 -> t1.id, RF002 -> t1.id - 1
+   row-size=24B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -240,12 +270,14 @@ PLAN-ROOT SINK
 |  other join predicates: t3.zip = 94720, t1.id - t2.id = 0
 |  other predicates: t2.id IS NULL, t1.id > 0, t1.zip + t2.zip = 10, t1.zip + t2.zip + t3.zip = 20
 |  runtime filters: RF000 <- t3.id
+|  row-size=72B cardinality=0
 |
 |--08:EXCHANGE [HASH(t3.id)]
 |  |
 |  02:SCAN HDFS [functional.testtbl t3]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t3.id IS NOT NULL
+|     row-size=24B cardinality=0
 |
 07:EXCHANGE [HASH(t1.id)]
 |
@@ -253,12 +285,14 @@ PLAN-ROOT SINK
 |  hash predicates: t1.id - 1 = t2.id + 1
 |  other join predicates: t1.zip = 94611, t2.zip = 94104
 |  runtime filters: RF002 <- t2.id + 1
+|  row-size=48B cardinality=0
 |
 |--06:EXCHANGE [HASH(t2.id + 1)]
 |  |
 |  01:SCAN HDFS [functional.testtbl t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.id = 15
+|     row-size=24B cardinality=0
 |
 05:EXCHANGE [HASH(t1.id - 1)]
 |
@@ -266,6 +300,7 @@ PLAN-ROOT SINK
    partitions=1/1 files=0 size=0B
    predicates: t1.id IS NOT NULL, t1.id > 0
    runtime filters: RF000 -> t1.id, RF002 -> t1.id - 1
+   row-size=24B cardinality=0
 ====
 # the same thing with subqueries; should produce the same result
 select *
@@ -283,24 +318,29 @@ PLAN-ROOT SINK
 |  other join predicates: a3.zip = 94720, a1.id - a2.id = 0
 |  other predicates: a2.id IS NULL, a1.id > 0, a1.zip + a2.zip = 10, a1.zip + a2.zip + a3.zip = 20
 |  runtime filters: RF000 <- a3.id
+|  row-size=72B cardinality=0
 |
 |--02:SCAN HDFS [functional.testtbl a3]
 |     partitions=1/1 files=0 size=0B
 |     predicates: a3.id IS NOT NULL
+|     row-size=24B cardinality=0
 |
 03:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: a1.id - 1 = a2.id + 1
 |  other join predicates: a1.zip = 94611, a2.zip = 94104
 |  runtime filters: RF002 <- a2.id + 1
+|  row-size=48B cardinality=0
 |
 |--01:SCAN HDFS [functional.testtbl a2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: a2.id = 15
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl a1]
    partitions=1/1 files=0 size=0B
    predicates: a1.id IS NOT NULL, a1.id > 0
    runtime filters: RF000 -> a1.id, RF002 -> a1.id - 1
+   row-size=24B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -311,12 +351,14 @@ PLAN-ROOT SINK
 |  other join predicates: a3.zip = 94720, a1.id - a2.id = 0
 |  other predicates: a2.id IS NULL, a1.id > 0, a1.zip + a2.zip = 10, a1.zip + a2.zip + a3.zip = 20
 |  runtime filters: RF000 <- a3.id
+|  row-size=72B cardinality=0
 |
 |--08:EXCHANGE [HASH(a3.id)]
 |  |
 |  02:SCAN HDFS [functional.testtbl a3]
 |     partitions=1/1 files=0 size=0B
 |     predicates: a3.id IS NOT NULL
+|     row-size=24B cardinality=0
 |
 07:EXCHANGE [HASH(a1.id)]
 |
@@ -324,12 +366,14 @@ PLAN-ROOT SINK
 |  hash predicates: a1.id - 1 = a2.id + 1
 |  other join predicates: a1.zip = 94611, a2.zip = 94104
 |  runtime filters: RF002 <- a2.id + 1
+|  row-size=48B cardinality=0
 |
 |--06:EXCHANGE [HASH(a2.id + 1)]
 |  |
 |  01:SCAN HDFS [functional.testtbl a2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: a2.id = 15
+|     row-size=24B cardinality=0
 |
 05:EXCHANGE [HASH(a1.id - 1)]
 |
@@ -337,6 +381,7 @@ PLAN-ROOT SINK
    partitions=1/1 files=0 size=0B
    predicates: a1.id IS NOT NULL, a1.id > 0
    runtime filters: RF000 -> a1.id, RF002 -> a1.id - 1
+   row-size=24B cardinality=0
 ====
 # right outer join requires the join op to be partitioned, otherwise non-matches cause
 # duplicates
@@ -351,13 +396,16 @@ PLAN-ROOT SINK
 |  hash predicates: a.tinyint_col = b.id
 |  other predicates: a.tinyint_col IS NULL
 |  runtime filters: RF000 <- b.id
+|  row-size=5B cardinality=9.78K
 |
 |--01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> a.tinyint_col
+   row-size=1B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -367,17 +415,20 @@ PLAN-ROOT SINK
 |  hash predicates: a.tinyint_col = b.id
 |  other predicates: a.tinyint_col IS NULL
 |  runtime filters: RF000 <- b.id
+|  row-size=5B cardinality=9.78K
 |
 |--04:EXCHANGE [HASH(b.id)]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 03:EXCHANGE [HASH(a.tinyint_col)]
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> a.tinyint_col
+   row-size=1B cardinality=11.00K
 ====
 # same for full outer joins
 select a.tinyint_col, b.id
@@ -390,12 +441,15 @@ PLAN-ROOT SINK
 02:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: a.tinyint_col = b.id
 |  other predicates: a.tinyint_col IS NULL
+|  row-size=5B cardinality=11.01K
 |
 |--01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
+   row-size=1B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -404,16 +458,19 @@ PLAN-ROOT SINK
 02:HASH JOIN [FULL OUTER JOIN, PARTITIONED]
 |  hash predicates: a.tinyint_col = b.id
 |  other predicates: a.tinyint_col IS NULL
+|  row-size=5B cardinality=11.01K
 |
 |--04:EXCHANGE [HASH(b.id)]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 03:EXCHANGE [HASH(a.tinyint_col)]
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
+   row-size=1B cardinality=11.00K
 ====
 # Predicate assignment when query has a full outer join (IMPALA-1371)
 select *
@@ -430,13 +487,16 @@ PLAN-ROOT SINK
 |  hash predicates: a.id = b.id
 |  other join predicates: a.int_col < 10, b.tinyint_col != 5
 |  other predicates: b.bigint_col > 10
+|  row-size=178B cardinality=8.03K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: b.bigint_col > 10
+|     row-size=89B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # Predicate assignment when query has a full outer followed by an inner join
 select *
@@ -454,22 +514,27 @@ PLAN-ROOT SINK
 |  hash predicates: c.int_col = a.int_col
 |  other predicates: a.tinyint_col < 10
 |  runtime filters: RF000 <- a.int_col
+|  row-size=267B cardinality=1.07M
 |
 |--04:HASH JOIN [FULL OUTER JOIN]
 |  |  hash predicates: a.id = b.id
 |  |  other predicates: b.tinyint_col > 20
+|  |  row-size=178B cardinality=1.46K
 |  |
 |  |--01:SCAN HDFS [functional.alltypes b]
 |  |     partitions=24/24 files=24 size=478.45KB
 |  |     predicates: b.tinyint_col > 20
+|  |     row-size=89B cardinality=730
 |  |
 |  00:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: a.tinyint_col < 10
+|     row-size=89B cardinality=730
 |
 02:SCAN HDFS [functional.alltypes c]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> c.int_col
+   row-size=89B cardinality=7.30K
 ====
 # Predicate assignment when query has an inner join followed by a full outer join
 select *
@@ -489,21 +554,26 @@ PLAN-ROOT SINK
 |  hash predicates: c.int_col = a.int_col
 |  other join predicates: a.bigint_col < 10, a.tinyint_col < b.tinyint_col
 |  other predicates: a.smallint_col = 100, a.float_col > b.float_col
+|  row-size=267B cardinality=532.90K
 |
 |--03:HASH JOIN [INNER JOIN]
 |  |  hash predicates: b.id = a.id
 |  |  runtime filters: RF000 <- a.id
+|  |  row-size=178B cardinality=730
 |  |
 |  |--00:SCAN HDFS [functional.alltypes a]
 |  |     partitions=24/24 files=24 size=478.45KB
 |  |     predicates: a.smallint_col = 100
+|  |     row-size=89B cardinality=730
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> b.id
+|     row-size=89B cardinality=7.30K
 |
 02:SCAN HDFS [functional.alltypes c]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # Predicate assignment when query has a sequence of full outer joins interleaved with
 # an inner join
@@ -525,30 +595,37 @@ PLAN-ROOT SINK
 |  hash predicates: a.tinyint_col = d.tinyint_col
 |  other join predicates: b.int_col < 20
 |  other predicates: a.float_col < b.float_col, a.bool_col = FALSE
+|  row-size=356B cardinality=2.33G
 |
 |--03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: a.tinyint_col = c.tinyint_col
 |  other predicates: b.int_col > 10
 |  runtime filters: RF000 <- c.tinyint_col
+|  row-size=267B cardinality=3.20M
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 05:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: a.id = b.id
 |  other join predicates: a.int_col < 10
+|  row-size=178B cardinality=4.38K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: b.int_col > 10
+|     row-size=89B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.bool_col = FALSE
    runtime filters: RF000 -> a.tinyint_col
+   row-size=89B cardinality=3.65K
 ====
 # Predicate assignment through an inline-view that has a full outer join
 select 1
@@ -576,28 +653,35 @@ PLAN-ROOT SINK
 |  hash predicates: c.id = d.id
 |  other join predicates: a.bigint_col > b.bigint_col
 |  other predicates: a.tinyint_col != b.tinyint_col
+|  row-size=46B cardinality=5.87M
 |
 |--03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: b.smallint_col = c.smallint_col
 |  other predicates: a.int_col < b.int_col, b.id < 10
 |  runtime filters: RF000 <- c.smallint_col
+|  row-size=42B cardinality=5.86M
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=6B cardinality=7.30K
 |
 04:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: a.id = b.id
+|  row-size=36B cardinality=8.03K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: b.id < 10
 |     runtime filters: RF000 -> b.smallint_col
+|     row-size=19B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ====
 # Predicate assignment when query has an inner join followed by full outer join
 select 1
@@ -616,20 +700,25 @@ PLAN-ROOT SINK
 |  hash predicates: b.int_col = c.int_col
 |  other join predicates: c.int_col < 10
 |  other predicates: a.bigint_col = b.bigint_col, a.tinyint_col < b.tinyint_col
+|  row-size=34B cardinality=5.33M
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: b.id = a.id
 |  runtime filters: RF000 <- a.id
+|  row-size=30B cardinality=7.30K
 |
 |--00:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=13B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypes b]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> b.id
+   row-size=17B cardinality=7.30K
 ====
 # Predicate assignment when query has a sequence of inner, outer and full
 # outer joins
@@ -649,31 +738,38 @@ PLAN-ROOT SINK
 |  hash predicates: b.string_col = d.string_col
 |  other join predicates: a.tinyint_col < b.tinyint_col
 |  other predicates: b.smallint_col = 1, a.float_col = b.float_col, d.tinyint_col < 10
+|  row-size=56B cardinality=38.90M
 |
 |--03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: d.tinyint_col < 10
+|     row-size=14B cardinality=730
 |
 05:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: c.int_col = a.int_col
 |  other join predicates: c.tinyint_col = 10
 |  runtime filters: RF000 <- a.int_col
+|  row-size=42B cardinality=532.90K
 |
 |--04:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.id = b.id
 |  |  runtime filters: RF002 <- b.id
+|  |  row-size=37B cardinality=730
 |  |
 |  |--01:SCAN HDFS [functional.alltypes b]
 |  |     partitions=24/24 files=24 size=478.45KB
 |  |     predicates: b.smallint_col = 1
+|  |     row-size=24B cardinality=730
 |  |
 |  00:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF002 -> a.id
+|     row-size=13B cardinality=7.30K
 |
 02:SCAN HDFS [functional.alltypes c]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> c.int_col
+   row-size=5B cardinality=7.30K
 ====
 # IMPALA-2144: Test correct assignment of Having-clause predicates
 # referencing a grouping column coming from an outer-joined table ref.
@@ -690,17 +786,21 @@ PLAN-ROOT SINK
 |  output: sum(b.double_col)
 |  group by: a.bool_col, a.int_col, b.bool_col, b.int_col
 |  having: b.bool_col IS NULL, b.int_col IS NOT NULL
+|  row-size=18B cardinality=8
 |
 02:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: a.id = b.id
+|  row-size=26B cardinality=730
 |
 |--01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
 |     predicates: b.int_col IS NOT NULL
+|     row-size=17B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.bool_col IS NULL, a.int_col IS NOT NULL
+   row-size=9B cardinality=730
 ====
 # IMPALA-2144: Same as above but with a full outer join.
 select b.int_col, sum(b.double_col)
@@ -716,17 +816,21 @@ PLAN-ROOT SINK
 |  output: sum(b.double_col)
 |  group by: a.bool_col, a.int_col, b.bool_col, b.int_col
 |  having: a.bool_col IS NULL, a.int_col IS NOT NULL, b.bool_col IS NULL, b.int_col IS NOT NULL
+|  row-size=18B cardinality=8
 |
 02:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: a.id = b.id
+|  row-size=26B cardinality=731
 |
 |--01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
 |     predicates: b.int_col IS NOT NULL
+|     row-size=17B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.int_col IS NOT NULL
+   row-size=9B cardinality=730
 ====
 # IMPALA-2765: Predicate assignment when outer joins have case expressions as predicates
 select *
@@ -745,14 +849,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: b.id = a.id
 |  runtime filters: RF000 <- a.id
+|  row-size=178B cardinality=730
 |
 |--00:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: (CASE WHEN a.id > 100 THEN a.timestamp_col ELSE a.timestamp_col END) >= TIMESTAMP '2001-01-01 00:00:00'
+|     row-size=89B cardinality=730
 |
 01:SCAN HDFS [functional.alltypes b]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> b.id
+   row-size=89B cardinality=7.30K
 ====
 # IMPALA-3071: Test correct assignment of non-join conjuncts belonging to the On-clause
 # of an inner join that follows an outer join.
@@ -768,21 +875,26 @@ PLAN-ROOT SINK
 |  hash predicates: c.id = b.id
 |  other predicates: b.int_col < 0
 |  runtime filters: RF000 <- b.id
+|  row-size=20B cardinality=1
 |
 |--04:HASH JOIN [LEFT OUTER JOIN]
 |  |  hash predicates: a.id = b.id
+|  |  row-size=16B cardinality=1
 |  |
 |  |--01:SCAN HDFS [functional.alltypestiny b]
 |  |     partitions=4/4 files=4 size=460B
 |  |     predicates: b.int_col < 0
+|  |     row-size=8B cardinality=1
 |  |
 |  00:SCAN HDFS [functional.alltypestiny a]
 |     partitions=4/4 files=4 size=460B
 |     predicates: a.int_col > 10
+|     row-size=8B cardinality=1
 |
 02:SCAN HDFS [functional.alltypestiny c]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> c.id
+   row-size=4B cardinality=8
 ====
 # IMPALA-3071: Same as above but with a right outer join.
 select 1 from functional.alltypestiny a
@@ -797,23 +909,28 @@ PLAN-ROOT SINK
 |  hash predicates: c.id = b.id
 |  other predicates: a.int_col > 10
 |  runtime filters: RF000 <- b.id
+|  row-size=20B cardinality=1
 |
 |--04:HASH JOIN [RIGHT OUTER JOIN]
 |  |  hash predicates: a.id = b.id
 |  |  runtime filters: RF002 <- b.id
+|  |  row-size=16B cardinality=1
 |  |
 |  |--01:SCAN HDFS [functional.alltypestiny b]
 |  |     partitions=4/4 files=4 size=460B
 |  |     predicates: b.int_col < 0
+|  |     row-size=8B cardinality=1
 |  |
 |  00:SCAN HDFS [functional.alltypestiny a]
 |     partitions=4/4 files=4 size=460B
 |     predicates: a.int_col > 10
 |     runtime filters: RF002 -> a.id
+|     row-size=8B cardinality=1
 |
 02:SCAN HDFS [functional.alltypestiny c]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> c.id
+   row-size=4B cardinality=8
 ====
 # IMPALA-3071: Same as above but with a full outer join.
 select 1 from functional.alltypestiny a
@@ -828,21 +945,26 @@ PLAN-ROOT SINK
 |  hash predicates: b.id = c.id
 |  other predicates: a.int_col > 10, b.int_col < 0
 |  runtime filters: RF000 <- c.id
+|  row-size=20B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny c]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 04:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: a.id = b.id
+|  row-size=16B cardinality=2
 |
 |--01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
 |     predicates: b.int_col < 0
 |     runtime filters: RF000 -> b.id
+|     row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypestiny a]
    partitions=4/4 files=4 size=460B
    predicates: a.int_col > 10
+   row-size=8B cardinality=1
 ====
 # IMPALA-3071: Test that the inner join On-clause predicate is placed
 # precisely at the correct outer join (not before or after).
@@ -860,34 +982,43 @@ PLAN-ROOT SINK
 |
 09:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: e.id = d.id
+|  row-size=28B cardinality=9
 |
 |--08:NESTED LOOP JOIN [INNER JOIN]
 |  |  predicates: a.int_col > 10, b.int_col < 0
+|  |  row-size=24B cardinality=1
 |  |
 |  |--07:HASH JOIN [RIGHT OUTER JOIN]
 |  |  |  hash predicates: c.id = b.id
 |  |  |  runtime filters: RF000 <- b.id
+|  |  |  row-size=20B cardinality=1
 |  |  |
 |  |  |--06:HASH JOIN [LEFT OUTER JOIN]
 |  |  |  |  hash predicates: a.id = b.id
+|  |  |  |  row-size=16B cardinality=1
 |  |  |  |
 |  |  |  |--01:SCAN HDFS [functional.alltypestiny b]
 |  |  |  |     partitions=4/4 files=4 size=460B
 |  |  |  |     predicates: b.int_col < 0
+|  |  |  |     row-size=8B cardinality=1
 |  |  |  |
 |  |  |  00:SCAN HDFS [functional.alltypestiny a]
 |  |  |     partitions=4/4 files=4 size=460B
 |  |  |     predicates: a.int_col > 10
+|  |  |     row-size=8B cardinality=1
 |  |  |
 |  |  02:SCAN HDFS [functional.alltypestiny c]
 |  |     partitions=4/4 files=4 size=460B
 |  |     runtime filters: RF000 -> c.id
+|  |     row-size=4B cardinality=8
 |  |
 |  03:SCAN HDFS [functional.alltypestiny d]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 04:SCAN HDFS [functional.alltypestiny e]
    partitions=4/4 files=4 size=460B
+   row-size=4B cardinality=8
 ====
 # IMPALA-3125: Test that the On-clause predicates from an outer join are assigned to the
 # corresponding outer-join node, even if the predicates do not reference the join rhs.
@@ -903,20 +1034,25 @@ PLAN-ROOT SINK
 04:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: b.bigint_col = c.bigint_col
 |  other join predicates: a.id = b.id
+|  row-size=32B cardinality=3.89G
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: b.int_col = a.int_col
 |  runtime filters: RF000 <- a.int_col
+|  row-size=24B cardinality=5.33M
 |
 |--00:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypes b]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> b.int_col
+   row-size=16B cardinality=7.30K
 ====
 # IMPALA-3167: Test correct assignment of a WHERE-clause predicate through an inline view
 # that has a grouping aggregation and an outer join. The predicate can be assigned at the
@@ -936,16 +1072,20 @@ PLAN-ROOT SINK
 |  output: sum(bigint_col)
 |  group by: t2.id
 |  having: v1.id < 10
+|  row-size=12B cardinality=10
 |
 02:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t1.int_col = t2.int_col
+|  row-size=20B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypessmall t2]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: t2.id < 10
+|     row-size=8B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ====
 # IMPALA-3126: Test assignment of an inner join On-clause predicate. The predicate
 # may not be assigned below the join materializing 'd'.
@@ -961,26 +1101,33 @@ PLAN-ROOT SINK
 |
 06:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: a.int_col = b.int_col
+|  row-size=20B cardinality=7.30K
 |
 |--03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=0B cardinality=7.30K
 |
 05:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: b.id = c.id
 |  runtime filters: RF000 <- c.id
+|  row-size=20B cardinality=7.30K
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 04:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: a.id = b.id
+|  row-size=16B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> b.id
+|     row-size=8B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
+   row-size=8B cardinality=7.30K
 ====
 # IMPALA-3126: Same as above but with a semi join at the end.
 select 1 from functional.alltypes a
@@ -995,26 +1142,33 @@ PLAN-ROOT SINK
 |
 06:NESTED LOOP JOIN [LEFT SEMI JOIN]
 |  join predicates: a.int_col = b.int_col
+|  row-size=20B cardinality=7.30K
 |
 |--03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=0B cardinality=7.30K
 |
 05:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: b.id = c.id
 |  runtime filters: RF000 <- c.id
+|  row-size=20B cardinality=7.30K
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 04:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: a.id = b.id
+|  row-size=16B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> b.id
+|     row-size=8B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
+   row-size=8B cardinality=7.30K
 ====
 # IMPALA-5856: Test correct assignment of a join predicate with the following properties:
 # - from the On-clause of a left outer join
@@ -1034,20 +1188,25 @@ PLAN-ROOT SINK
 |  hash predicates: coalesce(t1.id, t2.id) = t3.id
 |  other join predicates: t1.bigint_col > 10, t2.bigint_col > 30, coalesce(t1.int_col, t2.int_col) = 2
 |  other predicates: t3.string_col = 'test2'
+|  row-size=267B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypestiny t3]
 |     partitions=4/4 files=4 size=460B
 |     predicates: t3.string_col = 'test2'
+|     row-size=89B cardinality=4
 |
 03:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: t1.id = t2.id
 |  other predicates: concat(t1.string_col, t2.string_col) = 'test1'
+|  row-size=178B cardinality=7.40K
 |
 |--01:SCAN HDFS [functional.alltypessmall t2]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # IMPALA-6123: Test the partition compatibility check with consecutive outer joins.
 # In this query the output partition of the right outer join should be the rhs partition
@@ -1070,37 +1229,44 @@ PLAN-ROOT SINK
 11:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: t2.id
+|  row-size=12B cardinality=99
 |
 10:EXCHANGE [HASH(t2.id)]
 |
 05:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: t2.id
+|  row-size=12B cardinality=99
 |
 04:HASH JOIN [RIGHT OUTER JOIN, PARTITIONED]
 |  hash predicates: t2.id = t3.id
 |  runtime filters: RF000 <- t3.id
+|  row-size=16B cardinality=5.90K
 |
 |--09:EXCHANGE [HASH(t3.id)]
 |  |
 |  02:SCAN HDFS [functional.alltypestiny t3]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 08:EXCHANGE [HASH(t2.id)]
 |
 03:HASH JOIN [LEFT OUTER JOIN, PARTITIONED]
 |  hash predicates: t1.int_col = t2.int_col
+|  row-size=12B cardinality=73.00K
 |
 |--07:EXCHANGE [HASH(t2.int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall t2]
 |     partitions=4/4 files=4 size=6.32KB
 |     runtime filters: RF000 -> t2.id
+|     row-size=8B cardinality=100
 |
 06:EXCHANGE [HASH(t1.int_col)]
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 # IMPALA-6123: Test the partition compatibility check with consecutive outer joins.
 # In this query the output partition of the full outer join should be random. There should
@@ -1122,35 +1288,42 @@ PLAN-ROOT SINK
 11:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: t1.int_col
+|  row-size=12B cardinality=10
 |
 10:EXCHANGE [HASH(t1.int_col)]
 |
 05:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: t1.int_col
+|  row-size=12B cardinality=10
 |
 04:HASH JOIN [FULL OUTER JOIN, PARTITIONED]
 |  hash predicates: t1.int_col = t3.int_col
+|  row-size=12B cardinality=3.89G
 |
 |--09:EXCHANGE [HASH(t3.int_col)]
 |  |
 |  02:SCAN HDFS [functional.alltypes t3]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 08:EXCHANGE [HASH(t1.int_col)]
 |
 03:HASH JOIN [RIGHT OUTER JOIN, PARTITIONED]
 |  hash predicates: t1.smallint_col = t2.smallint_col
 |  runtime filters: RF000 <- t2.smallint_col
+|  row-size=8B cardinality=5.33M
 |
 |--07:EXCHANGE [HASH(t2.smallint_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=2B cardinality=7.30K
 |
 06:EXCHANGE [HASH(t1.smallint_col)]
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t1.smallint_col
+   row-size=6B cardinality=7.30K
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/parquet-filtering-disabled.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/parquet-filtering-disabled.test b/testdata/workloads/functional-planner/queries/PlannerTest/parquet-filtering-disabled.test
index 166ba26..d518039 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/parquet-filtering-disabled.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/parquet-filtering-disabled.test
@@ -20,7 +20,7 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=188.29KB
+   partitions=24/24 files=24 size=189.28KB
    predicates: int_col IS NULL, int_col > CAST(1 AS INT), int_col > CAST(tinyint_col AS INT), CAST(int_col AS DOUBLE) * rand() > CAST(50 AS DOUBLE)
    stored statistics:
      table: rows=unavailable size=unavailable
@@ -57,7 +57,8 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=22/24 files=22 size=172.28KB
+   partition predicates: year > CAST(2000 AS INT), month < CAST(12 AS INT)
+   partitions=22/24 files=22 size=173.16KB
    predicates: bool_col, bigint_col < CAST(5000 AS BIGINT), double_col > CAST(100.00 AS DOUBLE), float_col > CAST(50.00 AS FLOAT), id = CAST(1 AS INT), tinyint_col < CAST(50 AS TINYINT), int_col % CAST(2 AS INT) = CAST(1 AS INT), string_col IN ('aaaa', 'bbbb', 'cccc'), smallint_col IN (CAST(1 AS SMALLINT), CAST(2 AS SMALLINT), CAST(3 AS SMALLINT), CAST(4 AS SMALLINT), CAST(5 AS SMALLINT)), timestamp_cmp(timestamp_col, TIMESTAMP '2016-11-20 00:00:00') = CAST(1 AS INT), date_string_col > '1993-10-01'
    stored statistics:
      table: rows=unavailable size=unavailable
@@ -87,7 +88,7 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=188.29KB
+   partitions=24/24 files=24 size=189.28KB
    predicates: id IN (int_col), id NOT IN (CAST(0 AS INT), CAST(1 AS INT), CAST(2 AS INT)), int_col % CAST(50 AS INT) IN (CAST(0 AS INT), CAST(1 AS INT)), string_col IN ('aaaa', 'bbbb', 'cccc', NULL)
    stored statistics:
      table: rows=unavailable size=unavailable
@@ -168,7 +169,7 @@ PLAN-ROOT SINK
 ====
 # Parquet predicates to be skipped at each level:
 # parquet statistics predicates on c_custkey, o.o_orderkey & l.l_partkey
-# parquet dictionary predicates on c_custkey, o.o_orderkey & l.l_partkey 
+# parquet dictionary predicates on c_custkey, o.o_orderkey & l.l_partkey
 select c_custkey from tpch_nested_parquet.customer c, c.c_orders o,
 o.o_lineitems l where c_custkey > 0 and o.o_orderkey > 0 and l.l_partkey > 0;
 ---- PLAN
@@ -179,7 +180,7 @@ PLAN-ROOT SINK
 |
 01:SUBPLAN
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=2,1,0 row-size=48B cardinality=1500000
+|  tuple-ids=2,1,0 row-size=48B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
@@ -222,16 +223,16 @@ PLAN-ROOT SINK
 |     in pipelines: 00(GETNEXT)
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.98MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: c_custkey > CAST(0 AS BIGINT), !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o.o_orderkey > CAST(0 AS BIGINT)
    predicates on l: l.l_partkey > CAST(0 AS BIGINT)
    stored statistics:
-     table: rows=150000 size=288.96MB
+     table: rows=150000 size=288.99MB
      columns missing stats: c_orders
-   extrapolated-rows=disabled max-scan-range-rows=44226
+   extrapolated-rows=disabled max-scan-range-rows=44225
    mem-estimate=264.00MB mem-reservation=16.00MB thread-reservation=1
-   tuple-ids=0 row-size=20B cardinality=15000
+   tuple-ids=0 row-size=20B cardinality=15.00K
    in pipelines: 00(GETNEXT)
 ====
 # Parquet filtering to be skipped on multiple collections at the same nested level:
@@ -252,7 +253,7 @@ PLAN-ROOT SINK
 |
 01:SUBPLAN
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=2,1,0 row-size=126B cardinality=15000000
+|  tuple-ids=2,1,0 row-size=126B cardinality=15.00M
 |  in pipelines: 00(GETNEXT)
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
@@ -295,16 +296,16 @@ PLAN-ROOT SINK
 |     in pipelines: 00(GETNEXT)
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems)
    predicates on l: l.l_shipdate = '1994-08-19', l.l_receiptdate = '1994-08-24', l.l_shipmode = 'RAIL', l.l_returnflag = 'R', l.l_comment IS NULL
    stored statistics:
-     table: rows=150000 size=288.96MB
+     table: rows=150000 size=288.99MB
      columns missing stats: c_orders
-   extrapolated-rows=disabled max-scan-range-rows=44226
+   extrapolated-rows=disabled max-scan-range-rows=44225
    mem-estimate=616.00MB mem-reservation=32.00MB thread-reservation=1
-   tuple-ids=0 row-size=42B cardinality=150000
+   tuple-ids=0 row-size=42B cardinality=150.00K
    in pipelines: 00(GETNEXT)
 ====
 # Parquet filtering to be skipped on a mixed file format table:
@@ -333,7 +334,8 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypesmixedformat]
-   partitions=4/4 files=4 size=66.61KB
+   partition predicates: year > CAST(2000 AS INT), month < CAST(12 AS INT)
+   partitions=4/4 files=4 size=66.12KB
    predicates: bool_col, bigint_col < CAST(5000 AS BIGINT), double_col > CAST(100.00 AS DOUBLE), float_col > CAST(50.00 AS FLOAT), id = CAST(1 AS INT), tinyint_col < CAST(50 AS TINYINT), int_col % CAST(2 AS INT) = CAST(1 AS INT), string_col IN ('aaaa', 'bbbb', 'cccc'), smallint_col IN (CAST(1 AS SMALLINT), CAST(2 AS SMALLINT), CAST(3 AS SMALLINT), CAST(4 AS SMALLINT), CAST(5 AS SMALLINT)), timestamp_cmp(timestamp_col, TIMESTAMP '2016-11-20 00:00:00') = CAST(1 AS INT), date_string_col > '1993-10-01'
    stored statistics:
      table: rows=unavailable size=unavailable

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/parquet-filtering.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/parquet-filtering.test b/testdata/workloads/functional-planner/queries/PlannerTest/parquet-filtering.test
index 3ebd0aa..6e1f249 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/parquet-filtering.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/parquet-filtering.test
@@ -19,7 +19,7 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=189.12KB
+   partitions=24/24 files=24 size=189.28KB
    predicates: int_col IS NULL, int_col > CAST(1 AS INT), int_col > CAST(tinyint_col AS INT), CAST(int_col AS DOUBLE) * rand() > CAST(50 AS DOUBLE)
    stored statistics:
      table: rows=unavailable size=unavailable
@@ -81,7 +81,8 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=22/24 files=22 size=173.06KB
+   partition predicates: year > CAST(2000 AS INT), month < CAST(12 AS INT)
+   partitions=22/24 files=22 size=173.16KB
    predicates: bool_col, bigint_col < CAST(5000 AS BIGINT), double_col > CAST(100.00 AS DOUBLE), float_col > CAST(50.00 AS FLOAT), id = CAST(1 AS INT), tinyint_col < CAST(50 AS TINYINT), int_col % CAST(2 AS INT) = CAST(1 AS INT), string_col IN ('aaaa', 'bbbb', 'cccc'), smallint_col IN (CAST(1 AS SMALLINT), CAST(2 AS SMALLINT), CAST(3 AS SMALLINT), CAST(4 AS SMALLINT), CAST(5 AS SMALLINT)), timestamp_cmp(timestamp_col, TIMESTAMP '2016-11-20 00:00:00') = CAST(1 AS INT), date_string_col > '1993-10-01'
    stored statistics:
      table: rows=unavailable size=unavailable
@@ -115,6 +116,7 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year > CAST(2000 AS INT), month < CAST(12 AS INT)
    partitions=22/24 files=22 size=437.72KB
    predicates: id = CAST(1 AS INT), string_col IN ('aaaa', 'bbbb', 'cccc'), smallint_col IN (CAST(1 AS SMALLINT), CAST(2 AS SMALLINT), CAST(3 AS SMALLINT), CAST(4 AS SMALLINT), CAST(5 AS SMALLINT)), bool_col, bigint_col < CAST(5000 AS BIGINT), double_col > CAST(100.00 AS DOUBLE), float_col > CAST(50.00 AS FLOAT), tinyint_col < CAST(50 AS TINYINT), int_col % CAST(2 AS INT) = CAST(1 AS INT), timestamp_cmp(timestamp_col, TIMESTAMP '2016-11-20 00:00:00') = CAST(1 AS INT), date_string_col > '1993-10-01'
    stored statistics:
@@ -148,7 +150,7 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=189.12KB
+   partitions=24/24 files=24 size=189.28KB
    predicates: id IN (int_col), id NOT IN (CAST(0 AS INT), CAST(1 AS INT), CAST(2 AS INT)), int_col % CAST(50 AS INT) IN (CAST(0 AS INT), CAST(1 AS INT)), string_col IN ('aaaa', 'bbbb', 'cccc', NULL)
    stored statistics:
      table: rows=unavailable size=unavailable
@@ -377,7 +379,7 @@ PLAN-ROOT SINK
 |
 01:SUBPLAN
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=2,1,0 row-size=48B cardinality=1500000
+|  tuple-ids=2,1,0 row-size=48B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
@@ -435,7 +437,7 @@ PLAN-ROOT SINK
    parquet dictionary predicates on o: o.o_orderkey > CAST(0 AS BIGINT)
    parquet dictionary predicates on l: l.l_partkey > CAST(0 AS BIGINT)
    mem-estimate=264.00MB mem-reservation=16.00MB thread-reservation=1
-   tuple-ids=0 row-size=20B cardinality=15000
+   tuple-ids=0 row-size=20B cardinality=15.00K
    in pipelines: 00(GETNEXT)
 ====
 # Test collections in a way that would incorrectly apply a min-max
@@ -502,7 +504,7 @@ PLAN-ROOT SINK
 |
 01:SUBPLAN
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=2,1,0 row-size=126B cardinality=15000000
+|  tuple-ids=2,1,0 row-size=126B cardinality=15.00M
 |  in pipelines: 00(GETNEXT)
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
@@ -556,7 +558,7 @@ PLAN-ROOT SINK
    parquet statistics predicates on l: l.l_shipdate = '1994-08-19', l.l_receiptdate = '1994-08-24', l.l_shipmode = 'RAIL', l.l_returnflag = 'R'
    parquet dictionary predicates on l: l.l_shipdate = '1994-08-19', l.l_receiptdate = '1994-08-24', l.l_shipmode = 'RAIL', l.l_returnflag = 'R'
    mem-estimate=616.00MB mem-reservation=32.00MB thread-reservation=1
-   tuple-ids=0 row-size=42B cardinality=150000
+   tuple-ids=0 row-size=42B cardinality=150.00K
    in pipelines: 00(GETNEXT)
 ====
 # Test a variety of predicates on a mixed format table.
@@ -581,7 +583,8 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypesmixedformat]
-   partitions=4/4 files=4 size=66.09KB
+   partition predicates: year > CAST(2000 AS INT), month < CAST(12 AS INT)
+   partitions=4/4 files=4 size=66.12KB
    predicates: bool_col, bigint_col < CAST(5000 AS BIGINT), double_col > CAST(100.00 AS DOUBLE), float_col > CAST(50.00 AS FLOAT), id = CAST(1 AS INT), tinyint_col < CAST(50 AS TINYINT), int_col % CAST(2 AS INT) = CAST(1 AS INT), string_col IN ('aaaa', 'bbbb', 'cccc'), smallint_col IN (CAST(1 AS SMALLINT), CAST(2 AS SMALLINT), CAST(3 AS SMALLINT), CAST(4 AS SMALLINT), CAST(5 AS SMALLINT)), timestamp_cmp(timestamp_col, TIMESTAMP '2016-11-20 00:00:00') = CAST(1 AS INT), date_string_col > '1993-10-01'
    stored statistics:
      table: rows=unavailable size=unavailable
@@ -616,6 +619,7 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypesmixedformat]
+   partition predicates: year != CAST(2009 AS INT), month != CAST(4 AS INT)
    partitions=0/4 files=0 size=0B
    predicates: bool_col, bigint_col < CAST(5000 AS BIGINT), double_col > CAST(100.00 AS DOUBLE), float_col > CAST(50.00 AS FLOAT), id = CAST(1 AS INT), tinyint_col < CAST(50 AS TINYINT), int_col % CAST(2 AS INT) = CAST(1 AS INT), string_col IN ('aaaa', 'bbbb', 'cccc'), smallint_col IN (CAST(1 AS SMALLINT), CAST(2 AS SMALLINT), CAST(3 AS SMALLINT), CAST(4 AS SMALLINT), CAST(5 AS SMALLINT)), timestamp_cmp(timestamp_col, TIMESTAMP '2016-11-20 00:00:00') = CAST(1 AS INT), date_string_col > '1993-10-01'
    stored statistics:


[13/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/resource-requirements.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/resource-requirements.test b/testdata/workloads/functional-planner/queries/PlannerTest/resource-requirements.test
index 7f4a12f..60c9f6d 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/resource-requirements.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/resource-requirements.test
@@ -11,13 +11,13 @@ PLAN-ROOT SINK
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
 |
 00:SCAN HDFS [tpch_parquet.lineitem]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=40.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=40.00MB Threads=3
@@ -31,19 +31,19 @@ PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.69MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=231B cardinality=6001215
+|  tuple-ids=0 row-size=231B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
 Per-Host Resources: mem-estimate=80.00MB mem-reservation=40.00MB thread-reservation=2
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=40.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=80.00MB Threads=3
@@ -57,19 +57,19 @@ PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=11.38MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=231B cardinality=6001215
+|  tuple-ids=0 row-size=231B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
 Per-Host Resources: mem-estimate=160.00MB mem-reservation=80.00MB thread-reservation=2
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=40.00MB thread-reservation=0
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # Single column parquet scan - memory reservation is reduced compared to multi-column
@@ -86,13 +86,13 @@ PLAN-ROOT SINK
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
 |
 00:SCAN HDFS [tpch_parquet.lineitem]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=4.00MB thread-reservation=1
-   tuple-ids=0 row-size=38B cardinality=6001215
+   tuple-ids=0 row-size=38B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=4.00MB Threads=3
@@ -106,19 +106,19 @@ PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.12MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=38B cardinality=6001215
+|  tuple-ids=0 row-size=38B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
 Per-Host Resources: mem-estimate=80.00MB mem-reservation=4.00MB thread-reservation=2
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=4.00MB thread-reservation=1
-   tuple-ids=0 row-size=38B cardinality=6001215
+   tuple-ids=0 row-size=38B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=8.00MB Threads=3
@@ -132,19 +132,19 @@ PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.25MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=38B cardinality=6001215
+|  tuple-ids=0 row-size=38B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
 Per-Host Resources: mem-estimate=160.00MB mem-reservation=8.00MB thread-reservation=2
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=4.00MB thread-reservation=0
-   tuple-ids=0 row-size=38B cardinality=6001215
+   tuple-ids=0 row-size=38B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # Small parquet files - memory reservation is reduced because of small file size.
@@ -162,7 +162,7 @@ PLAN-ROOT SINK
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=189.12KB
+   partitions=24/24 files=24 size=189.28KB
    stored statistics:
      table: rows=unavailable size=unavailable
      partitions: 0/24 rows=unavailable
@@ -191,7 +191,7 @@ PLAN-ROOT SINK
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
 Per-Host Resources: mem-estimate=16.00MB mem-reservation=16.00KB thread-reservation=2
 00:SCAN HDFS [functional_parquet.alltypes, RANDOM]
-   partitions=24/24 files=24 size=189.12KB
+   partitions=24/24 files=24 size=189.28KB
    stored statistics:
      table: rows=unavailable size=unavailable
      partitions: 0/24 rows=unavailable
@@ -220,7 +220,7 @@ PLAN-ROOT SINK
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
 Per-Host Resources: mem-estimate=32.00MB mem-reservation=32.00KB thread-reservation=2
 00:SCAN HDFS [functional_parquet.alltypes, RANDOM]
-   partitions=24/24 files=24 size=189.12KB
+   partitions=24/24 files=24 size=189.28KB
    stored statistics:
      table: rows=unavailable size=unavailable
      partitions: 0/24 rows=unavailable
@@ -247,7 +247,7 @@ PLAN-ROOT SINK
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=189.12KB
+   partitions=24/24 files=24 size=189.28KB
    stored statistics:
      table: rows=unavailable size=unavailable
      partitions: 0/24 rows=unavailable
@@ -277,7 +277,7 @@ PLAN-ROOT SINK
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
 Per-Host Resources: mem-estimate=16.00MB mem-reservation=24.00KB thread-reservation=2
 00:SCAN HDFS [functional_parquet.alltypes, RANDOM]
-   partitions=24/24 files=24 size=189.12KB
+   partitions=24/24 files=24 size=189.28KB
    stored statistics:
      table: rows=unavailable size=unavailable
      partitions: 0/24 rows=unavailable
@@ -307,7 +307,7 @@ PLAN-ROOT SINK
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
 Per-Host Resources: mem-estimate=32.00MB mem-reservation=48.00KB thread-reservation=2
 00:SCAN HDFS [functional_parquet.alltypes, RANDOM]
-   partitions=24/24 files=24 size=189.12KB
+   partitions=24/24 files=24 size=189.28KB
    stored statistics:
      table: rows=unavailable size=unavailable
      partitions: 0/24 rows=unavailable
@@ -333,7 +333,7 @@ PLAN-ROOT SINK
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=189.12KB
+   partitions=24/24 files=24 size=189.28KB
    stored statistics:
      table: rows=unavailable size=unavailable
      partitions: 0/24 rows=unavailable
@@ -362,7 +362,7 @@ PLAN-ROOT SINK
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
 Per-Host Resources: mem-estimate=1.00MB mem-reservation=16.00KB thread-reservation=2
 00:SCAN HDFS [functional_parquet.alltypes, RANDOM]
-   partitions=24/24 files=24 size=189.12KB
+   partitions=24/24 files=24 size=189.28KB
    stored statistics:
      table: rows=unavailable size=unavailable
      partitions: 0/24 rows=unavailable
@@ -391,7 +391,7 @@ PLAN-ROOT SINK
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
 Per-Host Resources: mem-estimate=32.00MB mem-reservation=32.00KB thread-reservation=2
 00:SCAN HDFS [functional_parquet.alltypes, RANDOM]
-   partitions=24/24 files=24 size=189.12KB
+   partitions=24/24 files=24 size=189.28KB
    stored statistics:
      table: rows=unavailable size=unavailable
      partitions: 0/24 rows=unavailable
@@ -421,7 +421,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=150000
    mem-estimate=24.00MB mem-reservation=128.00KB thread-reservation=1
-   tuple-ids=0 row-size=2B cardinality=150000
+   tuple-ids=0 row-size=2B cardinality=150.00K
    in pipelines: 00(GETNEXT)
 ====
 # Parquet scan - reservation estimate based on uncompressed size reduces reservation
@@ -444,7 +444,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=150000
    mem-estimate=24.00MB mem-reservation=2.00MB thread-reservation=1
-   tuple-ids=0 row-size=8B cardinality=150000
+   tuple-ids=0 row-size=8B cardinality=150.00K
    in pipelines: 00(GETNEXT)
 ====
 # Parquet scan - reservation estimate is not reduced based on column stats when they
@@ -494,7 +494,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=44225
    mem-estimate=88.00MB mem-reservation=24.00MB thread-reservation=1
-   tuple-ids=0 row-size=64B cardinality=1500000
+   tuple-ids=0 row-size=64B cardinality=1.50M
    in pipelines: 00(GETNEXT)
 ====
 # Parquet nested types, unnested in scan - don't reserve extra memory for "pos" virtual
@@ -519,7 +519,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=44225
    mem-estimate=88.00MB mem-reservation=4.00MB thread-reservation=1
-   tuple-ids=0 row-size=16B cardinality=1500000
+   tuple-ids=0 row-size=16B cardinality=1.50M
    in pipelines: 00(GETNEXT)
 ====
 # Parquet nested types, unnested in scan - reserve memory for "pos" virtual column if it
@@ -543,7 +543,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=44225
    mem-estimate=88.00MB mem-reservation=4.00MB thread-reservation=1
-   tuple-ids=0 row-size=8B cardinality=1500000
+   tuple-ids=0 row-size=8B cardinality=1.50M
    in pipelines: 00(GETNEXT)
 ====
 # Parquet nested types, left nested in scan - should reserve memory for each scalar
@@ -567,7 +567,7 @@ PLAN-ROOT SINK
 |
 01:SUBPLAN
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=1,0 row-size=84B cardinality=1500000
+|  tuple-ids=1,0 row-size=84B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
@@ -595,7 +595,7 @@ PLAN-ROOT SINK
      columns missing stats: c_orders
    extrapolated-rows=disabled max-scan-range-rows=44225
    mem-estimate=88.00MB mem-reservation=32.00MB thread-reservation=1
-   tuple-ids=0 row-size=20B cardinality=150000
+   tuple-ids=0 row-size=20B cardinality=150.00K
    in pipelines: 00(GETNEXT)
 ====
 # Parquet nested types, left nested in scan - should reserve memory for each scalar
@@ -617,7 +617,7 @@ PLAN-ROOT SINK
 |
 01:SUBPLAN
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=1,0 row-size=36B cardinality=1500000
+|  tuple-ids=1,0 row-size=36B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
@@ -645,7 +645,7 @@ PLAN-ROOT SINK
      columns missing stats: c_orders
    extrapolated-rows=disabled max-scan-range-rows=44225
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=20B cardinality=150000
+   tuple-ids=0 row-size=20B cardinality=150.00K
    in pipelines: 00(GETNEXT)
 ====
 # Parquet nested types, left nested in scan - should reserve memory for virtual
@@ -668,7 +668,7 @@ PLAN-ROOT SINK
 |
 01:SUBPLAN
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=1,0 row-size=28B cardinality=1500000
+|  tuple-ids=1,0 row-size=28B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
@@ -696,7 +696,7 @@ PLAN-ROOT SINK
      columns missing stats: c_orders
    extrapolated-rows=disabled max-scan-range-rows=44225
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=20B cardinality=150000
+   tuple-ids=0 row-size=20B cardinality=150.00K
    in pipelines: 00(GETNEXT)
 ====
 # Parquet nested types, left nested in scan - should reserve memory for nested column
@@ -718,7 +718,7 @@ PLAN-ROOT SINK
 |
 01:SUBPLAN
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=1,0 row-size=20B cardinality=1500000
+|  tuple-ids=1,0 row-size=20B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
@@ -746,7 +746,7 @@ PLAN-ROOT SINK
      columns missing stats: c_orders
    extrapolated-rows=disabled max-scan-range-rows=44225
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=20B cardinality=150000
+   tuple-ids=0 row-size=20B cardinality=150.00K
    in pipelines: 00(GETNEXT)
 ====
 # Parquet nested types, left nested in scan - should reserve memory for nested column
@@ -768,7 +768,7 @@ PLAN-ROOT SINK
 |
 01:SUBPLAN
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=1,0 row-size=20B cardinality=1500000
+|  tuple-ids=1,0 row-size=20B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
@@ -796,7 +796,7 @@ PLAN-ROOT SINK
      columns: unavailable
    extrapolated-rows=disabled max-scan-range-rows=44225
    mem-estimate=88.00MB mem-reservation=4.00MB thread-reservation=1
-   tuple-ids=0 row-size=12B cardinality=150000
+   tuple-ids=0 row-size=12B cardinality=150.00K
    in pipelines: 00(GETNEXT)
 ====
 # Parquet nested types with two levels of nesting materialized in scan. Should
@@ -818,7 +818,7 @@ PLAN-ROOT SINK
 |
 01:SUBPLAN
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=2,1,0 row-size=52B cardinality=15000000
+|  tuple-ids=2,1,0 row-size=52B cardinality=15.00M
 |  in pipelines: 00(GETNEXT)
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
@@ -869,7 +869,7 @@ PLAN-ROOT SINK
      columns missing stats: c_orders
    extrapolated-rows=disabled max-scan-range-rows=44225
    mem-estimate=88.00MB mem-reservation=16.00MB thread-reservation=1
-   tuple-ids=0 row-size=20B cardinality=150000
+   tuple-ids=0 row-size=20B cardinality=150.00K
    in pipelines: 00(GETNEXT)
 ====
 # Text scan
@@ -891,7 +891,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1068457
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=8.00MB Threads=3
@@ -905,7 +905,7 @@ PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.69MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=231B cardinality=6001215
+|  tuple-ids=0 row-size=231B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -917,7 +917,7 @@ Per-Host Resources: mem-estimate=88.00MB mem-reservation=8.00MB thread-reservati
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1068457
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=16.00MB Threads=3
@@ -931,7 +931,7 @@ PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=11.38MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=231B cardinality=6001215
+|  tuple-ids=0 row-size=231B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
@@ -943,7 +943,7 @@ Per-Host Resources: mem-estimate=176.00MB mem-reservation=16.00MB thread-reserva
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1068457
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=0
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # Single column text scan - memory reservation is same as multi-column scan.
@@ -965,7 +965,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1068457
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=38B cardinality=6001215
+   tuple-ids=0 row-size=38B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=8.00MB Threads=3
@@ -979,7 +979,7 @@ PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.12MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=38B cardinality=6001215
+|  tuple-ids=0 row-size=38B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -991,7 +991,7 @@ Per-Host Resources: mem-estimate=88.00MB mem-reservation=8.00MB thread-reservati
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1068457
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=38B cardinality=6001215
+   tuple-ids=0 row-size=38B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=16.00MB Threads=3
@@ -1005,7 +1005,7 @@ PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.25MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=38B cardinality=6001215
+|  tuple-ids=0 row-size=38B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
@@ -1017,7 +1017,7 @@ Per-Host Resources: mem-estimate=176.00MB mem-reservation=16.00MB thread-reserva
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1068457
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=0
-   tuple-ids=0 row-size=38B cardinality=6001215
+   tuple-ids=0 row-size=38B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # Text scan on small files - memory reservation is reduced.
@@ -1041,7 +1041,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=16.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=89B cardinality=7300
+   tuple-ids=0 row-size=89B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=32.00KB Threads=3
@@ -1056,7 +1056,7 @@ PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=490.49KB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=89B cardinality=7300
+|  tuple-ids=0 row-size=89B cardinality=7.30K
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -1069,7 +1069,7 @@ Per-Host Resources: mem-estimate=16.00MB mem-reservation=32.00KB thread-reservat
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=16.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=89B cardinality=7300
+   tuple-ids=0 row-size=89B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=64.00KB Threads=3
@@ -1084,7 +1084,7 @@ PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=769.49KB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=89B cardinality=7300
+|  tuple-ids=0 row-size=89B cardinality=7.30K
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
@@ -1097,7 +1097,7 @@ Per-Host Resources: mem-estimate=32.00MB mem-reservation=64.00KB thread-reservat
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=16.00MB mem-reservation=32.00KB thread-reservation=0
-   tuple-ids=0 row-size=89B cardinality=7300
+   tuple-ids=0 row-size=89B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # Avro scan.
@@ -1359,7 +1359,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1075682
    mem-estimate=40.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # Single column ORC scan - memory reservation is same as multi-column scan.
@@ -1381,7 +1381,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1075682
    mem-estimate=40.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=38B cardinality=6001215
+   tuple-ids=0 row-size=38B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # ORC scan on small files - memory reservation is reduced.
@@ -1424,7 +1424,7 @@ PLAN-ROOT SINK
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
 |
 00:SCAN HDFS [functional.alltypesmixedformat]
-   partitions=4/4 files=4 size=66.09KB
+   partitions=4/4 files=4 size=66.12KB
    stored statistics:
      table: rows=unavailable size=unavailable
      partitions: 0/4 rows=unavailable
@@ -1453,7 +1453,7 @@ PLAN-ROOT SINK
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
 Per-Host Resources: mem-estimate=16.00MB mem-reservation=88.00KB thread-reservation=2
 00:SCAN HDFS [functional.alltypesmixedformat, RANDOM]
-   partitions=4/4 files=4 size=66.09KB
+   partitions=4/4 files=4 size=66.12KB
    stored statistics:
      table: rows=unavailable size=unavailable
      partitions: 0/4 rows=unavailable
@@ -1482,7 +1482,7 @@ PLAN-ROOT SINK
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
 Per-Host Resources: mem-estimate=32.00MB mem-reservation=176.00KB thread-reservation=4
 00:SCAN HDFS [functional.alltypesmixedformat, RANDOM]
-   partitions=4/4 files=4 size=66.09KB
+   partitions=4/4 files=4 size=66.12KB
    stored statistics:
      table: rows=unavailable size=unavailable
      partitions: 0/4 rows=unavailable
@@ -1511,7 +1511,7 @@ PLAN-ROOT SINK
      table: rows=unavailable
      columns: unavailable
    mem-estimate=256.00KB mem-reservation=0B thread-reservation=0
-   tuple-ids=0 row-size=80B cardinality=14298
+   tuple-ids=0 row-size=80B cardinality=14.30K
    in pipelines: 00(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=0B Threads=2
@@ -1527,7 +1527,7 @@ PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=1.17MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=80B cardinality=14298
+|  tuple-ids=0 row-size=80B cardinality=14.30K
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=1 instances=1
@@ -1537,7 +1537,7 @@ Per-Host Resources: mem-estimate=256.00KB mem-reservation=0B thread-reservation=
      table: rows=unavailable
      columns: unavailable
    mem-estimate=256.00KB mem-reservation=0B thread-reservation=0
-   tuple-ids=0 row-size=80B cardinality=14298
+   tuple-ids=0 row-size=80B cardinality=14.30K
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=0B Threads=3
@@ -1553,7 +1553,7 @@ PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=1.25MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=80B cardinality=14298
+|  tuple-ids=0 row-size=80B cardinality=14.30K
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=1 instances=2
@@ -1563,7 +1563,7 @@ Per-Host Resources: mem-estimate=512.00KB mem-reservation=0B thread-reservation=
      table: rows=unavailable
      columns: unavailable
    mem-estimate=256.00KB mem-reservation=0B thread-reservation=0
-   tuple-ids=0 row-size=80B cardinality=14298
+   tuple-ids=0 row-size=80B cardinality=14.30K
    in pipelines: 00(GETNEXT)
 ====
 # HBase scan on table with stats.
@@ -1653,7 +1653,7 @@ PLAN-ROOT SINK
 |
 00:SCAN DATA SOURCE [functional.alltypes_datasource]
    mem-estimate=1.00GB mem-reservation=0B thread-reservation=0
-   tuple-ids=0 row-size=112B cardinality=5000
+   tuple-ids=0 row-size=112B cardinality=5.00K
    in pipelines: 00(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=0B Threads=2
@@ -1669,14 +1669,14 @@ PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=662.88KB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=112B cardinality=5000
+|  tuple-ids=0 row-size=112B cardinality=5.00K
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=1 instances=1
 Per-Host Resources: mem-estimate=1.00GB mem-reservation=0B thread-reservation=1
 00:SCAN DATA SOURCE [functional.alltypes_datasource]
    mem-estimate=1.00GB mem-reservation=0B thread-reservation=0
-   tuple-ids=0 row-size=112B cardinality=5000
+   tuple-ids=0 row-size=112B cardinality=5.00K
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=0B Threads=3
@@ -1692,14 +1692,14 @@ PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=778.88KB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=112B cardinality=5000
+|  tuple-ids=0 row-size=112B cardinality=5.00K
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=1 instances=2
 Per-Host Resources: mem-estimate=2.00GB mem-reservation=0B thread-reservation=2
 00:SCAN DATA SOURCE [functional.alltypes_datasource]
    mem-estimate=1.00GB mem-reservation=0B thread-reservation=0
-   tuple-ids=0 row-size=112B cardinality=5000
+   tuple-ids=0 row-size=112B cardinality=5.00K
    in pipelines: 00(GETNEXT)
 ====
 # Union
@@ -1720,7 +1720,7 @@ PLAN-ROOT SINK
 00:UNION
 |  pass-through-operands: all
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=2 row-size=231B cardinality=12002430
+|  tuple-ids=2 row-size=231B cardinality=12.00M
 |  in pipelines: 01(GETNEXT), 02(GETNEXT)
 |
 |--02:SCAN HDFS [tpch.lineitem]
@@ -1730,7 +1730,7 @@ PLAN-ROOT SINK
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=1068457
 |     mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=1 row-size=231B cardinality=6001215
+|     tuple-ids=1 row-size=231B cardinality=6.00M
 |     in pipelines: 02(GETNEXT)
 |
 01:SCAN HDFS [tpch.lineitem]
@@ -1740,7 +1740,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1068457
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 01(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=8.00MB Threads=3
@@ -1755,7 +1755,7 @@ PLAN-ROOT SINK
 |
 03:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.69MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=2 row-size=231B cardinality=12002430
+|  tuple-ids=2 row-size=231B cardinality=12.00M
 |  in pipelines: 01(GETNEXT), 02(GETNEXT)
 |
 F02:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -1763,7 +1763,7 @@ Per-Host Resources: mem-estimate=88.00MB mem-reservation=8.00MB thread-reservati
 00:UNION
 |  pass-through-operands: all
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=2 row-size=231B cardinality=12002430
+|  tuple-ids=2 row-size=231B cardinality=12.00M
 |  in pipelines: 01(GETNEXT), 02(GETNEXT)
 |
 |--02:SCAN HDFS [tpch.lineitem, RANDOM]
@@ -1773,7 +1773,7 @@ Per-Host Resources: mem-estimate=88.00MB mem-reservation=8.00MB thread-reservati
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=1068457
 |     mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=1 row-size=231B cardinality=6001215
+|     tuple-ids=1 row-size=231B cardinality=6.00M
 |     in pipelines: 02(GETNEXT)
 |
 01:SCAN HDFS [tpch.lineitem, RANDOM]
@@ -1783,7 +1783,7 @@ Per-Host Resources: mem-estimate=88.00MB mem-reservation=8.00MB thread-reservati
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1068457
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 01(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=16.00MB Threads=3
@@ -1798,7 +1798,7 @@ PLAN-ROOT SINK
 |
 03:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=11.38MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=2 row-size=231B cardinality=12002430
+|  tuple-ids=2 row-size=231B cardinality=12.00M
 |  in pipelines: 01(GETNEXT), 02(GETNEXT)
 |
 F02:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
@@ -1806,7 +1806,7 @@ Per-Host Resources: mem-estimate=176.00MB mem-reservation=16.00MB thread-reserva
 00:UNION
 |  pass-through-operands: all
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=2 row-size=231B cardinality=12002430
+|  tuple-ids=2 row-size=231B cardinality=12.00M
 |  in pipelines: 01(GETNEXT), 02(GETNEXT)
 |
 |--02:SCAN HDFS [tpch.lineitem, RANDOM]
@@ -1816,7 +1816,7 @@ Per-Host Resources: mem-estimate=176.00MB mem-reservation=16.00MB thread-reserva
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=1068457
 |     mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=0
-|     tuple-ids=1 row-size=231B cardinality=6001215
+|     tuple-ids=1 row-size=231B cardinality=6.00M
 |     in pipelines: 02(GETNEXT)
 |
 01:SCAN HDFS [tpch.lineitem, RANDOM]
@@ -1826,7 +1826,7 @@ Per-Host Resources: mem-estimate=176.00MB mem-reservation=16.00MB thread-reserva
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1068457
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=0
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 01(GETNEXT)
 ====
 # Grouping aggregation
@@ -1848,17 +1848,17 @@ PLAN-ROOT SINK
 |  output: count(*)
 |  group by: l_orderkey
 |  mem-estimate=34.00MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=16B cardinality=1563438
+|  tuple-ids=1 row-size=16B cardinality=1.56M
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [tpch_parquet.lineitem]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=4.00MB thread-reservation=1
-   tuple-ids=0 row-size=8B cardinality=6001215
+   tuple-ids=0 row-size=8B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=46.50MB Threads=4
@@ -1873,7 +1873,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=8.01MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=1 row-size=16B cardinality=1563438
+|  tuple-ids=1 row-size=16B cardinality=1.56M
 |  in pipelines: 03(GETNEXT)
 |
 F01:PLAN FRAGMENT [HASH(l_orderkey)] hosts=3 instances=3
@@ -1882,12 +1882,12 @@ Per-Host Resources: mem-estimate=18.01MB mem-reservation=8.50MB thread-reservati
 |  output: count:merge(*)
 |  group by: l_orderkey
 |  mem-estimate=10.00MB mem-reservation=8.50MB spill-buffer=512.00KB thread-reservation=0
-|  tuple-ids=1 row-size=16B cardinality=1563438
+|  tuple-ids=1 row-size=16B cardinality=1.56M
 |  in pipelines: 03(GETNEXT), 00(OPEN)
 |
 02:EXCHANGE [HASH(l_orderkey)]
 |  mem-estimate=8.01MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=1 row-size=16B cardinality=1563438
+|  tuple-ids=1 row-size=16B cardinality=1.56M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -1896,17 +1896,17 @@ Per-Host Resources: mem-estimate=114.00MB mem-reservation=38.00MB thread-reserva
 |  output: count(*)
 |  group by: l_orderkey
 |  mem-estimate=34.00MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=16B cardinality=1563438
+|  tuple-ids=1 row-size=16B cardinality=1.56M
 |  in pipelines: 00(GETNEXT)
 |
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=4.00MB thread-reservation=1
-   tuple-ids=0 row-size=8B cardinality=6001215
+   tuple-ids=0 row-size=8B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=85.50MB Threads=5
@@ -1921,7 +1921,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=8.07MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=1 row-size=16B cardinality=1563438
+|  tuple-ids=1 row-size=16B cardinality=1.56M
 |  in pipelines: 03(GETNEXT)
 |
 F01:PLAN FRAGMENT [HASH(l_orderkey)] hosts=3 instances=6
@@ -1930,12 +1930,12 @@ Per-Host Resources: mem-estimate=36.14MB mem-reservation=9.50MB thread-reservati
 |  output: count:merge(*)
 |  group by: l_orderkey
 |  mem-estimate=10.00MB mem-reservation=4.75MB spill-buffer=256.00KB thread-reservation=0
-|  tuple-ids=1 row-size=16B cardinality=1563438
+|  tuple-ids=1 row-size=16B cardinality=1.56M
 |  in pipelines: 03(GETNEXT), 00(OPEN)
 |
 02:EXCHANGE [HASH(l_orderkey)]
 |  mem-estimate=8.07MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=1 row-size=16B cardinality=1563438
+|  tuple-ids=1 row-size=16B cardinality=1.56M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
@@ -1944,17 +1944,17 @@ Per-Host Resources: mem-estimate=228.00MB mem-reservation=76.00MB thread-reserva
 |  output: count(*)
 |  group by: l_orderkey
 |  mem-estimate=34.00MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=16B cardinality=1563438
+|  tuple-ids=1 row-size=16B cardinality=1.56M
 |  in pipelines: 00(GETNEXT)
 |
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=4.00MB thread-reservation=0
-   tuple-ids=0 row-size=8B cardinality=6001215
+   tuple-ids=0 row-size=8B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # Non-grouping aggregation with zero-slot parquet scan
@@ -1976,13 +1976,13 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [tpch_parquet.lineitem]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=1.00MB mem-reservation=128.00KB thread-reservation=1
-   tuple-ids=0 row-size=8B cardinality=6001215
+   tuple-ids=0 row-size=8B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=128.00KB Threads=3
@@ -2014,13 +2014,13 @@ Per-Host Resources: mem-estimate=11.00MB mem-reservation=128.00KB thread-reserva
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=1.00MB mem-reservation=128.00KB thread-reservation=1
-   tuple-ids=0 row-size=8B cardinality=6001215
+   tuple-ids=0 row-size=8B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=256.00KB Threads=3
@@ -2052,13 +2052,13 @@ Per-Host Resources: mem-estimate=180.00MB mem-reservation=256.00KB thread-reserv
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=128.00KB thread-reservation=0
-   tuple-ids=0 row-size=8B cardinality=6001215
+   tuple-ids=0 row-size=8B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # Sort
@@ -2078,17 +2078,17 @@ PLAN-ROOT SINK
 01:SORT
 |  order by: l_comment ASC
 |  mem-estimate=38.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=231B cardinality=6001215
+|  tuple-ids=1 row-size=231B cardinality=6.00M
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [tpch_parquet.lineitem]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=40.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=52.00MB Threads=3
@@ -2103,7 +2103,7 @@ PLAN-ROOT SINK
 02:MERGING-EXCHANGE [UNPARTITIONED]
 |  order by: l_comment ASC
 |  mem-estimate=30.69MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=1 row-size=231B cardinality=6001215
+|  tuple-ids=1 row-size=231B cardinality=6.00M
 |  in pipelines: 01(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -2111,17 +2111,17 @@ Per-Host Resources: mem-estimate=118.00MB mem-reservation=52.00MB thread-reserva
 01:SORT
 |  order by: l_comment ASC
 |  mem-estimate=38.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=231B cardinality=6001215
+|  tuple-ids=1 row-size=231B cardinality=6.00M
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=40.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=104.00MB Threads=3
@@ -2136,7 +2136,7 @@ PLAN-ROOT SINK
 02:MERGING-EXCHANGE [UNPARTITIONED]
 |  order by: l_comment ASC
 |  mem-estimate=61.38MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=1 row-size=231B cardinality=6001215
+|  tuple-ids=1 row-size=231B cardinality=6.00M
 |  in pipelines: 01(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
@@ -2144,17 +2144,17 @@ Per-Host Resources: mem-estimate=236.00MB mem-reservation=104.00MB thread-reserv
 01:SORT
 |  order by: l_comment ASC
 |  mem-estimate=38.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=231B cardinality=6001215
+|  tuple-ids=1 row-size=231B cardinality=6.00M
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=40.00MB thread-reservation=0
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # TOP-N
@@ -2180,13 +2180,13 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [tpch_parquet.lineitem]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=40.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=40.00MB Threads=3
@@ -2215,13 +2215,13 @@ Per-Host Resources: mem-estimate=80.02MB mem-reservation=40.00MB thread-reservat
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=40.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=80.00MB Threads=3
@@ -2250,13 +2250,13 @@ Per-Host Resources: mem-estimate=160.04MB mem-reservation=80.00MB thread-reserva
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=40.00MB thread-reservation=0
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # Broadcast Hash Join
@@ -2278,7 +2278,7 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  runtime filters: RF000[bloom] <- o_orderkey
 |  mem-estimate=268.94MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=0,1 row-size=402B cardinality=5757710
+|  tuple-ids=0,1 row-size=402B cardinality=5.76M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--01:SCAN HDFS [tpch.orders]
@@ -2288,7 +2288,7 @@ PLAN-ROOT SINK
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=1181132
 |     mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=1 row-size=171B cardinality=1500000
+|     tuple-ids=1 row-size=171B cardinality=1.50M
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpch.lineitem]
@@ -2299,7 +2299,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1068457
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=51.00MB Threads=5
@@ -2314,7 +2314,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=11.20MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1 row-size=402B cardinality=5757710
+|  tuple-ids=0,1 row-size=402B cardinality=5.76M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -2324,12 +2324,12 @@ Per-Host Resources: mem-estimate=368.29MB mem-reservation=43.00MB thread-reserva
 |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  runtime filters: RF000[bloom] <- o_orderkey
 |  mem-estimate=268.94MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=0,1 row-size=402B cardinality=5757710
+|  tuple-ids=0,1 row-size=402B cardinality=5.76M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--03:EXCHANGE [BROADCAST]
 |  |  mem-estimate=10.34MB mem-reservation=0B thread-reservation=0
-|  |  tuple-ids=1 row-size=171B cardinality=1500000
+|  |  tuple-ids=1 row-size=171B cardinality=1.50M
 |  |  in pipelines: 01(GETNEXT)
 |  |
 |  F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=2
@@ -2341,7 +2341,7 @@ Per-Host Resources: mem-estimate=368.29MB mem-reservation=43.00MB thread-reserva
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=1181132
 |     mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=1 row-size=171B cardinality=1500000
+|     tuple-ids=1 row-size=171B cardinality=1.50M
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpch.lineitem, RANDOM]
@@ -2352,7 +2352,7 @@ Per-Host Resources: mem-estimate=368.29MB mem-reservation=43.00MB thread-reserva
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1068457
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=102.00MB Threads=5
@@ -2367,7 +2367,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=12.40MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1 row-size=402B cardinality=5757710
+|  tuple-ids=0,1 row-size=402B cardinality=5.76M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
@@ -2378,7 +2378,7 @@ Per-Host Resources: mem-estimate=715.89MB mem-reservation=86.00MB thread-reserva
 |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  runtime filters: RF000[bloom] <- o_orderkey
 |  mem-estimate=268.94MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=0,1 row-size=402B cardinality=5757710
+|  tuple-ids=0,1 row-size=402B cardinality=5.76M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--F03:PLAN FRAGMENT [RANDOM] hosts=2 instances=4
@@ -2390,7 +2390,7 @@ Per-Host Resources: mem-estimate=715.89MB mem-reservation=86.00MB thread-reserva
 |  |
 |  03:EXCHANGE [BROADCAST]
 |  |  mem-estimate=10.68MB mem-reservation=0B thread-reservation=0
-|  |  tuple-ids=1 row-size=171B cardinality=1500000
+|  |  tuple-ids=1 row-size=171B cardinality=1.50M
 |  |  in pipelines: 01(GETNEXT)
 |  |
 |  F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=4
@@ -2402,7 +2402,7 @@ Per-Host Resources: mem-estimate=715.89MB mem-reservation=86.00MB thread-reserva
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=1181132
 |     mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=0
-|     tuple-ids=1 row-size=171B cardinality=1500000
+|     tuple-ids=1 row-size=171B cardinality=1.50M
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpch.lineitem, RANDOM]
@@ -2413,7 +2413,7 @@ Per-Host Resources: mem-estimate=715.89MB mem-reservation=86.00MB thread-reserva
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1068457
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=0
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # Shuffle Hash Join
@@ -2436,7 +2436,7 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  runtime filters: RF000[bloom] <- o_orderkey
 |  mem-estimate=268.94MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=0,1 row-size=402B cardinality=5757710
+|  tuple-ids=0,1 row-size=402B cardinality=5.76M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--01:SCAN HDFS [tpch.orders]
@@ -2446,7 +2446,7 @@ PLAN-ROOT SINK
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=1181132
 |     mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=1 row-size=171B cardinality=1500000
+|     tuple-ids=1 row-size=171B cardinality=1.50M
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpch.lineitem]
@@ -2457,7 +2457,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1068457
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=52.00MB Threads=6
@@ -2473,7 +2473,7 @@ PLAN-ROOT SINK
 |
 05:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=11.20MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1 row-size=402B cardinality=5757710
+|  tuple-ids=0,1 row-size=402B cardinality=5.76M
 |  in pipelines: 00(GETNEXT)
 |
 F02:PLAN FRAGMENT [HASH(l_orderkey)] hosts=3 instances=3
@@ -2483,12 +2483,12 @@ Per-Host Resources: mem-estimate=111.68MB mem-reservation=35.00MB thread-reserva
 |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  runtime filters: RF000[bloom] <- o_orderkey
 |  mem-estimate=89.65MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=0,1 row-size=402B cardinality=5757710
+|  tuple-ids=0,1 row-size=402B cardinality=5.76M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--04:EXCHANGE [HASH(o_orderkey)]
 |  |  mem-estimate=10.34MB mem-reservation=0B thread-reservation=0
-|  |  tuple-ids=1 row-size=171B cardinality=1500000
+|  |  tuple-ids=1 row-size=171B cardinality=1.50M
 |  |  in pipelines: 01(GETNEXT)
 |  |
 |  F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=2
@@ -2500,12 +2500,12 @@ Per-Host Resources: mem-estimate=111.68MB mem-reservation=35.00MB thread-reserva
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=1181132
 |     mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=1 row-size=171B cardinality=1500000
+|     tuple-ids=1 row-size=171B cardinality=1.50M
 |     in pipelines: 01(GETNEXT)
 |
 03:EXCHANGE [HASH(l_orderkey)]
 |  mem-estimate=10.69MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=231B cardinality=6001215
+|  tuple-ids=0 row-size=231B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -2518,7 +2518,7 @@ Per-Host Resources: mem-estimate=89.00MB mem-reservation=9.00MB thread-reservati
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1068457
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=104.00MB Threads=7
@@ -2534,7 +2534,7 @@ PLAN-ROOT SINK
 |
 05:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=12.40MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1 row-size=402B cardinality=5757710
+|  tuple-ids=0,1 row-size=402B cardinality=5.76M
 |  in pipelines: 00(GETNEXT)
 |
 F02:PLAN FRAGMENT [HASH(l_orderkey)] hosts=3 instances=6
@@ -2545,7 +2545,7 @@ Per-Host Resources: mem-estimate=114.40MB mem-reservation=70.00MB thread-reserva
 |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  runtime filters: RF000[bloom] <- o_orderkey
 |  mem-estimate=44.82MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=0,1 row-size=402B cardinality=5757710
+|  tuple-ids=0,1 row-size=402B cardinality=5.76M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--F04:PLAN FRAGMENT [HASH(l_orderkey)] hosts=2 instances=4
@@ -2557,7 +2557,7 @@ Per-Host Resources: mem-estimate=114.40MB mem-reservation=70.00MB thread-reserva
 |  |
 |  04:EXCHANGE [HASH(o_orderkey)]
 |  |  mem-estimate=10.68MB mem-reservation=0B thread-reservation=0
-|  |  tuple-ids=1 row-size=171B cardinality=1500000
+|  |  tuple-ids=1 row-size=171B cardinality=1.50M
 |  |  in pipelines: 01(GETNEXT)
 |  |
 |  F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=4
@@ -2569,12 +2569,12 @@ Per-Host Resources: mem-estimate=114.40MB mem-reservation=70.00MB thread-reserva
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=1181132
 |     mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=0
-|     tuple-ids=1 row-size=171B cardinality=1500000
+|     tuple-ids=1 row-size=171B cardinality=1.50M
 |     in pipelines: 01(GETNEXT)
 |
 03:EXCHANGE [HASH(l_orderkey)]
 |  mem-estimate=11.38MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=231B cardinality=6001215
+|  tuple-ids=0 row-size=231B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
@@ -2587,7 +2587,7 @@ Per-Host Resources: mem-estimate=178.00MB mem-reservation=18.00MB thread-reserva
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1068457
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=0
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # Nested loop join
@@ -2605,7 +2605,7 @@ PLAN-ROOT SINK
 |
 02:NESTED LOOP JOIN [CROSS JOIN]
 |  mem-estimate=244.49MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1 row-size=402B cardinality=9001822500000
+|  tuple-ids=0,1 row-size=402B cardinality=9.00T
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--01:SCAN HDFS [tpch.orders]
@@ -2615,7 +2615,7 @@ PLAN-ROOT SINK
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=1181132
 |     mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=1 row-size=171B cardinality=1500000
+|     tuple-ids=1 row-size=171B cardinality=1.50M
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpch.lineitem]
@@ -2625,7 +2625,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1068457
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=16.00MB Threads=5
@@ -2639,19 +2639,19 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=11.20MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1 row-size=402B cardinality=9001822500000
+|  tuple-ids=0,1 row-size=402B cardinality=9.00T
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
 Per-Host Resources: mem-estimate=342.84MB mem-reservation=8.00MB thread-reservation=2
 02:NESTED LOOP JOIN [CROSS JOIN, BROADCAST]
 |  mem-estimate=244.49MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1 row-size=402B cardinality=9001822500000
+|  tuple-ids=0,1 row-size=402B cardinality=9.00T
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--03:EXCHANGE [BROADCAST]
 |  |  mem-estimate=10.34MB mem-reservation=0B thread-reservation=0
-|  |  tuple-ids=1 row-size=171B cardinality=1500000
+|  |  tuple-ids=1 row-size=171B cardinality=1.50M
 |  |  in pipelines: 01(GETNEXT)
 |  |
 |  F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=2
@@ -2663,7 +2663,7 @@ Per-Host Resources: mem-estimate=342.84MB mem-reservation=8.00MB thread-reservat
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=1181132
 |     mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=1 row-size=171B cardinality=1500000
+|     tuple-ids=1 row-size=171B cardinality=1.50M
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpch.lineitem, RANDOM]
@@ -2673,7 +2673,7 @@ Per-Host Resources: mem-estimate=342.84MB mem-reservation=8.00MB thread-reservat
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1068457
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=32.00MB Threads=5
@@ -2687,7 +2687,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=12.40MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1 row-size=402B cardinality=9001822500000
+|  tuple-ids=0,1 row-size=402B cardinality=9.00T
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
@@ -2695,7 +2695,7 @@ Per-Host Resources: mem-estimate=686.35MB mem-reservation=16.00MB thread-reserva
 02:NESTED LOOP JOIN [CROSS JOIN, BROADCAST]
 |  join table id: 00
 |  mem-estimate=244.49MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1 row-size=402B cardinality=9001822500000
+|  tuple-ids=0,1 row-size=402B cardinality=9.00T
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--F03:PLAN FRAGMENT [RANDOM] hosts=2 instances=4
@@ -2706,7 +2706,7 @@ Per-Host Resources: mem-estimate=686.35MB mem-reservation=16.00MB thread-reserva
 |  |
 |  03:EXCHANGE [BROADCAST]
 |  |  mem-estimate=10.68MB mem-reservation=0B thread-reservation=0
-|  |  tuple-ids=1 row-size=171B cardinality=1500000
+|  |  tuple-ids=1 row-size=171B cardinality=1.50M
 |  |  in pipelines: 01(GETNEXT)
 |  |
 |  F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=4
@@ -2718,7 +2718,7 @@ Per-Host Resources: mem-estimate=686.35MB mem-reservation=16.00MB thread-reserva
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=1181132
 |     mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=0
-|     tuple-ids=1 row-size=171B cardinality=1500000
+|     tuple-ids=1 row-size=171B cardinality=1.50M
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpch.lineitem, RANDOM]
@@ -2728,7 +2728,7 @@ Per-Host Resources: mem-estimate=686.35MB mem-reservation=16.00MB thread-reserva
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1068457
    mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=0
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # Empty set node
@@ -2746,7 +2746,7 @@ PLAN-ROOT SINK
 |
 00:EMPTYSET
    mem-estimate=0B mem-reservation=0B thread-reservation=0
-   tuple-ids=0 row-size=0B cardinality=0
+   tuple-ids=0
    in pipelines: 00(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=0B Threads=1
@@ -2761,7 +2761,7 @@ PLAN-ROOT SINK
 |
 00:EMPTYSET
    mem-estimate=0B mem-reservation=0B thread-reservation=0
-   tuple-ids=0 row-size=0B cardinality=0
+   tuple-ids=0
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=0B Threads=1
@@ -2776,7 +2776,7 @@ PLAN-ROOT SINK
 |
 00:EMPTYSET
    mem-estimate=0B mem-reservation=0B thread-reservation=0
-   tuple-ids=0 row-size=0B cardinality=0
+   tuple-ids=0
    in pipelines: 00(GETNEXT)
 ====
 # Analytic function
@@ -2798,13 +2798,13 @@ PLAN-ROOT SINK
 |  functions: max(tinyint_col)
 |  partition by: int_col
 |  mem-estimate=4.00MB mem-reservation=4.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=3,2 row-size=6B cardinality=7300
+|  tuple-ids=3,2 row-size=6B cardinality=7.30K
 |  in pipelines: 01(GETNEXT)
 |
 01:SORT
 |  order by: int_col ASC NULLS FIRST
 |  mem-estimate=6.00MB mem-reservation=6.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=3 row-size=5B cardinality=7300
+|  tuple-ids=3 row-size=5B cardinality=7.30K
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypes]
@@ -2815,7 +2815,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=16.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=5B cardinality=7300
+   tuple-ids=0 row-size=5B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=10.03MB Threads=4
@@ -2831,7 +2831,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=56.26KB mem-reservation=0B thread-reservation=0
-|  tuple-ids=3,2 row-size=6B cardinality=7300
+|  tuple-ids=3,2 row-size=6B cardinality=7.30K
 |  in pipelines: 01(GETNEXT)
 |
 F01:PLAN FRAGMENT [HASH(int_col)] hosts=3 instances=3
@@ -2840,18 +2840,18 @@ Per-Host Resources: mem-estimate=10.04MB mem-reservation=10.00MB thread-reservat
 |  functions: max(tinyint_col)
 |  partition by: int_col
 |  mem-estimate=4.00MB mem-reservation=4.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=3,2 row-size=6B cardinality=7300
+|  tuple-ids=3,2 row-size=6B cardinality=7.30K
 |  in pipelines: 01(GETNEXT)
 |
 01:SORT
 |  order by: int_col ASC NULLS FIRST
 |  mem-estimate=6.00MB mem-reservation=6.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=3 row-size=5B cardinality=7300
+|  tuple-ids=3 row-size=5B cardinality=7.30K
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 03:EXCHANGE [HASH(int_col)]
 |  mem-estimate=38.88KB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=5B cardinality=7300
+|  tuple-ids=0 row-size=5B cardinality=7.30K
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -2864,7 +2864,7 @@ Per-Host Resources: mem-estimate=16.00MB mem-reservation=32.00KB thread-reservat
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=16.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=5B cardinality=7300
+   tuple-ids=0 row-size=5B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=20.06MB Threads=5
@@ -2880,7 +2880,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=98.26KB mem-reservation=0B thread-reservation=0
-|  tuple-ids=3,2 row-size=6B cardinality=7300
+|  tuple-ids=3,2 row-size=6B cardinality=7.30K
 |  in pipelines: 01(GETNEXT)
 |
 F01:PLAN FRAGMENT [HASH(int_col)] hosts=3 instances=6
@@ -2889,18 +2889,18 @@ Per-Host Resources: mem-estimate=20.13MB mem-reservation=20.00MB thread-reservat
 |  functions: max(tinyint_col)
 |  partition by: int_col
 |  mem-estimate=4.00MB mem-reservation=4.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=3,2 row-size=6B cardinality=7300
+|  tuple-ids=3,2 row-size=6B cardinality=7.30K
 |  in pipelines: 01(GETNEXT)
 |
 01:SORT
 |  order by: int_col ASC NULLS FIRST
 |  mem-estimate=6.00MB mem-reservation=6.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=3 row-size=5B cardinality=7300
+|  tuple-ids=3 row-size=5B cardinality=7.30K
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 03:EXCHANGE [HASH(int_col)]
 |  mem-estimate=65.88KB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=5B cardinality=7300
+|  tuple-ids=0 row-size=5B cardinality=7.30K
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
@@ -2913,7 +2913,7 @@ Per-Host Resources: mem-estimate=32.00MB mem-reservation=64.00KB thread-reservat
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=16.00MB mem-reservation=32.00KB thread-reservation=0
-   tuple-ids=0 row-size=5B cardinality=7300
+   tuple-ids=0 row-size=5B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # Pipeline of blocking operators from analytic fns. Blocking operators break
@@ -2939,13 +2939,13 @@ PLAN-ROOT SINK
 |  order by: o_orderpriority ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
 |  mem-estimate=4.00MB mem-reservation=4.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=10,5 row-size=195B cardinality=1500000
+|  tuple-ids=10,5 row-size=195B cardinality=1.50M
 |  in pipelines: 05(GETNEXT)
 |
 05:SORT
 |  order by: o_orderpriority ASC
 |  mem-estimate=18.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=10 row-size=187B cardinality=1500000
+|  tuple-ids=10 row-size=187B cardinality=1.50M
 |  in pipelines: 05(GETNEXT), 03(OPEN)
 |
 04:ANALYTIC
@@ -2953,13 +2953,13 @@ PLAN-ROOT SINK
 |  order by: o_orderdate ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
 |  mem-estimate=4.00MB mem-reservation=4.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=8,4 row-size=187B cardinality=1500000
+|  tuple-ids=8,4 row-size=187B cardinality=1.50M
 |  in pipelines: 03(GETNEXT)
 |
 03:SORT
 |  order by: o_orderdate ASC
 |  mem-estimate=16.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=8 row-size=179B cardinality=1500000
+|  tuple-ids=8 row-size=179B cardinality=1.50M
 |  in pipelines: 03(GETNEXT), 01(OPEN)
 |
 02:ANALYTIC
@@ -2967,13 +2967,13 @@ PLAN-ROOT SINK
 |  order by: o_totalprice ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
 |  mem-estimate=4.00MB mem-reservation=4.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=6,3 row-size=179B cardinality=1500000
+|  tuple-ids=6,3 row-size=179B cardinality=1.50M
 |  in pipelines: 01(GETNEXT)
 |
 01:SORT
 |  order by: o_totalprice ASC
 |  mem-estimate=16.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=6 row-size=171B cardinality=1500000
+|  tuple-ids=6 row-size=171B cardinality=1.50M
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [tpch_parquet.orders]
@@ -2981,9 +2981,9 @@ PLAN-ROOT SINK
    stored statistics:
      table: rows=1500000 size=54.07MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=1177135
+   extrapolated-rows=disabled max-scan-range-rows=1177136
    mem-estimate=40.00MB mem-reservation=24.00MB thread-reservation=1
-   tuple-ids=0 row-size=171B cardinality=1500000
+   tuple-ids=0 row-size=171B cardinality=1.50M
    in pipelines: 00(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=68.00MB Threads=3
@@ -3002,13 +3002,13 @@ PLAN-ROOT SINK
 |  order by: o_orderpriority ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
 |  mem-estimate=4.00MB mem-reservation=4.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=10,5 row-size=195B cardinality=1500000
+|  tuple-ids=10,5 row-size=195B cardinality=1.50M
 |  in pipelines: 05(GETNEXT)
 |
 05:SORT
 |  order by: o_orderpriority ASC
 |  mem-estimate=18.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=10 row-size=187B cardinality=1500000
+|  tuple-ids=10 row-size=187B cardinality=1.50M
 |  in pipelines: 05(GETNEXT), 03(OPEN)
 |
 04:ANALYTIC
@@ -3016,13 +3016,13 @@ PLAN-ROOT SINK
 |  order by: o_orderdate ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
 |  mem-estimate=4.00MB mem-reservation=4.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=8,4 row-size=187B cardinality=1500000
+|  tuple-ids=8,4 row-size=187B cardinality=1.50M
 |  in pipelines: 03(GETNEXT)
 |
 03:SORT
 |  order by: o_orderdate ASC
 |  mem-estimate=16.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=8 row-size=179B cardinality=1500000
+|  tuple-ids=8 row-size=179B cardinality=1.50M
 |  in pipelines: 03(GETNEXT), 01(OPEN)
 |
 02:ANALYTIC
@@ -3030,13 +3030,13 @@ PLAN-ROOT SINK
 |  order by: o_totalprice ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
 |  mem-estimate=4.00MB mem-reservation=4.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=6,3 row-size=179B cardinality=1500000
+|  tuple-ids=6,3 row-size=179B cardinality=1.50M
 |  in pipelines: 01(GETNEXT)
 |
 07:MERGING-EXCHANGE [UNPARTITIONED]
 |  order by: o_totalprice ASC
 |  mem-estimate=20.34MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=6 row-size=171B cardinality=1500000
+|  tuple-ids=6 row-size=171B cardinality=1.50M
 |  in pipelines: 01(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=2 instances=2
@@ -3044,7 +3044,7 @@ Per-Host Resources: mem-estimate=56.00MB mem-reservation=36.00MB thread-reservat
 01:SORT
 |  order by: o_totalprice ASC
 |  mem-estimate=16.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=6 row-size=171B cardinality=1500000
+|  tuple-ids=6 row-size=171B cardinality=1.50M
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [tpch_parquet.orders, RANDOM]
@@ -3052,9 +3052,9 @@ Per-Host Resources: mem-estimate=56.00MB mem-reservation=36.00MB thread-reservat
    stored statistics:
      table: rows=1500000 size=54.07MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=1177135
+   extrapolated-rows=disabled max-scan-range-rows=1177136
    mem-estimate=40.00MB mem-reservation=24.00MB thread-reservation=1
-   tuple-ids=0 row-size=171B cardinality=1500000
+   tuple-ids=0 row-size=171B cardinality=1.50M
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=104.00MB Threads=3
@@ -3073,13 +3073,13 @@ PLAN-ROOT SINK
 |  order by: o_orderpriority ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
 |  mem-estimate=4.00MB mem-reservation=4.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=10,5 row-size=195B cardinality=1500000
+|  tuple-ids=10,5 row-size=195B cardinality=1.50M
 |  in pipelines: 05(GETNEXT)
 |
 05:SORT
 |  order by: o_orderpriority ASC
 |  mem-estimate=18.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=10 row-size=187B cardinality=1500000
+|  tuple-ids=10 row-size=187B cardinality=1.50M
 |  in pipelines: 05(GETNEXT), 03(OPEN)
 |
 04:ANALYTIC
@@ -3087,13 +3087,13 @@ PLAN-ROOT SINK
 |  order by: o_orderdate ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
 |  mem-estimate=4.00MB mem-reservation=4.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=8,4 row-size=187B cardinality=1500000
+|  tuple-ids=8,4 row-size=187B cardinality=1.50M
 |  in pipelines: 03(GETNEXT)
 |
 03:SORT
 |  order by: o_orderdate ASC
 |  mem-estimate=16.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=8 row-size=179B cardinality=1500000
+|  tuple-ids=8 row-size=179B cardinality=1.50M
 |  in pipelines: 03(GETNEXT), 01(OPEN)
 |
 02:ANALYTIC
@@ -3101,13 +3101,13 @@ PLAN-ROOT SINK
 |  order by: o_totalprice ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
 |  mem-estimate=4.00MB mem-reservation=4.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=6,3 row-size=179B cardinality=1500000
+|  tuple-ids=6,3 row-size=179B cardinality=1.50M
 |  in pipelines: 01(GETNEXT)
 |
 07:MERGING-EXCHANGE [UNPARTITIONED]
 |  order by: o_totalprice ASC
 |  mem-estimate=40.68MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=6 row-size=171B cardinality=1500000
+|  tuple-ids=6 row-size=171B cardinality=1.50M
 |  in pipelines: 01(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=2 instances=4
@@ -3115,7 +3115,7 @@ Per-Host Resources: mem-estimate=112.00MB mem-reservation=72.00MB thread-reserva
 01:SORT
 |  order by: o_totalprice ASC
 |  mem-estimate=16.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=6 row-size=171B cardinality=1500000
+|  tuple-ids=6 row-size=171B cardinality=1.50M
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [tpch_parquet.orders, RANDOM]
@@ -3123,9 +3123,9 @@ Per-Host Resources: mem-estimate=112.00MB mem-reservation=72.00MB thread-reserva
    stored statistics:
      table: rows=1500000 size=54.07MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=1177135
+   extrapolated-rows=disabled max-scan-range-rows=1177136
    mem-estimate=40.00MB mem-reservation=24.00MB thread-reservation=0
-   tuple-ids=0 row-size=171B cardinality=1500000
+   tuple-ids=0 row-size=171B cardinality=1.50M
    in pipelines: 00(GETNEXT)
 ====
 # Union with non-trivial branches: each branch executes sequentially within fragment.
@@ -3160,7 +3160,7 @@ PLAN-ROOT SINK
 00:UNION
 |  pass-through-operands: 04
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=7 row-size=66B cardinality=2549844
+|  tuple-ids=7 row-size=66B cardinality=2.55M
 |  in pipelines: 04(GETNEXT), 05(GETNEXT), 08(GETNEXT)
 |
 |--10:HASH JOIN [INNER JOIN]
@@ -3168,7 +3168,7 @@ PLAN-ROOT SINK
 |  |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  |  runtime filters: RF004[bloom] <- o_orderkey
 |  |  mem-estimate=17.00MB mem-reservation=17.00MB spill-buffer=1.00MB thread-reservation=0
-|  |  tuple-ids=5,6 row-size=91B cardinality=822530
+|  |  tuple-ids=5,6 row-size=91B cardinality=822.53K
 |  |  in pipelines: 08(GETNEXT), 09(OPEN)
 |  |
 |  |--09:SCAN HDFS [tpch_parquet.orders]
@@ -3176,23 +3176,23 @@ PLAN-ROOT SINK
 |  |     stored statistics:
 |  |       table: rows=1500000 size=54.07MB
 |  |       columns: all
-|  |     extrapolated-rows=disabled max-scan-range-rows=1177135
+|  |     extrapolated-rows=disabled max-scan-range-rows=1177136
 |  |     mem-estimate=40.00MB mem-reservation=4.00MB thread-reservation=1
-|  |     tuple-ids=6 row-size=8B cardinality=1500000
+|  |     tuple-ids=6 row-size=8B cardinality=1.50M
 |  |     in pipelines: 09(GETNEXT)
 |  |
 |  08:SCAN HDFS [tpch_parquet.lineitem]
-|     partitions=1/1 files=3 size=193.72MB
+|     partitions=1/1 files=3 size=193.60MB
 |     predicates: l_shipmode = 'F'
 |     runtime filters: RF004[bloom] -> l_orderkey
 |     stored statistics:
-|       table: rows=6001215 size=193.72MB
+|       table: rows=6001215 size=193.60MB
 |       columns: all
-|     extrapolated-rows=disabled max-scan-range-rows=2141609
+|     extrapolated-rows=disabled max-scan-range-rows=2141702
 |     parquet statistics predicates: l_shipmode = 'F'
 |     parquet dictionary predicates: l_shipmode = 'F'
 |     mem-estimate=80.00MB mem-reservation=24.00MB thread-reservation=1
-|     tuple-ids=5 row-size=83B cardinality=857316
+|     tuple-ids=5 row-size=83B cardinality=857.32K
 |     in pipelines: 08(GETNEXT)
 |
 |--07:HASH JOIN [INNER JOIN]
@@ -3200,7 +3200,7 @@ PLAN-ROOT SINK
 |  |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  |  runtime filters: RF002[bloom] <- o_orderkey
 |  |  mem-estimate=17.00MB mem-reservation=17.00MB spill-buffer=1.00MB thread-reservation=0
-|  |  tuple-ids=3,4 row-size=95B cardinality=1151542
+|  |  tuple-ids=3,4 row-size=95B cardinality=1.15M
 |  |  in pipelines: 05(GETNEXT), 06(OPEN)
 |  |
 |  |--06:SCAN HDFS [tpch_parquet.orders]
@@ -3209,28 +3209,28 @@ PLAN-ROOT SINK
 |  |     stored statistics:
 |  |       table: rows=1500000 size=54.07MB
 |  |       columns: all
-|  |     extrapolated-rows=disabled max-scan-range-rows=1177135
+|  |     extrapolated-rows=disabled max-scan-range-rows=1177136
 |  |     parquet statistics predicates: o_orderpriority = '2-HIGH'
 |  |     parquet dictionary predicates: o_orderpriority = '2-HIGH'
 |  |     mem-estimate=40.00MB mem-reservation=8.00MB thread-reservation=1
-|  |     tuple-ids=4 row-size=28B cardinality=300000
+|  |     tuple-ids=4 row-size=28B cardinality=300.00K
 |  |     in pipelines: 06(GETNEXT)
 |  |
 |  05:SCAN HDFS [tpch_parquet.lineitem]
-|     partitions=1/1 files=3 size=193.72MB
+|     partitions=1/1 files=3 size=193.60MB
 |     runtime filters: RF002[bloom] -> l_orderkey
 |     stored statistics:
-|       table: rows=6001215 size=193.72MB
+|       table: rows=6001215 size=193.60MB
 |       columns: all
-|     extrapolated-rows=disabled max-scan-range-rows=2141609
+|     extrapolated-rows=disabled max-scan-range-rows=2141702
 |     mem-estimate=80.00MB mem-reservation=24.00MB thread-reservation=1
-|     tuple-ids=3 row-size=66B cardinality=6001215
+|     tuple-ids=3 row-size=66B cardinality=6.00M
 |     in pipelines: 05(GETNEXT)
 |
 04:AGGREGATE [FINALIZE]
 |  group by: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_comment
 |  mem-estimate=40.16MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=2 row-size=66B cardinality=575772
+|  tuple-ids=2 row-size=66B cardinality=575.77K
 |  in pipelines: 04(GETNEXT), 01(OPEN)
 |
 03:HASH JOIN [INNER JOIN]
@@ -3238,7 +3238,7 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  runtime filters: RF000[bloom] <- o_orderkey
 |  mem-estimate=17.00MB mem-reservation=17.00MB spill-buffer=1.00MB thread-reservation=0
-|  tuple-ids=0,1 row-size=82B cardinality=575772
+|  tuple-ids=0,1 row-size=82B cardinality=575.77K
 |  in pipelines: 01(GETNEXT), 02(OPEN)
 |
 |--02:SCAN HDFS [tpch_parquet.orders]
@@ -3246,23 +3246,23 @@ PLAN-ROOT SINK
 |     stored statistics:
 |       table: rows=1500000 size=54.07MB
 |       columns: all
-|     extrapolated-rows=disabled max-scan-range-rows=1177135
+|     extrapolated-rows=disabled max-scan-range-rows=1177136
 |     mem-estimate=40.00MB mem-reservation=4.00MB thread-reservation=1
-|     tuple-ids=1 row-size=8B cardinality=1500000
+|     tuple-ids=1 row-size=8B cardinality=1.50M
 |     in pipelines: 02(GETNEXT)
 |
 01:SCAN HDFS [tpch_parquet.lineitem]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    predicates: l_tax > CAST(10 AS DECIMAL(3,0))
    runtime filters: RF000[bloom] -> l_orderkey
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    parquet statistics predicates: l_tax > CAST(10 AS DECIMAL(3,0))
    parquet dictionary predicates: l_tax > CAST(10 AS DECIMAL(3,0))
    mem-estimate=80.00MB mem-reservation=24.00MB thread-reservation=1
-   tuple-ids=0 row-size=74B cardinality=600122
+   tuple-ids=0 row-size=74B cardinality=600.12K
    in pipelines: 01(GETNEXT)
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=123.75MB Threads=12
@@ -3283,7 +3283,7 @@ PLAN-ROOT SINK
 |
 17:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.21MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=7 row-size=66B cardinality=2549844
+|  tuple-ids=7 row-size=66B cardinality=2.55M
 |  in pipelines: 14(GETNEXT), 05(GETNEXT), 08(GETNEXT)
 |
 F08:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -3291,7 +3291,7 @@ Per-Host Resources: mem-estimate=109.02MB mem-reservation=43.00MB thread-reserva
 00:UNION
 |  pass-through-operands: 14
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=7 row-size=66B cardinality=2549844
+|  tuple-ids=7 row-size=66B cardinality=2.55M
 |  in pipelines: 14(GETNEXT), 05(GETNEXT), 08(GETNEXT)
 |
 |--10:HASH JOIN [INNER JOIN, BROADCAST]
@@ -3299,12 +3299,12 @@ Per-Host Resources: mem-estimate=109.02MB mem-reservation=43.00MB thread-reserva
 |  |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  |  runtime filters: RF004[bloom] <- o_orderkey
 |  |  mem-estimate=17.00MB mem-reservation=17.00MB spill-buffer=1.00MB thread-reservation=0
-|  |  tuple-ids=5,6 row-size=91B cardinality=822530
+|  |  tuple-ids=5,6 row-size=91B cardinality=822.53K
 |  |  in pipelines: 08(GETNEXT), 09(OPEN)
 |  |
 |  |--16:EXCHANGE [BROADCAST]
 |  |  |  mem-estimate=10.02MB mem-reservation=0B thread-reservation=0
-|  |  |  tuple-ids=6 row-size=8B cardinality=1500000
+|  |  |  tuple-ids=6 row-size=8B cardinality=1.50M
 |  |  |  in pipelines: 09(GETNEXT)
 |  |  |
 |  |  F07:PLAN FRAGMENT [RANDOM] hosts=2 instances=2
@@ -3314,23 +3314,23 @@ Per-Host Resources: mem-estimate=109.02MB mem-reservation=43.00MB thread-reserva
 |  |     stored statistics:
 |  |       table: rows=1500000 size=54.07MB
 |  |       columns: all
-|  |     extrapolated-rows=disabled max-scan-range-rows=1177135
+|  |     extrapolated-rows=disabled max-scan-range-rows=1177136
 |  |     mem-estimate=40.00MB mem-reservation=4.00MB thread-reservation=1
-|  |     tuple-ids=6 row-size=8B cardinality=1500000
+|  |     tuple-ids=6 row-size=8B cardinality=1.50M
 |  |     in pipelines: 09(GETNEXT)
 |  |
 |  08:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-|     partitions=1/1 files=3 size=193.72MB
+|     partitions=1/1 files=3 size=193.60MB
 |     predicates: l_shipmode = 'F'
 |     runtime filters: RF004[bloom] -> l_orderkey
 |     stored statistics:
-|       table: rows=6001215 size=193.72MB
+|       table: rows=6001215 size=193.60MB
 |       columns: all
-|     extrapolated-rows=disabled max-scan-range-rows=2141609
+|     extrapolated-rows=disabled max-scan-range-rows=2141702
 |     parquet statistics predicates: l_shipmode = 'F'
 |     parquet dictionary predicates: l_shipmode = 'F'
 |     mem-estimate=80.00MB mem-reservation=24.00MB thread-reservation=1
-|     tuple-ids=5 row-size=83B cardinality=857316
+|     tuple-ids=5 row-size=83B cardinality=857.32K
 |     in pipelines: 08(GETNEXT)
 |
 |--07:HASH JOIN [INNER JOIN, BROADCAST]
@@ -3338,12 +3338,12 @@ Per-Host Resources: mem-estimate=109.02MB mem-reservation=43.00MB thread-reserva
 |  |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  |  runtime filters: RF002[bloom] <- o_orderkey
 |  |  mem-estimate=17.00MB mem-reservation=17.00MB spill-buffer=1.00MB thread-reservation=0
-|  |  tuple-ids=3,4 row-size=95B cardinality=1151542
+|  |  tuple-ids=3,4 row-size=95B cardinality=1.15M
 |  |  in pipelines: 05(GETNEXT), 06(OPEN)
 |  |
 |  |--15:EXCHANGE [BROADCAST]
 |  |  |  mem-estimate=8.19MB mem-reservation=0B thread-reservation=0
-|  |  |  tuple-ids=4 row-size=28B cardinality=300000
+|  |  |  tuple-ids=4 row-size=28B cardinality=300.00K
 |  |  |  in pipelines: 06(GETNEXT)
 |  |  |
 |  |  F05:PLAN FRAGMENT [RANDOM] hosts=2 instances=2
@@ -3354,33 +3354,33 @@ Per-Host Resources: mem-estimate=109.02MB mem-reservation=43.00MB thread-reserva
 |  |     stored statistics:
 |  |       table: rows=1500000 size=54.07MB
 |  |       columns: all
-|  |     extrapolated-rows=disabled max-scan-range-rows=1177135
+|  |     extrapolated-rows=disabled max-scan-range-rows=1177136
 |  |     parquet statistics predicates: o_orderpriority = '2-HIGH'
 |  |     parquet dictionary predicates: o_orderpriority = '2-HIGH'
 |  |     mem-estimate=40.00MB mem-reservation=8.00MB thread-reservation=1
-|  |     tuple-ids=4 row-size=28B cardinality=300000
+|  |     tuple-ids=4 row-size=28B cardinality=300.00K
 |  |     in pipelines: 06(GETNEXT)
 |  |
 |  05:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-|     partitions=1/1 files=3 size=193.72MB
+|     partitions=1/1 files=3 size=193.60MB
 |     runtime filters: RF002[bloom] -> l_orderkey
 |     stored statistics:
-|       table: rows=6001215 size=193.72MB
+|       table: rows=6001215 size=193.60MB
 |       columns: all
-|     extrapolated-rows=disabled max-scan-range-rows=2141609
+|     extrapolated-rows=disabled max-scan-range-rows=2141702
 |     mem-estimate=80.00MB mem-reservation=24.00MB thread-reservation=1
-|     tuple-ids=3 row-size=66B cardinality=6001215
+|     tuple-ids=3 row-size=66B cardinality=6.00M
 |     in pipelines: 05(GETNEXT)
 |
 14:AGGREGATE [FINALIZE]
 |  group by: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_comment
 |  mem-estimate=34.00MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=2 row-size=66B cardinality=575772
+|  tuple-ids=2 row-size=66B cardinality=575.77K
 |  in pipelines: 14(GETNEXT), 01(OPEN)
 |
 13:EXCHANGE [HASH(l_orderkey,l_partkey,l_suppkey,l_linenumber,l_comment)]
 |  mem-estimate=10.21MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=2 row-size=66B cardinality=575772
+|  tuple-ids=2 row-size=66B cardinality=575.77K
 |  in pipelines: 01(GETNEXT)
 |
 F02:PLAN FRAGMENT [HASH(l_orderkey)] hosts=3 instances=3
@@ -3388,7 +3388,7 @@ Per-Host Resources: mem-estimate=55.73MB mem-reservation=39.75MB thread-reservat
 04:AGGREGATE [STREAMING]
 |  group by: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_comment
 |  mem-estimate=34.00MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=2 row-size=66B cardinality=575772
+|  tuple-ids=2 row-size=66B cardinality=575.77K
 |  in pipelines: 01(GETNEXT)
 |
 03:HASH JOIN [INNER JOIN, PARTITIONED]
@@ -3396,12 +3396,12 @@ Per-Host Resources: mem-estimate=55.73MB mem-reservation=39.75MB thread-reservat
 |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  runtime filters: RF000[bloom] <- o_orderkey
 |  mem-estimate=4.75MB mem-reservation=4.75MB spill-buffer=256.00KB thread-reservation=0
-|  tuple-ids=0,1 row-size=82B cardinality=575772
+|  tuple-ids=0,1 row-size=82B cardinality=575.77K
 |  in pipelines: 01(GETNEXT), 02(OPEN)
 |
 |--12:EXCHANGE [HASH(o_orderkey)]
 |  |  mem-estimate=5.75MB mem-reservation=0B thread-reservation=0
-|  |  tuple-ids=1 row-size=8B cardinality=1500000
+|  |  tuple-ids=1 row-size=8B cardinality=1.50M
 |  |  in pipelines: 02(GETNEXT)
 |  |
 |  F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=2
@@ -3411,30 +3411,30 @@ Per-Host Resources: mem-estimate=55.73MB mem-reservation=39.75MB thread-reservat
 |     stored statistics:
 |       table: rows=1500000 size=54.07MB
 |       columns: all
-|     extrapolated-rows=disabled max-scan-range-rows=1177135
+|     extrapolated-rows=disabled max-scan-range-rows=1177136
 |     mem-estimate=40.00MB mem-reservation=4.00MB thread-reservation=1
-|     tuple-ids=1 row-size=8B cardinality=1500000
+|     tuple-ids=1 row-size=8B cardinality=1.50M
 |     in pipelines: 02(GETNEXT)
 |
 11:EXCHANGE [HASH(l_orderkey)]
 |  mem-estimate=10.23MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=74B cardinality=600122
+|  tuple-ids=0 row-size=74B cardinality=600.12K
 |  in pipelines: 01(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
 Per-Host Resources: mem-estimate=81.00MB mem-reservation=25.00MB thread-reservation=2 runtime-filters-memory=1.00MB
 01:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    predicates: l_tax > CAST(10 AS DECIMAL(3,0))
    runtime filters: RF000[bloom] -> l_orderkey
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    parquet statistics predicates: l_tax > CAST(10 AS DECIMAL(3,0))
    parquet dictionary predicates: l_tax > CAST(10 AS DECIMAL(3,0))
    mem-estimate=80.00MB mem-reservation=24.00MB thread-reservation=1
-   tuple-ids=0 row-size=74B cardinality=600122
+   tuple-ids=0 row-size=74B cardinality=600.12K
    in pipelines: 01(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=209.75MB Threads=13
@@ -3455,7 +3455,7 @@ PLAN-ROOT SINK
 |
 17:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.41MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=7 row-size=66B cardinality=2549844
+|  tuple-ids=7 row-size=66B cardinality=2.55M
 |  in pipelines: 14(GETNEXT), 05(GETNEXT), 08(GETNEXT)
 |
 F08:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
@@ -3463,7 +3463,7 @@ Per-Host Resources: mem-estimate=198.00MB mem-reservation=86.00MB thread-reserva
 00:UNION
 |  pass-through-operands: 14
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=7 row-size=66B cardinality=2549844
+|  tuple-ids=7 row-size=66B cardinality=2.55M
 |  in pipelines: 14(GETNEXT), 05(GETNEXT), 08(GETNEXT)
 |
 |--10:HASH JOIN [INNER JOIN, BROADCAST]
@@ -3472,7 +3472,7 @@ Per-Host Resources: mem-estimate=198.00MB mem-reservation=86.00MB thread-reserva
 |  |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  |  runtime filters: RF004[bloom] <- o_orderkey
 |  |  mem-estimate=17.00MB mem-reservation=17.00MB spill-buffer=1.00MB thread-reservation=0
-|  |  tuple-ids=5,6 row-size=91B cardinality=822530
+|  |  tuple-ids=5,6 row-size=91B cardinality=822.53K
 |  |  in pipelines: 08(GETNEXT), 09(OPEN)
 |  |
 |  |--F11:PLAN FRAGMENT [RANDOM] hosts=2 instances=4
@@ -3484,7 +3484,7 @@ Per-Host Resources: mem-estimate=198.00MB mem-reservation=86.00MB thread-reserva
 |  |  |
 |  |  16:EXCHANGE

<TRUNCATED>

[21/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/inline-view-limit.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/inline-view-limit.test b/testdata/workloads/functional-planner/queries/PlannerTest/inline-view-limit.test
index c75c02d..266d528 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/inline-view-limit.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/inline-view-limit.test
@@ -6,6 +6,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
    predicates: functional.alltypessmall.id < 5
+   row-size=89B cardinality=10
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -14,6 +15,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
    predicates: functional.alltypessmall.id < 5
+   row-size=89B cardinality=10
 ====
 # predicate pushdown is prevented in presence of limit clause
 select * from (select * from functional.alltypessmall limit 10) a where id < 5 limit 5
@@ -23,16 +25,19 @@ PLAN-ROOT SINK
 01:SELECT
 |  predicates: functional.alltypessmall.id < 5
 |  limit: 5
+|  row-size=89B cardinality=1
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
    limit: 10
+   row-size=89B cardinality=10
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 01:SELECT
 |  predicates: functional.alltypessmall.id < 5
 |  limit: 5
+|  row-size=89B cardinality=1
 |
 02:EXCHANGE [UNPARTITIONED]
 |  limit: 10
@@ -40,6 +45,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
    limit: 10
+   row-size=89B cardinality=10
 ====
 # predicate pushdown is prevented in presence of order by/limit clause;
 # top-n is distributed
@@ -52,18 +58,22 @@ PLAN-ROOT SINK
 02:SELECT
 |  predicates: id < 5
 |  limit: 5
+|  row-size=89B cardinality=1
 |
 01:TOP-N [LIMIT=10]
 |  order by: id ASC
+|  row-size=89B cardinality=10
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=89B cardinality=100
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 02:SELECT
 |  predicates: id < 5
 |  limit: 5
+|  row-size=89B cardinality=1
 |
 03:MERGING-EXCHANGE [UNPARTITIONED]
 |  order by: id ASC
@@ -71,9 +81,11 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=10]
 |  order by: id ASC
+|  row-size=89B cardinality=10
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=89B cardinality=100
 ====
 # top-n is not distributed because it depends on the output of the aggregation
 select *
@@ -90,21 +102,26 @@ PLAN-ROOT SINK
 |  hash predicates: functional.alltypes.id = id
 |  runtime filters: RF000 <- id
 |  limit: 5
+|  row-size=101B cardinality=1
 |
 |--03:TOP-N [LIMIT=5]
 |  |  order by: count(*) ASC
+|  |  row-size=12B cardinality=5
 |  |
 |  02:AGGREGATE [FINALIZE]
 |  |  output: count(*)
 |  |  group by: id
+|  |  row-size=12B cardinality=7.30K
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id < 5
    runtime filters: RF000 -> functional.alltypes.id
+   row-size=89B cardinality=730
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -115,6 +132,7 @@ PLAN-ROOT SINK
 |  hash predicates: functional.alltypes.id = id
 |  runtime filters: RF000 <- id
 |  limit: 5
+|  row-size=101B cardinality=1
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
@@ -124,24 +142,29 @@ PLAN-ROOT SINK
 |  |
 |  03:TOP-N [LIMIT=5]
 |  |  order by: count(*) ASC
+|  |  row-size=12B cardinality=5
 |  |
 |  06:AGGREGATE [FINALIZE]
 |  |  output: count:merge(*)
 |  |  group by: id
+|  |  row-size=12B cardinality=7.30K
 |  |
 |  05:EXCHANGE [HASH(id)]
 |  |
 |  02:AGGREGATE [STREAMING]
 |  |  output: count(*)
 |  |  group by: id
+|  |  row-size=12B cardinality=7.30K
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id < 5
    runtime filters: RF000 -> functional.alltypes.id
+   row-size=89B cardinality=730
 ====
 # predicate pushdown is prevented in presence of limit clause; variant w/ join
 select *
@@ -156,24 +179,29 @@ PLAN-ROOT SINK
 03:SELECT
 |  predicates: a.id < 5
 |  limit: 5
+|  row-size=93B cardinality=1
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = functional.alltypessmall.id
 |  runtime filters: RF000 <- functional.alltypessmall.id
 |  limit: 10
+|  row-size=93B cardinality=10
 |
 |--01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 00:SCAN HDFS [functional.alltypessmall a]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> a.id
+   row-size=89B cardinality=100
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:SELECT
 |  predicates: a.id < 5
 |  limit: 5
+|  row-size=93B cardinality=1
 |
 05:EXCHANGE [UNPARTITIONED]
 |  limit: 10
@@ -182,15 +210,18 @@ PLAN-ROOT SINK
 |  hash predicates: a.id = functional.alltypessmall.id
 |  runtime filters: RF000 <- functional.alltypessmall.id
 |  limit: 10
+|  row-size=93B cardinality=10
 |
 |--04:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 00:SCAN HDFS [functional.alltypessmall a]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> a.id
+   row-size=89B cardinality=100
 ====
 # predicate pushdown is prevented in presence of order by/limit clause
 select *
@@ -203,21 +234,26 @@ PLAN-ROOT SINK
 |
 02:TOP-N [LIMIT=5]
 |  order by: id ASC
+|  row-size=89B cardinality=1
 |
 01:SELECT
 |  predicates: functional.alltypessmall.id < 5
+|  row-size=89B cardinality=1
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
    limit: 10
+   row-size=89B cardinality=10
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 02:TOP-N [LIMIT=5]
 |  order by: id ASC
+|  row-size=89B cardinality=1
 |
 01:SELECT
 |  predicates: functional.alltypessmall.id < 5
+|  row-size=89B cardinality=1
 |
 03:EXCHANGE [UNPARTITIONED]
 |  limit: 10
@@ -225,6 +261,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
    limit: 10
+   row-size=89B cardinality=10
 ====
 # predicate pushdown is prevented in presence of order by/limit clause; variant w/ join
 select *
@@ -241,29 +278,36 @@ PLAN-ROOT SINK
 |
 04:TOP-N [LIMIT=5]
 |  order by: id ASC
+|  row-size=89B cardinality=1
 |
 03:SELECT
 |  predicates: a.id < 5
+|  row-size=93B cardinality=1
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = functional.alltypessmall.id
 |  runtime filters: RF000 <- functional.alltypessmall.id
 |  limit: 10
+|  row-size=93B cardinality=10
 |
 |--01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 00:SCAN HDFS [functional.alltypessmall a]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> a.id
+   row-size=89B cardinality=100
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 04:TOP-N [LIMIT=5]
 |  order by: id ASC
+|  row-size=89B cardinality=1
 |
 03:SELECT
 |  predicates: a.id < 5
+|  row-size=93B cardinality=1
 |
 06:EXCHANGE [UNPARTITIONED]
 |  limit: 10
@@ -272,15 +316,18 @@ PLAN-ROOT SINK
 |  hash predicates: a.id = functional.alltypessmall.id
 |  runtime filters: RF000 <- functional.alltypessmall.id
 |  limit: 10
+|  row-size=93B cardinality=10
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 00:SCAN HDFS [functional.alltypessmall a]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> a.id
+   row-size=89B cardinality=100
 ====
 # join against subquery with limit creates a merge fragment that applies the limit
 select *
@@ -292,14 +339,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = id
 |  runtime filters: RF000 <- id
+|  row-size=93B cardinality=10
 |
 |--01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
 |     limit: 10
+|     row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.id
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -308,6 +358,7 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: functional.alltypes.id = id
 |  runtime filters: RF000 <- id
+|  row-size=93B cardinality=10
 |
 |--04:EXCHANGE [BROADCAST]
 |  |
@@ -317,10 +368,12 @@ PLAN-ROOT SINK
 |  01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
 |     limit: 10
+|     row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.id
+   row-size=89B cardinality=7.30K
 ====
 # join against subquery with limit creates a merge fragment that applies the limit;
 # topn is distributed
@@ -333,16 +386,20 @@ PLAN-ROOT SINK
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = id
 |  runtime filters: RF000 <- id
+|  row-size=93B cardinality=10
 |
 |--02:TOP-N [LIMIT=10]
 |  |  order by: id ASC
+|  |  row-size=4B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.id
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -351,6 +408,7 @@ PLAN-ROOT SINK
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: functional.alltypes.id = id
 |  runtime filters: RF000 <- id
+|  row-size=93B cardinality=10
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
@@ -360,13 +418,16 @@ PLAN-ROOT SINK
 |  |
 |  02:TOP-N [LIMIT=10]
 |  |  order by: id ASC
+|  |  row-size=4B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.id
+   row-size=89B cardinality=7.30K
 ====
 # join against subquery with limit;
 # predicate pushdown is prevented in presence of order by/limit clause; variant w/ join
@@ -384,27 +445,33 @@ PLAN-ROOT SINK
 |
 05:TOP-N [LIMIT=5]
 |  order by: id ASC
+|  row-size=93B cardinality=1
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = a.id
 |  runtime filters: RF000 <- a.id
+|  row-size=97B cardinality=1
 |
 |--03:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.id = functional.alltypestiny.id
 |  |  runtime filters: RF002 <- functional.alltypestiny.id
 |  |  limit: 10
+|  |  row-size=8B cardinality=9
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=4B cardinality=8
 |  |
 |  01:SCAN HDFS [functional.alltypessmall a]
 |     partitions=4/4 files=4 size=6.32KB
 |     runtime filters: RF002 -> a.id
+|     row-size=4B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id < 5
    runtime filters: RF000 -> functional.alltypes.id
+   row-size=89B cardinality=730
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -414,10 +481,12 @@ PLAN-ROOT SINK
 |
 05:TOP-N [LIMIT=5]
 |  order by: id ASC
+|  row-size=93B cardinality=1
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: functional.alltypes.id = a.id
 |  runtime filters: RF000 <- a.id
+|  row-size=97B cardinality=1
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
@@ -428,20 +497,24 @@ PLAN-ROOT SINK
 |  |  hash predicates: a.id = functional.alltypestiny.id
 |  |  runtime filters: RF002 <- functional.alltypestiny.id
 |  |  limit: 10
+|  |  row-size=8B cardinality=9
 |  |
 |  |--06:EXCHANGE [BROADCAST]
 |  |  |
 |  |  02:SCAN HDFS [functional.alltypestiny]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=4B cardinality=8
 |  |
 |  01:SCAN HDFS [functional.alltypessmall a]
 |     partitions=4/4 files=4 size=6.32KB
 |     runtime filters: RF002 -> a.id
+|     row-size=4B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id < 5
    runtime filters: RF000 -> functional.alltypes.id
+   row-size=89B cardinality=730
 ====
 # join against subquery with order by/limit;
 # predicate pushdown is prevented in presence of order by/limit clause; variant w/ join
@@ -460,29 +533,36 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=5]
 |  order by: id ASC
+|  row-size=93B cardinality=1
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = id
 |  runtime filters: RF000 <- id
+|  row-size=97B cardinality=1
 |
 |--04:TOP-N [LIMIT=10]
 |  |  order by: int_col ASC
+|  |  row-size=8B cardinality=9
 |  |
 |  03:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.id = functional.alltypestiny.id
 |  |  runtime filters: RF002 <- functional.alltypestiny.id
+|  |  row-size=12B cardinality=9
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=4B cardinality=8
 |  |
 |  01:SCAN HDFS [functional.alltypessmall a]
 |     partitions=4/4 files=4 size=6.32KB
 |     runtime filters: RF002 -> a.id
+|     row-size=8B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id < 5
    runtime filters: RF000 -> functional.alltypes.id
+   row-size=89B cardinality=730
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -492,10 +572,12 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=5]
 |  order by: id ASC
+|  row-size=93B cardinality=1
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: functional.alltypes.id = id
 |  runtime filters: RF000 <- id
+|  row-size=97B cardinality=1
 |
 |--09:EXCHANGE [BROADCAST]
 |  |
@@ -505,24 +587,29 @@ PLAN-ROOT SINK
 |  |
 |  04:TOP-N [LIMIT=10]
 |  |  order by: int_col ASC
+|  |  row-size=8B cardinality=9
 |  |
 |  03:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: a.id = functional.alltypestiny.id
 |  |  runtime filters: RF002 <- functional.alltypestiny.id
+|  |  row-size=12B cardinality=9
 |  |
 |  |--07:EXCHANGE [BROADCAST]
 |  |  |
 |  |  02:SCAN HDFS [functional.alltypestiny]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=4B cardinality=8
 |  |
 |  01:SCAN HDFS [functional.alltypessmall a]
 |     partitions=4/4 files=4 size=6.32KB
 |     runtime filters: RF002 -> a.id
+|     row-size=8B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id < 5
    runtime filters: RF000 -> functional.alltypes.id
+   row-size=89B cardinality=730
 ====
 # Subquery containing limit and offset
 select x.id from (
@@ -534,17 +621,21 @@ PLAN-ROOT SINK
 |
 02:TOP-N [LIMIT=100 OFFSET=4]
 |  order by: id ASC
+|  row-size=4B cardinality=5
 |
 01:TOP-N [LIMIT=5 OFFSET=5]
 |  order by: id ASC
+|  row-size=4B cardinality=5
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=4B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 02:TOP-N [LIMIT=100 OFFSET=4]
 |  order by: id ASC
+|  row-size=4B cardinality=5
 |
 03:MERGING-EXCHANGE [UNPARTITIONED]
 |  offset: 5
@@ -553,9 +644,11 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=10]
 |  order by: id ASC
+|  row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=4B cardinality=11.00K
 ====
 # Test value transfers for outer-joined inline views with a limit.
 # Value transfer a.id->b.id is illegal due to the limit in b.
@@ -572,15 +665,18 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: id = id
 |  other predicates: id > 20
+|  row-size=8B cardinality=730
 |
 |--01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: id != 2
 |     limit: 10
+|     row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id > 10, id != 1
+   row-size=4B cardinality=730
 ====
 # Test value transfers for outer-joined inline views with a limit.
 # Value transfer a.id->b.id is legal.
@@ -598,19 +694,23 @@ PLAN-ROOT SINK
 |  hash predicates: id = id
 |  other predicates: id > 20
 |  runtime filters: RF000 <- id
+|  row-size=8B cardinality=1
 |
 |--01:SELECT
 |  |  predicates: id > 10
+|  |  row-size=4B cardinality=1
 |  |
 |  00:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: id != 1
 |     limit: 10
+|     row-size=4B cardinality=10
 |
 02:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
    predicates: functional.alltypessmall.id != 1, functional.alltypessmall.id > 10, functional.alltypessmall.id > 20, id != 2
    runtime filters: RF000 -> id
+   row-size=4B cardinality=10
 ====
 # Test value transfers for outer-joined inline views with a limit.
 # Value transfer b.id->a.id is illegal due to the limit in a.
@@ -628,16 +728,19 @@ PLAN-ROOT SINK
 |  hash predicates: id = id
 |  other predicates: id > 10
 |  runtime filters: RF000 <- id
+|  row-size=8B cardinality=10
 |
 |--01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: functional.alltypessmall.id > 20, id != 2
+|     row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: id != 1
    runtime filters: RF000 -> id
    limit: 10
+   row-size=4B cardinality=10
 ====
 # Test value transfers for outer-joined inline views with a limit.
 # Value transfer b.id->a.id is legal.
@@ -655,19 +758,23 @@ PLAN-ROOT SINK
 |  hash predicates: id = id
 |  other predicates: id > 10
 |  runtime filters: RF000 <- id
+|  row-size=8B cardinality=1
 |
 |--02:SELECT
 |  |  predicates: id > 20
+|  |  row-size=4B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: id != 2
 |     limit: 10
+|     row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id != 2, functional.alltypes.id > 10, functional.alltypes.id > 20, id != 1
    runtime filters: RF000 -> id
+   row-size=4B cardinality=730
 ====
 # IMPALA-3450: limits on select nodes are reflected in cardinality estimates. The test for
 # this is embedded in PlannerTestBase.java and is not visible in these plans, as they only
@@ -679,8 +786,10 @@ PLAN-ROOT SINK
 01:SELECT
 |  predicates: functional.alltypes.id < 10
 |  limit: 1
+|  row-size=89B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    limit: 100
+   row-size=89B cardinality=100
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/inline-view.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/inline-view.test b/testdata/workloads/functional-planner/queries/PlannerTest/inline-view.test
index 55660cb..aa47074 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/inline-view.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/inline-view.test
@@ -17,21 +17,28 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: t2.int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=101B cardinality=620
 |
 |--02:TOP-N [LIMIT=5]
 |  |  order by: count(*) DESC
+|  |  row-size=12B cardinality=1
 |  |
 |  01:AGGREGATE [FINALIZE]
 |  |  output: count(*)
 |  |  group by: int_col
 |  |  having: count(*) > 1
+|  |  row-size=12B cardinality=1
 |  |
 |  00:SCAN HDFS [functional.alltypessmall]
+|     partition predicates: month = 1
 |     partitions=1/4 files=1 size=1.57KB
+|     row-size=4B cardinality=25
 |
 03:SCAN HDFS [functional.alltypes t2]
+   partition predicates: month = 1
    partitions=2/24 files=2 size=40.32KB
    runtime filters: RF000 -> t2.int_col
+   row-size=89B cardinality=620
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -40,6 +47,7 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: t2.int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=101B cardinality=620
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
@@ -49,24 +57,31 @@ PLAN-ROOT SINK
 |  |
 |  02:TOP-N [LIMIT=5]
 |  |  order by: count(*) DESC
+|  |  row-size=12B cardinality=1
 |  |
 |  06:AGGREGATE [FINALIZE]
 |  |  output: count:merge(*)
 |  |  group by: int_col
 |  |  having: count(*) > 1
+|  |  row-size=12B cardinality=1
 |  |
 |  05:EXCHANGE [HASH(int_col)]
 |  |
 |  01:AGGREGATE [STREAMING]
 |  |  output: count(*)
 |  |  group by: int_col
+|  |  row-size=12B cardinality=10
 |  |
 |  00:SCAN HDFS [functional.alltypessmall]
+|     partition predicates: month = 1
 |     partitions=1/4 files=1 size=1.57KB
+|     row-size=4B cardinality=25
 |
 03:SCAN HDFS [functional.alltypes t2]
+   partition predicates: month = 1
    partitions=2/24 files=2 size=40.32KB
    runtime filters: RF000 -> t2.int_col
+   row-size=89B cardinality=620
 ====
 # simple full scan subquery
 select * from (select y x from (select id y from functional_hbase.alltypessmall) a) b
@@ -74,12 +89,14 @@ select * from (select y x from (select id y from functional_hbase.alltypessmall)
 PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.alltypessmall]
+   row-size=4B cardinality=50
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |
 00:SCAN HBASE [functional_hbase.alltypessmall]
+   row-size=4B cardinality=50
 ====
 # subquery doing join
 select * from (select t2.*
@@ -91,14 +108,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=36B cardinality=0
 |
 |--01:SCAN HDFS [functional.testtbl t2]
 |     partitions=1/1 files=0 size=0B
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
    predicates: t1.zip = 94611
    runtime filters: RF000 -> t1.id
+   row-size=12B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -107,16 +127,19 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=36B cardinality=0
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.testtbl t2]
 |     partitions=1/1 files=0 size=0B
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
    predicates: t1.zip = 94611
    runtime filters: RF000 -> t1.id
+   row-size=12B cardinality=0
 ====
 # subquery doing join
 # multiple join predicates;
@@ -139,15 +162,20 @@ PLAN-ROOT SINK
 |  hash predicates: a.id = b.id, a.int_col = b.int_col
 |  other predicates: a.tinyint_col = 15, a.day >= 6, a.tinyint_col + b.tinyint_col < 15
 |  runtime filters: RF000 <- b.id, RF001 <- b.int_col
+|  row-size=117B cardinality=5
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
+|     partition predicates: b.month > 2
 |     partitions=2/4 files=2 size=3.17KB
 |     predicates: b.string_col = '15'
+|     row-size=22B cardinality=5
 |
 00:SCAN HDFS [functional.alltypesagg a]
+   partition predicates: a.day >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: a.tinyint_col = 15
    runtime filters: RF000 -> a.id, RF001 -> a.int_col
+   row-size=95B cardinality=556
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypesagg/year=2010/month=1/day=10/100110.txt 0:76263
@@ -167,19 +195,24 @@ PLAN-ROOT SINK
 |  hash predicates: a.id = b.id, a.int_col = b.int_col
 |  other predicates: a.tinyint_col = 15, a.day >= 6, a.tinyint_col + b.tinyint_col < 15
 |  runtime filters: RF000 <- b.id, RF001 <- b.int_col
+|  row-size=117B cardinality=5
 |
 |--04:EXCHANGE [HASH(b.id,b.int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall b]
+|     partition predicates: b.month > 2
 |     partitions=2/4 files=2 size=3.17KB
 |     predicates: b.string_col = '15'
+|     row-size=22B cardinality=5
 |
 03:EXCHANGE [HASH(a.id,a.int_col)]
 |
 00:SCAN HDFS [functional.alltypesagg a]
+   partition predicates: a.day >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: a.tinyint_col = 15
    runtime filters: RF000 -> a.id, RF001 -> a.int_col
+   row-size=95B cardinality=556
 ====
 # predicate pushdown
 select * from (select * from functional_hbase.alltypessmall) a where id < 5
@@ -188,6 +221,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.alltypessmall]
    predicates: functional_hbase.alltypessmall.id < 5
+   row-size=89B cardinality=5
 ====
 # subquery join
 # multiple join predicates;
@@ -212,15 +246,20 @@ PLAN-ROOT SINK
 |  hash predicates: id = id, int_col = int_col
 |  other predicates: tinyint_col = 15, day >= 6, tinyint_col + tinyint_col < 15
 |  runtime filters: RF000 <- id, RF001 <- int_col
+|  row-size=39B cardinality=2
 |
 |--01:SCAN HDFS [functional.alltypessmall]
+|     partition predicates: functional.alltypessmall.month > 2
 |     partitions=2/4 files=2 size=3.17KB
 |     predicates: functional.alltypessmall.string_col = '15', functional.alltypessmall.id + 15 = 27
+|     row-size=26B cardinality=2
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: functional.alltypesagg.day >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: functional.alltypesagg.tinyint_col = 15, functional.alltypesagg.id + 15 = 27
    runtime filters: RF000 -> id, RF001 -> int_col
+   row-size=13B cardinality=167
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -230,19 +269,24 @@ PLAN-ROOT SINK
 |  hash predicates: id = id, int_col = int_col
 |  other predicates: tinyint_col = 15, day >= 6, tinyint_col + tinyint_col < 15
 |  runtime filters: RF000 <- id, RF001 <- int_col
+|  row-size=39B cardinality=2
 |
 |--04:EXCHANGE [HASH(id,int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
+|     partition predicates: functional.alltypessmall.month > 2
 |     partitions=2/4 files=2 size=3.17KB
 |     predicates: functional.alltypessmall.string_col = '15', functional.alltypessmall.id + 15 = 27
+|     row-size=26B cardinality=2
 |
 03:EXCHANGE [HASH(id,int_col)]
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: functional.alltypesagg.day >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: functional.alltypesagg.tinyint_col = 15, functional.alltypesagg.id + 15 = 27
    runtime filters: RF000 -> id, RF001 -> int_col
+   row-size=13B cardinality=167
 ====
 # subquery join
 # multiple join predicates;
@@ -268,15 +312,20 @@ PLAN-ROOT SINK
 |  hash predicates: id = id, int_col = int_col
 |  other predicates: tinyint_col = 15, tinyint_col + tinyint_col < 15
 |  runtime filters: RF000 <- id, RF001 <- int_col
+|  row-size=39B cardinality=5
 |
 |--01:SCAN HDFS [functional.alltypessmall]
+|     partition predicates: functional.alltypessmall.month > 2
 |     partitions=2/4 files=2 size=3.17KB
 |     predicates: functional.alltypessmall.string_col = '15'
+|     row-size=26B cardinality=5
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: functional.alltypesagg.day >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: functional.alltypesagg.tinyint_col = 15
    runtime filters: RF000 -> id, RF001 -> int_col
+   row-size=13B cardinality=556
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypesagg/year=2010/month=1/day=10/100110.txt 0:76263
@@ -296,19 +345,24 @@ PLAN-ROOT SINK
 |  hash predicates: id = id, int_col = int_col
 |  other predicates: tinyint_col = 15, tinyint_col + tinyint_col < 15
 |  runtime filters: RF000 <- id, RF001 <- int_col
+|  row-size=39B cardinality=5
 |
 |--04:EXCHANGE [HASH(id,int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
+|     partition predicates: functional.alltypessmall.month > 2
 |     partitions=2/4 files=2 size=3.17KB
 |     predicates: functional.alltypessmall.string_col = '15'
+|     row-size=26B cardinality=5
 |
 03:EXCHANGE [HASH(id,int_col)]
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: functional.alltypesagg.day >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: functional.alltypesagg.tinyint_col = 15
    runtime filters: RF000 -> id, RF001 -> int_col
+   row-size=13B cardinality=556
 ====
 # complex join, having joined subquery on the rhs, and predicate
 # at multiple subquery level. This tests that both sides of a join
@@ -333,24 +387,30 @@ PLAN-ROOT SINK
 |  hash predicates: c.id = a.tinyint_col
 |  other predicates: a.int_col + b.float_col + CAST(c.string_col AS FLOAT) < 1000
 |  runtime filters: RF000 <- a.tinyint_col
+|  row-size=32B cardinality=11
 |
 |--03:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.smallint_col = b.id
 |  |  runtime filters: RF002 <- b.id
+|  |  row-size=15B cardinality=11
 |  |
 |  |--02:SCAN HDFS [functional.alltypessmall b]
 |  |     partitions=4/4 files=4 size=6.32KB
 |  |     predicates: b.float_col > 4.5
+|  |     row-size=8B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypesagg a]
+|     partition predicates: month = 1, a.day = 1
 |     partitions=1/11 files=1 size=73.39KB
 |     predicates: a.int_col > 899
 |     runtime filters: RF002 -> a.smallint_col
+|     row-size=7B cardinality=100
 |
 00:SCAN HDFS [functional.alltypessmall c]
    partitions=4/4 files=4 size=6.32KB
    predicates: c.string_col < '7'
    runtime filters: RF000 -> c.id
+   row-size=17B cardinality=10
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypessmall/year=2009/month=1/090101.txt 0:1610
@@ -373,18 +433,22 @@ PLAN-ROOT SINK
 |  hash predicates: c.id = a.tinyint_col
 |  other predicates: a.int_col + b.float_col + CAST(c.string_col AS FLOAT) < 1000
 |  runtime filters: RF000 <- a.tinyint_col
+|  row-size=32B cardinality=11
 |
 |--08:EXCHANGE [HASH(a.tinyint_col)]
 |  |
 |  03:HASH JOIN [INNER JOIN, PARTITIONED]
 |  |  hash predicates: b.id = a.smallint_col
 |  |  runtime filters: RF002 <- a.smallint_col
+|  |  row-size=15B cardinality=11
 |  |
 |  |--06:EXCHANGE [HASH(a.smallint_col)]
 |  |  |
 |  |  01:SCAN HDFS [functional.alltypesagg a]
+|  |     partition predicates: month = 1, a.day = 1
 |  |     partitions=1/11 files=1 size=73.39KB
 |  |     predicates: a.int_col > 899
+|  |     row-size=7B cardinality=100
 |  |
 |  05:EXCHANGE [HASH(b.id)]
 |  |
@@ -392,6 +456,7 @@ PLAN-ROOT SINK
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: b.float_col > 4.5
 |     runtime filters: RF002 -> b.id
+|     row-size=8B cardinality=10
 |
 07:EXCHANGE [HASH(c.id)]
 |
@@ -399,6 +464,7 @@ PLAN-ROOT SINK
    partitions=4/4 files=4 size=6.32KB
    predicates: c.string_col < '7'
    runtime filters: RF000 -> c.id
+   row-size=17B cardinality=10
 ====
 # with grouping
 select tinyint_col, count(*), min(tinyint_col), max(tinyint_col), sum(tinyint_col),
@@ -411,9 +477,11 @@ PLAN-ROOT SINK
 01:AGGREGATE [FINALIZE]
 |  output: count(*), min(functional.alltypesagg.tinyint_col), max(functional.alltypesagg.tinyint_col), sum(functional.alltypesagg.tinyint_col), avg(functional.alltypesagg.tinyint_col)
 |  group by: functional.alltypesagg.tinyint_col
+|  row-size=27B cardinality=9
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=1B cardinality=11.00K
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypesagg/year=2010/month=1/day=1/100101.txt 0:75153
@@ -435,15 +503,18 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: count:merge(*), min:merge(tinyint_col), max:merge(tinyint_col), sum:merge(tinyint_col), avg:merge(tinyint_col)
 |  group by: tinyint_col
+|  row-size=27B cardinality=9
 |
 02:EXCHANGE [HASH(tinyint_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: count(*), min(functional.alltypesagg.tinyint_col), max(functional.alltypesagg.tinyint_col), sum(functional.alltypesagg.tinyint_col), avg(functional.alltypesagg.tinyint_col)
 |  group by: functional.alltypesagg.tinyint_col
+|  row-size=27B cardinality=9
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=1B cardinality=11.00K
 ====
 # with grouping
 select * from (
@@ -458,9 +529,11 @@ PLAN-ROOT SINK
 01:AGGREGATE [FINALIZE]
 |  output: count(*), min(tinyint_col), max(tinyint_col), sum(tinyint_col), avg(tinyint_col)
 |  group by: tinyint_col
+|  row-size=27B cardinality=9
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=1B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -469,15 +542,18 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: count:merge(*), min:merge(tinyint_col), max:merge(tinyint_col), sum:merge(tinyint_col), avg:merge(tinyint_col)
 |  group by: tinyint_col
+|  row-size=27B cardinality=9
 |
 02:EXCHANGE [HASH(tinyint_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: count(*), min(tinyint_col), max(tinyint_col), sum(tinyint_col), avg(tinyint_col)
 |  group by: tinyint_col
+|  row-size=27B cardinality=9
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=1B cardinality=11.00K
 ====
 select c1, c2, c3
 from
@@ -494,12 +570,15 @@ PLAN-ROOT SINK
 |
 02:TOP-N [LIMIT=5]
 |  order by: c2 ASC, c3 DESC
+|  row-size=16B cardinality=5
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum(float_col), min(float_col)
 |  group by: int_col
+|  row-size=16B cardinality=10
 |
 00:SCAN HBASE [functional_hbase.alltypessmall]
+   row-size=8B cardinality=50
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -509,18 +588,22 @@ PLAN-ROOT SINK
 |
 02:TOP-N [LIMIT=5]
 |  order by: c2 ASC, c3 DESC
+|  row-size=16B cardinality=5
 |
 04:AGGREGATE [FINALIZE]
 |  output: sum:merge(float_col), min:merge(float_col)
 |  group by: int_col
+|  row-size=16B cardinality=10
 |
 03:EXCHANGE [HASH(int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: sum(float_col), min(float_col)
 |  group by: int_col
+|  row-size=16B cardinality=10
 |
 00:SCAN HBASE [functional_hbase.alltypessmall]
+   row-size=8B cardinality=50
 ====
 select c1, x2
 from (
@@ -556,12 +639,15 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  group by: functional.testtbl.id, functional.testtbl.name, functional.testtbl.zip
+|  row-size=24B cardinality=0
 |
 01:AGGREGATE [FINALIZE]
 |  group by: functional.testtbl.id, functional.testtbl.name, functional.testtbl.zip
+|  row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
+   row-size=24B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -569,17 +655,21 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  group by: functional.testtbl.id, functional.testtbl.name, functional.testtbl.zip
+|  row-size=24B cardinality=0
 |
 04:AGGREGATE [FINALIZE]
 |  group by: functional.testtbl.id, functional.testtbl.name, functional.testtbl.zip
+|  row-size=24B cardinality=0
 |
 03:EXCHANGE [HASH(functional.testtbl.id,functional.testtbl.name,functional.testtbl.zip)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: functional.testtbl.id, functional.testtbl.name, functional.testtbl.zip
+|  row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
+   row-size=24B cardinality=0
 ====
 # distinct w/ explicit select list
 select distinct id, zip
@@ -589,12 +679,15 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  group by: functional.testtbl.id, functional.testtbl.zip
+|  row-size=12B cardinality=0
 |
 01:AGGREGATE [FINALIZE]
 |  group by: functional.testtbl.id, functional.testtbl.name, functional.testtbl.zip
+|  row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
+   row-size=24B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -602,22 +695,27 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  group by: id, zip
+|  row-size=12B cardinality=0
 |
 05:EXCHANGE [HASH(id,zip)]
 |
 02:AGGREGATE [STREAMING]
 |  group by: functional.testtbl.id, functional.testtbl.zip
+|  row-size=12B cardinality=0
 |
 04:AGGREGATE [FINALIZE]
 |  group by: functional.testtbl.id, functional.testtbl.name, functional.testtbl.zip
+|  row-size=24B cardinality=0
 |
 03:EXCHANGE [HASH(functional.testtbl.id,functional.testtbl.name,functional.testtbl.zip)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: functional.testtbl.id, functional.testtbl.name, functional.testtbl.zip
+|  row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
+   row-size=24B cardinality=0
 ====
 # aggregate with group-by, having
 select *
@@ -638,9 +736,11 @@ PLAN-ROOT SINK
 |  output: count(*), avg(functional.alltypesagg.int_col)
 |  group by: functional.alltypesagg.int_col % 7
 |  having: int_col % 7 IS NOT NULL, count(*) > 10, avg(int_col) > 500 OR count(*) = 10
+|  row-size=20B cardinality=96
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=4B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -650,15 +750,18 @@ PLAN-ROOT SINK
 |  output: count:merge(*), avg:merge(int_col)
 |  group by: int_col % 7
 |  having: int_col % 7 IS NOT NULL, count(*) > 10, avg(int_col) > 500 OR count(*) = 10
+|  row-size=20B cardinality=96
 |
 02:EXCHANGE [HASH(int_col % 7)]
 |
 01:AGGREGATE [STREAMING]
 |  output: count(*), avg(functional.alltypesagg.int_col)
 |  group by: functional.alltypesagg.int_col % 7
+|  row-size=20B cardinality=957
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=4B cardinality=11.00K
 ====
 # subquery with left outer join
 select j.*, d.*
@@ -679,14 +782,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: b.name = a.test_name
 |  runtime filters: RF000 <- a.test_name
+|  row-size=63B cardinality=2
 |
 |--00:SCAN HDFS [functional.jointbl a]
 |     partitions=1/1 files=1 size=433B
 |     predicates: a.test_id <= 1006
+|     row-size=33B cardinality=2
 |
 01:SCAN HDFS [functional.dimtbl b]
    partitions=1/1 files=1 size=171B
    runtime filters: RF000 -> b.name
+   row-size=29B cardinality=10
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -695,18 +801,21 @@ PLAN-ROOT SINK
 02:HASH JOIN [RIGHT OUTER JOIN, PARTITIONED]
 |  hash predicates: b.name = a.test_name
 |  runtime filters: RF000 <- a.test_name
+|  row-size=63B cardinality=2
 |
 |--04:EXCHANGE [HASH(a.test_name)]
 |  |
 |  00:SCAN HDFS [functional.jointbl a]
 |     partitions=1/1 files=1 size=433B
 |     predicates: a.test_id <= 1006
+|     row-size=33B cardinality=2
 |
 03:EXCHANGE [HASH(b.name)]
 |
 01:SCAN HDFS [functional.dimtbl b]
    partitions=1/1 files=1 size=171B
    runtime filters: RF000 -> b.name
+   row-size=29B cardinality=10
 ====
 # complex join, having joined subquery on the rhs, and predicate
 # at multiple subquery level
@@ -732,24 +841,30 @@ PLAN-ROOT SINK
 05:AGGREGATE [FINALIZE]
 |  output: count(b.id)
 |  group by: a.smallint_col
+|  row-size=10B cardinality=97
 |
 04:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: a.tinyint_col = c.id
 |  runtime filters: RF000 <- c.id
+|  row-size=11B cardinality=11.11K
 |
 |--00:SCAN HDFS [functional.alltypessmall c]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: a.smallint_col = b.id
 |  runtime filters: RF002 <- b.id
+|  row-size=7B cardinality=11.00K
 |
 |--02:SCAN HDFS [functional.alltypessmall b]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 01:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> a.tinyint_col, RF002 -> a.smallint_col
+   row-size=3B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -758,36 +873,43 @@ PLAN-ROOT SINK
 10:AGGREGATE [FINALIZE]
 |  output: count:merge(x.id)
 |  group by: x.smallint_col
+|  row-size=10B cardinality=97
 |
 09:EXCHANGE [HASH(x.smallint_col)]
 |
 05:AGGREGATE [STREAMING]
 |  output: count(b.id)
 |  group by: a.smallint_col
+|  row-size=10B cardinality=97
 |
 04:HASH JOIN [RIGHT OUTER JOIN, PARTITIONED]
 |  hash predicates: a.tinyint_col = c.id
 |  runtime filters: RF000 <- c.id
+|  row-size=11B cardinality=11.11K
 |
 |--08:EXCHANGE [HASH(c.id)]
 |  |
 |  00:SCAN HDFS [functional.alltypessmall c]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 07:EXCHANGE [HASH(a.tinyint_col)]
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.smallint_col = b.id
 |  runtime filters: RF002 <- b.id
+|  row-size=7B cardinality=11.00K
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [functional.alltypessmall b]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 01:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> a.tinyint_col, RF002 -> a.smallint_col
+   row-size=3B cardinality=11.00K
 ====
 # complex join, having joined subquery on the lhs, and predicate
 # at multiple subquery level
@@ -820,24 +942,30 @@ PLAN-ROOT SINK
 |  hash predicates: c.id = a.tinyint_col
 |  other predicates: a.int_col + b.float_col + CAST(c.string_col AS FLOAT) < 1000
 |  runtime filters: RF000 <- a.tinyint_col
+|  row-size=32B cardinality=11
 |
 |--02:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.smallint_col = b.id
 |  |  runtime filters: RF002 <- b.id
+|  |  row-size=15B cardinality=11
 |  |
 |  |--01:SCAN HDFS [functional.alltypessmall b]
 |  |     partitions=4/4 files=4 size=6.32KB
 |  |     predicates: b.float_col > 4.5
+|  |     row-size=8B cardinality=10
 |  |
 |  00:SCAN HDFS [functional.alltypesagg a]
+|     partition predicates: month = 1, a.day = 1
 |     partitions=1/11 files=1 size=73.39KB
 |     predicates: a.int_col > 899
 |     runtime filters: RF002 -> a.smallint_col
+|     row-size=7B cardinality=100
 |
 03:SCAN HDFS [functional.alltypessmall c]
    partitions=4/4 files=4 size=6.32KB
    predicates: c.string_col < '7'
    runtime filters: RF000 -> c.id
+   row-size=17B cardinality=10
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -847,18 +975,22 @@ PLAN-ROOT SINK
 |  hash predicates: c.id = a.tinyint_col
 |  other predicates: a.int_col + b.float_col + CAST(c.string_col AS FLOAT) < 1000
 |  runtime filters: RF000 <- a.tinyint_col
+|  row-size=32B cardinality=11
 |
 |--08:EXCHANGE [HASH(a.tinyint_col)]
 |  |
 |  02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  |  hash predicates: b.id = a.smallint_col
 |  |  runtime filters: RF002 <- a.smallint_col
+|  |  row-size=15B cardinality=11
 |  |
 |  |--06:EXCHANGE [HASH(a.smallint_col)]
 |  |  |
 |  |  00:SCAN HDFS [functional.alltypesagg a]
+|  |     partition predicates: month = 1, a.day = 1
 |  |     partitions=1/11 files=1 size=73.39KB
 |  |     predicates: a.int_col > 899
+|  |     row-size=7B cardinality=100
 |  |
 |  05:EXCHANGE [HASH(b.id)]
 |  |
@@ -866,6 +998,7 @@ PLAN-ROOT SINK
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: b.float_col > 4.5
 |     runtime filters: RF002 -> b.id
+|     row-size=8B cardinality=10
 |
 07:EXCHANGE [HASH(c.id)]
 |
@@ -873,6 +1006,7 @@ PLAN-ROOT SINK
    partitions=4/4 files=4 size=6.32KB
    predicates: c.string_col < '7'
    runtime filters: RF000 -> c.id
+   row-size=17B cardinality=10
 ====
 # complex join, having joined aggregate subquery on the rhs, and predicate
 # at multiple subquery level
@@ -891,29 +1025,36 @@ PLAN-ROOT SINK
 06:AGGREGATE [FINALIZE]
 |  output: sum(count(a.id))
 |  group by: b.smallint_col
+|  row-size=10B cardinality=10
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: c.id = b.smallint_col
 |  runtime filters: RF000 <- b.smallint_col
+|  row-size=14B cardinality=10
 |
 |--04:AGGREGATE [FINALIZE]
 |  |  output: count(a.id)
 |  |  group by: b.smallint_col
+|  |  row-size=10B cardinality=10
 |  |
 |  03:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.smallint_col = b.id
 |  |  runtime filters: RF002 <- b.id
+|  |  row-size=12B cardinality=11.00K
 |  |
 |  |--02:SCAN HDFS [functional.alltypessmall b]
 |  |     partitions=4/4 files=4 size=6.32KB
+|  |     row-size=6B cardinality=100
 |  |
 |  01:SCAN HDFS [functional.alltypesagg a]
 |     partitions=11/11 files=11 size=814.73KB
 |     runtime filters: RF002 -> a.smallint_col
+|     row-size=6B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypessmall c]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> c.id
+   row-size=4B cardinality=100
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -922,45 +1063,54 @@ PLAN-ROOT SINK
 12:AGGREGATE [FINALIZE]
 |  output: sum:merge(x.cnt)
 |  group by: x.smallint_col
+|  row-size=10B cardinality=10
 |
 11:EXCHANGE [HASH(x.smallint_col)]
 |
 06:AGGREGATE [STREAMING]
 |  output: sum(count(a.id))
 |  group by: b.smallint_col
+|  row-size=10B cardinality=10
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: c.id = b.smallint_col
 |  runtime filters: RF000 <- b.smallint_col
+|  row-size=14B cardinality=10
 |
 |--10:EXCHANGE [BROADCAST]
 |  |
 |  09:AGGREGATE [FINALIZE]
 |  |  output: count:merge(a.id)
 |  |  group by: b.smallint_col
+|  |  row-size=10B cardinality=10
 |  |
 |  08:EXCHANGE [HASH(b.smallint_col)]
 |  |
 |  04:AGGREGATE [STREAMING]
 |  |  output: count(a.id)
 |  |  group by: b.smallint_col
+|  |  row-size=10B cardinality=10
 |  |
 |  03:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: a.smallint_col = b.id
 |  |  runtime filters: RF002 <- b.id
+|  |  row-size=12B cardinality=11.00K
 |  |
 |  |--07:EXCHANGE [BROADCAST]
 |  |  |
 |  |  02:SCAN HDFS [functional.alltypessmall b]
 |  |     partitions=4/4 files=4 size=6.32KB
+|  |     row-size=6B cardinality=100
 |  |
 |  01:SCAN HDFS [functional.alltypesagg a]
 |     partitions=11/11 files=11 size=814.73KB
 |     runtime filters: RF002 -> a.smallint_col
+|     row-size=6B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypessmall c]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> c.id
+   row-size=4B cardinality=100
 ====
 # Values statement in subqueries with predicate
 select * from (select y from (values((1 as y),(11))) a where y < 10) b
@@ -969,11 +1119,13 @@ PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=1
+   row-size=1B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=1
+   row-size=1B cardinality=1
 ====
 # Mixed constant and non-constant select; the predicate is evaluated directly
 # by the non-const select
@@ -989,10 +1141,12 @@ PLAN-ROOT SINK
 00:UNION
 |  constant-operands=1
 |  pass-through-operands: all
+|  row-size=1B cardinality=731
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.tinyint_col < 10
+   row-size=1B cardinality=730
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -1001,10 +1155,12 @@ PLAN-ROOT SINK
 00:UNION
 |  constant-operands=1
 |  pass-through-operands: all
+|  row-size=1B cardinality=731
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.tinyint_col < 10
+   row-size=1B cardinality=730
 ====
 # Union of constant selects in subquery
 select * from (select 1 as y union all select 2 union all select * from (select 11) a) b
@@ -1014,11 +1170,13 @@ PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=2
+   row-size=1B cardinality=2
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=2
+   row-size=1B cardinality=2
 ====
 # Union of values statements in subquery
 # TODO: We could combine the merge nodes below.
@@ -1029,15 +1187,19 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  constant-operands=2
+|  row-size=1B cardinality=2
 |
 01:UNION
+   row-size=1B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 00:UNION
 |  constant-operands=2
+|  row-size=1B cardinality=2
 |
 01:UNION
+   row-size=1B cardinality=0
 ====
 # Inner join on inline views made up of unions of constant selects
 select * from
@@ -1051,39 +1213,49 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: b = b
+|  row-size=6B cardinality=2
 |
 |--02:UNION
 |     constant-operands=2
+|     row-size=2B cardinality=2
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: a = a
+|  row-size=4B cardinality=2
 |
 |--01:UNION
 |     constant-operands=2
+|     row-size=2B cardinality=2
 |
 00:UNION
    constant-operands=2
+   row-size=2B cardinality=2
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: b = b
+|  row-size=6B cardinality=2
 |
 |--06:EXCHANGE [UNPARTITIONED]
 |  |
 |  02:UNION
 |     constant-operands=2
+|     row-size=2B cardinality=2
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a = a
+|  row-size=4B cardinality=2
 |
 |--05:EXCHANGE [UNPARTITIONED]
 |  |
 |  01:UNION
 |     constant-operands=2
+|     row-size=2B cardinality=2
 |
 00:UNION
    constant-operands=2
+   row-size=2B cardinality=2
 ====
 # Semi and inner join on a table and on inline views made up of constant selects
 select * from functional.alltypessmall x
@@ -1097,20 +1269,25 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: x.id + 2 = b
 |  runtime filters: RF000 <- b
+|  row-size=91B cardinality=2
 |
 |--02:UNION
 |     constant-operands=2
+|     row-size=2B cardinality=2
 |
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: x.id = a
 |  runtime filters: RF002 <- a
+|  row-size=89B cardinality=2
 |
 |--01:UNION
 |     constant-operands=2
+|     row-size=1B cardinality=2
 |
 00:SCAN HDFS [functional.alltypessmall x]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> x.id + 2, RF002 -> x.id
+   row-size=89B cardinality=100
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -1119,24 +1296,29 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: x.id + 2 = b
 |  runtime filters: RF000 <- b
+|  row-size=91B cardinality=2
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
 |  02:UNION
 |     constant-operands=2
+|     row-size=2B cardinality=2
 |
 03:HASH JOIN [LEFT SEMI JOIN, BROADCAST]
 |  hash predicates: x.id = a
 |  runtime filters: RF002 <- a
+|  row-size=89B cardinality=2
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  01:UNION
 |     constant-operands=2
+|     row-size=1B cardinality=2
 |
 00:SCAN HDFS [functional.alltypessmall x]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> x.id + 2, RF002 -> x.id
+   row-size=89B cardinality=100
 ====
 # Tests that views correctly reanalyze cloned exprs. (IMPALA-984)
 select b.* from functional.decimal_tbl a left outer join
@@ -1147,12 +1329,15 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: a.d1 = d1
+|  row-size=8B cardinality=unavailable
 |
 |--01:SCAN HDFS [functional.decimal_tbl]
 |     partitions=1/1 files=1 size=195B
+|     row-size=4B cardinality=unavailable
 |
 00:SCAN HDFS [functional.decimal_tbl a]
    partitions=1/1 files=1 size=195B
+   row-size=4B cardinality=unavailable
 ====
 # Test predicate assignment through inline view when the query contains
 # group by and distinct (IMPALA-1165)
@@ -1168,13 +1353,16 @@ PLAN-ROOT SINK
 |  output: sum(foo)
 |  group by: foo
 |  limit: 10
+|  row-size=16B cardinality=10
 |
 01:AGGREGATE
 |  group by: int_col + int_col, int_col + int_col
+|  row-size=16B cardinality=11
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
    predicates: int_col + int_col = 10
+   row-size=4B cardinality=11
 ====
 # Test enforcement of inline-view slot equivalences when the inline-view
 # contains an outer join (IMPALA-1441)
@@ -1188,13 +1376,16 @@ PLAN-ROOT SINK
 02:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: t2.int_col = t1.int_col, t2.tinyint_col = t1.tinyint_col
 |  runtime filters: RF000 <- t1.int_col, RF001 <- t1.tinyint_col
+|  row-size=10B cardinality=5.84K
 |
 |--00:SCAN HDFS [functional.alltypestiny t1]
 |     partitions=4/4 files=4 size=460B
+|     row-size=5B cardinality=8
 |
 01:SCAN HDFS [functional.alltypes t2]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t2.int_col, RF001 -> t2.tinyint_col
+   row-size=5B cardinality=7.30K
 ====
 # IMPALA-1459: Test correct assignment of On-clause predicate from an enclosing block
 # inside an inline view with an outer join.
@@ -1211,28 +1402,35 @@ PLAN-ROOT SINK
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = c.id
 |  runtime filters: RF000 <- c.id
+|  row-size=16B cardinality=9
 |
 |--05:SCAN HDFS [functional.alltypestiny c]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 04:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: a.id = c.id
 |  other predicates: a.id < b.id, a.id = b.id
+|  row-size=12B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall c]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF002 <- b.id
+|  row-size=8B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> b.id
+|     row-size=4B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.id, RF002 -> a.id
+   row-size=4B cardinality=7.30K
 ====
 # IMPALA-2665: Test correct assignment of On-clause predicate from an enclosing block
 # inside an inline view with an outer join.
@@ -1248,24 +1446,29 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = t1.id
 |  runtime filters: RF000 <- t1.id
+|  row-size=12B cardinality=73
 |
 |--00:SCAN HDFS [functional.alltypes t1]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: t1.id < 10
+|     row-size=4B cardinality=730
 |
 03:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: a.id = b.int_col
 |  other predicates: b.int_col IS NULL, b.int_col < 10
+|  row-size=8B cardinality=730
 |
 |--02:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: b.int_col < 10
 |     runtime filters: RF000 -> b.int_col
+|     row-size=4B cardinality=730
 |
 01:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.id < 10
    runtime filters: RF000 -> a.id
+   row-size=4B cardinality=730
 ====
 # IMPALA-2643: Test inline views with duplicate exprs in their select list.
 # Inferred predicate referencing the same expr gets filtered out.
@@ -1279,6 +1482,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=8B cardinality=8
 ====
 # IMPALA-2643: Explicit predicates remain unafftected.
 select * from
@@ -1292,6 +1496,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    predicates: bigint_col = bigint_col
+   row-size=8B cardinality=1
 ====
 # IMPALA-2643: Test aggregation.
 # Inferred predicate referencing the same expr gets filtered out.
@@ -1305,9 +1510,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum(bigint_col)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=8B cardinality=8
 ====
 # IMPALA-2643: Explicit predicates remain unafftected.
 select * from
@@ -1321,7 +1528,9 @@ PLAN-ROOT SINK
 01:AGGREGATE [FINALIZE]
 |  output: sum(bigint_col)
 |  having: sum(bigint_col) = sum(bigint_col)
+|  row-size=8B cardinality=0
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=8B cardinality=8
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/insert-sort-by.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/insert-sort-by.test b/testdata/workloads/functional-planner/queries/PlannerTest/insert-sort-by.test
index f2ecb2c..e6432da 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/insert-sort-by.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/insert-sort-by.test
@@ -8,20 +8,24 @@ WRITE TO HDFS [test_sort_by.t, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |
 01:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST, int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=17B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [test_sort_by.t, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |  partitions=24
 |
 02:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST, int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=17B cardinality=7.30K
 |
 01:EXCHANGE [HASH(year,month)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ====
 # IMPALA-4166: insert with noshuffle hint into tables with sort.columns property adds
 # sort node.
@@ -33,18 +37,22 @@ WRITE TO HDFS [test_sort_by.t, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |
 01:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST, int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=17B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [test_sort_by.t, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |  partitions=24
 |
 01:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST, int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=17B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ====
 # IMPALA-4166: insert into tables with sort.columns property adds sort node. Clustering
 # columns are added to the sort columns. noclustered hint is ignored when sort.columns
@@ -57,20 +65,24 @@ WRITE TO HDFS [test_sort_by.t, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |
 01:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST, int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=17B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [test_sort_by.t, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |  partitions=24
 |
 02:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST, int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=17B cardinality=7.30K
 |
 01:EXCHANGE [HASH(year,month)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ====
 # IMPALA-4166: insert into tables with sort.columns property adds sort node.
 insert into table test_sort_by.t_nopart /*+ shuffle */
@@ -81,20 +93,24 @@ WRITE TO HDFS [test_sort_by.t_nopart, OVERWRITE=false]
 |
 01:SORT
 |  order by: int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=9B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=9B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [test_sort_by.t_nopart, OVERWRITE=false]
 |  partitions=1
 |
 02:SORT
 |  order by: int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=9B cardinality=7.30K
 |
 01:EXCHANGE [UNPARTITIONED]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=9B cardinality=7.30K
 ====
 # IMPALA-4166: insert with noshuffle hint into tables with sort.columns property adds
 # sort node.
@@ -106,18 +122,22 @@ WRITE TO HDFS [test_sort_by.t_nopart, OVERWRITE=false]
 |
 01:SORT
 |  order by: int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=9B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=9B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [test_sort_by.t_nopart, OVERWRITE=false]
 |  partitions=1
 |
 01:SORT
 |  order by: int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=9B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=9B cardinality=7.30K
 ====
 # IMPALA-4166: sort columns are correct when using an identity column permutation.
 insert into table test_sort_by.t_nopart (id, int_col, bool_col) /*+ shuffle */
@@ -128,20 +148,24 @@ WRITE TO HDFS [test_sort_by.t_nopart, OVERWRITE=false]
 |
 01:SORT
 |  order by: int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=9B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=9B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [test_sort_by.t_nopart, OVERWRITE=false]
 |  partitions=1
 |
 02:SORT
 |  order by: int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=9B cardinality=7.30K
 |
 01:EXCHANGE [UNPARTITIONED]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=9B cardinality=7.30K
 ====
 # IMPALA-4166: sort columns are correct when using a non-trivial column permutation.
 insert into table test_sort_by.t_nopart (bool_col, id, int_col) /*+ shuffle */
@@ -152,20 +176,24 @@ WRITE TO HDFS [test_sort_by.t_nopart, OVERWRITE=false]
 |
 01:SORT
 |  order by: int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=9B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=9B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [test_sort_by.t_nopart, OVERWRITE=false]
 |  partitions=1
 |
 02:SORT
 |  order by: int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=9B cardinality=7.30K
 |
 01:EXCHANGE [UNPARTITIONED]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=9B cardinality=7.30K
 ====
 # IMPALA-4166: sort columns are correct when using a partial column permutation.
 insert into table test_sort_by.t_nopart (bool_col, id) /*+ shuffle */
@@ -176,20 +204,24 @@ WRITE TO HDFS [test_sort_by.t_nopart, OVERWRITE=false]
 |
 01:SORT
 |  order by: bool_col ASC NULLS LAST
+|  row-size=5B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=5B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [test_sort_by.t_nopart, OVERWRITE=false]
 |  partitions=1
 |
 02:SORT
 |  order by: bool_col ASC NULLS LAST
+|  row-size=5B cardinality=7.30K
 |
 01:EXCHANGE [UNPARTITIONED]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=5B cardinality=7.30K
 ====
 # IMPALA-4166: no sort node is added when using a partial column permutation and none of
 # the sort columns appear in the permutation.
@@ -201,6 +233,7 @@ WRITE TO HDFS [test_sort_by.t_nopart, OVERWRITE=false]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [test_sort_by.t_nopart, OVERWRITE=false]
 |  partitions=1
@@ -209,6 +242,7 @@ WRITE TO HDFS [test_sort_by.t_nopart, OVERWRITE=false]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 # IMPALA-4166: sort columns with a join
 insert into table test_sort_by.t partition(year, month) /*+ noclustered */
@@ -221,26 +255,32 @@ WRITE TO HDFS [test_sort_by.t, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |
 04:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST, int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=17B cardinality=10
 |
 03:TOP-N [LIMIT=10]
 |  order by: string_col ASC
+|  row-size=30B cardinality=10
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: b.id = a.id
 |  runtime filters: RF000 <- a.id
+|  row-size=34B cardinality=7.30K
 |
 |--00:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=9B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypes b]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> b.id
+   row-size=25B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [test_sort_by.t, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |  partitions=24
 |
 07:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST, int_col ASC NULLS LAST, bool_col ASC NULLS LAST
+|  row-size=17B cardinality=10
 |
 06:MERGING-EXCHANGE [UNPARTITIONED]
 |  order by: string_col ASC
@@ -248,21 +288,25 @@ WRITE TO HDFS [test_sort_by.t, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |
 03:TOP-N [LIMIT=10]
 |  order by: string_col ASC
+|  row-size=30B cardinality=10
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: b.id = a.id
 |  runtime filters: RF000 <- a.id
+|  row-size=34B cardinality=7.30K
 |
 |--05:EXCHANGE [HASH(a.id)]
 |  |
 |  00:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=9B cardinality=7.30K
 |
 04:EXCHANGE [HASH(b.id)]
 |
 01:SCAN HDFS [functional.alltypes b]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> b.id
+   row-size=25B cardinality=7.30K
 ====
 # IMPALA-4166: sort columns with a join and agg
 insert into table test_sort_by.t partition(year, month) /*+ noclustered */
@@ -275,54 +319,65 @@ WRITE TO HDFS [test_sort_by.t, OVERWRITE=false, PARTITION-KEYS=(b.year,a.month)]
 |
 04:SORT
 |  order by: b.year ASC NULLS LAST, a.month ASC NULLS LAST, max(b.int_col) ASC NULLS LAST, min(a.bool_col) ASC NULLS LAST
+|  row-size=17B cardinality=7.30K
 |
 03:AGGREGATE [FINALIZE]
 |  output: max(b.int_col), min(a.bool_col)
 |  group by: a.id, b.year, a.month
+|  row-size=17B cardinality=7.30K
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: b.id = a.id
 |  runtime filters: RF000 <- a.id
+|  row-size=21B cardinality=7.30K
 |
 |--00:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=9B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypes b]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> b.id
+   row-size=12B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [test_sort_by.t, OVERWRITE=false, PARTITION-KEYS=(b.year,a.month)]
 |  partitions=24
 |
 09:SORT
 |  order by: b.year ASC NULLS LAST, a.month ASC NULLS LAST, max(b.int_col) ASC NULLS LAST, min(a.bool_col) ASC NULLS LAST
+|  row-size=17B cardinality=7.30K
 |
 08:EXCHANGE [HASH(b.year,a.month)]
 |
 07:AGGREGATE [FINALIZE]
 |  output: max:merge(b.int_col), min:merge(a.bool_col)
 |  group by: a.id, b.year, a.month
+|  row-size=17B cardinality=7.30K
 |
 06:EXCHANGE [HASH(a.id,b.year,a.month)]
 |
 03:AGGREGATE [STREAMING]
 |  output: max(b.int_col), min(a.bool_col)
 |  group by: a.id, b.year, a.month
+|  row-size=17B cardinality=7.30K
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: b.id = a.id
 |  runtime filters: RF000 <- a.id
+|  row-size=21B cardinality=7.30K
 |
 |--05:EXCHANGE [HASH(a.id)]
 |  |
 |  00:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=9B cardinality=7.30K
 |
 04:EXCHANGE [HASH(b.id)]
 |
 01:SCAN HDFS [functional.alltypes b]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> b.id
+   row-size=12B cardinality=7.30K
 ====
 # IMPALA-5339: Sort columns with a union to trigger expr rewrite
 insert into table test_sort_by.t_nopart
@@ -333,24 +388,30 @@ WRITE TO HDFS [test_sort_by.t_nopart, OVERWRITE=false]
 |
 02:SORT
 |  order by: CAST(id AS INT) ASC NULLS LAST
+|  row-size=1B cardinality=2
 |
 01:AGGREGATE [FINALIZE]
 |  group by: id
+|  row-size=1B cardinality=2
 |
 00:UNION
    constant-operands=2
+   row-size=1B cardinality=2
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [test_sort_by.t_nopart, OVERWRITE=false]
 |  partitions=1
 |
 02:SORT
 |  order by: CAST(id AS INT) ASC NULLS LAST
+|  row-size=1B cardinality=2
 |
 01:AGGREGATE [FINALIZE]
 |  group by: id
+|  row-size=1B cardinality=2
 |
 00:UNION
    constant-operands=2
+   row-size=1B cardinality=2
 ====
 # IMPALA-5339: Sort columns with a subquery to trigger expr rewrite
 insert into table test_sort_by.t_nopart
@@ -361,45 +422,56 @@ WRITE TO HDFS [test_sort_by.t_nopart, OVERWRITE=false]
 |
 04:SORT
 |  order by: id ASC NULLS LAST
+|  row-size=4B cardinality=0
 |
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: id = min(id)
 |  runtime filters: RF000 <- min(id)
+|  row-size=4B cardinality=0
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: min(id)
+|  |  row-size=4B cardinality=0
 |  |
 |  01:SCAN HDFS [test_sort_by.t]
 |     partitions=0/0 files=0 size=0B
+|     row-size=4B cardinality=0
 |
 00:SCAN HDFS [test_sort_by.t_nopart]
    partitions=1/0 files=0 size=0B
    runtime filters: RF000 -> id
+   row-size=4B cardinality=0
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [test_sort_by.t_nopart, OVERWRITE=false]
 |  partitions=1
 |
 07:SORT
 |  order by: id ASC NULLS LAST
+|  row-size=4B cardinality=0
 |
 03:HASH JOIN [LEFT SEMI JOIN, BROADCAST]
 |  hash predicates: id = min(id)
 |  runtime filters: RF000 <- min(id)
+|  row-size=4B cardinality=0
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
 |  05:AGGREGATE [FINALIZE]
 |  |  output: min:merge(id)
+|  |  row-size=4B cardinality=0
 |  |
 |  04:EXCHANGE [UNPARTITIONED]
 |  |
 |  02:AGGREGATE
 |  |  output: min(id)
+|  |  row-size=4B cardinality=0
 |  |
 |  01:SCAN HDFS [test_sort_by.t]
 |     partitions=0/0 files=0 size=0B
+|     row-size=4B cardinality=0
 |
 00:SCAN HDFS [test_sort_by.t_nopart]
    partitions=1/0 files=0 size=0B
    runtime filters: RF000 -> id
+   row-size=4B cardinality=0
 ====


[20/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/insert.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/insert.test b/testdata/workloads/functional-planner/queries/PlannerTest/insert.test
index e46bf94..2e85c6a 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/insert.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/insert.test
@@ -9,7 +9,9 @@ WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
 |  partitions=1
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month = 5
    partitions=1/24 files=1 size=20.36KB
+   row-size=81B cardinality=310
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=5/090501.txt 0:20853
@@ -18,7 +20,9 @@ WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
 |  partitions=1
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month = 5
    partitions=1/24 files=1 size=20.36KB
+   row-size=81B cardinality=310
 ====
 # insert into a static partition
 insert into table functional.alltypessmall
@@ -32,7 +36,9 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,4
 |  partitions=1
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month = 5
    partitions=1/24 files=1 size=20.36KB
+   row-size=81B cardinality=310
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=5/090501.txt 0:20853
@@ -41,7 +47,9 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,4
 |  partitions=1
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month = 5
    partitions=1/24 files=1 size=20.36KB
+   row-size=81B cardinality=310
 ====
 # overwrite a static partition
 insert overwrite table functional.alltypessmall
@@ -55,7 +63,9 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=true, PARTITION-KEYS=(2009,4)
 |  partitions=1
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month = 5
    partitions=1/24 files=1 size=20.36KB
+   row-size=81B cardinality=310
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=5/090501.txt 0:20853
@@ -64,7 +74,9 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=true, PARTITION-KEYS=(2009,4)
 |  partitions=1
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month = 5
    partitions=1/24 files=1 size=20.36KB
+   row-size=81B cardinality=310
 ====
 # insert into fully dynamic partitions
 insert into table functional.alltypessmall
@@ -79,9 +91,12 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,m
 |
 01:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=610
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month > 10
    partitions=2/24 files=2 size=40.07KB
+   row-size=89B cardinality=610
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=11/091101.txt 0:20179
@@ -92,11 +107,14 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,m
 |
 02:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=610
 |
 01:EXCHANGE [HASH(year,month)]
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month > 10
    partitions=2/24 files=2 size=40.07KB
+   row-size=89B cardinality=610
 ====
 # IMPALA-5293: noclustered hint prevents adding sort node
 insert into table functional.alltypessmall
@@ -110,7 +128,9 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,m
 |  partitions=24
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month > 10
    partitions=2/24 files=2 size=40.07KB
+   row-size=89B cardinality=610
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=11/091101.txt 0:20179
@@ -122,7 +142,9 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,m
 01:EXCHANGE [HASH(year,month)]
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month > 10
    partitions=2/24 files=2 size=40.07KB
+   row-size=89B cardinality=610
 ====
 # insert into fully dynamic partitions. The source table has no stats and the insert
 # statement has a partition clause, so hash partition before the sink.
@@ -138,20 +160,26 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(int_co
 |
 01:SORT
 |  order by: int_col ASC NULLS LAST, int_col ASC NULLS LAST
+|  row-size=72B cardinality=unavailable
 |
 00:SCAN HDFS [functional_seq_snap.alltypes]
+   partition predicates: year = 2009, month > 10
    partitions=2/24 files=2 size=11.34KB
+   row-size=72B cardinality=unavailable
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(int_col,int_col)]
 |  partitions=unavailable
 |
 02:SORT
 |  order by: int_col ASC NULLS LAST, int_col ASC NULLS LAST
+|  row-size=72B cardinality=unavailable
 |
 01:EXCHANGE [HASH(int_col,int_col)]
 |
 00:SCAN HDFS [functional_seq_snap.alltypes]
+   partition predicates: year = 2009, month > 10
    partitions=2/24 files=2 size=11.34KB
+   row-size=72B cardinality=unavailable
 ====
 # insert into fully dynamic partitions;
 # partitioned output doesn't require repartitioning
@@ -169,13 +197,17 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,m
 |
 02:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=80B cardinality=24
 |
 01:AGGREGATE [FINALIZE]
 |  output: min(id), min(bool_col), min(tinyint_col), min(smallint_col), min(int_col), min(bigint_col), min(float_col), min(double_col), min(date_string_col), min(string_col), min(timestamp_col)
 |  group by: year, month
+|  row-size=80B cardinality=24
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month > 10
    partitions=2/24 files=2 size=40.07KB
+   row-size=89B cardinality=610
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=11/091101.txt 0:20179
@@ -186,19 +218,24 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,m
 |
 04:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=80B cardinality=24
 |
 03:AGGREGATE [FINALIZE]
 |  output: min:merge(id), min:merge(bool_col), min:merge(tinyint_col), min:merge(smallint_col), min:merge(int_col), min:merge(bigint_col), min:merge(float_col), min:merge(double_col), min:merge(date_string_col), min:merge(string_col), min:merge(timestamp_col)
 |  group by: year, month
+|  row-size=80B cardinality=24
 |
 02:EXCHANGE [HASH(year,month)]
 |
 01:AGGREGATE [STREAMING]
 |  output: min(id), min(bool_col), min(tinyint_col), min(smallint_col), min(int_col), min(bigint_col), min(float_col), min(double_col), min(date_string_col), min(string_col), min(timestamp_col)
 |  group by: year, month
+|  row-size=80B cardinality=24
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month > 10
    partitions=2/24 files=2 size=40.07KB
+   row-size=89B cardinality=610
 ====
 # insert into a partially dynamic partition
 insert into table functional.alltypessmall
@@ -213,9 +250,12 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,m
 |
 01:SORT
 |  order by: month ASC NULLS LAST
+|  row-size=85B cardinality=610
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month > 10
    partitions=2/24 files=2 size=40.07KB
+   row-size=85B cardinality=610
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=11/091101.txt 0:20179
@@ -226,11 +266,14 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,m
 |
 02:SORT
 |  order by: month ASC NULLS LAST
+|  row-size=85B cardinality=610
 |
 01:EXCHANGE [HASH(month)]
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month > 10
    partitions=2/24 files=2 size=40.07KB
+   row-size=85B cardinality=610
 ====
 # insert into a partially dynamic partition
 # partitioned output doesn't require repartitioning
@@ -248,13 +291,17 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,m
 |
 02:SORT
 |  order by: month ASC NULLS LAST
+|  row-size=76B cardinality=12
 |
 01:AGGREGATE [FINALIZE]
 |  output: min(id), min(bool_col), min(tinyint_col), min(smallint_col), min(int_col), min(bigint_col), min(float_col), min(double_col), min(date_string_col), min(string_col), min(timestamp_col)
 |  group by: month
+|  row-size=76B cardinality=12
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month > 10
    partitions=2/24 files=2 size=40.07KB
+   row-size=85B cardinality=610
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=11/091101.txt 0:20179
@@ -265,19 +312,24 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,m
 |
 04:SORT
 |  order by: month ASC NULLS LAST
+|  row-size=76B cardinality=12
 |
 03:AGGREGATE [FINALIZE]
 |  output: min:merge(id), min:merge(bool_col), min:merge(tinyint_col), min:merge(smallint_col), min:merge(int_col), min:merge(bigint_col), min:merge(float_col), min:merge(double_col), min:merge(date_string_col), min:merge(string_col), min:merge(timestamp_col)
 |  group by: month
+|  row-size=76B cardinality=12
 |
 02:EXCHANGE [HASH(month)]
 |
 01:AGGREGATE [STREAMING]
 |  output: min(id), min(bool_col), min(tinyint_col), min(smallint_col), min(int_col), min(bigint_col), min(float_col), min(double_col), min(date_string_col), min(string_col), min(timestamp_col)
 |  group by: month
+|  row-size=76B cardinality=12
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month > 10
    partitions=2/24 files=2 size=40.07KB
+   row-size=85B cardinality=610
 ====
 # insert into a partially dynamic partition
 insert into table functional.alltypessmall
@@ -292,9 +344,12 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,4
 |
 01:SORT
 |  order by: year ASC NULLS LAST
+|  row-size=85B cardinality=300
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year > 2009, month = 4
    partitions=1/24 files=1 size=19.71KB
+   row-size=85B cardinality=300
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2010/month=4/100401.txt 0:20179
@@ -304,9 +359,12 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,4
 |
 01:SORT
 |  order by: year ASC NULLS LAST
+|  row-size=85B cardinality=300
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year > 2009, month = 4
    partitions=1/24 files=1 size=19.71KB
+   row-size=85B cardinality=300
 ====
 # insert with limit from partitioned table.
 insert into table functional.alltypesnopart
@@ -318,8 +376,10 @@ WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
 |  partitions=1
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month = 1
    partitions=1/24 files=1 size=19.95KB
    limit: 10
+   row-size=81B cardinality=10
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=1/090101.txt 0:20433
@@ -331,8 +391,10 @@ WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
 |  limit: 10
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month = 1
    partitions=1/24 files=1 size=19.95KB
    limit: 10
+   row-size=81B cardinality=10
 ====
 # static partition insert from a constant select
 insert into table functional.alltypessmall
@@ -345,12 +407,14 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2010,4
 |
 00:UNION
    constant-operands=1
+   row-size=54B cardinality=1
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2010,4)]
 |  partitions=1
 |
 00:UNION
    constant-operands=1
+   row-size=54B cardinality=1
 ====
 # dynamic partition insert from a constant select
 insert into table functional.alltypessmall
@@ -363,12 +427,14 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2010,4
 |
 00:UNION
    constant-operands=1
+   row-size=57B cardinality=1
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2010,4)]
 |  partitions=1
 |
 00:UNION
    constant-operands=1
+   row-size=57B cardinality=1
 ====
 # static partition insert from values statement
 insert into table functional.alltypessmall
@@ -382,12 +448,14 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2010,4
 |
 00:UNION
    constant-operands=3
+   row-size=55B cardinality=3
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2010,4)]
 |  partitions=1
 |
 00:UNION
    constant-operands=3
+   row-size=55B cardinality=3
 ====
 # dynamic partition insert from values statement
 insert into table functional.alltypessmall
@@ -401,18 +469,22 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2010,4
 |
 01:SORT
 |  order by: 2010 ASC NULLS LAST, 4 ASC NULLS LAST
+|  row-size=58B cardinality=3
 |
 00:UNION
    constant-operands=3
+   row-size=58B cardinality=3
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2010,4)]
 |  partitions=9
 |
 01:SORT
 |  order by: 2010 ASC NULLS LAST, 4 ASC NULLS LAST
+|  row-size=58B cardinality=3
 |
 00:UNION
    constant-operands=3
+   row-size=58B cardinality=3
 ====
 # test static partition insert from a query with grouped aggregation
 # we expect the insert fragment to be partitioned by the grouping exprs of the query stmt
@@ -427,9 +499,11 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(2010,10)]
 01:AGGREGATE [FINALIZE]
 |  output: count(int_col)
 |  group by: string_col
+|  row-size=21B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(2010,10)]
 |  partitions=1
@@ -437,15 +511,18 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(2010,10)]
 03:AGGREGATE [FINALIZE]
 |  output: count:merge(int_col)
 |  group by: string_col
+|  row-size=21B cardinality=10
 |
 02:EXCHANGE [HASH(string_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: count(int_col)
 |  group by: string_col
+|  row-size=21B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ====
 # test static partition insert from a query with distinct grouped aggregation
 # we expect the insert fragment to be partitioned by the grouping exprs of the query stmt
@@ -460,12 +537,15 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(2010,10)]
 02:AGGREGATE [FINALIZE]
 |  output: count(int_col)
 |  group by: string_col
+|  row-size=21B cardinality=10
 |
 01:AGGREGATE
 |  group by: string_col, int_col
+|  row-size=17B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(2010,10)]
 |  partitions=1
@@ -473,23 +553,28 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(2010,10)]
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(int_col)
 |  group by: string_col
+|  row-size=21B cardinality=10
 |
 05:EXCHANGE [HASH(string_col)]
 |
 02:AGGREGATE [STREAMING]
 |  output: count(int_col)
 |  group by: string_col
+|  row-size=21B cardinality=10
 |
 04:AGGREGATE
 |  group by: string_col, int_col
+|  row-size=17B cardinality=100
 |
 03:EXCHANGE [HASH(string_col,int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: string_col, int_col
+|  row-size=17B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ====
 # test that the planner chooses to repartition before the table sink
 # alltypes has column stats and based on the product of the NDVs of year and month
@@ -502,11 +587,13 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)
 |
 02:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=7.30K
 |
 01:EXCHANGE [HASH(functional.alltypes.year,functional.alltypes.month)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # test noshuffle hint to prevent repartitioning (same query as above with hint)
 insert into table functional.alltypes partition(year, month) [noshuffle]
@@ -517,9 +604,11 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)
 |
 01:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # same as above but with traditional commented hint at default hint location
 insert into table functional.alltypes partition(year, month) /* +noshuffle */
@@ -530,9 +619,11 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)
 |
 01:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # same as above but with traditional commented hint at Oracle hint location
 insert /* +noshuffle */ into table functional.alltypes partition(year, month)
@@ -543,9 +634,11 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)
 |
 01:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # same as above but with enf-of-line commented hint
 insert into table functional.alltypes partition(year, month)
@@ -557,9 +650,11 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)
 |
 01:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # test that the planner does not repartition before the table sink
 # alltypes has column stats and since year only has 2 distinct values the planner
@@ -574,9 +669,11 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,1)]
 |
 01:SORT
 |  order by: year ASC NULLS LAST
+|  row-size=85B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=85B cardinality=7.30K
 ====
 # test shuffle hint to force repartitioning (same query as above with hint)
 insert into table functional.alltypes partition(year, month=1) [shuffle]
@@ -589,11 +686,13 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,1)]
 |
 02:SORT
 |  order by: year ASC NULLS LAST
+|  row-size=85B cardinality=7.30K
 |
 01:EXCHANGE [HASH(year)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=85B cardinality=7.30K
 ====
 # test insert/select stmt that contains an analytic function (IMPALA-1400)
 insert into table functional.alltypestiny partition(year=2009, month=1)
@@ -610,12 +709,15 @@ WRITE TO HDFS [functional.alltypestiny, OVERWRITE=false, PARTITION-KEYS=(2009,1)
 |  partition by: id
 |  order by: id ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING
+|  row-size=97B cardinality=8
 |
 01:SORT
 |  order by: id ASC NULLS FIRST
+|  row-size=81B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=81B cardinality=8
 ====
 # IMPALA-3930: Test insert with shuffle hint on constant partition exprs. The table sink
 # is executed at the coordinator.
@@ -631,6 +733,7 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(2009,1)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=81B cardinality=7.30K
 ====
 # IMPALA-3930: Same as above but with a dynamic partition insert.
 insert into table functional.alltypes partition(year, month) /* +shuffle */
@@ -645,6 +748,7 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(2009,1)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=81B cardinality=7.30K
 ====
 # IMPALA-3930: Same as above but with a mix of static/dynamic partition exprs, and
 # with more complex constant exprs.
@@ -660,6 +764,7 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(2009,5)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=81B cardinality=7.30K
 ====
 # Test insert into an unpartitioned table with shuffle hint.
 insert into table functional.alltypesnopart /* +shuffle */
@@ -674,6 +779,7 @@ WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=81B cardinality=7.30K
 ====
 # IMPALA-5293: ensure insert into partitioned table adds sort node without clustered hint.
 insert into table functional.alltypes partition(year, month)
@@ -684,20 +790,24 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)
 |
 01:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |  partitions=24
 |
 02:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=7.30K
 |
 01:EXCHANGE [HASH(functional.alltypes.year,functional.alltypes.month)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # IMPALA-5293: ensure insert into partitioned table adds sort node without clustered hint.
 insert into table functional.alltypes partition(year, month) /*+ noshuffle */
@@ -708,18 +818,22 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)
 |
 01:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |  partitions=24
 |
 01:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # IMPALA-5293: ensure insert into partitioned table adds sort node without clustered hint.
 # Subquery in WHERE-clause exercises the reset() + analyze() path during rewrite.
@@ -732,49 +846,60 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)
 |
 04:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=730
 |
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: int_col = max(int_col)
 |  runtime filters: RF000 <- max(int_col)
+|  row-size=89B cardinality=730
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: max(int_col)
+|  |  row-size=4B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> int_col
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |  partitions=24
 |
 08:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=730
 |
 07:EXCHANGE [HASH(functional.alltypes.year,functional.alltypes.month)]
 |
 03:HASH JOIN [LEFT SEMI JOIN, BROADCAST]
 |  hash predicates: int_col = max(int_col)
 |  runtime filters: RF000 <- max(int_col)
+|  row-size=89B cardinality=730
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
 |  05:AGGREGATE [FINALIZE]
 |  |  output: max:merge(int_col)
+|  |  row-size=4B cardinality=1
 |  |
 |  04:EXCHANGE [UNPARTITIONED]
 |  |
 |  02:AGGREGATE
 |  |  output: max(int_col)
+|  |  row-size=4B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> int_col
+   row-size=89B cardinality=7.30K
 ====
 # IMPALA-5293: ensure insert into non-partitioned table does not add sort node.
 insert into table functional.alltypesnopart
@@ -785,12 +910,14 @@ WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
 |
 00:SCAN HDFS [functional.alltypesnopart]
    partitions=1/1 files=0 size=0B
+   row-size=72B cardinality=0
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
 |  partitions=1
 |
 00:SCAN HDFS [functional.alltypesnopart]
    partitions=1/1 files=0 size=0B
+   row-size=72B cardinality=0
 ====
 # IMPALA-5293: ensure insert into non-partitioned table does not add sort node.
 insert into table functional.alltypesnopart /*+ shuffle */
@@ -801,6 +928,7 @@ WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
 |
 00:SCAN HDFS [functional.alltypesnopart]
    partitions=1/1 files=0 size=0B
+   row-size=72B cardinality=0
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
 |  partitions=1
@@ -809,4 +937,5 @@ WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
 |
 00:SCAN HDFS [functional.alltypesnopart]
    partitions=1/1 files=0 size=0B
+   row-size=72B cardinality=0
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/join-order.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/join-order.test b/testdata/workloads/functional-planner/queries/PlannerTest/join-order.test
index fc6abeb..01ff807 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/join-order.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/join-order.test
@@ -27,32 +27,39 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=10]
 |  order by: round(sum(l_extendedprice * (1 - l_discount)), 5) DESC, o_orderdate ASC
+|  row-size=50B cardinality=10
 |
 05:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: l_orderkey, o_orderdate, o_shippriority
+|  row-size=50B cardinality=17.56K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: o.o_custkey = c.c_custkey
 |  runtime filters: RF000 <- c.c_custkey
+|  row-size=117B cardinality=17.56K
 |
 |--00:SCAN HDFS [tpch.customer c]
 |     partitions=1/1 files=1 size=23.08MB
 |     predicates: c.c_mktsegment = 'BUILDING'
+|     row-size=29B cardinality=30.00K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: l.l_orderkey = o.o_orderkey
 |  runtime filters: RF002 <- o.o_orderkey
+|  row-size=88B cardinality=57.58K
 |
 |--01:SCAN HDFS [tpch.orders o]
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1995-03-15'
 |     runtime filters: RF000 -> o.o_custkey
+|     row-size=42B cardinality=150.00K
 |
 02:SCAN HDFS [tpch.lineitem l]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_shipdate > '1995-03-15'
    runtime filters: RF002 -> l.l_orderkey
+   row-size=46B cardinality=600.12K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -62,30 +69,36 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=10]
 |  order by: round(sum(l_extendedprice * (1 - l_discount)), 5) DESC, o_orderdate ASC
+|  row-size=50B cardinality=10
 |
 10:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * (1 - l_discount))
 |  group by: l_orderkey, o_orderdate, o_shippriority
+|  row-size=50B cardinality=17.56K
 |
 09:EXCHANGE [HASH(l_orderkey,o_orderdate,o_shippriority)]
 |
 05:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: l_orderkey, o_orderdate, o_shippriority
+|  row-size=50B cardinality=17.56K
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: o.o_custkey = c.c_custkey
 |  runtime filters: RF000 <- c.c_custkey
+|  row-size=117B cardinality=17.56K
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
 |  00:SCAN HDFS [tpch.customer c]
 |     partitions=1/1 files=1 size=23.08MB
 |     predicates: c.c_mktsegment = 'BUILDING'
+|     row-size=29B cardinality=30.00K
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l.l_orderkey = o.o_orderkey
 |  runtime filters: RF002 <- o.o_orderkey
+|  row-size=88B cardinality=57.58K
 |
 |--07:EXCHANGE [BROADCAST]
 |  |
@@ -93,11 +106,13 @@ PLAN-ROOT SINK
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1995-03-15'
 |     runtime filters: RF000 -> o.o_custkey
+|     row-size=42B cardinality=150.00K
 |
 02:SCAN HDFS [tpch.lineitem l]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_shipdate > '1995-03-15'
    runtime filters: RF002 -> l.l_orderkey
+   row-size=46B cardinality=600.12K
 ====
 # Q3 - Shipping Priority Query
 # straight_join prevents join order optimization
@@ -128,32 +143,39 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=10]
 |  order by: round(sum(l_extendedprice * (1 - l_discount)), 5) DESC, o_orderdate ASC
+|  row-size=50B cardinality=10
 |
 05:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: l_orderkey, o_orderdate, o_shippriority
+|  row-size=50B cardinality=575.77K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: o.o_orderkey = l.l_orderkey
 |  runtime filters: RF000 <- l.l_orderkey
+|  row-size=117B cardinality=575.77K
 |
 |--02:SCAN HDFS [tpch.lineitem l]
 |     partitions=1/1 files=1 size=718.94MB
 |     predicates: l_shipdate > '1995-03-15'
+|     row-size=46B cardinality=600.12K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: c.c_custkey = o.o_custkey
 |  runtime filters: RF002 <- o.o_custkey
+|  row-size=71B cardinality=150.00K
 |
 |--01:SCAN HDFS [tpch.orders o]
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1995-03-15'
 |     runtime filters: RF000 -> o.o_orderkey
+|     row-size=42B cardinality=150.00K
 |
 00:SCAN HDFS [tpch.customer c]
    partitions=1/1 files=1 size=23.08MB
    predicates: c.c_mktsegment = 'BUILDING'
    runtime filters: RF002 -> c.c_custkey
+   row-size=29B cardinality=30.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -163,30 +185,36 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=10]
 |  order by: round(sum(l_extendedprice * (1 - l_discount)), 5) DESC, o_orderdate ASC
+|  row-size=50B cardinality=10
 |
 10:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * (1 - l_discount))
 |  group by: l_orderkey, o_orderdate, o_shippriority
+|  row-size=50B cardinality=575.77K
 |
 09:EXCHANGE [HASH(l_orderkey,o_orderdate,o_shippriority)]
 |
 05:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: l_orderkey, o_orderdate, o_shippriority
+|  row-size=50B cardinality=575.77K
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: o.o_orderkey = l.l_orderkey
 |  runtime filters: RF000 <- l.l_orderkey
+|  row-size=117B cardinality=575.77K
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [tpch.lineitem l]
 |     partitions=1/1 files=1 size=718.94MB
 |     predicates: l_shipdate > '1995-03-15'
+|     row-size=46B cardinality=600.12K
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: c.c_custkey = o.o_custkey
 |  runtime filters: RF002 <- o.o_custkey
+|  row-size=71B cardinality=150.00K
 |
 |--07:EXCHANGE [BROADCAST]
 |  |
@@ -194,11 +222,13 @@ PLAN-ROOT SINK
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1995-03-15'
 |     runtime filters: RF000 -> o.o_orderkey
+|     row-size=42B cardinality=150.00K
 |
 00:SCAN HDFS [tpch.customer c]
    partitions=1/1 files=1 size=23.08MB
    predicates: c.c_mktsegment = 'BUILDING'
    runtime filters: RF002 -> c.c_custkey
+   row-size=29B cardinality=30.00K
 ====
 # Q5 - Local Supplier Volume Query
 # Modifications: Added round() call, converted selects from multiple tables
@@ -231,55 +261,68 @@ PLAN-ROOT SINK
 |
 12:TOP-N [LIMIT=100]
 |  order by: round(sum(l_extendedprice * (1 - l_discount)), 5) DESC
+|  row-size=35B cardinality=25
 |
 11:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: n_name
+|  row-size=35B cardinality=25
 |
 10:HASH JOIN [INNER JOIN]
 |  hash predicates: n_regionkey = r_regionkey
 |  runtime filters: RF000 <- r_regionkey
+|  row-size=134B cardinality=115.16K
 |
 |--05:SCAN HDFS [tpch.region]
 |     partitions=1/1 files=1 size=384B
 |     predicates: r_name = 'ASIA'
+|     row-size=21B cardinality=1
 |
 09:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF002 <- n_nationkey
+|  row-size=113B cardinality=575.77K
 |
 |--04:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     runtime filters: RF000 -> n_regionkey
+|     row-size=23B cardinality=25
 |
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: c_nationkey = s_nationkey, l_suppkey = s_suppkey
 |  runtime filters: RF004 <- s_nationkey, RF005 <- s_suppkey
+|  row-size=90B cardinality=575.77K
 |
 |--03:SCAN HDFS [tpch.supplier s]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF002 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF008 <- c_custkey
+|  row-size=80B cardinality=575.77K
 |
 |--00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     runtime filters: RF002 -> tpch.customer.c_nationkey, RF004 -> c_nationkey
+|     row-size=10B cardinality=150.00K
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF010 <- o_orderkey
+|  row-size=70B cardinality=575.77K
 |
 |--01:SCAN HDFS [tpch.orders o]
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1995-01-01', o_orderdate >= '1994-01-01'
 |     runtime filters: RF008 -> o_custkey
+|     row-size=38B cardinality=150.00K
 |
 02:SCAN HDFS [tpch.lineitem l]
    partitions=1/1 files=1 size=718.94MB
    runtime filters: RF005 -> l_suppkey, RF010 -> l_orderkey
+   row-size=32B cardinality=6.00M
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -289,60 +332,72 @@ PLAN-ROOT SINK
 |
 12:TOP-N [LIMIT=100]
 |  order by: round(sum(l_extendedprice * (1 - l_discount)), 5) DESC
+|  row-size=35B cardinality=25
 |
 19:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * (1 - l_discount))
 |  group by: n_name
+|  row-size=35B cardinality=25
 |
 18:EXCHANGE [HASH(n_name)]
 |
 11:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: n_name
+|  row-size=35B cardinality=25
 |
 10:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: n_regionkey = r_regionkey
 |  runtime filters: RF000 <- r_regionkey
+|  row-size=134B cardinality=115.16K
 |
 |--17:EXCHANGE [BROADCAST]
 |  |
 |  05:SCAN HDFS [tpch.region]
 |     partitions=1/1 files=1 size=384B
 |     predicates: r_name = 'ASIA'
+|     row-size=21B cardinality=1
 |
 09:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF002 <- n_nationkey
+|  row-size=113B cardinality=575.77K
 |
 |--16:EXCHANGE [BROADCAST]
 |  |
 |  04:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     runtime filters: RF000 -> n_regionkey
+|     row-size=23B cardinality=25
 |
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: c_nationkey = s_nationkey, l_suppkey = s_suppkey
 |  runtime filters: RF004 <- s_nationkey, RF005 <- s_suppkey
+|  row-size=90B cardinality=575.77K
 |
 |--15:EXCHANGE [BROADCAST]
 |  |
 |  03:SCAN HDFS [tpch.supplier s]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF002 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 07:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF008 <- c_custkey
+|  row-size=80B cardinality=575.77K
 |
 |--14:EXCHANGE [BROADCAST]
 |  |
 |  00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     runtime filters: RF002 -> tpch.customer.c_nationkey, RF004 -> c_nationkey
+|     row-size=10B cardinality=150.00K
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF010 <- o_orderkey
+|  row-size=70B cardinality=575.77K
 |
 |--13:EXCHANGE [BROADCAST]
 |  |
@@ -350,10 +405,12 @@ PLAN-ROOT SINK
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1995-01-01', o_orderdate >= '1994-01-01'
 |     runtime filters: RF008 -> o_custkey
+|     row-size=38B cardinality=150.00K
 |
 02:SCAN HDFS [tpch.lineitem l]
    partitions=1/1 files=1 size=718.94MB
    runtime filters: RF005 -> l_suppkey, RF010 -> l_orderkey
+   row-size=32B cardinality=6.00M
 ====
 # Q2 - Minimum Cost Supplier Query
 select
@@ -386,38 +443,47 @@ PLAN-ROOT SINK
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: n.n_regionkey = r.r_regionkey
 |  runtime filters: RF000 <- r.r_regionkey
+|  row-size=325B cardinality=1.01K
 |
 |--04:SCAN HDFS [tpch.region r]
 |     partitions=1/1 files=1 size=384B
 |     predicates: r.r_name = 'EUROPE'
+|     row-size=21B cardinality=1
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: s.s_nationkey = n.n_nationkey
 |  runtime filters: RF002 <- n.n_nationkey
+|  row-size=304B cardinality=5.05K
 |
 |--03:SCAN HDFS [tpch.nation n]
 |     partitions=1/1 files=1 size=2.15KB
 |     runtime filters: RF000 -> n.n_regionkey
+|     row-size=23B cardinality=25
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: s.s_suppkey = ps.ps_suppkey
 |  runtime filters: RF004 <- ps.ps_suppkey
+|  row-size=281B cardinality=5.05K
 |
 |--05:HASH JOIN [INNER JOIN]
 |  |  hash predicates: ps.ps_partkey = p.p_partkey
 |  |  runtime filters: RF006 <- p.p_partkey
+|  |  row-size=95B cardinality=5.05K
 |  |
 |  |--00:SCAN HDFS [tpch.part p]
 |  |     partitions=1/1 files=1 size=22.83MB
 |  |     predicates: p.p_size = 15, p.p_type LIKE '%BRASS'
+|  |     row-size=71B cardinality=1.26K
 |  |
 |  02:SCAN HDFS [tpch.partsupp ps]
 |     partitions=1/1 files=1 size=112.71MB
 |     runtime filters: RF006 -> ps.ps_partkey
+|     row-size=24B cardinality=800.00K
 |
 01:SCAN HDFS [tpch.supplier s]
    partitions=1/1 files=1 size=1.33MB
    runtime filters: RF002 -> s.s_nationkey, RF004 -> s.s_suppkey
+   row-size=187B cardinality=10.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -426,46 +492,55 @@ PLAN-ROOT SINK
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: n.n_regionkey = r.r_regionkey
 |  runtime filters: RF000 <- r.r_regionkey
+|  row-size=325B cardinality=1.01K
 |
 |--12:EXCHANGE [BROADCAST]
 |  |
 |  04:SCAN HDFS [tpch.region r]
 |     partitions=1/1 files=1 size=384B
 |     predicates: r.r_name = 'EUROPE'
+|     row-size=21B cardinality=1
 |
 07:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s.s_nationkey = n.n_nationkey
 |  runtime filters: RF002 <- n.n_nationkey
+|  row-size=304B cardinality=5.05K
 |
 |--11:EXCHANGE [BROADCAST]
 |  |
 |  03:SCAN HDFS [tpch.nation n]
 |     partitions=1/1 files=1 size=2.15KB
 |     runtime filters: RF000 -> n.n_regionkey
+|     row-size=23B cardinality=25
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s.s_suppkey = ps.ps_suppkey
 |  runtime filters: RF004 <- ps.ps_suppkey
+|  row-size=281B cardinality=5.05K
 |
 |--10:EXCHANGE [BROADCAST]
 |  |
 |  05:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: ps.ps_partkey = p.p_partkey
 |  |  runtime filters: RF006 <- p.p_partkey
+|  |  row-size=95B cardinality=5.05K
 |  |
 |  |--09:EXCHANGE [BROADCAST]
 |  |  |
 |  |  00:SCAN HDFS [tpch.part p]
 |  |     partitions=1/1 files=1 size=22.83MB
 |  |     predicates: p.p_size = 15, p.p_type LIKE '%BRASS'
+|  |     row-size=71B cardinality=1.26K
 |  |
 |  02:SCAN HDFS [tpch.partsupp ps]
 |     partitions=1/1 files=1 size=112.71MB
 |     runtime filters: RF006 -> ps.ps_partkey
+|     row-size=24B cardinality=800.00K
 |
 01:SCAN HDFS [tpch.supplier s]
    partitions=1/1 files=1 size=1.33MB
    runtime filters: RF002 -> s.s_nationkey, RF004 -> s.s_suppkey
+   row-size=187B cardinality=10.00K
 ====
 # Q4 - Order Priority Checking Query
 # the largest input is prevented from becoming the leftmost input by the semi-join
@@ -489,23 +564,28 @@ PLAN-ROOT SINK
 |
 04:TOP-N [LIMIT=10]
 |  order by: o_orderpriority ASC
+|  row-size=28B cardinality=5
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: o_orderpriority
+|  row-size=28B cardinality=5
 |
 02:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF000 <- o_orderkey
+|  row-size=50B cardinality=150.00K
 |
 |--00:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1993-10-01', o_orderdate >= '1993-07-01'
+|     row-size=50B cardinality=150.00K
 |
 01:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_commitdate < l_receiptdate
    runtime filters: RF000 -> l_orderkey
+   row-size=52B cardinality=600.12K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -515,26 +595,31 @@ PLAN-ROOT SINK
 |
 04:TOP-N [LIMIT=10]
 |  order by: o_orderpriority ASC
+|  row-size=28B cardinality=5
 |
 08:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: o_orderpriority
+|  row-size=28B cardinality=5
 |
 07:EXCHANGE [HASH(o_orderpriority)]
 |
 03:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: o_orderpriority
+|  row-size=28B cardinality=5
 |
 02:HASH JOIN [RIGHT SEMI JOIN, PARTITIONED]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF000 <- o_orderkey
+|  row-size=50B cardinality=150.00K
 |
 |--06:EXCHANGE [HASH(o_orderkey)]
 |  |
 |  00:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1993-10-01', o_orderdate >= '1993-07-01'
+|     row-size=50B cardinality=150.00K
 |
 05:EXCHANGE [HASH(l_orderkey)]
 |
@@ -542,6 +627,7 @@ PLAN-ROOT SINK
    partitions=1/1 files=1 size=718.94MB
    predicates: l_commitdate < l_receiptdate
    runtime filters: RF000 -> l_orderkey
+   row-size=52B cardinality=600.12K
 ====
 select o_orderpriority, count(*) as order_count
 from tpch.orders
@@ -554,19 +640,24 @@ PLAN-ROOT SINK
 |
 04:TOP-N [LIMIT=10]
 |  order by: o_orderpriority ASC
+|  row-size=28B cardinality=5
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: o_orderpriority
+|  row-size=28B cardinality=5
 |
 02:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: l_orderkey = o_orderkey
+|  row-size=36B cardinality=7.50M
 |
 |--00:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
+|     row-size=28B cardinality=1.50M
 |
 01:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
+   row-size=8B cardinality=6.00M
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -576,29 +667,35 @@ PLAN-ROOT SINK
 |
 04:TOP-N [LIMIT=10]
 |  order by: o_orderpriority ASC
+|  row-size=28B cardinality=5
 |
 08:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: o_orderpriority
+|  row-size=28B cardinality=5
 |
 07:EXCHANGE [HASH(o_orderpriority)]
 |
 03:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: o_orderpriority
+|  row-size=28B cardinality=5
 |
 02:HASH JOIN [FULL OUTER JOIN, PARTITIONED]
 |  hash predicates: l_orderkey = o_orderkey
+|  row-size=36B cardinality=7.50M
 |
 |--06:EXCHANGE [HASH(o_orderkey)]
 |  |
 |  00:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
+|     row-size=28B cardinality=1.50M
 |
 05:EXCHANGE [HASH(l_orderkey)]
 |
 01:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
+   row-size=8B cardinality=6.00M
 ====
 select o_orderpriority, count(*) as order_count
 from tpch.orders
@@ -611,19 +708,24 @@ PLAN-ROOT SINK
 |
 04:TOP-N [LIMIT=10]
 |  order by: o_orderpriority ASC
+|  row-size=28B cardinality=5
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: o_orderpriority
+|  row-size=28B cardinality=5
 |
 02:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: l_orderkey = o_orderkey
+|  row-size=36B cardinality=6.00M
 |
 |--00:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
+|     row-size=28B cardinality=1.50M
 |
 01:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
+   row-size=8B cardinality=6.00M
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -633,29 +735,35 @@ PLAN-ROOT SINK
 |
 04:TOP-N [LIMIT=10]
 |  order by: o_orderpriority ASC
+|  row-size=28B cardinality=5
 |
 08:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: o_orderpriority
+|  row-size=28B cardinality=5
 |
 07:EXCHANGE [HASH(o_orderpriority)]
 |
 03:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: o_orderpriority
+|  row-size=28B cardinality=5
 |
 02:HASH JOIN [LEFT OUTER JOIN, PARTITIONED]
 |  hash predicates: l_orderkey = o_orderkey
+|  row-size=36B cardinality=6.00M
 |
 |--06:EXCHANGE [HASH(o_orderkey)]
 |  |
 |  00:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
+|     row-size=28B cardinality=1.50M
 |
 05:EXCHANGE [HASH(l_orderkey)]
 |
 01:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
+   row-size=8B cardinality=6.00M
 ====
 # order does not become the leftmost input because of the outer join;
 # the join with nation is done first because it reduces the intermediate output
@@ -669,62 +777,75 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: c_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=39B cardinality=60.00K
 |
 |--02:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     predicates: n_name = 'x'
+|     row-size=21B cardinality=1
 |
 03:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF002 <- c_custkey
+|  row-size=18B cardinality=1.50M
 |
 |--00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     runtime filters: RF000 -> c_nationkey
+|     row-size=10B cardinality=150.00K
 |
 01:SCAN HDFS [tpch.orders]
    partitions=1/1 files=1 size=162.56MB
    runtime filters: RF002 -> o_custkey
+   row-size=8B cardinality=1.50M
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 10:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=1
 |
 09:EXCHANGE [UNPARTITIONED]
 |
 05:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: c_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=39B cardinality=60.00K
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     predicates: n_name = 'x'
+|     row-size=21B cardinality=1
 |
 03:HASH JOIN [RIGHT OUTER JOIN, PARTITIONED]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF002 <- c_custkey
+|  row-size=18B cardinality=1.50M
 |
 |--07:EXCHANGE [HASH(c_custkey)]
 |  |
 |  00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     runtime filters: RF000 -> c_nationkey
+|     row-size=10B cardinality=150.00K
 |
 06:EXCHANGE [HASH(o_custkey)]
 |
 01:SCAN HDFS [tpch.orders]
    partitions=1/1 files=1 size=162.56MB
    runtime filters: RF002 -> o_custkey
+   row-size=8B cardinality=1.50M
 ====
 # order does not become the leftmost input because of the cross join;
 # the join with nation is done first because it reduces the intermediate output
@@ -738,54 +859,67 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 04:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=23B cardinality=9.00G
 |
 |--01:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
+|     row-size=0B cardinality=1.50M
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: c_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=23B cardinality=6.00K
 |
 |--02:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     predicates: n_name = 'x'
+|     row-size=21B cardinality=1
 |
 00:SCAN HDFS [tpch.customer]
    partitions=1/1 files=1 size=23.08MB
    runtime filters: RF000 -> c_nationkey
+   row-size=2B cardinality=150.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 09:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=1
 |
 08:EXCHANGE [UNPARTITIONED]
 |
 05:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 04:NESTED LOOP JOIN [CROSS JOIN, BROADCAST]
+|  row-size=23B cardinality=9.00G
 |
 |--07:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
+|     row-size=0B cardinality=1.50M
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: c_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=23B cardinality=6.00K
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     predicates: n_name = 'x'
+|     row-size=21B cardinality=1
 |
 00:SCAN HDFS [tpch.customer]
    partitions=1/1 files=1 size=23.08MB
    runtime filters: RF000 -> c_nationkey
+   row-size=2B cardinality=150.00K
 ====
 # Do not consider 'c' a candidate for the leftmost table (IMPALA-1281),
 # because doing so requires careful consideration of the joinOps of
@@ -798,17 +932,22 @@ cross join functional.alltypes c
 PLAN-ROOT SINK
 |
 04:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=4B cardinality=467.20K
 |
 |--03:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=0B cardinality=64
 |  |
 |  |--01:SCAN HDFS [functional.alltypestiny b]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=0B cardinality=8
 |  |
 |  00:SCAN HDFS [functional.alltypestiny a]
 |     partitions=4/4 files=4 size=460B
+|     row-size=0B cardinality=8
 |
 02:SCAN HDFS [functional.alltypes c]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 # Test that tables are not re-ordered across outer/semi joins (IMPALA-860),
 # but the tables to the left/right of outer/semi joins are still re-ordered.
@@ -824,48 +963,60 @@ PLAN-ROOT SINK
 |
 11:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 10:HASH JOIN [INNER JOIN]
 |  hash predicates: t5.id = t4.id
 |  runtime filters: RF000 <- t4.id
+|  row-size=24B cardinality=7
 |
 |--09:HASH JOIN [INNER JOIN]
 |  |  hash predicates: t4.id = t6.id
 |  |  runtime filters: RF002 <- t6.id
+|  |  row-size=20B cardinality=9
 |  |
 |  |--05:SCAN HDFS [functional.alltypestiny t6]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=4B cardinality=8
 |  |
 |  08:HASH JOIN [LEFT OUTER JOIN]
 |  |  hash predicates: t4.id = t3.id
+|  |  row-size=16B cardinality=11.00K
 |  |
 |  |--07:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: t3.id = t2.id
 |  |  |  runtime filters: RF004 <- t2.id
+|  |  |  row-size=12B cardinality=1
 |  |  |
 |  |  |--06:HASH JOIN [INNER JOIN]
 |  |  |  |  hash predicates: t2.id = t1.id
 |  |  |  |  runtime filters: RF006 <- t1.id
+|  |  |  |  row-size=8B cardinality=8
 |  |  |  |
 |  |  |  |--00:SCAN HDFS [functional.alltypestiny t1]
 |  |  |  |     partitions=4/4 files=4 size=460B
 |  |  |  |     runtime filters: RF002 -> t1.id
+|  |  |  |     row-size=4B cardinality=8
 |  |  |  |
 |  |  |  01:SCAN HDFS [functional.alltypes t2]
 |  |  |     partitions=24/24 files=24 size=478.45KB
 |  |  |     runtime filters: RF002 -> t2.id, RF006 -> t2.id
+|  |  |     row-size=4B cardinality=7.30K
 |  |  |
 |  |  02:SCAN HDFS [functional.alltypessmall t3]
 |  |     partitions=4/4 files=4 size=6.32KB
 |  |     runtime filters: RF002 -> t3.id, RF004 -> t3.id
+|  |     row-size=4B cardinality=100
 |  |
 |  03:SCAN HDFS [functional.alltypesagg t4]
 |     partitions=11/11 files=11 size=814.73KB
 |     runtime filters: RF002 -> t4.id
+|     row-size=4B cardinality=11.00K
 |
 04:SCAN HDFS [functional.alltypes t5]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t5.id
+   row-size=4B cardinality=7.30K
 ====
 # No tables can be re-ordered because of semi and outer joins that must
 # remain at a fixed position in the plan (IMPALA-860).
@@ -881,47 +1032,59 @@ PLAN-ROOT SINK
 |
 13:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 12:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t6.id = t5.id
+|  row-size=20B cardinality=8
 |
 |--11:HASH JOIN [INNER JOIN]
 |  |  hash predicates: t5.id = t3.id
 |  |  runtime filters: RF000 <- t3.id
+|  |  row-size=16B cardinality=1
 |  |
 |  |--10:HASH JOIN [RIGHT SEMI JOIN]
 |  |  |  hash predicates: t4.id = t3.id
 |  |  |  runtime filters: RF002 <- t3.id
+|  |  |  row-size=12B cardinality=1
 |  |  |
 |  |  |--09:HASH JOIN [INNER JOIN]
 |  |  |  |  hash predicates: t3.id = t2.id
 |  |  |  |  runtime filters: RF004 <- t2.id
+|  |  |  |  row-size=12B cardinality=1
 |  |  |  |
 |  |  |  |--08:HASH JOIN [RIGHT OUTER JOIN]
 |  |  |  |  |  hash predicates: t2.id = t1.id
 |  |  |  |  |  runtime filters: RF006 <- t1.id
+|  |  |  |  |  row-size=8B cardinality=8
 |  |  |  |  |
 |  |  |  |  |--00:SCAN HDFS [functional.alltypestiny t1]
 |  |  |  |  |     partitions=4/4 files=4 size=460B
+|  |  |  |  |     row-size=4B cardinality=8
 |  |  |  |  |
 |  |  |  |  01:SCAN HDFS [functional.alltypes t2]
 |  |  |  |     partitions=24/24 files=24 size=478.45KB
 |  |  |  |     runtime filters: RF006 -> t2.id
+|  |  |  |     row-size=4B cardinality=7.30K
 |  |  |  |
 |  |  |  02:SCAN HDFS [functional.alltypessmall t3]
 |  |  |     partitions=4/4 files=4 size=6.32KB
 |  |  |     runtime filters: RF004 -> t3.id
+|  |  |     row-size=4B cardinality=100
 |  |  |
 |  |  03:SCAN HDFS [functional.alltypesagg t4]
 |  |     partitions=11/11 files=11 size=814.73KB
 |  |     runtime filters: RF002 -> t4.id
+|  |     row-size=4B cardinality=11.00K
 |  |
 |  04:SCAN HDFS [functional.alltypes t5]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> t5.id
+|     row-size=4B cardinality=7.30K
 |
 05:SCAN HDFS [functional.alltypestiny t6]
    partitions=4/4 files=4 size=460B
+   row-size=4B cardinality=8
 ====
 # Check that a join in between outer/semi joins is re-ordered correctly.
 # We expect t4 is joined before t3.
@@ -937,49 +1100,61 @@ PLAN-ROOT SINK
 |
 13:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 12:HASH JOIN [INNER JOIN]
 |  hash predicates: t6.id = t3.id
 |  runtime filters: RF000 <- t3.id
+|  row-size=20B cardinality=1
 |
 |--11:HASH JOIN [RIGHT SEMI JOIN]
 |  |  hash predicates: t5.id = t4.id
 |  |  runtime filters: RF002 <- t4.id
+|  |  row-size=16B cardinality=1
 |  |
 |  |--10:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: t3.id = t2.id
 |  |  |  runtime filters: RF004 <- t2.id
+|  |  |  row-size=16B cardinality=1
 |  |  |
 |  |  |--09:HASH JOIN [INNER JOIN]
 |  |  |  |  hash predicates: t4.id = t2.id
 |  |  |  |  runtime filters: RF006 <- t2.id
+|  |  |  |  row-size=12B cardinality=1
 |  |  |  |
 |  |  |  |--08:HASH JOIN [RIGHT OUTER JOIN]
 |  |  |  |  |  hash predicates: t2.id = t1.id
 |  |  |  |  |  runtime filters: RF008 <- t1.id
+|  |  |  |  |  row-size=8B cardinality=8
 |  |  |  |  |
 |  |  |  |  |--00:SCAN HDFS [functional.alltypestiny t1]
 |  |  |  |  |     partitions=4/4 files=4 size=460B
+|  |  |  |  |     row-size=4B cardinality=8
 |  |  |  |  |
 |  |  |  |  01:SCAN HDFS [functional.alltypes t2]
 |  |  |  |     partitions=24/24 files=24 size=478.45KB
 |  |  |  |     runtime filters: RF008 -> t2.id
+|  |  |  |     row-size=4B cardinality=7.30K
 |  |  |  |
 |  |  |  03:SCAN HDFS [functional.alltypessmall t4]
 |  |  |     partitions=4/4 files=4 size=6.32KB
 |  |  |     runtime filters: RF006 -> t4.id
+|  |  |     row-size=4B cardinality=100
 |  |  |
 |  |  02:SCAN HDFS [functional.alltypesagg t3]
 |  |     partitions=11/11 files=11 size=814.73KB
 |  |     runtime filters: RF004 -> t3.id
+|  |     row-size=4B cardinality=11.00K
 |  |
 |  04:SCAN HDFS [functional.alltypes t5]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF002 -> t5.id
+|     row-size=4B cardinality=7.30K
 |
 05:SCAN HDFS [functional.alltypestiny t6]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> t6.id
+   row-size=4B cardinality=8
 ====
 # Same above but using an anti join instead of a semi join.
 select count(*) from
@@ -994,47 +1169,59 @@ PLAN-ROOT SINK
 |
 13:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 12:HASH JOIN [INNER JOIN]
 |  hash predicates: t6.id = t3.id
 |  runtime filters: RF000 <- t3.id
+|  row-size=20B cardinality=1
 |
 |--11:HASH JOIN [RIGHT ANTI JOIN]
 |  |  hash predicates: t5.id = t4.id
+|  |  row-size=16B cardinality=1
 |  |
 |  |--10:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: t3.id = t2.id
 |  |  |  runtime filters: RF002 <- t2.id
+|  |  |  row-size=16B cardinality=1
 |  |  |
 |  |  |--09:HASH JOIN [INNER JOIN]
 |  |  |  |  hash predicates: t4.id = t2.id
 |  |  |  |  runtime filters: RF004 <- t2.id
+|  |  |  |  row-size=12B cardinality=1
 |  |  |  |
 |  |  |  |--08:HASH JOIN [RIGHT OUTER JOIN]
 |  |  |  |  |  hash predicates: t2.id = t1.id
 |  |  |  |  |  runtime filters: RF006 <- t1.id
+|  |  |  |  |  row-size=8B cardinality=8
 |  |  |  |  |
 |  |  |  |  |--00:SCAN HDFS [functional.alltypestiny t1]
 |  |  |  |  |     partitions=4/4 files=4 size=460B
+|  |  |  |  |     row-size=4B cardinality=8
 |  |  |  |  |
 |  |  |  |  01:SCAN HDFS [functional.alltypes t2]
 |  |  |  |     partitions=24/24 files=24 size=478.45KB
 |  |  |  |     runtime filters: RF006 -> t2.id
+|  |  |  |     row-size=4B cardinality=7.30K
 |  |  |  |
 |  |  |  03:SCAN HDFS [functional.alltypessmall t4]
 |  |  |     partitions=4/4 files=4 size=6.32KB
 |  |  |     runtime filters: RF004 -> t4.id
+|  |  |     row-size=4B cardinality=100
 |  |  |
 |  |  02:SCAN HDFS [functional.alltypesagg t3]
 |  |     partitions=11/11 files=11 size=814.73KB
 |  |     runtime filters: RF002 -> t3.id
+|  |     row-size=4B cardinality=11.00K
 |  |
 |  04:SCAN HDFS [functional.alltypes t5]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 05:SCAN HDFS [functional.alltypestiny t6]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> t6.id
+   row-size=4B cardinality=8
 ====
 # Test inverting outer joins in a complex query plan.
 select 1 from
@@ -1057,49 +1244,63 @@ PLAN-ROOT SINK
 12:HASH JOIN [INNER JOIN]
 |  hash predicates: t3.id = t4.id
 |  runtime filters: RF000 <- t4.id
+|  row-size=28B cardinality=1
 |
 |--09:SCAN HDFS [functional.alltypestiny t4]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 11:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: t3.id = a.id
 |  runtime filters: RF002 <- a.id
+|  row-size=24B cardinality=8
 |
 |--10:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.id = count(a.id)
 |  |  runtime filters: RF004 <- count(a.id)
+|  |  row-size=20B cardinality=8
 |  |
 |  |--04:AGGREGATE [FINALIZE]
 |  |  |  output: count(a.id)
+|  |  |  row-size=8B cardinality=1
 |  |  |
 |  |  03:AGGREGATE
 |  |  |  group by: a.id
+|  |  |  row-size=4B cardinality=8
 |  |  |
 |  |  02:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: a.id = b.id
 |  |  |  runtime filters: RF006 <- b.id
+|  |  |  row-size=8B cardinality=8
 |  |  |
 |  |  |--01:SCAN HDFS [functional.alltypestiny b]
 |  |  |     partitions=4/4 files=4 size=460B
+|  |  |     row-size=4B cardinality=8
 |  |  |
 |  |  00:SCAN HDFS [functional.alltypestiny a]
 |  |     partitions=4/4 files=4 size=460B
 |  |     runtime filters: RF006 -> a.id
+|  |     row-size=4B cardinality=8
 |  |
 |  07:HASH JOIN [LEFT OUTER JOIN]
 |  |  hash predicates: b.id = a.id
 |  |  other predicates: a.year < 10
+|  |  row-size=12B cardinality=8
 |  |
 |  |--05:SCAN HDFS [functional.alltypes a]
+|  |     partition predicates: a.year < 10
 |  |     partitions=0/24 files=0 size=0B
 |  |     runtime filters: RF004 -> a.id
+|  |     row-size=8B cardinality=0
 |  |
 |  06:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 08:SCAN HDFS [functional.alltypes t3]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t3.id, RF002 -> t3.id
+   row-size=4B cardinality=7.30K
 ====
 # Same as above but with full outer joins.
 select 1 from
@@ -1122,48 +1323,62 @@ PLAN-ROOT SINK
 12:HASH JOIN [INNER JOIN]
 |  hash predicates: t3.id = t4.id
 |  runtime filters: RF000 <- t4.id
+|  row-size=28B cardinality=9
 |
 |--09:SCAN HDFS [functional.alltypestiny t4]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 11:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: t3.id = a.id
+|  row-size=24B cardinality=7.31K
 |
 |--10:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.id = count(a.id)
 |  |  runtime filters: RF002 <- count(a.id)
+|  |  row-size=20B cardinality=8
 |  |
 |  |--04:AGGREGATE [FINALIZE]
 |  |  |  output: count(a.id)
+|  |  |  row-size=8B cardinality=1
 |  |  |
 |  |  03:AGGREGATE
 |  |  |  group by: a.id
+|  |  |  row-size=4B cardinality=8
 |  |  |
 |  |  02:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: a.id = b.id
 |  |  |  runtime filters: RF004 <- b.id
+|  |  |  row-size=8B cardinality=8
 |  |  |
 |  |  |--01:SCAN HDFS [functional.alltypestiny b]
 |  |  |     partitions=4/4 files=4 size=460B
+|  |  |     row-size=4B cardinality=8
 |  |  |
 |  |  00:SCAN HDFS [functional.alltypestiny a]
 |  |     partitions=4/4 files=4 size=460B
 |  |     runtime filters: RF004 -> a.id
+|  |     row-size=4B cardinality=8
 |  |
 |  07:HASH JOIN [FULL OUTER JOIN]
 |  |  hash predicates: b.id = a.id
 |  |  other predicates: a.year < 10
+|  |  row-size=12B cardinality=8
 |  |
 |  |--05:SCAN HDFS [functional.alltypes a]
+|  |     partition predicates: a.year < 10
 |  |     partitions=0/24 files=0 size=0B
 |  |     runtime filters: RF002 -> a.id
+|  |     row-size=8B cardinality=0
 |  |
 |  06:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 08:SCAN HDFS [functional.alltypes t3]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t3.id
+   row-size=4B cardinality=7.30K
 ====
 # Test inverting semi joins in a complex query plan.
 select 1 from
@@ -1186,50 +1401,64 @@ PLAN-ROOT SINK
 12:HASH JOIN [INNER JOIN]
 |  hash predicates: b.id = t4.id
 |  runtime filters: RF000 <- t4.id
+|  row-size=16B cardinality=1
 |
 |--09:SCAN HDFS [functional.alltypestiny t4]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 11:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: t3.id = b.id
 |  runtime filters: RF002 <- b.id
+|  row-size=12B cardinality=8
 |
 |--10:HASH JOIN [INNER JOIN]
 |  |  hash predicates: b.id = count(a.id)
 |  |  runtime filters: RF004 <- count(a.id)
+|  |  row-size=12B cardinality=8
 |  |
 |  |--04:AGGREGATE [FINALIZE]
 |  |  |  output: count(a.id)
+|  |  |  row-size=8B cardinality=1
 |  |  |
 |  |  03:AGGREGATE
 |  |  |  group by: a.id
+|  |  |  row-size=4B cardinality=8
 |  |  |
 |  |  02:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: a.id = b.id
 |  |  |  runtime filters: RF008 <- b.id
+|  |  |  row-size=8B cardinality=8
 |  |  |
 |  |  |--01:SCAN HDFS [functional.alltypestiny b]
 |  |  |     partitions=4/4 files=4 size=460B
+|  |  |     row-size=4B cardinality=8
 |  |  |
 |  |  00:SCAN HDFS [functional.alltypestiny a]
 |  |     partitions=4/4 files=4 size=460B
 |  |     runtime filters: RF008 -> a.id
+|  |     row-size=4B cardinality=8
 |  |
 |  07:HASH JOIN [LEFT SEMI JOIN]
 |  |  hash predicates: b.id = a.id
 |  |  runtime filters: RF006 <- a.id
+|  |  row-size=4B cardinality=8
 |  |
 |  |--05:SCAN HDFS [functional.alltypestiny a]
 |  |     partitions=4/4 files=4 size=460B
 |  |     runtime filters: RF000 -> a.id, RF004 -> a.id
+|  |     row-size=4B cardinality=8
 |  |
 |  06:SCAN HDFS [functional.alltypes b]
+|     partition predicates: b.month = 1
 |     partitions=2/24 files=2 size=40.32KB
 |     runtime filters: RF000 -> b.id, RF004 -> b.id, RF006 -> b.id
+|     row-size=4B cardinality=620
 |
 08:SCAN HDFS [functional.alltypes t3]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t3.id, RF002 -> t3.id
+   row-size=4B cardinality=7.30K
 ====
 # Same as above but with anti joins.
 select 1 from
@@ -1252,48 +1481,62 @@ PLAN-ROOT SINK
 12:HASH JOIN [INNER JOIN]
 |  hash predicates: b.id = t4.id
 |  runtime filters: RF000 <- t4.id
+|  row-size=16B cardinality=1
 |
 |--09:SCAN HDFS [functional.alltypestiny t4]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 11:HASH JOIN [RIGHT ANTI JOIN]
 |  hash predicates: t3.id = b.id
+|  row-size=12B cardinality=620
 |
 |--10:HASH JOIN [INNER JOIN]
 |  |  hash predicates: b.id = count(a.id)
 |  |  runtime filters: RF002 <- count(a.id)
+|  |  row-size=12B cardinality=620
 |  |
 |  |--04:AGGREGATE [FINALIZE]
 |  |  |  output: count(a.id)
+|  |  |  row-size=8B cardinality=1
 |  |  |
 |  |  03:AGGREGATE
 |  |  |  group by: a.id
+|  |  |  row-size=4B cardinality=8
 |  |  |
 |  |  02:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: a.id = b.id
 |  |  |  runtime filters: RF004 <- b.id
+|  |  |  row-size=8B cardinality=8
 |  |  |
 |  |  |--01:SCAN HDFS [functional.alltypestiny b]
 |  |  |     partitions=4/4 files=4 size=460B
+|  |  |     row-size=4B cardinality=8
 |  |  |
 |  |  00:SCAN HDFS [functional.alltypestiny a]
 |  |     partitions=4/4 files=4 size=460B
 |  |     runtime filters: RF004 -> a.id
+|  |     row-size=4B cardinality=8
 |  |
 |  07:HASH JOIN [LEFT ANTI JOIN]
 |  |  hash predicates: b.id = a.id
+|  |  row-size=4B cardinality=620
 |  |
 |  |--05:SCAN HDFS [functional.alltypestiny a]
 |  |     partitions=4/4 files=4 size=460B
 |  |     runtime filters: RF000 -> a.id, RF002 -> a.id
+|  |     row-size=4B cardinality=8
 |  |
 |  06:SCAN HDFS [functional.alltypes b]
+|     partition predicates: b.month = 1
 |     partitions=2/24 files=2 size=40.32KB
 |     runtime filters: RF000 -> b.id, RF002 -> b.id
+|     row-size=4B cardinality=620
 |
 08:SCAN HDFS [functional.alltypes t3]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t3.id
+   row-size=4B cardinality=7.30K
 ====
 # Regression test for IMPALA-1343.
 SELECT sum(t4.tinyint_col)
@@ -1316,41 +1559,52 @@ PLAN-ROOT SINK
 |
 10:AGGREGATE [FINALIZE]
 |  output: sum(t4.tinyint_col)
+|  row-size=8B cardinality=1
 |
 09:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: t4.bigint_col = tt1.int_col
 |  runtime filters: RF000 <- tt1.int_col
+|  row-size=31B cardinality=8
 |
 |--06:SCAN HDFS [functional.alltypestiny tt1]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 08:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: sum(t1.int_col) > t4.id
+|  row-size=31B cardinality=8
 |
 |--05:AGGREGATE [FINALIZE]
 |  |  output: sum(t1.int_col)
 |  |  limit: 1
+|  |  row-size=8B cardinality=1
 |  |
 |  04:SCAN HDFS [functional.alltypesagg t1]
 |     partitions=11/11 files=11 size=814.73KB
+|     row-size=4B cardinality=11.00K
 |
 07:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=23B cardinality=8
 |
 |--03:HASH JOIN [INNER JOIN]
 |  |  hash predicates: t1.bigint_col = t2.smallint_col
 |  |  runtime filters: RF002 <- t2.smallint_col
 |  |  limit: 1
+|  |  row-size=10B cardinality=1
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny t2]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=2B cardinality=8
 |  |
 |  01:SCAN HDFS [functional.alltypes t1]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF002 -> t1.bigint_col
+|     row-size=8B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypestiny t4]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> t4.bigint_col
+   row-size=13B cardinality=8
 ====
 # Tests assignment of conjuncts to inverted outer joins (IMPALA-1342).
 select 1
@@ -1366,13 +1620,16 @@ PLAN-ROOT SINK
 |  hash predicates: b.id = a.id
 |  other predicates: a.int_col = b.int_col, b.bigint_col < a.tinyint_col
 |  runtime filters: RF000 <- a.id, RF001 <- a.int_col
+|  row-size=25B cardinality=8
 |
 |--00:SCAN HDFS [functional.alltypestiny a]
 |     partitions=4/4 files=4 size=460B
+|     row-size=9B cardinality=8
 |
 01:SCAN HDFS [functional.alltypessmall b]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> b.id, RF001 -> b.int_col
+   row-size=16B cardinality=100
 ====
 # Tests assignment of conjuncts to inverted outer joins (IMPALA-1342).
 select 1
@@ -1391,21 +1648,26 @@ PLAN-ROOT SINK
 |  hash predicates: c.id = b.id
 |  other predicates: a.int_col = b.int_col, b.bool_col != c.bool_col, b.tinyint_col = c.tinyint_col, b.bigint_col < a.tinyint_col
 |  runtime filters: RF000 <- b.tinyint_col
+|  row-size=33B cardinality=7.30K
 |
 |--03:HASH JOIN [RIGHT OUTER JOIN]
 |  |  hash predicates: b.id = a.id
 |  |  runtime filters: RF002 <- a.id
+|  |  row-size=27B cardinality=8
 |  |
 |  |--00:SCAN HDFS [functional.alltypestiny a]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=9B cardinality=8
 |  |
 |  01:SCAN HDFS [functional.alltypessmall b]
 |     partitions=4/4 files=4 size=6.32KB
 |     runtime filters: RF002 -> b.id
+|     row-size=18B cardinality=100
 |
 02:SCAN HDFS [functional.alltypes c]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> c.tinyint_col
+   row-size=6B cardinality=7.30K
 ====
 # Tests assignment of conjuncts to inverted outer joins (IMPALA-1342).
 select 1
@@ -1423,21 +1685,26 @@ PLAN-ROOT SINK
 |  hash predicates: c.id = b.id
 |  other predicates: b.bool_col != c.bool_col, b.tinyint_col = c.tinyint_col
 |  runtime filters: RF000 <- b.tinyint_col
+|  row-size=16B cardinality=7.30K
 |
 |--03:HASH JOIN [INNER JOIN]
 |  |  hash predicates: b.id = a.id
 |  |  runtime filters: RF002 <- a.id
+|  |  row-size=10B cardinality=9
 |  |
 |  |--00:SCAN HDFS [functional.alltypestiny a]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=4B cardinality=8
 |  |
 |  01:SCAN HDFS [functional.alltypessmall b]
 |     partitions=4/4 files=4 size=6.32KB
 |     runtime filters: RF002 -> b.id
+|     row-size=6B cardinality=100
 |
 02:SCAN HDFS [functional.alltypes c]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> c.tinyint_col
+   row-size=6B cardinality=7.30K
 ====
 # Regression test for IMPALA-1342.
 select count(1) from
@@ -1453,24 +1720,30 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 04:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t2.string_col = t3.string_col, t1.string_col = t3.date_string_col
+|  row-size=81B cardinality=83.39K
 |
 |--03:SCAN HDFS [functional.alltypestiny t3]
 |     partitions=4/4 files=4 size=460B
+|     row-size=33B cardinality=8
 |
 02:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: t2.date_string_col = t1.string_col
 |  other predicates: t2.date_string_col = t1.string_col
 |  runtime filters: RF000 <- t1.string_col, RF001 <- t1.string_col
+|  row-size=48B cardinality=83.39K
 |
 |--00:SCAN HDFS [functional.alltypesagg t1]
 |     partitions=11/11 files=11 size=814.73KB
+|     row-size=15B cardinality=11.00K
 |
 01:SCAN HDFS [functional.alltypes t2]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t2.date_string_col, RF001 -> t2.date_string_col
+   row-size=33B cardinality=7.30K
 ====
 # Test that filtering with "<=>" sets selectivity, just as "=" does. First, the
 # base case: functional.alltypes.timestamp_col has more distinct vals than
@@ -1490,15 +1763,18 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: a.id = functional.alltypes.id
 |  runtime filters: RF000 <- functional.alltypes.id
+|  row-size=89B cardinality=1
 |
 |--01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: timestamp_col = TIMESTAMP '2016-11-20 00:00:00'
+|     row-size=20B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.date_string_col = ''
    runtime filters: RF000 -> a.id
+   row-size=89B cardinality=10
 ====
 select * from functional.alltypes a
 left semi join
@@ -1512,15 +1788,18 @@ PLAN-ROOT SINK
 02:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: functional.alltypes.id = a.id
 |  runtime filters: RF000 <- a.id
+|  row-size=89B cardinality=1
 |
 |--00:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: a.timestamp_col = TIMESTAMP '2016-11-20 00:00:00'
+|     row-size=89B cardinality=1
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: date_string_col = ''
    runtime filters: RF000 -> functional.alltypes.id
+   row-size=24B cardinality=10
 ====
 # The same should hold true when the filtering is done with "<=>" rather than "=".
 select * from functional.alltypes a
@@ -1535,15 +1814,18 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: a.id = functional.alltypes.id
 |  runtime filters: RF000 <- functional.alltypes.id
+|  row-size=89B cardinality=1
 |
 |--01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: timestamp_col IS NOT DISTINCT FROM TIMESTAMP '2016-11-20 00:00:00'
+|     row-size=20B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.date_string_col IS NOT DISTINCT FROM ''
    runtime filters: RF000 -> a.id
+   row-size=89B cardinality=10
 ====
 select * from functional.alltypes a
 left semi join
@@ -1557,13 +1839,16 @@ PLAN-ROOT SINK
 02:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: functional.alltypes.id = a.id
 |  runtime filters: RF000 <- a.id
+|  row-size=89B cardinality=1
 |
 |--00:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: a.timestamp_col IS NOT DISTINCT FROM TIMESTAMP '2016-11-20 00:00:00'
+|     row-size=89B cardinality=1
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: date_string_col IS NOT DISTINCT FROM ''
    runtime filters: RF000 -> functional.alltypes.id
+   row-size=24B cardinality=10
 ====


[26/26] impala git commit: IMPALA-8007: Fix test_slow_subscriber

Posted by ta...@apache.org.
IMPALA-8007: Fix test_slow_subscriber

Previously, test_slow_subscriber verified a slow subscriber by
checking for a precise time window since the last heartbeat.
This was non-deterministic since sleep duration in python depends
on the machine's workload. This change makes it deterministic by
checking for an increase in the time since last heartbeat.

Testing: Executed the test 1000 times in a loop.

Change-Id: Ibeed543a145076cd11d5d0e441a257111a66497d
Reviewed-on: http://gerrit.cloudera.org:8080/12216
Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
Tested-by: Impala Public Jenkins <im...@cloudera.com>


Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/85b9c6c4
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/85b9c6c4
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/85b9c6c4

Branch: refs/heads/master
Commit: 85b9c6c4257cabb866a3a4580c009f2729d8df8e
Parents: a7ea86b
Author: poojanilangekar <po...@cloudera.com>
Authored: Thu Jan 10 21:39:24 2019 -0800
Committer: Impala Public Jenkins <im...@cloudera.com>
Committed: Sat Jan 12 04:10:15 2019 +0000

----------------------------------------------------------------------
 tests/statestore/test_statestore.py | 38 ++++++++++++++++++++------------
 1 file changed, 24 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/impala/blob/85b9c6c4/tests/statestore/test_statestore.py
----------------------------------------------------------------------
diff --git a/tests/statestore/test_statestore.py b/tests/statestore/test_statestore.py
index f9e61cf..85f42dc 100644
--- a/tests/statestore/test_statestore.py
+++ b/tests/statestore/test_statestore.py
@@ -555,23 +555,33 @@ class TestStatestore():
        )
 
   def test_slow_subscriber(self):
-    """Test for IMPALA-6644: This test kills a healthy subscriber and sleeps for a random
-    interval between 1 and 9 seconds, this lets the heartbeats fail without removing the
-    subscriber from the set of active subscribers. It then checks the subscribers page
-    of the statestore to ensure that the 'time_since_heartbeat' field is updated with an
-    acceptable value. Since the statestore heartbeats at 1 second intervals, an acceptable
-    value would be between ((sleep_time-1.0), (sleep_time+1.0))."""
+    """Test for IMPALA-6644: This test kills a healthy subscriber and sleeps for multiple
+    intervals of about 1 second each, this lets the heartbeats to the subscriber fail.
+    It polls the subscribers page of the statestore to ensure that the
+    'secs_since_heartbeat' field is updated with an acceptable value. This test only
+    checks for a strictly increasing value since the actual value of time might depend
+    on the system load. It stops polling the page once the subscriber is removed from
+    the set of active subscribers. It also checks that a valid heartbeat record of the
+    subscriber is found at least once."""
     sub = StatestoreSubscriber()
     sub.start().register().wait_for_heartbeat(1)
     sub.kill()
-    sleep_time = randint(1, 9)
-    time.sleep(sleep_time)
-    subscribers = get_statestore_subscribers()["subscribers"]
-    for s in subscribers:
-      if str(s["id"]) == sub.subscriber_id:
-        secs_since_heartbeat = float(s["secs_since_heartbeat"])
-        assert (secs_since_heartbeat > float(sleep_time - 1.0))
-        assert (secs_since_heartbeat < float(sleep_time + 1.0))
+    # secs_since_heartbeat is initially unknown.
+    secs_since_heartbeat = -1
+    valid_heartbeat_record = False
+    while secs_since_heartbeat != 0:
+      sleep_start_time = time.time()
+      while time.time() - sleep_start_time < 1:
+        time.sleep(0.1)
+      prev_secs_since_heartbeat = secs_since_heartbeat
+      secs_since_heartbeat = 0
+      subscribers = get_statestore_subscribers()["subscribers"]
+      for s in subscribers:
+        if str(s["id"]) == sub.subscriber_id:
+          secs_since_heartbeat = float(s["secs_since_heartbeat"])
+          assert (secs_since_heartbeat > prev_secs_since_heartbeat)
+          valid_heartbeat_record = True
+    assert valid_heartbeat_record
 
   def test_topic_persistence(self):
     """Test that persistent topic entries survive subscriber failure, but transent topic


[05/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/tpch-nested.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/tpch-nested.test b/testdata/workloads/functional-planner/queries/PlannerTest/tpch-nested.test
index 7bc9da5..bd2c6d1 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/tpch-nested.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/tpch-nested.test
@@ -28,14 +28,17 @@ PLAN-ROOT SINK
 |
 02:SORT
 |  order by: l_returnflag ASC, l_linestatus ASC
+|  row-size=120B cardinality=1.50M
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum(l_quantity), sum(l_extendedprice), sum(l_extendedprice * (1 - l_discount)), sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)), avg(l_quantity), avg(l_extendedprice), avg(l_discount), count(*)
 |  group by: l_returnflag, l_linestatus
+|  row-size=120B cardinality=1.50M
 |
 00:SCAN HDFS [tpch_nested_parquet.customer.c_orders.o_lineitems]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: l_shipdate <= '1998-09-02'
+   row-size=68B cardinality=1.50M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=112.00MB Threads=4
 Per-Host Resource Estimates: Memory=916MB
@@ -46,20 +49,24 @@ PLAN-ROOT SINK
 |
 02:SORT
 |  order by: l_returnflag ASC, l_linestatus ASC
+|  row-size=120B cardinality=1.50M
 |
 04:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_quantity), sum:merge(l_extendedprice), sum:merge(l_extendedprice * (1 - l_discount)), sum:merge(l_extendedprice * (1 - l_discount) * (1 + l_tax)), avg:merge(l_quantity), avg:merge(l_extendedprice), avg:merge(l_discount), count:merge(*)
 |  group by: l_returnflag, l_linestatus
+|  row-size=120B cardinality=1.50M
 |
 03:EXCHANGE [HASH(l_returnflag,l_linestatus)]
 |
 01:AGGREGATE [STREAMING]
 |  output: sum(l_quantity), sum(l_extendedprice), sum(l_extendedprice * (1 - l_discount)), sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)), avg(l_quantity), avg(l_extendedprice), avg(l_discount), count(*)
 |  group by: l_returnflag, l_linestatus
+|  row-size=120B cardinality=1.50M
 |
 00:SCAN HDFS [tpch_nested_parquet.customer.c_orders.o_lineitems]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: l_shipdate <= '1998-09-02'
+   row-size=68B cardinality=1.50M
 ====
 # TPCH-Q2
 # Q2 - Minimum Cost Supplier Query
@@ -110,80 +117,107 @@ PLAN-ROOT SINK
 |
 26:TOP-N [LIMIT=100]
 |  order by: s_acctbal DESC, n_name ASC, s_name ASC, p_partkey ASC
+|  row-size=223B cardinality=100
 |
 25:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: p_partkey = ps_partkey, ps_supplycost = min(ps_supplycost)
 |  runtime filters: RF000 <- ps_partkey
+|  row-size=322B cardinality=100.00K
 |
 |--22:AGGREGATE [FINALIZE]
 |  |  output: min(ps_supplycost)
 |  |  group by: ps_partkey
+|  |  row-size=16B cardinality=100.00K
 |  |
 |  21:HASH JOIN [INNER JOIN]
 |  |  hash predicates: s_nationkey = n_nationkey
 |  |  runtime filters: RF008 <- n_nationkey
+|  |  row-size=63B cardinality=100.00K
 |  |
 |  |--17:SUBPLAN
+|  |  |  row-size=33B cardinality=10
 |  |  |
 |  |  |--20:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  |  row-size=33B cardinality=10
 |  |  |  |
 |  |  |  |--18:SINGULAR ROW SRC
+|  |  |  |     row-size=31B cardinality=1
 |  |  |  |
 |  |  |  19:UNNEST [r.r_nations n]
+|  |  |     row-size=0B cardinality=10
 |  |  |
 |  |  16:SCAN HDFS [tpch_nested_parquet.region r]
-|  |     partitions=1/1 files=1 size=3.41KB
+|  |     partitions=1/1 files=1 size=3.44KB
 |  |     predicates: r_name = 'EUROPE', !empty(r.r_nations)
+|  |     row-size=31B cardinality=1
 |  |
 |  12:SUBPLAN
+|  |  row-size=30B cardinality=100.00K
 |  |
 |  |--15:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=30B cardinality=10
 |  |  |
 |  |  |--13:SINGULAR ROW SRC
+|  |  |     row-size=14B cardinality=1
 |  |  |
 |  |  14:UNNEST [s.s_partsupps ps]
+|  |     row-size=0B cardinality=10
 |  |
 |  11:SCAN HDFS [tpch_nested_parquet.supplier s]
 |     partitions=1/1 files=1 size=41.79MB
 |     predicates: !empty(s.s_partsupps)
 |     runtime filters: RF008 -> s_nationkey
+|     row-size=14B cardinality=10.00K
 |
 24:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF004 <- n_nationkey
+|  row-size=322B cardinality=100.00K
 |
 |--07:SUBPLAN
+|  |  row-size=45B cardinality=10
 |  |
 |  |--10:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=45B cardinality=10
 |  |  |
 |  |  |--08:SINGULAR ROW SRC
+|  |  |     row-size=31B cardinality=1
 |  |  |
 |  |  09:UNNEST [r.r_nations n]
+|  |     row-size=0B cardinality=10
 |  |
 |  06:SCAN HDFS [tpch_nested_parquet.region r]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
 |     predicates: r_name = 'EUROPE', !empty(r.r_nations)
+|     row-size=31B cardinality=1
 |
 23:HASH JOIN [INNER JOIN]
 |  hash predicates: ps_partkey = p_partkey
+|  row-size=277B cardinality=100.00K
 |
 |--05:SCAN HDFS [tpch_nested_parquet.part p]
 |     partitions=1/1 files=1 size=6.23MB
 |     predicates: p_size = 15, p_type LIKE '%BRASS'
 |     runtime filters: RF000 -> p_partkey
+|     row-size=71B cardinality=1.26K
 |
 01:SUBPLAN
+|  row-size=207B cardinality=100.00K
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=207B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=191B cardinality=1
 |  |
 |  03:UNNEST [s.s_partsupps ps]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.supplier s]
    partitions=1/1 files=1 size=41.79MB
    predicates: !empty(s.s_partsupps)
    runtime filters: RF004 -> s_nationkey
+   row-size=191B cardinality=10.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=114.73MB Threads=12
 Per-Host Resource Estimates: Memory=1.01GB
@@ -195,74 +229,96 @@ PLAN-ROOT SINK
 |
 26:TOP-N [LIMIT=100]
 |  order by: s_acctbal DESC, n_name ASC, s_name ASC, p_partkey ASC
+|  row-size=223B cardinality=100
 |
 25:HASH JOIN [LEFT SEMI JOIN, BROADCAST]
 |  hash predicates: p_partkey = ps_partkey, ps_supplycost = min(ps_supplycost)
 |  runtime filters: RF000 <- ps_partkey
+|  row-size=322B cardinality=100.00K
 |
 |--32:EXCHANGE [BROADCAST]
 |  |
 |  31:AGGREGATE [FINALIZE]
 |  |  output: min:merge(ps_supplycost)
 |  |  group by: ps_partkey
+|  |  row-size=16B cardinality=100.00K
 |  |
 |  30:EXCHANGE [HASH(ps_partkey)]
 |  |
 |  22:AGGREGATE [STREAMING]
 |  |  output: min(ps_supplycost)
 |  |  group by: ps_partkey
+|  |  row-size=16B cardinality=100.00K
 |  |
 |  21:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: s_nationkey = n_nationkey
 |  |  runtime filters: RF008 <- n_nationkey
+|  |  row-size=63B cardinality=100.00K
 |  |
 |  |--29:EXCHANGE [BROADCAST]
 |  |  |
 |  |  17:SUBPLAN
+|  |  |  row-size=33B cardinality=10
 |  |  |
 |  |  |--20:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  |  row-size=33B cardinality=10
 |  |  |  |
 |  |  |  |--18:SINGULAR ROW SRC
+|  |  |  |     row-size=31B cardinality=1
 |  |  |  |
 |  |  |  19:UNNEST [r.r_nations n]
+|  |  |     row-size=0B cardinality=10
 |  |  |
 |  |  16:SCAN HDFS [tpch_nested_parquet.region r]
-|  |     partitions=1/1 files=1 size=3.41KB
+|  |     partitions=1/1 files=1 size=3.44KB
 |  |     predicates: r_name = 'EUROPE', !empty(r.r_nations)
+|  |     row-size=31B cardinality=1
 |  |
 |  12:SUBPLAN
+|  |  row-size=30B cardinality=100.00K
 |  |
 |  |--15:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=30B cardinality=10
 |  |  |
 |  |  |--13:SINGULAR ROW SRC
+|  |  |     row-size=14B cardinality=1
 |  |  |
 |  |  14:UNNEST [s.s_partsupps ps]
+|  |     row-size=0B cardinality=10
 |  |
 |  11:SCAN HDFS [tpch_nested_parquet.supplier s]
 |     partitions=1/1 files=1 size=41.79MB
 |     predicates: !empty(s.s_partsupps)
 |     runtime filters: RF008 -> s_nationkey
+|     row-size=14B cardinality=10.00K
 |
 24:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF004 <- n_nationkey
+|  row-size=322B cardinality=100.00K
 |
 |--28:EXCHANGE [BROADCAST]
 |  |
 |  07:SUBPLAN
+|  |  row-size=45B cardinality=10
 |  |
 |  |--10:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=45B cardinality=10
 |  |  |
 |  |  |--08:SINGULAR ROW SRC
+|  |  |     row-size=31B cardinality=1
 |  |  |
 |  |  09:UNNEST [r.r_nations n]
+|  |     row-size=0B cardinality=10
 |  |
 |  06:SCAN HDFS [tpch_nested_parquet.region r]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
 |     predicates: r_name = 'EUROPE', !empty(r.r_nations)
+|     row-size=31B cardinality=1
 |
 23:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ps_partkey = p_partkey
+|  row-size=277B cardinality=100.00K
 |
 |--27:EXCHANGE [BROADCAST]
 |  |
@@ -270,19 +326,25 @@ PLAN-ROOT SINK
 |     partitions=1/1 files=1 size=6.23MB
 |     predicates: p_size = 15, p_type LIKE '%BRASS'
 |     runtime filters: RF000 -> p_partkey
+|     row-size=71B cardinality=1.26K
 |
 01:SUBPLAN
+|  row-size=207B cardinality=100.00K
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=207B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=191B cardinality=1
 |  |
 |  03:UNNEST [s.s_partsupps ps]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.supplier s]
    partitions=1/1 files=1 size=41.79MB
    predicates: !empty(s.s_partsupps)
    runtime filters: RF004 -> s_nationkey
+   row-size=191B cardinality=10.00K
 ====
 # TPCH-Q3
 # Q3 - Shipping Priority Query
@@ -314,32 +376,43 @@ PLAN-ROOT SINK
 |
 10:TOP-N [LIMIT=10]
 |  order by: sum(l_extendedprice * (1 - l_discount)) DESC, o_orderdate ASC
+|  row-size=40B cardinality=10
 |
 09:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: o_orderkey, o_orderdate, o_shippriority
+|  row-size=40B cardinality=3.00M
 |
 01:SUBPLAN
+|  row-size=97B cardinality=3.00M
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=97B cardinality=100
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=33B cardinality=1
 |  |
 |  04:SUBPLAN
+|  |  row-size=64B cardinality=100
 |  |
 |  |--07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=64B cardinality=10
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=36B cardinality=1
 |  |  |
 |  |  06:UNNEST [o.o_lineitems l]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: c_mktsegment = 'BUILDING', !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o_orderdate < '1995-03-15'
    predicates on l: l_shipdate > '1995-03-15'
+   row-size=33B cardinality=30.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=100.00MB Threads=4
 Per-Host Resource Estimates: Memory=882MB
@@ -351,38 +424,50 @@ PLAN-ROOT SINK
 |
 10:TOP-N [LIMIT=10]
 |  order by: sum(l_extendedprice * (1 - l_discount)) DESC, o_orderdate ASC
+|  row-size=40B cardinality=10
 |
 12:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * (1 - l_discount))
 |  group by: o_orderkey, o_orderdate, o_shippriority
+|  row-size=40B cardinality=3.00M
 |
 11:EXCHANGE [HASH(o_orderkey,o_orderdate,o_shippriority)]
 |
 09:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: o_orderkey, o_orderdate, o_shippriority
+|  row-size=40B cardinality=3.00M
 |
 01:SUBPLAN
+|  row-size=97B cardinality=3.00M
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=97B cardinality=100
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=33B cardinality=1
 |  |
 |  04:SUBPLAN
+|  |  row-size=64B cardinality=100
 |  |
 |  |--07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=64B cardinality=10
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=36B cardinality=1
 |  |  |
 |  |  06:UNNEST [o.o_lineitems l]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: c_mktsegment = 'BUILDING', !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o_orderdate < '1995-03-15'
    predicates on l: l_shipdate > '1995-03-15'
+   row-size=33B cardinality=30.00K
 ====
 # TPCH-Q4
 # Q4 - Order Priority Checking Query
@@ -414,32 +499,43 @@ PLAN-ROOT SINK
 |
 10:SORT
 |  order by: o_orderpriority ASC
+|  row-size=20B cardinality=1.50M
 |
 09:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: o_orderpriority
+|  row-size=20B cardinality=1.50M
 |
 01:SUBPLAN
+|  row-size=48B cardinality=1.50M
 |
 |--08:SUBPLAN
+|  |  row-size=48B cardinality=10
 |  |
 |  |--06:NESTED LOOP JOIN [RIGHT SEMI JOIN]
+|  |  |  row-size=48B cardinality=1
 |  |  |
 |  |  |--04:SINGULAR ROW SRC
+|  |  |     row-size=48B cardinality=1
 |  |  |
 |  |  05:UNNEST [o.o_lineitems]
+|  |     row-size=24B cardinality=10
 |  |
 |  07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=48B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=12B cardinality=1
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(c.c_orders)
    predicates on o: o_orderdate >= '1993-07-01', o_orderdate < '1993-10-01'
    predicates on o_lineitems: l_commitdate < l_receiptdate
+   row-size=12B cardinality=150.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=96.00MB Threads=4
 Per-Host Resource Estimates: Memory=630MB
@@ -450,38 +546,50 @@ PLAN-ROOT SINK
 |
 10:SORT
 |  order by: o_orderpriority ASC
+|  row-size=20B cardinality=1.50M
 |
 12:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: o_orderpriority
+|  row-size=20B cardinality=1.50M
 |
 11:EXCHANGE [HASH(o_orderpriority)]
 |
 09:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: o_orderpriority
+|  row-size=20B cardinality=1.50M
 |
 01:SUBPLAN
+|  row-size=48B cardinality=1.50M
 |
 |--08:SUBPLAN
+|  |  row-size=48B cardinality=10
 |  |
 |  |--06:NESTED LOOP JOIN [RIGHT SEMI JOIN]
+|  |  |  row-size=48B cardinality=1
 |  |  |
 |  |  |--04:SINGULAR ROW SRC
+|  |  |     row-size=48B cardinality=1
 |  |  |
 |  |  05:UNNEST [o.o_lineitems]
+|  |     row-size=24B cardinality=10
 |  |
 |  07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=48B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=12B cardinality=1
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(c.c_orders)
    predicates on o: o_orderdate >= '1993-07-01', o_orderdate < '1993-10-01'
    predicates on o_lineitems: l_commitdate < l_receiptdate
+   row-size=12B cardinality=150.00K
 ====
 # TPCH-Q5
 # Q5 - Local Supplier Volume Query
@@ -513,55 +621,74 @@ PLAN-ROOT SINK
 |
 18:SORT
 |  order by: sum(l_extendedprice * (1 - l_discount)) DESC
+|  row-size=28B cardinality=6.00G
 |
 17:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: n_name
+|  row-size=28B cardinality=6.00G
 |
 16:HASH JOIN [INNER JOIN]
 |  hash predicates: c_nationkey = s_nationkey, l_suppkey = s_suppkey
 |  runtime filters: RF000 <- s_nationkey
+|  row-size=117B cardinality=6.00G
 |
 |--09:SCAN HDFS [tpch_nested_parquet.supplier s]
 |     partitions=1/1 files=1 size=41.79MB
+|     row-size=10B cardinality=10.00K
 |
 15:HASH JOIN [INNER JOIN]
 |  hash predicates: c.c_nationkey = n.n_nationkey
 |  runtime filters: RF004 <- n.n_nationkey
+|  row-size=107B cardinality=15.00M
 |
 |--11:SUBPLAN
+|  |  row-size=45B cardinality=10
 |  |
 |  |--14:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=45B cardinality=10
 |  |  |
 |  |  |--12:SINGULAR ROW SRC
+|  |  |     row-size=31B cardinality=1
 |  |  |
 |  |  13:UNNEST [r.r_nations n]
+|  |     row-size=0B cardinality=10
 |  |
 |  10:SCAN HDFS [tpch_nested_parquet.region r]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
 |     predicates: r_name = 'ASIA', !empty(r.r_nations)
+|     row-size=31B cardinality=1
 |
 01:SUBPLAN
+|  row-size=62B cardinality=15.00M
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=62B cardinality=100
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=14B cardinality=1
 |  |
 |  04:SUBPLAN
+|  |  row-size=48B cardinality=100
 |  |
 |  |--07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=48B cardinality=10
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=24B cardinality=1
 |  |  |
 |  |  06:UNNEST [o.o_lineitems l]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o_orderdate >= '1994-01-01', o_orderdate < '1995-01-01'
    runtime filters: RF000 -> c_nationkey, RF004 -> c.c_nationkey
+   row-size=14B cardinality=150.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=110.02MB Threads=8
 Per-Host Resource Estimates: Memory=1.26GB
@@ -572,65 +699,85 @@ PLAN-ROOT SINK
 |
 18:SORT
 |  order by: sum(l_extendedprice * (1 - l_discount)) DESC
+|  row-size=28B cardinality=6.00G
 |
 22:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * (1 - l_discount))
 |  group by: n_name
+|  row-size=28B cardinality=6.00G
 |
 21:EXCHANGE [HASH(n_name)]
 |
 17:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: n_name
+|  row-size=28B cardinality=6.00G
 |
 16:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: c_nationkey = s_nationkey, l_suppkey = s_suppkey
 |  runtime filters: RF000 <- s_nationkey
+|  row-size=117B cardinality=6.00G
 |
 |--20:EXCHANGE [BROADCAST]
 |  |
 |  09:SCAN HDFS [tpch_nested_parquet.supplier s]
 |     partitions=1/1 files=1 size=41.79MB
+|     row-size=10B cardinality=10.00K
 |
 15:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: c.c_nationkey = n.n_nationkey
 |  runtime filters: RF004 <- n.n_nationkey
+|  row-size=107B cardinality=15.00M
 |
 |--19:EXCHANGE [BROADCAST]
 |  |
 |  11:SUBPLAN
+|  |  row-size=45B cardinality=10
 |  |
 |  |--14:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=45B cardinality=10
 |  |  |
 |  |  |--12:SINGULAR ROW SRC
+|  |  |     row-size=31B cardinality=1
 |  |  |
 |  |  13:UNNEST [r.r_nations n]
+|  |     row-size=0B cardinality=10
 |  |
 |  10:SCAN HDFS [tpch_nested_parquet.region r]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
 |     predicates: r_name = 'ASIA', !empty(r.r_nations)
+|     row-size=31B cardinality=1
 |
 01:SUBPLAN
+|  row-size=62B cardinality=15.00M
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=62B cardinality=100
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=14B cardinality=1
 |  |
 |  04:SUBPLAN
+|  |  row-size=48B cardinality=100
 |  |
 |  |--07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=48B cardinality=10
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=24B cardinality=1
 |  |  |
 |  |  06:UNNEST [o.o_lineitems l]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o_orderdate >= '1994-01-01', o_orderdate < '1995-01-01'
    runtime filters: RF000 -> c_nationkey, RF004 -> c.c_nationkey
+   row-size=14B cardinality=150.00K
 ====
 # TPCH-Q6
 # Q6 - Forecasting Revenue Change Query
@@ -650,10 +797,12 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * l_discount)
+|  row-size=16B cardinality=1
 |
 00:SCAN HDFS [tpch_nested_parquet.customer.c_orders.o_lineitems]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: l_discount <= 0.07, l_discount >= 0.05, l_quantity < 24, l_shipdate < '1995-01-01', l_shipdate >= '1994-01-01'
+   row-size=36B cardinality=1.50M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=16.00MB Threads=3
 Per-Host Resource Estimates: Memory=372MB
@@ -661,15 +810,18 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * l_discount)
+|  row-size=16B cardinality=1
 |
 02:EXCHANGE [UNPARTITIONED]
 |
 01:AGGREGATE
 |  output: sum(l_extendedprice * l_discount)
+|  row-size=16B cardinality=1
 |
 00:SCAN HDFS [tpch_nested_parquet.customer.c_orders.o_lineitems]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: l_discount <= 0.07, l_discount >= 0.05, l_quantity < 24, l_shipdate < '1995-01-01', l_shipdate >= '1994-01-01'
+   row-size=36B cardinality=1.50M
 ====
 # TPCH-Q7
 # Q7 - Volume Shipping Query
@@ -716,55 +868,72 @@ PLAN-ROOT SINK
 |
 16:SORT
 |  order by: supp_nation ASC, cust_nation ASC, l_year ASC
+|  row-size=44B cardinality=15.00M
 |
 15:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: n1.n_name, n2.n_name, year(l_shipdate)
+|  row-size=44B cardinality=15.00M
 |
 14:HASH JOIN [INNER JOIN]
 |  hash predicates: c_nationkey = n2.n_nationkey
 |  other predicates: ((n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE'))
 |  runtime filters: RF000 <- n2.n_nationkey
+|  row-size=100B cardinality=15.00M
 |
 |--11:SCAN HDFS [tpch_nested_parquet.region.r_nations n2]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
+|     row-size=14B cardinality=50
 |
 13:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n1.n_nationkey
 |  runtime filters: RF002 <- n1.n_nationkey
+|  row-size=86B cardinality=15.00M
 |
 |--10:SCAN HDFS [tpch_nested_parquet.region.r_nations n1]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
+|     row-size=14B cardinality=50
 |
 12:HASH JOIN [INNER JOIN]
 |  hash predicates: l_suppkey = s_suppkey
+|  row-size=72B cardinality=15.00M
 |
 |--09:SCAN HDFS [tpch_nested_parquet.supplier s]
 |     partitions=1/1 files=1 size=41.79MB
 |     runtime filters: RF002 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 01:SUBPLAN
+|  row-size=62B cardinality=15.00M
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=62B cardinality=100
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=14B cardinality=1
 |  |
 |  04:SUBPLAN
+|  |  row-size=48B cardinality=100
 |  |
 |  |--07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=48B cardinality=10
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=12B cardinality=1
 |  |  |
 |  |  06:UNNEST [o.o_lineitems l]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems)
    predicates on l: l_shipdate >= '1995-01-01', l_shipdate <= '1996-12-31'
    runtime filters: RF000 -> c_nationkey
+   row-size=14B cardinality=150.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=112.97MB Threads=10
 Per-Host Resource Estimates: Memory=1.00GB
@@ -775,67 +944,85 @@ PLAN-ROOT SINK
 |
 16:SORT
 |  order by: supp_nation ASC, cust_nation ASC, l_year ASC
+|  row-size=44B cardinality=15.00M
 |
 21:AGGREGATE [FINALIZE]
 |  output: sum:merge(volume)
 |  group by: supp_nation, cust_nation, l_year
+|  row-size=44B cardinality=15.00M
 |
 20:EXCHANGE [HASH(supp_nation,cust_nation,l_year)]
 |
 15:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: n1.n_name, n2.n_name, year(l_shipdate)
+|  row-size=44B cardinality=15.00M
 |
 14:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: c_nationkey = n2.n_nationkey
 |  other predicates: ((n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE'))
 |  runtime filters: RF000 <- n2.n_nationkey
+|  row-size=100B cardinality=15.00M
 |
 |--19:EXCHANGE [BROADCAST]
 |  |
 |  11:SCAN HDFS [tpch_nested_parquet.region.r_nations n2]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
+|     row-size=14B cardinality=50
 |
 13:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n1.n_nationkey
 |  runtime filters: RF002 <- n1.n_nationkey
+|  row-size=86B cardinality=15.00M
 |
 |--18:EXCHANGE [BROADCAST]
 |  |
 |  10:SCAN HDFS [tpch_nested_parquet.region.r_nations n1]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
+|     row-size=14B cardinality=50
 |
 12:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_suppkey = s_suppkey
+|  row-size=72B cardinality=15.00M
 |
 |--17:EXCHANGE [BROADCAST]
 |  |
 |  09:SCAN HDFS [tpch_nested_parquet.supplier s]
 |     partitions=1/1 files=1 size=41.79MB
 |     runtime filters: RF002 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 01:SUBPLAN
+|  row-size=62B cardinality=15.00M
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=62B cardinality=100
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=14B cardinality=1
 |  |
 |  04:SUBPLAN
+|  |  row-size=48B cardinality=100
 |  |
 |  |--07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=48B cardinality=10
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=12B cardinality=1
 |  |  |
 |  |  06:UNNEST [o.o_lineitems l]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems)
    predicates on l: l_shipdate >= '1995-01-01', l_shipdate <= '1996-12-31'
    runtime filters: RF000 -> c_nationkey
+   row-size=14B cardinality=150.00K
 ====
 # TPCH-Q8
 # Q8 - National Market Share Query
@@ -880,69 +1067,92 @@ PLAN-ROOT SINK
 |
 22:SORT
 |  order by: o_year ASC
+|  row-size=36B cardinality=15.00M
 |
 21:AGGREGATE [FINALIZE]
 |  output: sum(CASE WHEN n2.n_name = 'BRAZIL' THEN l_extendedprice * (1 - l_discount) ELSE 0 END), sum(l_extendedprice * (1 - l_discount))
 |  group by: year(o_orderdate)
+|  row-size=36B cardinality=15.00M
 |
 20:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n2.n_nationkey
 |  runtime filters: RF000 <- n2.n_nationkey
+|  row-size=167B cardinality=15.00M
 |
 |--16:SCAN HDFS [tpch_nested_parquet.region.r_nations n2]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
+|     row-size=14B cardinality=50
 |
 19:HASH JOIN [INNER JOIN]
 |  hash predicates: c_nationkey = n1.n_nationkey
 |  runtime filters: RF002 <- n1.n_nationkey
+|  row-size=153B cardinality=15.00M
 |
 |--12:SUBPLAN
+|  |  row-size=33B cardinality=10
 |  |
 |  |--15:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=33B cardinality=10
 |  |  |
 |  |  |--13:SINGULAR ROW SRC
+|  |  |     row-size=31B cardinality=1
 |  |  |
 |  |  14:UNNEST [r.r_nations n1]
+|  |     row-size=0B cardinality=10
 |  |
 |  11:SCAN HDFS [tpch_nested_parquet.region r]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
 |     predicates: r_name = 'AMERICA', !empty(r.r_nations)
+|     row-size=31B cardinality=1
 |
 18:HASH JOIN [INNER JOIN]
 |  hash predicates: l_partkey = p_partkey
+|  row-size=121B cardinality=15.00M
 |
 |--10:SCAN HDFS [tpch_nested_parquet.part p]
 |     partitions=1/1 files=1 size=6.23MB
 |     predicates: p_type = 'ECONOMY ANODIZED STEEL'
+|     row-size=41B cardinality=1.32K
 |
 17:HASH JOIN [INNER JOIN]
 |  hash predicates: l_suppkey = s_suppkey
+|  row-size=80B cardinality=15.00M
 |
 |--09:SCAN HDFS [tpch_nested_parquet.supplier s]
 |     partitions=1/1 files=1 size=41.79MB
 |     runtime filters: RF000 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 01:SUBPLAN
+|  row-size=70B cardinality=15.00M
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=70B cardinality=100
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=14B cardinality=1
 |  |
 |  04:SUBPLAN
+|  |  row-size=56B cardinality=100
 |  |
 |  |--07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=56B cardinality=10
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=24B cardinality=1
 |  |  |
 |  |  06:UNNEST [o.o_lineitems l]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o_orderdate >= '1995-01-01', o_orderdate <= '1996-12-31'
    runtime filters: RF002 -> c_nationkey
+   row-size=14B cardinality=150.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=110.91MB Threads=12
 Per-Host Resource Estimates: Memory=1.04GB
@@ -953,83 +1163,107 @@ PLAN-ROOT SINK
 |
 22:SORT
 |  order by: o_year ASC
+|  row-size=36B cardinality=15.00M
 |
 28:AGGREGATE [FINALIZE]
 |  output: sum:merge(CASE WHEN nation = 'BRAZIL' THEN volume ELSE 0 END), sum:merge(volume)
 |  group by: o_year
+|  row-size=36B cardinality=15.00M
 |
 27:EXCHANGE [HASH(o_year)]
 |
 21:AGGREGATE [STREAMING]
 |  output: sum(CASE WHEN n2.n_name = 'BRAZIL' THEN l_extendedprice * (1 - l_discount) ELSE 0 END), sum(l_extendedprice * (1 - l_discount))
 |  group by: year(o_orderdate)
+|  row-size=36B cardinality=15.00M
 |
 20:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n2.n_nationkey
 |  runtime filters: RF000 <- n2.n_nationkey
+|  row-size=167B cardinality=15.00M
 |
 |--26:EXCHANGE [BROADCAST]
 |  |
 |  16:SCAN HDFS [tpch_nested_parquet.region.r_nations n2]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
+|     row-size=14B cardinality=50
 |
 19:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: c_nationkey = n1.n_nationkey
 |  runtime filters: RF002 <- n1.n_nationkey
+|  row-size=153B cardinality=15.00M
 |
 |--25:EXCHANGE [BROADCAST]
 |  |
 |  12:SUBPLAN
+|  |  row-size=33B cardinality=10
 |  |
 |  |--15:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=33B cardinality=10
 |  |  |
 |  |  |--13:SINGULAR ROW SRC
+|  |  |     row-size=31B cardinality=1
 |  |  |
 |  |  14:UNNEST [r.r_nations n1]
+|  |     row-size=0B cardinality=10
 |  |
 |  11:SCAN HDFS [tpch_nested_parquet.region r]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
 |     predicates: r_name = 'AMERICA', !empty(r.r_nations)
+|     row-size=31B cardinality=1
 |
 18:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_partkey = p_partkey
+|  row-size=121B cardinality=15.00M
 |
 |--24:EXCHANGE [BROADCAST]
 |  |
 |  10:SCAN HDFS [tpch_nested_parquet.part p]
 |     partitions=1/1 files=1 size=6.23MB
 |     predicates: p_type = 'ECONOMY ANODIZED STEEL'
+|     row-size=41B cardinality=1.32K
 |
 17:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_suppkey = s_suppkey
+|  row-size=80B cardinality=15.00M
 |
 |--23:EXCHANGE [BROADCAST]
 |  |
 |  09:SCAN HDFS [tpch_nested_parquet.supplier s]
 |     partitions=1/1 files=1 size=41.79MB
 |     runtime filters: RF000 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 01:SUBPLAN
+|  row-size=70B cardinality=15.00M
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=70B cardinality=100
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=14B cardinality=1
 |  |
 |  04:SUBPLAN
+|  |  row-size=56B cardinality=100
 |  |
 |  |--07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=56B cardinality=10
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=24B cardinality=1
 |  |  |
 |  |  06:UNNEST [o.o_lineitems l]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o_orderdate >= '1995-01-01', o_orderdate <= '1996-12-31'
    runtime filters: RF002 -> c_nationkey
+   row-size=14B cardinality=150.00K
 ====
 # TPCH-Q9
 # Q9 - Product Type Measure Query
@@ -1069,52 +1303,69 @@ PLAN-ROOT SINK
 |
 16:SORT
 |  order by: nation ASC, o_year DESC
+|  row-size=32B cardinality=15.00M
 |
 15:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity)
 |  group by: n_name, year(o_orderdate)
+|  row-size=32B cardinality=15.00M
 |
 14:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=169B cardinality=15.00M
 |
 |--11:SCAN HDFS [tpch_nested_parquet.region.r_nations n]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
+|     row-size=14B cardinality=50
 |
 13:HASH JOIN [INNER JOIN]
 |  hash predicates: l_partkey = p_partkey
+|  row-size=155B cardinality=15.00M
 |
 |--10:SCAN HDFS [tpch_nested_parquet.part p]
 |     partitions=1/1 files=1 size=6.23MB
 |     predicates: p_name LIKE '%green%'
+|     row-size=53B cardinality=20.00K
 |
 12:HASH JOIN [INNER JOIN]
 |  hash predicates: l_partkey = ps_partkey, l_suppkey = s_suppkey
+|  row-size=102B cardinality=15.00M
 |
 |--06:SUBPLAN
+|  |  row-size=38B cardinality=100.00K
 |  |
 |  |--09:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=38B cardinality=10
 |  |  |
 |  |  |--07:SINGULAR ROW SRC
+|  |  |     row-size=22B cardinality=1
 |  |  |
 |  |  08:UNNEST [s.s_partsupps ps]
+|  |     row-size=0B cardinality=10
 |  |
 |  05:SCAN HDFS [tpch_nested_parquet.supplier s]
 |     partitions=1/1 files=1 size=41.79MB
 |     predicates: !empty(s.s_partsupps)
 |     runtime filters: RF000 -> s_nationkey
+|     row-size=22B cardinality=10.00K
 |
 01:SUBPLAN
+|  row-size=64B cardinality=15.00M
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=64B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=24B cardinality=1
 |  |
 |  03:UNNEST [o.o_lineitems l]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer.c_orders o]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(o.o_lineitems)
+   row-size=24B cardinality=1.50M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=139.58MB Threads=10
 Per-Host Resource Estimates: Memory=1.11GB
@@ -1125,64 +1376,82 @@ PLAN-ROOT SINK
 |
 16:SORT
 |  order by: nation ASC, o_year DESC
+|  row-size=32B cardinality=15.00M
 |
 21:AGGREGATE [FINALIZE]
 |  output: sum:merge(amount)
 |  group by: nation, o_year
+|  row-size=32B cardinality=15.00M
 |
 20:EXCHANGE [HASH(nation,o_year)]
 |
 15:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity)
 |  group by: n_name, year(o_orderdate)
+|  row-size=32B cardinality=15.00M
 |
 14:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=169B cardinality=15.00M
 |
 |--19:EXCHANGE [BROADCAST]
 |  |
 |  11:SCAN HDFS [tpch_nested_parquet.region.r_nations n]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
+|     row-size=14B cardinality=50
 |
 13:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_partkey = p_partkey
+|  row-size=155B cardinality=15.00M
 |
 |--18:EXCHANGE [BROADCAST]
 |  |
 |  10:SCAN HDFS [tpch_nested_parquet.part p]
 |     partitions=1/1 files=1 size=6.23MB
 |     predicates: p_name LIKE '%green%'
+|     row-size=53B cardinality=20.00K
 |
 12:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_partkey = ps_partkey, l_suppkey = s_suppkey
+|  row-size=102B cardinality=15.00M
 |
 |--17:EXCHANGE [BROADCAST]
 |  |
 |  06:SUBPLAN
+|  |  row-size=38B cardinality=100.00K
 |  |
 |  |--09:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=38B cardinality=10
 |  |  |
 |  |  |--07:SINGULAR ROW SRC
+|  |  |     row-size=22B cardinality=1
 |  |  |
 |  |  08:UNNEST [s.s_partsupps ps]
+|  |     row-size=0B cardinality=10
 |  |
 |  05:SCAN HDFS [tpch_nested_parquet.supplier s]
 |     partitions=1/1 files=1 size=41.79MB
 |     predicates: !empty(s.s_partsupps)
 |     runtime filters: RF000 -> s_nationkey
+|     row-size=22B cardinality=10.00K
 |
 01:SUBPLAN
+|  row-size=64B cardinality=15.00M
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=64B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=24B cardinality=1
 |  |
 |  03:UNNEST [o.o_lineitems l]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer.c_orders o]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(o.o_lineitems)
+   row-size=24B cardinality=1.50M
 ====
 # TPCH-Q10
 # Q10 - Returned Item Reporting Query
@@ -1224,40 +1493,53 @@ PLAN-ROOT SINK
 |
 12:TOP-N [LIMIT=20]
 |  order by: sum(l_extendedprice * (1 - l_discount)) DESC
+|  row-size=223B cardinality=20
 |
 11:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment
+|  row-size=223B cardinality=15.00M
 |
 10:HASH JOIN [INNER JOIN]
 |  hash predicates: c_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=275B cardinality=15.00M
 |
 |--09:SCAN HDFS [tpch_nested_parquet.region.r_nations n]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
+|     row-size=14B cardinality=50
 |
 01:SUBPLAN
+|  row-size=261B cardinality=15.00M
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=261B cardinality=100
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=209B cardinality=1
 |  |
 |  04:SUBPLAN
+|  |  row-size=52B cardinality=100
 |  |
 |  |--07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=52B cardinality=10
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=24B cardinality=1
 |  |  |
 |  |  06:UNNEST [o.o_lineitems l]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o_orderdate >= '1993-10-01', o_orderdate < '1994-01-01'
    predicates on l: l_returnflag = 'R'
    runtime filters: RF000 -> c_nationkey
+   row-size=209B cardinality=150.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=94.95MB Threads=6
 Per-Host Resource Estimates: Memory=1006MB
@@ -1269,48 +1551,62 @@ PLAN-ROOT SINK
 |
 12:TOP-N [LIMIT=20]
 |  order by: sum(l_extendedprice * (1 - l_discount)) DESC
+|  row-size=223B cardinality=20
 |
 15:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * (1 - l_discount))
 |  group by: c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment
+|  row-size=223B cardinality=15.00M
 |
 14:EXCHANGE [HASH(c_custkey,c_name,c_acctbal,c_phone,n_name,c_address,c_comment)]
 |
 11:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment
+|  row-size=223B cardinality=15.00M
 |
 10:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: c_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=275B cardinality=15.00M
 |
 |--13:EXCHANGE [BROADCAST]
 |  |
 |  09:SCAN HDFS [tpch_nested_parquet.region.r_nations n]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
+|     row-size=14B cardinality=50
 |
 01:SUBPLAN
+|  row-size=261B cardinality=15.00M
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=261B cardinality=100
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=209B cardinality=1
 |  |
 |  04:SUBPLAN
+|  |  row-size=52B cardinality=100
 |  |
 |  |--07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=52B cardinality=10
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=24B cardinality=1
 |  |  |
 |  |  06:UNNEST [o.o_lineitems l]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o_orderdate >= '1993-10-01', o_orderdate < '1994-01-01'
    predicates on l: l_returnflag = 'R'
    runtime filters: RF000 -> c_nationkey
+   row-size=209B cardinality=150.00K
 ====
 # TPCH-Q11
 # Q11 - Important Stock Identification
@@ -1352,58 +1648,76 @@ PLAN-ROOT SINK
 |
 17:SORT
 |  order by: value DESC
+|  row-size=24B cardinality=100.00K
 |
 16:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: sum(ps_supplycost * ps_availqty) > sum(ps_supplycost * ps_availqty) * 0.0001
+|  row-size=40B cardinality=100.00K
 |
 |--15:AGGREGATE [FINALIZE]
 |  |  output: sum(ps_supplycost * ps_availqty)
+|  |  row-size=16B cardinality=1
 |  |
 |  14:HASH JOIN [INNER JOIN]
 |  |  hash predicates: s_nationkey = n_nationkey
 |  |  runtime filters: RF002 <- n_nationkey
+|  |  row-size=40B cardinality=100.00K
 |  |
 |  |--13:SCAN HDFS [tpch_nested_parquet.region.r_nations n]
-|  |     partitions=1/1 files=1 size=3.41KB
+|  |     partitions=1/1 files=1 size=3.44KB
 |  |     predicates: n_name = 'GERMANY'
+|  |     row-size=14B cardinality=5
 |  |
 |  09:SUBPLAN
+|  |  row-size=26B cardinality=100.00K
 |  |
 |  |--12:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=26B cardinality=10
 |  |  |
 |  |  |--10:SINGULAR ROW SRC
+|  |  |     row-size=14B cardinality=1
 |  |  |
 |  |  11:UNNEST [s.s_partsupps ps]
+|  |     row-size=0B cardinality=10
 |  |
 |  08:SCAN HDFS [tpch_nested_parquet.supplier s]
 |     partitions=1/1 files=1 size=41.79MB
 |     predicates: !empty(s.s_partsupps)
 |     runtime filters: RF002 -> s_nationkey
+|     row-size=14B cardinality=10.00K
 |
 07:AGGREGATE [FINALIZE]
 |  output: sum(ps_supplycost * ps_availqty)
 |  group by: ps_partkey
+|  row-size=24B cardinality=100.00K
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=48B cardinality=100.00K
 |
 |--05:SCAN HDFS [tpch_nested_parquet.region.r_nations n]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
 |     predicates: n_name = 'GERMANY'
+|     row-size=14B cardinality=5
 |
 01:SUBPLAN
+|  row-size=34B cardinality=100.00K
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=34B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=14B cardinality=1
 |  |
 |  03:UNNEST [s.s_partsupps ps]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.supplier s]
    partitions=1/1 files=1 size=41.79MB
    predicates: !empty(s.s_partsupps)
    runtime filters: RF000 -> s_nationkey
+   row-size=14B cardinality=10.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=111.91MB Threads=11
 Per-Host Resource Estimates: Memory=746MB
@@ -1414,75 +1728,95 @@ PLAN-ROOT SINK
 |
 17:SORT
 |  order by: value DESC
+|  row-size=24B cardinality=100.00K
 |
 16:NESTED LOOP JOIN [INNER JOIN, BROADCAST]
 |  predicates: sum(ps_supplycost * ps_availqty) > sum(ps_supplycost * ps_availqty) * 0.0001
+|  row-size=40B cardinality=100.00K
 |
 |--24:EXCHANGE [BROADCAST]
 |  |
 |  23:AGGREGATE [FINALIZE]
 |  |  output: sum:merge(ps_supplycost * ps_availqty)
+|  |  row-size=16B cardinality=1
 |  |
 |  22:EXCHANGE [UNPARTITIONED]
 |  |
 |  15:AGGREGATE
 |  |  output: sum(ps_supplycost * ps_availqty)
+|  |  row-size=16B cardinality=1
 |  |
 |  14:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: s_nationkey = n_nationkey
 |  |  runtime filters: RF002 <- n_nationkey
+|  |  row-size=40B cardinality=100.00K
 |  |
 |  |--21:EXCHANGE [BROADCAST]
 |  |  |
 |  |  13:SCAN HDFS [tpch_nested_parquet.region.r_nations n]
-|  |     partitions=1/1 files=1 size=3.41KB
+|  |     partitions=1/1 files=1 size=3.44KB
 |  |     predicates: n_name = 'GERMANY'
+|  |     row-size=14B cardinality=5
 |  |
 |  09:SUBPLAN
+|  |  row-size=26B cardinality=100.00K
 |  |
 |  |--12:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=26B cardinality=10
 |  |  |
 |  |  |--10:SINGULAR ROW SRC
+|  |  |     row-size=14B cardinality=1
 |  |  |
 |  |  11:UNNEST [s.s_partsupps ps]
+|  |     row-size=0B cardinality=10
 |  |
 |  08:SCAN HDFS [tpch_nested_parquet.supplier s]
 |     partitions=1/1 files=1 size=41.79MB
 |     predicates: !empty(s.s_partsupps)
 |     runtime filters: RF002 -> s_nationkey
+|     row-size=14B cardinality=10.00K
 |
 20:AGGREGATE [FINALIZE]
 |  output: sum:merge(ps_supplycost * ps_availqty)
 |  group by: ps_partkey
+|  row-size=24B cardinality=100.00K
 |
 19:EXCHANGE [HASH(ps_partkey)]
 |
 07:AGGREGATE [STREAMING]
 |  output: sum(ps_supplycost * ps_availqty)
 |  group by: ps_partkey
+|  row-size=24B cardinality=100.00K
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=48B cardinality=100.00K
 |
 |--18:EXCHANGE [BROADCAST]
 |  |
 |  05:SCAN HDFS [tpch_nested_parquet.region.r_nations n]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
 |     predicates: n_name = 'GERMANY'
+|     row-size=14B cardinality=5
 |
 01:SUBPLAN
+|  row-size=34B cardinality=100.00K
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=34B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=14B cardinality=1
 |  |
 |  03:UNNEST [s.s_partsupps ps]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.supplier s]
    partitions=1/1 files=1 size=41.79MB
    predicates: !empty(s.s_partsupps)
    runtime filters: RF000 -> s_nationkey
+   row-size=14B cardinality=10.00K
 ====
 # TPCH-Q12
 # Q12 - Shipping Mode and Order Priority Query
@@ -1520,23 +1854,30 @@ PLAN-ROOT SINK
 |
 06:SORT
 |  order by: l_shipmode ASC
+|  row-size=28B cardinality=15.00M
 |
 05:AGGREGATE [FINALIZE]
 |  output: sum(CASE WHEN o_orderpriority IN ('1-URGENT', '2-HIGH') THEN 1 ELSE 0 END), sum(CASE WHEN o_orderpriority != '1-URGENT' AND o_orderpriority != '2-HIGH' THEN 1 ELSE 0 END)
 |  group by: l_shipmode
+|  row-size=28B cardinality=15.00M
 |
 01:SUBPLAN
+|  row-size=72B cardinality=15.00M
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=72B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=24B cardinality=1
 |  |
 |  03:UNNEST [o.o_lineitems l]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer.c_orders o]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(o.o_lineitems)
    predicates on l: l_shipmode IN ('MAIL', 'SHIP'), l_commitdate < l_receiptdate, l_shipdate < l_commitdate, l_receiptdate >= '1994-01-01', l_receiptdate < '1995-01-01'
+   row-size=24B cardinality=1.50M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=104.00MB Threads=4
 Per-Host Resource Estimates: Memory=748MB
@@ -1547,29 +1888,37 @@ PLAN-ROOT SINK
 |
 06:SORT
 |  order by: l_shipmode ASC
+|  row-size=28B cardinality=15.00M
 |
 08:AGGREGATE [FINALIZE]
 |  output: sum:merge(CASE WHEN o_orderpriority IN ('1-URGENT', '2-HIGH') THEN 1 ELSE 0 END), sum:merge(CASE WHEN o_orderpriority != '1-URGENT' AND o_orderpriority != '2-HIGH' THEN 1 ELSE 0 END)
 |  group by: l_shipmode
+|  row-size=28B cardinality=15.00M
 |
 07:EXCHANGE [HASH(l_shipmode)]
 |
 05:AGGREGATE [STREAMING]
 |  output: sum(CASE WHEN o_orderpriority IN ('1-URGENT', '2-HIGH') THEN 1 ELSE 0 END), sum(CASE WHEN o_orderpriority != '1-URGENT' AND o_orderpriority != '2-HIGH' THEN 1 ELSE 0 END)
 |  group by: l_shipmode
+|  row-size=28B cardinality=15.00M
 |
 01:SUBPLAN
+|  row-size=72B cardinality=15.00M
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=72B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=24B cardinality=1
 |  |
 |  03:UNNEST [o.o_lineitems l]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer.c_orders o]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(o.o_lineitems)
    predicates on l: l_shipmode IN ('MAIL', 'SHIP'), l_commitdate < l_receiptdate, l_shipdate < l_commitdate, l_receiptdate >= '1994-01-01', l_receiptdate < '1995-01-01'
+   row-size=24B cardinality=1.50M
 ====
 # TPCH-Q13
 # Q13 - Customer Distribution Query
@@ -1599,26 +1948,34 @@ PLAN-ROOT SINK
 |
 07:SORT
 |  order by: count(*) DESC, c_count DESC
+|  row-size=16B cardinality=150.00K
 |
 06:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: count(o_orderkey)
+|  row-size=16B cardinality=150.00K
 |
 05:AGGREGATE [FINALIZE]
 |  output: count(o_orderkey)
 |  group by: c_custkey
+|  row-size=16B cardinality=150.00K
 |
 01:SUBPLAN
+|  row-size=40B cardinality=150.00K
 |
 |--04:NESTED LOOP JOIN [RIGHT OUTER JOIN]
+|  |  row-size=40B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=20B cardinality=1
 |  |
 |  03:UNNEST [c.c_orders]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates on c_orders: (NOT o_comment LIKE '%special%requests%')
+   row-size=20B cardinality=150.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=94.94MB Threads=5
 Per-Host Resource Estimates: Memory=548MB
@@ -1629,38 +1986,48 @@ PLAN-ROOT SINK
 |
 07:SORT
 |  order by: count(*) DESC, c_count DESC
+|  row-size=16B cardinality=150.00K
 |
 11:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: c_count
+|  row-size=16B cardinality=150.00K
 |
 10:EXCHANGE [HASH(c_count)]
 |
 06:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: count(o_orderkey)
+|  row-size=16B cardinality=150.00K
 |
 09:AGGREGATE [FINALIZE]
 |  output: count:merge(o_orderkey)
 |  group by: c_custkey
+|  row-size=16B cardinality=150.00K
 |
 08:EXCHANGE [HASH(c_custkey)]
 |
 05:AGGREGATE [STREAMING]
 |  output: count(o_orderkey)
 |  group by: c_custkey
+|  row-size=16B cardinality=150.00K
 |
 01:SUBPLAN
+|  row-size=40B cardinality=150.00K
 |
 |--04:NESTED LOOP JOIN [RIGHT OUTER JOIN]
+|  |  row-size=40B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=20B cardinality=1
 |  |
 |  03:UNNEST [c.c_orders]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates on c_orders: (NOT o_comment LIKE '%special%requests%')
+   row-size=20B cardinality=150.00K
 ====
 # TPCH-Q14
 # Q14 - Promotion Effect
@@ -1684,18 +2051,22 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: sum(CASE WHEN p_type LIKE 'PROMO%' THEN l_extendedprice * (1 - l_discount) ELSE 0 END), sum(l_extendedprice * (1 - l_discount))
+|  row-size=32B cardinality=1
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: l_partkey = p_partkey
 |  runtime filters: RF000 <- p_partkey
+|  row-size=77B cardinality=1.50M
 |
 |--01:SCAN HDFS [tpch_nested_parquet.part p]
 |     partitions=1/1 files=1 size=6.23MB
+|     row-size=41B cardinality=200.00K
 |
 00:SCAN HDFS [tpch_nested_parquet.customer.c_orders.o_lineitems l]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: l_shipdate < '1995-10-01', l_shipdate >= '1995-09-01'
    runtime filters: RF000 -> l_partkey
+   row-size=36B cardinality=1.50M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=27.50MB Threads=5
 Per-Host Resource Estimates: Memory=414MB
@@ -1703,25 +2074,30 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: sum:merge(CASE WHEN p_type LIKE 'PROMO%' THEN l_extendedprice * (1 - l_discount) ELSE 0 END), sum:merge(l_extendedprice * (1 - l_discount))
+|  row-size=32B cardinality=1
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 03:AGGREGATE
 |  output: sum(CASE WHEN p_type LIKE 'PROMO%' THEN l_extendedprice * (1 - l_discount) ELSE 0 END), sum(l_extendedprice * (1 - l_discount))
+|  row-size=32B cardinality=1
 |
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_partkey = p_partkey
 |  runtime filters: RF000 <- p_partkey
+|  row-size=77B cardinality=1.50M
 |
 |--04:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [tpch_nested_parquet.part p]
 |     partitions=1/1 files=1 size=6.23MB
+|     row-size=41B cardinality=200.00K
 |
 00:SCAN HDFS [tpch_nested_parquet.customer.c_orders.o_lineitems l]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: l_shipdate < '1995-10-01', l_shipdate >= '1995-09-01'
    runtime filters: RF000 -> l_partkey
+   row-size=36B cardinality=1.50M
 ====
 # TPCH-Q15
 # Q15 - Top Supplier Query
@@ -1762,36 +2138,45 @@ PLAN-ROOT SINK
 |
 08:SORT
 |  order by: s_suppkey ASC
+|  row-size=118B cardinality=1.50M
 |
 07:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: sum(l_extendedprice * (1 - l_discount)) = max(total_revenue)
+|  row-size=126B cardinality=1.50M
 |
 |--05:AGGREGATE [FINALIZE]
 |  |  output: max(sum(l_extendedprice * (1 - l_discount)))
+|  |  row-size=16B cardinality=1
 |  |
 |  04:AGGREGATE [FINALIZE]
 |  |  output: sum(l_extendedprice * (1 - l_discount))
 |  |  group by: l_suppkey
+|  |  row-size=24B cardinality=1.50M
 |  |
 |  03:SCAN HDFS [tpch_nested_parquet.customer.c_orders.o_lineitems l]
-|     partitions=1/1 files=4 size=288.96MB
+|     partitions=1/1 files=4 size=288.99MB
 |     predicates: l_shipdate < '1996-04-01', l_shipdate >= '1996-01-01'
+|     row-size=36B cardinality=1.50M
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: l_suppkey = s_suppkey
 |  runtime filters: RF000 <- s_suppkey
+|  row-size=126B cardinality=1.50M
 |
 |--00:SCAN HDFS [tpch_nested_parquet.supplier]
 |     partitions=1/1 files=1 size=41.79MB
+|     row-size=102B cardinality=10.00K
 |
 02:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: l_suppkey
+|  row-size=24B cardinality=1.50M
 |
 01:SCAN HDFS [tpch_nested_parquet.customer.c_orders.o_lineitems l]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: l_shipdate < '1996-04-01', l_shipdate >= '1996-01-01'
    runtime filters: RF000 -> l.l_suppkey
+   row-size=36B cardinality=1.50M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=186.88MB Threads=10
 Per-Host Resource Estimates: Memory=1.47GB
@@ -1802,57 +2187,69 @@ PLAN-ROOT SINK
 |
 08:SORT
 |  order by: s_suppkey ASC
+|  row-size=118B cardinality=1.50M
 |
 07:HASH JOIN [LEFT SEMI JOIN, BROADCAST]
 |  hash predicates: sum(l_extendedprice * (1 - l_discount)) = max(total_revenue)
+|  row-size=126B cardinality=1.50M
 |
 |--16:EXCHANGE [BROADCAST]
 |  |
 |  15:AGGREGATE [FINALIZE]
 |  |  output: max:merge(total_revenue)
+|  |  row-size=16B cardinality=1
 |  |
 |  14:EXCHANGE [UNPARTITIONED]
 |  |
 |  05:AGGREGATE
 |  |  output: max(sum(l_extendedprice * (1 - l_discount)))
+|  |  row-size=16B cardinality=1
 |  |
 |  13:AGGREGATE [FINALIZE]
 |  |  output: sum:merge(l_extendedprice * (1 - l_discount))
 |  |  group by: l_suppkey
+|  |  row-size=24B cardinality=1.50M
 |  |
 |  12:EXCHANGE [HASH(l_suppkey)]
 |  |
 |  04:AGGREGATE [STREAMING]
 |  |  output: sum(l_extendedprice * (1 - l_discount))
 |  |  group by: l_suppkey
+|  |  row-size=24B cardinality=1.50M
 |  |
 |  03:SCAN HDFS [tpch_nested_parquet.customer.c_orders.o_lineitems l]
-|     partitions=1/1 files=4 size=288.96MB
+|     partitions=1/1 files=4 size=288.99MB
 |     predicates: l_shipdate < '1996-04-01', l_shipdate >= '1996-01-01'
+|     row-size=36B cardinality=1.50M
 |
 06:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: l_suppkey = s_suppkey
 |  runtime filters: RF000 <- s_suppkey
+|  row-size=126B cardinality=1.50M
 |
 |--11:EXCHANGE [HASH(s_suppkey)]
 |  |
 |  00:SCAN HDFS [tpch_nested_parquet.supplier]
 |     partitions=1/1 files=1 size=41.79MB
+|     row-size=102B cardinality=10.00K
 |
 10:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * (1 - l_discount))
 |  group by: l_suppkey
+|  row-size=24B cardinality=1.50M
 |
 09:EXCHANGE [HASH(l_suppkey)]
 |
 02:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: l_suppkey
+|  row-size=24B cardinality=1.50M
 |
 01:SCAN HDFS [tpch_nested_parquet.customer.c_orders.o_lineitems l]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: l_shipdate < '1996-04-01', l_shipdate >= '1996-01-01'
    runtime filters: RF000 -> l.l_suppkey
+   row-size=36B cardinality=1.50M
 ====
 # TPCH-Q16
 # Q16 - Parts/Supplier Relation Query
@@ -1887,32 +2284,42 @@ PLAN-ROOT SINK
 |
 09:SORT
 |  order by: count(s_suppkey) DESC, p_brand ASC, p_type ASC, p_size ASC
+|  row-size=65B cardinality=10.00K
 |
 08:AGGREGATE [FINALIZE]
 |  output: count(s_suppkey)
 |  group by: p_brand, p_type, p_size
+|  row-size=65B cardinality=10.00K
 |
 07:AGGREGATE
 |  group by: p_brand, p_type, p_size, s_suppkey
+|  row-size=65B cardinality=10.00K
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: ps_partkey = p_partkey
+|  row-size=167B cardinality=10.00K
 |
 |--05:SCAN HDFS [tpch_nested_parquet.part p]
 |     partitions=1/1 files=1 size=6.23MB
 |     predicates: p_size IN (49, 14, 23, 45, 19, 3, 36, 9), p_brand != 'Brand#45', NOT p_type LIKE 'MEDIUM POLISHED%'
+|     row-size=65B cardinality=8.00K
 |
 01:SUBPLAN
+|  row-size=103B cardinality=10.00K
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=103B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=95B cardinality=1
 |  |
 |  03:UNNEST [s.s_partsupps ps]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.supplier s]
    partitions=1/1 files=1 size=41.79MB
    predicates: NOT s_comment LIKE '%Customer%Complaints%', !empty(s.s_partsupps)
+   row-size=95B cardinality=1.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=31.81MB Threads=7
 Per-Host Resource Estimates: Memory=288MB
@@ -1923,45 +2330,57 @@ PLAN-ROOT SINK
 |
 09:SORT
 |  order by: count(s_suppkey) DESC, p_brand ASC, p_type ASC, p_size ASC
+|  row-size=65B cardinality=10.00K
 |
 14:AGGREGATE [FINALIZE]
 |  output: count:merge(s_suppkey)
 |  group by: p_brand, p_type, p_size
+|  row-size=65B cardinality=10.00K
 |
 13:EXCHANGE [HASH(p_brand,p_type,p_size)]
 |
 08:AGGREGATE [STREAMING]
 |  output: count(s_suppkey)
 |  group by: p_brand, p_type, p_size
+|  row-size=65B cardinality=10.00K
 |
 12:AGGREGATE
 |  group by: p_brand, p_type, p_size, s_suppkey
+|  row-size=65B cardinality=10.00K
 |
 11:EXCHANGE [HASH(p_brand,p_type,p_size,s_suppkey)]
 |
 07:AGGREGATE [STREAMING]
 |  group by: p_brand, p_type, p_size, s_suppkey
+|  row-size=65B cardinality=10.00K
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ps_partkey = p_partkey
+|  row-size=167B cardinality=10.00K
 |
 |--10:EXCHANGE [BROADCAST]
 |  |
 |  05:SCAN HDFS [tpch_nested_parquet.part p]
 |     partitions=1/1 files=1 size=6.23MB
 |     predicates: p_size IN (49, 14, 23, 45, 19, 3, 36, 9), p_brand != 'Brand#45', NOT p_type LIKE 'MEDIUM POLISHED%'
+|     row-size=65B cardinality=8.00K
 |
 01:SUBPLAN
+|  row-size=103B cardinality=10.00K
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=103B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=95B cardinality=1
 |  |
 |  03:UNNEST [s.s_partsupps ps]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.supplier s]
    partitions=1/1 files=1 size=41.79MB
    predicates: NOT s_comment LIKE '%Customer%Complaints%', !empty(s.s_partsupps)
+   row-size=95B cardinality=1.00K
 ====
 # TPCH-Q17
 # Q17 - Small-Quantity-Order Revenue Query
@@ -1989,31 +2408,38 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice)
+|  row-size=16B cardinality=1
 |
 05:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: p_partkey = l_partkey
 |  other join predicates: l_quantity < 0.2 * avg(l_quantity)
 |  runtime filters: RF000 <- l_partkey
+|  row-size=72B cardinality=15.00M
 |
 |--03:AGGREGATE [FINALIZE]
 |  |  output: avg(l_quantity)
 |  |  group by: l_partkey
+|  |  row-size=16B cardinality=15.00M
 |  |
 |  02:SCAN HDFS [tpch_nested_parquet.customer.c_orders.o_lineitems l]
-|     partitions=1/1 files=4 size=288.96MB
+|     partitions=1/1 files=4 size=288.99MB
+|     row-size=16B cardinality=15.00M
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: l_partkey = p_partkey
 |  runtime filters: RF002 <- p_partkey
+|  row-size=72B cardinality=15.00M
 |
 |--01:SCAN HDFS [tpch_nested_parquet.part p]
 |     partitions=1/1 files=1 size=6.23MB
 |     predicates: p_container = 'MED BOX', p_brand = 'Brand#23'
 |     runtime filters: RF000 -> p_partkey
+|     row-size=48B cardinality=1.00K
 |
 00:SCAN HDFS [tpch_nested_parquet.customer.c_orders.o_lineitems l]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    runtime filters: RF000 -> l.l_partkey, RF002 -> l_partkey
+   row-size=24B cardinality=15.00M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=154.94MB Threads=8
 Per-Host Resource Estimates: Memory=875MB
@@ -2021,35 +2447,42 @@ PLAN-ROOT SINK
 |
 12:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice)
+|  row-size=16B cardinality=1
 |
 11:EXCHANGE [UNPARTITIONED]
 |
 06:AGGREGATE
 |  output: sum(l_extendedprice)
+|  row-size=16B cardinality=1
 |
 05:HASH JOIN [LEFT SEMI JOIN, PARTITIONED]
 |  hash predicates: p_partkey = l_partkey
 |  other join predicates: l_quantity < 0.2 * avg(l_quantity)
 |  runtime filters: RF000 <- l_partkey
+|  row-size=72B cardinality=15.00M
 |
 |--09:AGGREGATE [FINALIZE]
 |  |  output: avg:merge(l_quantity)
 |  |  group by: l_partkey
+|  |  row-size=16B cardinality=15.00M
 |  |
 |  08:EXCHANGE [HASH(l_partkey)]
 |  |
 |  03:AGGREGATE [STREAMING]
 |  |  output: avg(l_quantity)
 |  |  group by: l_partkey
+|  |  row-size=16B cardinality=15.00M
 |  |
 |  02:SCAN HDFS [tpch_nested_parquet.customer.c_orders.o_lineitems l]
-|     partitions=1/1 files=4 size=288.96MB
+|     partitions=1/1 files=4 size=288.99MB
+|     row-size=16B cardinality=15.00M
 |
 10:EXCHANGE [HASH(p_partkey)]
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_partkey = p_partkey
 |  runtime filters: RF002 <- p_partkey
+|  row-size=72B cardinality=15.00M
 |
 |--07:EXCHANGE [BROADCAST]
 |  |
@@ -2057,10 +2490,12 @@ PLAN-ROOT SINK
 |     partitions=1/1 files=1 size=6.23MB
 |     predicates: p_container = 'MED BOX', p_brand = 'Brand#23'
 |     runtime filters: RF000 -> p_partkey
+|     row-size=48B cardinality=1.00K
 |
 00:SCAN HDFS [tpch_nested_parquet.customer.c_orders.o_lineitems l]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    runtime filters: RF000 -> l.l_partkey, RF002 -> l_partkey
+   row-size=24B cardinality=15.00M
 ====
 # TPCH-Q18
 # Q18 - Large Value Customer Query
@@ -2088,30 +2523,41 @@ PLAN-ROOT SINK
 |
 10:TOP-N [LIMIT=100]
 |  order by: o_totalprice DESC, o_orderdate ASC
+|  row-size=82B cardinality=0
 |
 01:SUBPLAN
+|  row-size=106B cardinality=0
 |
 |--09:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=106B cardinality=0
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=50B cardinality=1
 |  |
 |  04:SUBPLAN
+|  |  row-size=56B cardinality=0
 |  |
 |  |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=56B cardinality=0
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=40B cardinality=1
 |  |  |
 |  |  07:AGGREGATE [FINALIZE]
 |  |  |  output: sum(l_quantity)
 |  |  |  having: sum(l_quantity) > 300
+|  |  |  row-size=16B cardinality=0
 |  |  |
 |  |  06:UNNEST [o.o_lineitems]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(c.c_orders)
+   row-size=50B cardinality=150.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=24.00MB Threads=3
 Per-Host Resource Estimates: Memory=538MB
@@ -2123,30 +2569,41 @@ PLAN-ROOT SINK
 |
 10:TOP-N [LIMIT=100]
 |  order by: o_totalprice DESC, o_orderdate ASC
+|  row-size=82B cardinality=0
 |
 01:SUBPLAN
+|  row-size=106B cardinality=0
 |
 |--09:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=106B cardinality=0
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=50B cardinality=1
 |  |
 |  04:SUBPLAN
+|  |  row-size=56B cardinality=0
 |  |
 |  |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=56B cardinality=0
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=40B cardinality=1
 |  |  |
 |  |  07:AGGREGATE [FINALIZE]
 |  |  |  output: sum(l_quantity)
 |  |  |  having: sum(l_quantity) > 300
+|  |  |  row-size=16B cardinality=0
 |  |  |
 |  |  06:UNNEST [o.o_lineitems]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(c.c_orders)
+   row-size=50B cardinality=150.00K
 ====
 # TPCH-Q19
 # Q19 - Discounted Revenue Query
@@ -2192,20 +2649,24 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount))
+|  row-size=16B cardinality=1
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: l_partkey = p_partkey
 |  other predicates: ((p_brand = 'Brand#12' AND p_container IN ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') AND l_quantity >= 1 AND l_quantity <= 11 AND p_size <= 5) OR (p_brand = 'Brand#23' AND p_container IN ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') AND l_quantity >= 10 AND l_quantity <= 20 AND p_size <= 10) OR (p_brand = 'Brand#34' AND p_container IN ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') AND l_quantity >= 20 AND l_quantity <= 30 AND p_size <= 15))
 |  runtime filters: RF000 <- p_partkey
+|  row-size=108B cardinality=1.50M
 |
 |--01:SCAN HDFS [tpch_nested_parquet.part p]
 |     partitions=1/1 files=1 size=6.23MB
 |     predicates: p_size >= 1
+|     row-size=52B cardinality=20.00K
 |
 00:SCAN HDFS [tpch_nested_parquet.customer.c_orders.o_lineitems l]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: l_shipmode IN ('AIR', 'AIR REG'), l_shipinstruct = 'DELIVER IN PERSON'
    runtime filters: RF000 -> l_partkey
+   row-size=56B cardinality=1.50M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=28.94MB Threads=5
 Per-Host Resource Estimates: Memory=615MB
@@ -2213,27 +2674,32 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * (1 - l_discount))
+|  row-size=16B cardinality=1
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 03:AGGREGATE
 |  output: sum(l_extendedprice * (1 - l_discount))
+|  row-size=16B cardinality=1
 |
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_partkey = p_partkey
 |  other predicates: ((p_brand = 'Brand#12' AND p_container IN ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') AND l_quantity >= 1 AND l_quantity <= 11 AND p_size <= 5) OR (p_brand = 'Brand#23' AND p_container IN ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') AND l_quantity >= 10 AND l_quantity <= 20 AND p_size <= 10) OR (p_brand = 'Brand#34' AND p_container IN ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') AND l_quantity >= 20 AND l_quantity <= 30 AND p_size <= 15))
 |  runtime filters: RF000 <- p_partkey
+|  row-size=108B cardinality=1.50M
 |
 |--04:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [tpch_nested_parquet.part p]
 |     partitions=1/1 files=1 size=6.23MB
 |     predicates: p_size >= 1
+|     row-size=52B cardinality=20.00K
 |
 00:SCAN HDFS [tpch_nested_parquet.customer.c_orders.o_lineitems l]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: l_shipmode IN ('AIR', 'AIR REG'), l_shipinstruct = 'DELIVER IN PERSON'
    runtime filters: RF000 -> l_partkey
+   row-size=56B cardinality=1.50M
 ====
 # TPCH-Q20
 # Note: Tricky rewrite from the original to avoid mixing
@@ -2276,51 +2742,65 @@ PLAN-ROOT SINK
 |
 13:SORT
 |  order by: s_name ASC
+|  row-size=67B cardinality=100.00K
 |
 12:AGGREGATE [FINALIZE]
 |  group by: s_name, s_address
+|  row-size=67B cardinality=100.00K
 |
 11:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: l_partkey = ps_partkey, l_suppkey = s_suppkey
 |  other join predicates: ps_availqty > 0.5 * sum(l_quantity)
 |  runtime filters: RF000 <- ps_partkey, RF001 <- s_suppkey
+|  row-size=115B cardinality=100.00K
 |
 |--10:HASH JOIN [LEFT SEMI JOIN]
 |  |  hash predicates: ps_partkey = p_partkey
+|  |  row-size=115B cardinality=100.00K
 |  |
 |  |--06:SCAN HDFS [tpch_nested_parquet.part p]
 |  |     partitions=1/1 files=1 size=6.23MB
 |  |     predicates: p_name LIKE 'forest%'
+|  |     row-size=53B cardinality=20.00K
 |  |
 |  09:HASH JOIN [INNER JOIN]
 |  |  hash predicates: s_nationkey = n_nationkey
 |  |  runtime filters: RF006 <- n_nationkey
+|  |  row-size=115B cardinality=100.00K
 |  |
 |  |--05:SCAN HDFS [tpch_nested_parquet.region.r_nations n]
-|  |     partitions=1/1 files=1 size=3.41KB
+|  |     partitions=1/1 files=1 size=3.44KB
 |  |     predicates: n_name = 'CANADA'
+|  |     row-size=14B cardinality=5
 |  |
 |  01:SUBPLAN
+|  |  row-size=101B cardinality=100.00K
 |  |
 |  |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=101B cardinality=10
 |  |  |
 |  |  |--02:SINGULAR ROW SRC
+|  |  |     row-size=89B cardinality=1
 |  |  |
 |  |  03:UNNEST [s.s_partsupps ps]
+|  |     row-size=0B cardinality=10
 |  |
 |  00:SCAN HDFS [tpch_nested_parquet.supplier s]
 |     partitions=1/1 files=1 size=41.79MB
 |     predicates: !empty(s.s_partsupps)
 |     runtime filters: RF006 -> s_nationkey
+|     row-size=89B cardinality=10.00K
 |
 08:AGGREGATE [FINALIZE]
 |  output: sum(l_quantity)
 |  group by: l_partkey, l_suppkey
+|  row-size=32B cardinality=1.50M
 |
 07:SCAN HDFS [tpch_nested_parquet.customer.c_orders.o_lineitems l]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: l_shipdate < '1995-01-01', l_shipdate >= '1994-01-01'
    runtime filters: RF000 -> l.l_partkey, RF001 -> l.l_suppkey
+   row-size=36B cardinality=1.50M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=164.33MB Threads=11
 Per-Host Resource Estimates: Memory=1.07GB
@@ -2331,68 +2811,84 @@ PLAN-ROOT SINK
 |
 13:SORT
 |  order by: s_name ASC
+|  row-size=67B cardinality=100.00K
 |
 20:AGGREGATE [FINALIZE]
 |  group by: s_name, s_address
+|  row-size=67B cardinality=100.00K
 |
 19:EXCHANGE [HASH(s_name,s_address)]
 |
 12:AGGREGATE [STREAMING]
 |  group by: s_name, s_address
+|  row-size=67B cardinality=100.00K
 |
 11:HASH JOIN [RIGHT SEMI JOIN, PARTITIONED]
 |  hash predicates: l_partkey = ps_partkey, l_suppkey = s_suppkey
 |  other join predicates: ps_availqty > 0.5 * sum(l_quantity)
 |  runtime filters: RF000 <- ps_partkey, RF001 <- s_suppkey
+|  row-size=115B cardinality=100.00K
 |
 |--18:EXCHANGE [HASH(ps_partkey,s_suppkey)]
 |  |
 |  10:HASH JOIN [LEFT SEMI JOIN, BROADCAST]
 |  |  hash predicates: ps_partkey = p_partkey
+|  |  row-size=115B cardinality=100.00K
 |  |
 |  |--17:EXCHANGE [BROADCAST]
 |  |  |
 |  |  06:SCAN HDFS [tpch_nested_parquet.part p]
 |  |     partitions=1/1 files=1 size=6.23MB
 |  |     predicates: p_name LIKE 'forest%'
+|  |     row-size=53B cardinality=20.00K
 |  |
 |  09:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: s_nationkey = n_nationkey
 |  |  runtime filters: RF006 <- n_nationkey
+|  |  row-size=115B cardinality=100.00K
 |  |
 |  |--16:EXCHANGE [BROADCAST]
 |  |  |
 |  |  05:SCAN HDFS [tpch_nested_parquet.region.r_nations n]
-|  |     partitions=1/1 files=1 size=3.41KB
+|  |     partitions=1/1 files=1 size=3.44KB
 |  |     predicates: n_name = 'CANADA'
+|  |     row-size=14B cardinality=5
 |  |
 |  01:SUBPLAN
+|  |  row-size=101B cardinality=100.00K
 |  |
 |  |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=101B cardinality=10
 |  |  |
 |  |  |--02:SINGULAR ROW SRC
+|  |  |     row-size=89B cardinality=1
 |  |  |
 |  |  03:UNNEST [s.s_partsupps ps]
+|  |     row-size=0B cardinality=10
 |  |
 |  00:SCAN HDFS [tpch_nested_parquet.supplier s]
 |     partitions=1/1 files=1 size=41.79MB
 |     predicates: !empty(s.s_partsupps)
 |     runtime filters: RF006 -> s_nationkey
+|     row-size=89B cardinality=10.00K
 |
 15:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_quantity)
 |  group by: l_partkey, l_suppkey
+|  row-size=32B cardinality=1.50M
 |
 14:EXCHANGE [HASH(l_partkey,l_suppkey)]
 |
 08:AGGREGATE [STREAMING]
 |  output: sum(l_quantity)
 |  group by: l_partkey, l_suppkey
+|  row-size=32B cardinality=1.50M
 |
 07:SCAN HDFS [tpch_nested_parquet.customer.c_orders.o_lineitems l]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: l_shipdate < '1995-01-01', l_shipdate >= '1994-01-01'
    runtime filters: RF000 -> l.l_partkey, RF001 -> l.l_suppkey
+   row-size=36B cardinality=1.50M
 ====
 # TPCH-Q21
 # Q21 - Suppliers Who Kept Orders Waiting Query
@@ -2441,62 +2937,83 @@ PLAN-ROOT SINK
 |
 20:TOP-N [LIMIT=100]
 |  order by: count(*) DESC, s_name ASC
+|  row-size=38B cardinality=100
 |
 19:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: s_name
+|  row-size=38B cardinality=9.96K
 |
 18:SUBPLAN
+|  row-size=146B cardinality=15.00M
 |
 |--16:NESTED LOOP JOIN [RIGHT ANTI JOIN]
 |  |  join predicates: l3.l_suppkey != l1.l_suppkey
+|  |  row-size=146B cardinality=1
 |  |
 |  |--15:NESTED LOOP JOIN [RIGHT SEMI JOIN]
 |  |  |  join predicates: l2.l_suppkey != l1.l_suppkey
+|  |  |  row-size=146B cardinality=1
 |  |  |
 |  |  |--12:SINGULAR ROW SRC
+|  |  |     row-size=146B cardinality=1
 |  |  |
 |  |  13:UNNEST [o.o_lineitems l2]
+|  |     row-size=8B cardinality=10
 |  |
 |  14:UNNEST [o.o_lineitems l3]
+|     row-size=32B cardinality=10
 |
 17:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=146B cardinality=15.00M
 |
 |--10:SCAN HDFS [tpch_nested_parquet.region.r_nations n]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
 |     predicates: n_name = 'SAUDI ARABIA'
+|     row-size=14B cardinality=5
 |
 11:HASH JOIN [INNER JOIN]
 |  hash predicates: l1.l_suppkey = s_suppkey
+|  row-size=132B cardinality=15.00M
 |
 |--00:SCAN HDFS [tpch_nested_parquet.supplier s]
 |     partitions=1/1 files=1 size=41.79MB
 |     runtime filters: RF000 -> s_nationkey
+|     row-size=40B cardinality=10.00K
 |
 02:SUBPLAN
+|  row-size=92B cardinality=15.00M
 |
 |--09:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=92B cardinality=100
 |  |
 |  |--03:SINGULAR ROW SRC
+|  |     row-size=12B cardinality=1
 |  |
 |  05:SUBPLAN
+|  |  row-size=80B cardinality=100
 |  |
 |  |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=80B cardinality=10
 |  |  |
 |  |  |--06:SINGULAR ROW SRC
+|  |  |     row-size=48B cardinality=1
 |  |  |
 |  |  07:UNNEST [o.o_lineitems l1]
+|  |     row-size=0B cardinality=10
 |  |
 |  04:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 01:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o_orderstatus = 'F'
    predicates on l1: l1.l_receiptdate > l1.l_commitdate
    predicates on l3: l3.l_receiptdate > l3.l_commitdate
+   row-size=12B cardinality=150.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=42.84MB Threads=10
 Per-Host Resource Estimates: Memory=1.10GB
@@ -2508,72 +3025,94 @@ PLAN-ROOT SINK
 |
 20:TOP-N [LIMIT=100]
 |  order by: count(*) DESC, s_name ASC
+|  row-size=38B cardinality=100
 |
 24:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: s_name
+|  row-size=38B cardinality=9.96K
 |
 23:EXCHANGE [HASH(s_name)]
 |
 19:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: s_name
+|  row-size=38B cardinality=9.96K
 |
 18:SUBPLAN
+|  row-size=146B cardinality=15.00M
 |
 |--16:NESTED LOOP JOIN [RIGHT ANTI JOIN]
 |  |  join predicates: l3.l_suppkey != l1.l_suppkey
+|  |  row-size=146B cardinality=1
 |  |
 |  |--15:NESTED LOOP JOIN [RIGHT SEMI JOIN]
 |  |  |  join predicates: l2.l_suppkey != l1.l_suppkey
+|  |  |  row-size=146B cardinality=1
 |  |  |
 |  |  |--12:SINGULAR ROW SRC
+|  |  |     row-size=146B cardinality=1
 |  |  |
 |  |  13:UNNEST [o.o_lineitems l2]
+|  |     row-size=8B cardinality=10
 |  |
 |  14:UNNEST [o.o_lineitems l3]
+|     row-size=32B cardinality=10
 |
 17:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=146B cardinality=15.00M
 |
 |--22:EXCHANGE [BROADCAST]
 |  |
 |  10:SCAN HDFS [tpch_nested_parquet.region.r_nations n]
-|     partitions=1/1 files=1 size=3.41KB
+|     partitions=1/1 files=1 size=3.44KB
 |     predicates: n_name = 'SAUDI ARABIA'
+|     row-size=14B cardinality=5
 |
 11:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l1.l_suppkey = s_suppkey
+|  row-size=132B cardinality=15.00M
 |
 |--21:EXCHANGE [BROADCAST]
 |  |
 |  00:SCAN HDFS [tpch_nested_parquet.supplier s]
 |     partitions=1/1 files=1 size=41.79MB
 |     runtime filters: RF000 -> s_nationkey
+|     row-size=40B cardinality=10.00K
 |
 02:SUBPLAN
+|  row-size=92B cardinality=15.00M
 |
 |--09:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=92B cardinality=100
 |  |
 |  |--03:SINGULAR ROW SRC
+|  |     row-size=12B cardinality=1
 |  |
 |  05:SUBPLAN
+|  |  row-size=80B cardinality=100
 |  |
 |  |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=80B cardinality=10
 |  |  |
 |  |  |--06:SINGULAR ROW SRC
+|  |  |     row-size=48B cardinality=1
 |  |  |
 |  |  07:UNNEST [o.o_lineitems l1]
+|  |     row-size=0B cardinality=10
 |  |
 |  04:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 01:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o_orderstatus = 'F'
    predicates on l1: l1.l_receiptdate > l1.l_commitdate
    predicates on l3: l3.l_receiptdate > l3.l_commitdate
+   row-size=12B cardinality=150.00K
 ====
 # TPCH-Q22
 # Q22 - Global Sales Opportunity Query
@@ -2616,32 +3155,42 @@ PLAN-ROOT SINK
 |
 09:SORT
 |  order by: cntrycode ASC
+|  row-size=36B cardinality=15.00K
 |
 08:AGGREGATE [FINALIZE]
 |  output: count(*), sum(c_acctbal)
 |  group by: substr(c_phone, 1, 2)
+|  row-size=36B cardinality=15.00K
 |
 07:SUBPLAN
+|  row-size=55B cardinality=15.00K
 |
 |--05:NESTED LOOP JOIN [RIGHT ANTI JOIN]
+|  |  row-size=55B cardinality=1
 |  |
 |  |--03:SINGULAR ROW SRC
+|  |     row-size=55B cardinality=1
 |  |
 |  04:UNNEST [c.c_orders]
+|     row-size=0B cardinality=10
 |
 06:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: c_acctbal > avg(c_acctbal)
+|  row-size=55B cardinality=15.00K
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: avg(c_acctbal)
+|  |  row-size=8B cardinality=1
 |  |
 |  01:SCAN HDFS [tpch_nested_parquet.customer c]
-|     partitions=1/1 files=4 size=289.00MB
+|     partitions=1/1 files=4 size=288.99MB
 |     predicates: c_acctbal > 0, substr(c_phone, 1, 2) IN ('13', '31', '23', '29', '30', '18', '17')
+|     row-size=35B cardinality=15.00K
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: substr(c_phone, 1, 2) IN ('13', '31', '23', '29', '30', '18', '17')
+   row-size=47B cardinality=15.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=25.94MB Threads=8
 Per-Host Resource Estimates: Memory=688MB
@@ -2652,43 +3201,55 @@ PLAN-ROOT SINK
 |
 09:SORT
 |  order by: cntrycode ASC
+|  row-size=36B cardinality=15.00K
 |
 14:AGGREGATE [FINALIZE]
 |  output: count:merge(*), sum:merge(c_acctbal)
 |  group by: cntrycode
+|  row-size=36B cardinality=15.00K
 |
 13:EXCHANGE [HASH(cntrycode)]
 |
 08:AGGREGATE [STREAMING]
 |  output: count(*), sum(c_acctbal)
 |  group by: substr(c_phone, 1, 2)
+|  row-size=36B cardinality=15.00K
 |
 07:SUBPLAN
+|  row-size=55B cardinality=15.00K
 |
 |--05:NESTED LOOP JOIN [RIGHT ANTI JOIN]
+|  |  row-size=55B cardinality=1
 |  |
 |  |--03:SINGULAR ROW SRC
+|  |     row-size=55B cardinality=1
 |  |
 |  04:UNNEST [c.c_orders]
+|     row-size=0B cardinality=10
 |
 06:NESTED LOOP JOIN [INNER JOIN, BROADCAST]
 |  predicates: c_acctbal > avg(c_acctbal)
+|  row-size=55B cardinality=15.00K
 |
 |--12:EXCHANGE [BROADCAST]
 |  |
 |  11:AGGREGATE [FINALIZE]
 |  |  output: avg:merge(c_acctbal)
+|  |  row-size=8B cardinality=1
 |  |
 |  10:EXCHANGE [UNPARTITIONED]
 |  |
 |  02:AGGREGATE
 |  |  output: avg(c_acctbal)
+|  |  row-size=8B cardinality=1
 |  |
 |  01:SCAN HDFS [tpch_nested_parquet.customer c]
-|     partitions=1/1 files=4 size=289.00MB
+|     partitions=1/1 files=4 size=288.99MB
 |     predicates: c_acctbal > 0, substr(c_phone, 1, 2) IN ('13', '31', '23', '29', '30', '18', '17')
+|     row-size=35B cardinality=15.00K
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.96MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: substr(c_phone, 1, 2) IN ('13', '31', '23', '29', '30', '18', '17')
+   row-size=47B cardinality=15.00K
 ====


[22/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/hbase.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/hbase.test b/testdata/workloads/functional-planner/queries/PlannerTest/hbase.test
index 2119e2c..ef7764c 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/hbase.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/hbase.test
@@ -4,6 +4,7 @@ select * from functional_hbase.stringids
 PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.stringids]
+   row-size=107B cardinality=17.33K
 ====
 # predicate on row key doesn't get transformed into scan parameter, because
 # it's mapped as an int (but stored in ascii and ordered lexicographically)
@@ -14,11 +15,12 @@ PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.alltypessmall]
    predicates: id < 5
+   row-size=89B cardinality=5
 ---- SCANRANGELOCATIONS
 NODE 0:
-  HBASE KEYRANGE <unbounded>:3
   HBASE KEYRANGE 3:7
   HBASE KEYRANGE 7:<unbounded>
+  HBASE KEYRANGE <unbounded>:3
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -26,6 +28,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.alltypessmall]
    predicates: id < 5
+   row-size=89B cardinality=5
 ====
 # if the row key is mapped as a string col, range predicates are applied to the scan
 select * from functional_hbase.stringids
@@ -38,6 +41,7 @@ PLAN-ROOT SINK
    start key: 5
    stop key: 5\0
    predicates: tinyint_col = 5
+   row-size=107B cardinality=1
 ---- SCANRANGELOCATIONS
 NODE 0:
   HBASE KEYRANGE 5:5\0
@@ -50,6 +54,7 @@ PLAN-ROOT SINK
    start key: 5
    stop key: 5\0
    predicates: tinyint_col = 5
+   row-size=107B cardinality=1
 ====
 select * from functional_hbase.stringids
 where id > '5'
@@ -60,6 +65,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.stringids]
    start key: 5\0
    predicates: tinyint_col = 5
+   row-size=107B cardinality=953
 ---- SCANRANGELOCATIONS
 NODE 0:
   HBASE KEYRANGE 5\0:7
@@ -72,6 +78,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.stringids]
    start key: 5\0
    predicates: tinyint_col = 5
+   row-size=107B cardinality=953
 ====
 select * from functional_hbase.stringids
 where id >= '5'
@@ -82,6 +89,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.stringids]
    start key: 5
    predicates: tinyint_col = 5
+   row-size=107B cardinality=953
 ---- SCANRANGELOCATIONS
 NODE 0:
   HBASE KEYRANGE 5:7
@@ -94,6 +102,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.stringids]
    start key: 5
    predicates: tinyint_col = 5
+   row-size=107B cardinality=953
 ====
 select * from functional_hbase.stringids
 where id < '5'
@@ -104,10 +113,11 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.stringids]
    stop key: 5
    predicates: tinyint_col = 5
+   row-size=107B cardinality=969
 ---- SCANRANGELOCATIONS
 NODE 0:
-  HBASE KEYRANGE <unbounded>:3
   HBASE KEYRANGE 3:5
+  HBASE KEYRANGE <unbounded>:3
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -116,6 +126,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.stringids]
    stop key: 5
    predicates: tinyint_col = 5
+   row-size=107B cardinality=969
 ====
 select * from functional_hbase.stringids
 where id <= '5'
@@ -126,6 +137,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.stringids]
    stop key: 5\0
    predicates: tinyint_col = 5
+   row-size=107B cardinality=1.45K
 ====
 select * from functional_hbase.stringids
 where id > '4' and id < '5'
@@ -137,6 +149,7 @@ PLAN-ROOT SINK
    start key: 4\0
    stop key: 5
    predicates: tinyint_col = 5
+   row-size=107B cardinality=475
 ---- SCANRANGELOCATIONS
 NODE 0:
   HBASE KEYRANGE 4\0:5
@@ -149,6 +162,7 @@ PLAN-ROOT SINK
    start key: 4\0
    stop key: 5
    predicates: tinyint_col = 5
+   row-size=107B cardinality=475
 ====
 select * from functional_hbase.stringids
 where id >= '4' and id < '5'
@@ -160,6 +174,7 @@ PLAN-ROOT SINK
    start key: 4
    stop key: 5
    predicates: tinyint_col = 5
+   row-size=107B cardinality=475
 ---- SCANRANGELOCATIONS
 NODE 0:
   HBASE KEYRANGE 4:5
@@ -172,6 +187,7 @@ PLAN-ROOT SINK
    start key: 4
    stop key: 5
    predicates: tinyint_col = 5
+   row-size=107B cardinality=475
 ====
 select * from functional_hbase.stringids
 where id > '4' and id <= '5'
@@ -183,6 +199,7 @@ PLAN-ROOT SINK
    start key: 4\0
    stop key: 5\0
    predicates: tinyint_col = 5
+   row-size=107B cardinality=953
 ---- SCANRANGELOCATIONS
 NODE 0:
   HBASE KEYRANGE 4\0:5\0
@@ -195,6 +212,7 @@ PLAN-ROOT SINK
    start key: 4\0
    stop key: 5\0
    predicates: tinyint_col = 5
+   row-size=107B cardinality=953
 ====
 select * from functional_hbase.stringids
 where id >= '4' and id <= '5'
@@ -206,6 +224,7 @@ PLAN-ROOT SINK
    start key: 4
    stop key: 5\0
    predicates: tinyint_col = 5
+   row-size=107B cardinality=953
 ---- SCANRANGELOCATIONS
 NODE 0:
   HBASE KEYRANGE 4:5\0
@@ -218,6 +237,7 @@ PLAN-ROOT SINK
    start key: 4
    stop key: 5\0
    predicates: tinyint_col = 5
+   row-size=107B cardinality=953
 ====
 # with aggregation
 select int_col, count(*)
@@ -230,9 +250,11 @@ PLAN-ROOT SINK
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: int_col
+|  row-size=12B cardinality=5
 |
 00:SCAN HBASE [functional_hbase.alltypessmall]
    predicates: id < 5
+   row-size=8B cardinality=5
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -241,15 +263,18 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: int_col
+|  row-size=12B cardinality=5
 |
 02:EXCHANGE [HASH(int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: int_col
+|  row-size=12B cardinality=5
 |
 00:SCAN HBASE [functional_hbase.alltypessmall]
    predicates: id < 5
+   row-size=8B cardinality=5
 ====
 # predicates on string columns against a constant string are converted to HBase filters
 select * from functional_hbase.alltypessmall where string_col = '4'
@@ -259,6 +284,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.alltypessmall]
    hbase filters: d:string_col EQUAL '4'
    predicates: string_col = '4'
+   row-size=89B cardinality=5
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -267,6 +293,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.alltypessmall]
    hbase filters: d:string_col EQUAL '4'
    predicates: string_col = '4'
+   row-size=89B cardinality=5
 ====
 # test all comparison ops
 select * from functional_hbase.alltypessmall where string_col != '4'
@@ -276,6 +303,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.alltypessmall]
    hbase filters: d:string_col NOT_EQUAL '4'
    predicates: string_col != '4'
+   row-size=89B cardinality=5
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -284,6 +312,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.alltypessmall]
    hbase filters: d:string_col NOT_EQUAL '4'
    predicates: string_col != '4'
+   row-size=89B cardinality=5
 ====
 select * from functional_hbase.alltypessmall where string_col < '4'
 ---- PLAN
@@ -292,6 +321,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.alltypessmall]
    hbase filters: d:string_col LESS '4'
    predicates: string_col < '4'
+   row-size=89B cardinality=5
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -300,6 +330,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.alltypessmall]
    hbase filters: d:string_col LESS '4'
    predicates: string_col < '4'
+   row-size=89B cardinality=5
 ====
 select * from functional_hbase.alltypessmall where string_col > '4'
 ---- PLAN
@@ -308,6 +339,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.alltypessmall]
    hbase filters: d:string_col GREATER '4'
    predicates: string_col > '4'
+   row-size=89B cardinality=5
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -316,6 +348,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.alltypessmall]
    hbase filters: d:string_col GREATER '4'
    predicates: string_col > '4'
+   row-size=89B cardinality=5
 ====
 select * from functional_hbase.alltypessmall where string_col <= '4'
 ---- PLAN
@@ -324,6 +357,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.alltypessmall]
    hbase filters: d:string_col LESS_OR_EQUAL '4'
    predicates: string_col <= '4'
+   row-size=89B cardinality=5
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -332,6 +366,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.alltypessmall]
    hbase filters: d:string_col LESS_OR_EQUAL '4'
    predicates: string_col <= '4'
+   row-size=89B cardinality=5
 ====
 select * from functional_hbase.alltypessmall where string_col >= '4'
 ---- PLAN
@@ -340,6 +375,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.alltypessmall]
    hbase filters: d:string_col GREATER_OR_EQUAL '4'
    predicates: string_col >= '4'
+   row-size=89B cardinality=5
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -348,6 +384,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.alltypessmall]
    hbase filters: d:string_col GREATER_OR_EQUAL '4'
    predicates: string_col >= '4'
+   row-size=89B cardinality=5
 ====
 # test multiple filters
 select * from functional_hbase.alltypessmall
@@ -361,6 +398,7 @@ PLAN-ROOT SINK
   d:string_col GREATER_OR_EQUAL '4'
   d:date_string_col EQUAL '04/03/09'
    predicates: string_col != '2', string_col >= '4', date_string_col = '04/03/09'
+   row-size=89B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -372,6 +410,7 @@ PLAN-ROOT SINK
   d:string_col GREATER_OR_EQUAL '4'
   d:date_string_col EQUAL '04/03/09'
    predicates: string_col != '2', string_col >= '4', date_string_col = '04/03/09'
+   row-size=89B cardinality=1
 ====
 # mix of predicates and functional_hbase. filters
 select * from functional_hbase.alltypessmall where string_col = '4' and tinyint_col = 5
@@ -381,6 +420,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.alltypessmall]
    hbase filters: d:string_col EQUAL '4'
    predicates: tinyint_col = 5, string_col = '4'
+   row-size=89B cardinality=1
 ====
 # mix of predicates, functional_hbase. filters and start/stop keys
 select * from functional_hbase.stringids
@@ -393,6 +433,7 @@ PLAN-ROOT SINK
    stop key: 5\0
    hbase filters: d:string_col EQUAL '4'
    predicates: tinyint_col = 5, string_col = '4'
+   row-size=107B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 0:
   HBASE KEYRANGE 4:5\0
@@ -406,6 +447,7 @@ PLAN-ROOT SINK
    stop key: 5\0
    hbase filters: d:string_col EQUAL '4'
    predicates: tinyint_col = 5, string_col = '4'
+   row-size=107B cardinality=2
 ====
 # predicates involving casts (ie, non-string comparisons) cannot be turned into filters
 select * from functional_hbase.alltypessmall where cast(string_col as int) >= 4
@@ -414,6 +456,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.alltypessmall]
    predicates: CAST(string_col AS INT) >= 4
+   row-size=89B cardinality=5
 ====
 # non-const comparisons cannot be turned into filters
 select * from functional_hbase.alltypessmall where string_col >= date_string_col
@@ -422,6 +465,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.alltypessmall]
    predicates: string_col >= date_string_col
+   row-size=89B cardinality=5
 ====
 # IMP-1188 - row key predicate is a constant expr.
 select * from functional_hbase.stringids
@@ -434,6 +478,7 @@ PLAN-ROOT SINK
    start key: 5
    stop key: 5\0
    predicates: tinyint_col = 5
+   row-size=107B cardinality=1
 ====
 # IMP-1188 - row key predicate is a constant expr.
 select * from functional_hbase.stringids
@@ -447,6 +492,7 @@ PLAN-ROOT SINK
    stop key: 5\0
    hbase filters: d:string_col EQUAL '4'
    predicates: tinyint_col = 5, string_col = '4'
+   row-size=107B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 0:
   HBASE KEYRANGE 4:5\0
@@ -460,6 +506,7 @@ PLAN-ROOT SINK
    stop key: 5\0
    hbase filters: d:string_col EQUAL '4'
    predicates: tinyint_col = 5, string_col = '4'
+   row-size=107B cardinality=2
 ====
 # IMP-1188 - row key predicate is null.
 select * from functional_hbase.stringids where id = null
@@ -467,12 +514,14 @@ select * from functional_hbase.stringids where id = null
 PLAN-ROOT SINK
 |
 empty scan node
+   row-size=107B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |
 empty scan node
+   row-size=107B cardinality=1
 ====
 # IMP-1188 - row key lower bound is bigger than upper bound.
 select * from functional_hbase.stringids where id > 'b' and id < 'a'
@@ -480,12 +529,14 @@ select * from functional_hbase.stringids where id > 'b' and id < 'a'
 PLAN-ROOT SINK
 |
 empty scan node
+   row-size=107B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |
 empty scan node
+   row-size=107B cardinality=1
 ====
 # IMP-1188 - casting row key to non-string type disables predicate from being pushed
 # into HBase
@@ -496,6 +547,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.stringids]
    predicates: CAST(id AS INT) < 5
+   row-size=107B cardinality=1.73K
 ====
 # The following test cases test plan generation for queries executed against HBase
 # and have 'IS NULL/IS NOT NULL' predicates as well as conjunctive predicates.
@@ -509,6 +561,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.alltypesagg]
    predicates: bigint_col IS NULL
+   row-size=84B cardinality=1.73K
 ====
 # HBase scan query with projection and an 'IS NULL' predicate on one of the
 # projected columns
@@ -519,6 +572,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.alltypesagg]
    predicates: bigint_col IS NULL
+   row-size=12B cardinality=1.73K
 ====
 # HBase scan query with 'IS NOT NULL' predicate
 select * from functional_hbase.alltypesagg
@@ -528,6 +582,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.alltypesagg]
    predicates: bigint_col IS NOT NULL
+   row-size=84B cardinality=1.73K
 ====
 # HBase scan query with conjunctive predicates one of which is an 'IS NULL'
 select * from functional_hbase.alltypesagg
@@ -537,6 +592,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.alltypesagg]
    predicates: bigint_col IS NULL, day = 1
+   row-size=84B cardinality=1.73K
 ====
 # HBase scan query with conjunctive predicates one of which is an 'IS NOT NULL'
 select * from functional_hbase.alltypesagg
@@ -546,11 +602,12 @@ PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.alltypesagg]
    predicates: bigint_col IS NOT NULL, bool_col = TRUE
+   row-size=84B cardinality=1.73K
 ---- SCANRANGELOCATIONS
 NODE 0:
-  HBASE KEYRANGE <unbounded>:3
   HBASE KEYRANGE 3:7
   HBASE KEYRANGE 7:<unbounded>
+  HBASE KEYRANGE <unbounded>:3
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -558,6 +615,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.alltypesagg]
    predicates: bigint_col IS NOT NULL, bool_col = TRUE
+   row-size=84B cardinality=1.73K
 ====
 # HBase scan query with an aggregation and a single predicate
 select count(*) from functional_hbase.alltypesagg
@@ -567,22 +625,27 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HBASE [functional_hbase.alltypesagg]
    predicates: bigint_col = 10
+   row-size=8B cardinality=1.73K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=1
 |
 02:EXCHANGE [UNPARTITIONED]
 |
 01:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HBASE [functional_hbase.alltypesagg]
    predicates: bigint_col = 10
+   row-size=8B cardinality=1.73K
 ====
 # HBase scan query with an aggregation and conjunctive predicates
 select count(*) from functional_hbase.alltypesagg
@@ -592,9 +655,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HBASE [functional_hbase.alltypesagg]
    predicates: bigint_col = 10, day = 1
+   row-size=12B cardinality=1.73K
 ====
 # IMPALA-1141: Simple joins to make sure cardinality estimates are right.
 select a.id, b.id, c.id
@@ -612,15 +677,20 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: a.int_col = b.int_col
+|  row-size=29B cardinality=300
 |
 |--00:SCAN HBASE [functional_hbase.alltypessmall b]
 |     predicates: b.bool_col = FALSE
+|     row-size=9B cardinality=25
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: a.int_col = c.int_col
+|  row-size=20B cardinality=120
 |
 |--02:SCAN HBASE [functional_hbase.alltypessmall c]
 |     predicates: c.month = 4
+|     row-size=12B cardinality=12
 |
 01:SCAN HBASE [functional_hbase.alltypessmall a]
+   row-size=8B cardinality=50
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/hdfs.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/hdfs.test b/testdata/workloads/functional-planner/queries/PlannerTest/hdfs.test
index 9b9a678..244ad82 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/hdfs.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/hdfs.test
@@ -1,18 +1,22 @@
 # all partitions are pruned during planning
 select * FROM functional.alltypes
-where cast(year as string) = to_date( from_unixtime(unix_timestamp()) )
+where cast(year as string) = '2019-01-01'
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: CAST(year AS STRING) = '2019-01-01'
    partitions=0/24 files=0 size=0B
+   row-size=89B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: CAST(year AS STRING) = '2019-01-01'
    partitions=0/24 files=0 size=0B
+   row-size=89B cardinality=0
 ====
 # predicate is evaluated by the scan node
 select zip, count(*)
@@ -25,10 +29,12 @@ PLAN-ROOT SINK
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: zip
+|  row-size=12B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
    predicates: name LIKE 'm%'
+   row-size=16B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -37,16 +43,19 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: zip
+|  row-size=12B cardinality=0
 |
 02:EXCHANGE [HASH(zip)]
 |
 01:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: zip
+|  row-size=12B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
    predicates: name LIKE 'm%'
+   row-size=16B cardinality=0
 ====
 # all partitions are selected
 select * from functional.alltypes
@@ -55,6 +64,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=1/090101.txt 0:20433
@@ -88,6 +98,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # predicate on first partition key
 select id, month from functional.alltypes where year = 2009
@@ -95,7 +106,9 @@ select id, month from functional.alltypes where year = 2009
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009
    partitions=12/24 files=12 size=238.68KB
+   row-size=8B cardinality=3.65K
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=1/090101.txt 0:20433
@@ -116,7 +129,9 @@ PLAN-ROOT SINK
 01:EXCHANGE [UNPARTITIONED]
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009
    partitions=12/24 files=12 size=238.68KB
+   row-size=8B cardinality=3.65K
 ====
 # same predicate, phrased differently
 select * from functional.alltypes where year = 2009.0
@@ -124,21 +139,27 @@ select * from functional.alltypes where year = 2009.0
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009.0
    partitions=12/24 files=12 size=238.68KB
+   row-size=89B cardinality=3.65K
 ====
 select * from functional.alltypes where 2009 = year
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009
    partitions=12/24 files=12 size=238.68KB
+   row-size=89B cardinality=3.65K
 ====
 select * from functional.alltypes where 2009 <=> year
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year IS NOT DISTINCT FROM 2009
    partitions=12/24 files=12 size=238.68KB
+   row-size=89B cardinality=3.65K
 ====
 # compound predicate on the second partition key
 select * from functional.alltypes where !(month > 2)
@@ -146,7 +167,9 @@ select * from functional.alltypes where !(month > 2)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: NOT (month > 2)
    partitions=4/24 files=4 size=76.83KB
+   row-size=89B cardinality=1.18K
 ====
 # nested compound predicates on the second partition key
 select * from functional.alltypes where !(!(month=1))
@@ -154,14 +177,18 @@ select * from functional.alltypes where !(!(month=1))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: NOT (NOT (month = 1))
    partitions=2/24 files=2 size=40.32KB
+   row-size=89B cardinality=620
 ====
 select * from functional.alltypes where !(!(month<=>1))
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: NOT (NOT (month IS NOT DISTINCT FROM 1))
    partitions=2/24 files=2 size=40.32KB
+   row-size=89B cardinality=620
 ====
 # predicates on both partition keys one of which is a compound predicate with NOT
 select * from functional.alltypes where year=2009 and !(month < 6)
@@ -169,7 +196,9 @@ select * from functional.alltypes where year=2009 and !(month < 6)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, NOT (month < 6)
    partitions=7/24 files=7 size=140.58KB
+   row-size=89B cardinality=2.14K
 ====
 # compound predicates on both partition keys
 select * from functional.alltypes where !(year < 2009) and !(month < 6)
@@ -177,7 +206,9 @@ select * from functional.alltypes where !(year < 2009) and !(month < 6)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: NOT (year < 2009), NOT (month < 6)
    partitions=14/24 files=14 size=281.15KB
+   row-size=89B cardinality=4.28K
 ====
 # compound predicate on a conjunct
 select * from functional.alltypes where !(year = 2009 and month > 6)
@@ -185,21 +216,27 @@ select * from functional.alltypes where !(year = 2009 and month > 6)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: NOT (year = 2009 AND month > 6)
    partitions=18/24 files=18 size=357.58KB
+   row-size=89B cardinality=5.46K
 ====
 select * from functional.alltypes where !(year <=> 2009 and month > 6)
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: NOT (year IS NOT DISTINCT FROM 2009 AND month > 6)
    partitions=18/24 files=18 size=357.58KB
+   row-size=89B cardinality=5.46K
 ====
 select * from functional.alltypes where !(year <=> 2009) or !(month > 6)
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: NOT (year IS NOT DISTINCT FROM 2009) OR NOT (month > 6)
    partitions=18/24 files=18 size=357.58KB
+   row-size=89B cardinality=5.46K
 ====
 # compound predicate on a disjunct
 select * from functional.alltypes where !(month = 6 or month = 8)
@@ -207,14 +244,18 @@ select * from functional.alltypes where !(month = 6 or month = 8)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: NOT month IN (6, 8)
    partitions=20/24 files=20 size=398.31KB
+   row-size=89B cardinality=6.08K
 ====
 select * from functional.alltypes where !(month <=> 6 or month <=> 8)
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: NOT (month IS NOT DISTINCT FROM 6 OR month IS NOT DISTINCT FROM 8)
    partitions=20/24 files=20 size=398.31KB
+   row-size=89B cardinality=6.08K
 ====
 # not predicate with is null
 select * from functional.alltypes where not (year = 2009 or month is null)
@@ -222,7 +263,9 @@ select * from functional.alltypes where not (year = 2009 or month is null)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: NOT (year = 2009 OR month IS NULL)
    partitions=12/24 files=12 size=239.77KB
+   row-size=89B cardinality=3.65K
 ====
 # not predicate with "<=> null" as a synonym of "is null"
 select * from functional.alltypes where not (year = 2009 or month <=> null)
@@ -230,7 +273,9 @@ select * from functional.alltypes where not (year = 2009 or month <=> null)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: NOT (year = 2009 OR month IS NOT DISTINCT FROM NULL)
    partitions=12/24 files=12 size=239.77KB
+   row-size=89B cardinality=3.65K
 ====
 # nested not predicates with is null
 select * from functional.alltypes where not (not (month is null))
@@ -238,7 +283,9 @@ select * from functional.alltypes where not (not (month is null))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: NOT (NOT (month IS NULL))
    partitions=0/24 files=0 size=0B
+   row-size=89B cardinality=0
 ====
 # nested not predicates with "<=> null" as a synonym of "is null"
 select * from functional.alltypes where not (not (month <=> null))
@@ -246,7 +293,9 @@ select * from functional.alltypes where not (not (month <=> null))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: NOT (NOT (month IS NOT DISTINCT FROM NULL))
    partitions=0/24 files=0 size=0B
+   row-size=89B cardinality=0
 ====
 # nested not predicates with disjunct
 select * from functional.alltypes where not (not (month is null or year = 2009))
@@ -254,7 +303,9 @@ select * from functional.alltypes where not (not (month is null or year = 2009))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: NOT (NOT (month IS NULL OR year = 2009))
    partitions=12/24 files=12 size=238.68KB
+   row-size=89B cardinality=3.65K
 ====
 # nested not predicates with disjunct and "<=> null" as a synonym of "is null"
 select * from functional.alltypes where not (not (month <=> null or year = 2009))
@@ -262,7 +313,9 @@ select * from functional.alltypes where not (not (month <=> null or year = 2009)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: NOT (NOT (month IS NOT DISTINCT FROM NULL OR year = 2009))
    partitions=12/24 files=12 size=238.68KB
+   row-size=89B cardinality=3.65K
 ====
 # predicate on second partition key
 select * from functional.alltypes where month=1
@@ -270,7 +323,9 @@ select * from functional.alltypes where month=1
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: month = 1
    partitions=2/24 files=2 size=40.32KB
+   row-size=89B cardinality=620
 ====
 # predicate on both partition keys
 select * from functional.alltypes where year=2009 and month=1
@@ -278,7 +333,9 @@ select * from functional.alltypes where year=2009 and month=1
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month = 1
    partitions=1/24 files=1 size=19.95KB
+   row-size=89B cardinality=310
 ====
 # single-sided range on 2nd key
 select * from functional.alltypes where year=2009 and month > 6
@@ -286,28 +343,36 @@ select * from functional.alltypes where year=2009 and month > 6
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month > 6
    partitions=6/24 files=6 size=120.87KB
+   row-size=89B cardinality=1.84K
 ====
 select * from functional.alltypes where year=2009 and month < 6
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month < 6
    partitions=5/24 files=5 size=98.11KB
+   row-size=89B cardinality=1.51K
 ====
 select * from functional.alltypes where year=2009 and month in (1, 3, 5, 7)
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month IN (1, 3, 5, 7)
    partitions=4/24 files=4 size=80.74KB
+   row-size=89B cardinality=1.24K
 ====
 select * from functional.alltypes where year<=>2009 and month in (1, 3, 5, 7)
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year IS NOT DISTINCT FROM 2009, month IN (1, 3, 5, 7)
    partitions=4/24 files=4 size=80.74KB
+   row-size=89B cardinality=1.24K
 ====
 # adding a predicate that always evaluates to true should not change anything
 select * from functional.alltypes
@@ -316,7 +381,9 @@ where year=2009 and month in (1, 3, 5, 7) and month is not null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month IN (1, 3, 5, 7), month IS NOT NULL
    partitions=4/24 files=4 size=80.74KB
+   row-size=89B cardinality=1.24K
 ====
 # adding a predicate that always evaluates to false should remove all partitions
 select * from functional.alltypes
@@ -325,14 +392,18 @@ where year=2009 and month in (1, 3, 5, 7) and month is null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month IN (1, 3, 5, 7), month IS NULL
    partitions=0/24 files=0 size=0B
+   row-size=89B cardinality=0
 ====
 select * from functional.alltypes where year=2009 and (month in (1, 3, 5) or month = 7)
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month IN (1, 3, 5, 7)
    partitions=4/24 files=4 size=80.74KB
+   row-size=89B cardinality=1.24K
 ====
 # single-sided ranges on both keys
 select * from functional.alltypes where year<=2009 and month < 6
@@ -340,7 +411,9 @@ select * from functional.alltypes where year<=2009 and month < 6
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year <= 2009, month < 6
    partitions=5/24 files=5 size=98.11KB
+   row-size=89B cardinality=1.51K
 ====
 # range on 2nd key
 select * from functional.alltypes where month < 9 and month > 6
@@ -348,7 +421,9 @@ select * from functional.alltypes where month < 9 and month > 6
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: month < 9, month > 6
    partitions=4/24 files=4 size=81.46KB
+   row-size=89B cardinality=1.24K
 ====
 # multiple predicates on first key; 2nd one applied as predicate
 select * from functional.alltypes where year < 2010 and year < 2009 and month > 6
@@ -356,7 +431,9 @@ select * from functional.alltypes where year < 2010 and year < 2009 and month >
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year < 2010, year < 2009, month > 6
    partitions=0/24 files=0 size=0B
+   row-size=89B cardinality=0
 ====
 # multiple predicates on second key
 select * from functional.alltypes
@@ -365,7 +442,9 @@ where year < 2010 and (month > 6 or month = 1 or month in (3, 4))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year < 2010, (month > 6 OR month = 1 OR month IN (3, 4))
    partitions=9/24 files=9 size=180.49KB
+   row-size=89B cardinality=2.76K
 ====
 # multiple predicates on second key
 select * from functional.alltypes
@@ -374,7 +453,9 @@ where year < 2010 and (month > 6 or month <=> 1 or month in (3, 4))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year < 2010, (month > 6 OR month IS NOT DISTINCT FROM 1 OR month IN (3, 4))
    partitions=9/24 files=9 size=180.49KB
+   row-size=89B cardinality=2.76K
 ====
 # between predicate on second key
 select * from functional.alltypes where year = 2009 and month between 6 and 8
@@ -382,7 +463,9 @@ select * from functional.alltypes where year = 2009 and month between 6 and 8
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month >= 6, month <= 8
    partitions=3/24 files=3 size=60.43KB
+   row-size=89B cardinality=920
 ====
 # between predicate on second key
 select * from functional.alltypes where year <=> 2009 and month between 6 and 8
@@ -390,7 +473,9 @@ select * from functional.alltypes where year <=> 2009 and month between 6 and 8
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year IS NOT DISTINCT FROM 2009, month >= 6, month <= 8
    partitions=3/24 files=3 size=60.43KB
+   row-size=89B cardinality=920
 ====
 # between predicates on first and second keys
 select * from functional.alltypes
@@ -399,7 +484,9 @@ where year between 2009 and 2009 and month between 6 and 8
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year >= 2009, year <= 2009, month >= 6, month <= 8
    partitions=3/24 files=3 size=60.43KB
+   row-size=89B cardinality=920
 ====
 # disjunctive between predicates on second key
 select * from functional.alltypes
@@ -408,7 +495,9 @@ where year = 2009 and (month between 6 and 7 or month between 7 and 8)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, (month >= 6 AND month <= 7 OR month >= 7 AND month <= 8)
    partitions=3/24 files=3 size=60.43KB
+   row-size=89B cardinality=920
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=6/090601.txt 0:20179
@@ -422,7 +511,9 @@ where year = 2009 and (month between 5+1 and 8-1 or month between 9-2 and 1+7)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, (month >= 6 AND month <= 7 OR month >= 7 AND month <= 8)
    partitions=3/24 files=3 size=60.43KB
+   row-size=89B cardinality=920
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=6/090601.txt 0:20179
@@ -435,14 +526,18 @@ select * from functional.alltypes where year - 1 = 2009
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year - 1 = 2009
    partitions=12/24 files=12 size=239.77KB
+   row-size=89B cardinality=3.65K
 ====
 select * from functional.alltypes where year - 1 <=> 2009
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year - 1 IS NOT DISTINCT FROM 2009
    partitions=12/24 files=12 size=239.77KB
+   row-size=89B cardinality=3.65K
 ====
 # Predicates on a partition key with nulls (see IMPALA-887)
 # IS NULL predicate on a partition key with nulls
@@ -451,7 +546,9 @@ select * from functional.alltypesagg where day is null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: day IS NULL
    partitions=1/11 files=1 size=71.05KB
+   row-size=95B cardinality=1.00K
 ====
 # <=> null predicate on a partition key with nulls
 select * from functional.alltypesagg where day <=> null
@@ -459,7 +556,9 @@ select * from functional.alltypesagg where day <=> null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: day IS NOT DISTINCT FROM NULL
    partitions=1/11 files=1 size=71.05KB
+   row-size=95B cardinality=1.00K
 ====
 # IS NOT NULL predicate on a partition key with nulls
 select * from functional.alltypesagg where day is not null
@@ -467,7 +566,9 @@ select * from functional.alltypesagg where day is not null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: day IS NOT NULL
    partitions=10/11 files=10 size=743.67KB
+   row-size=95B cardinality=10.00K
 ====
 # IS DISTINCT FROM NULL predicate on a partition key with nulls
 select * from functional.alltypesagg where day is distinct from null
@@ -475,14 +576,18 @@ select * from functional.alltypesagg where day is distinct from null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: day IS DISTINCT FROM NULL
    partitions=10/11 files=10 size=743.67KB
+   row-size=95B cardinality=10.00K
 ====
 select * from functional.alltypesagg where day = day
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: day = day
    partitions=10/11 files=10 size=743.67KB
+   row-size=95B cardinality=10.00K
 ====
 select * from functional.alltypesagg where day <=> day
 ---- PLAN
@@ -490,6 +595,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=95B cardinality=11.00K
 ====
 # partition key predicates which are in conjunctive normal form (case 1)
 select * from functional.alltypesagg where day is null and day = 10
@@ -511,7 +617,9 @@ select * from functional.alltypesagg where day is null and month = 1
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: day IS NULL, month = 1
    partitions=1/11 files=1 size=71.05KB
+   row-size=95B cardinality=1.00K
 ====
 # partition key predicates which are in conjunctive normal form (case 2)
 select * from functional.alltypesagg where day <=> null and month = 1
@@ -519,7 +627,9 @@ select * from functional.alltypesagg where day <=> null and month = 1
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: day IS NOT DISTINCT FROM NULL, month = 1
    partitions=1/11 files=1 size=71.05KB
+   row-size=95B cardinality=1.00K
 ====
 # partition key predicates which are in conjunctive normal form (case 3)
 select * from functional.alltypesagg where month = 1 and (day is null or day = 10)
@@ -527,7 +637,9 @@ select * from functional.alltypesagg where month = 1 and (day is null or day = 1
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: month = 1, (day IS NULL OR day = 10)
    partitions=2/11 files=2 size=145.53KB
+   row-size=95B cardinality=2.00K
 ====
 # partition key predicates which are in conjunctive normal form (case 3)
 select * from functional.alltypesagg where month = 1 and (day <=> null or day = 10)
@@ -535,7 +647,9 @@ select * from functional.alltypesagg where month = 1 and (day <=> null or day =
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: month = 1, (day IS NOT DISTINCT FROM NULL OR day = 10)
    partitions=2/11 files=2 size=145.53KB
+   row-size=95B cardinality=2.00K
 ====
 # partition key predicates which are in conjunctive normal form (case 4)
 select * from functional.alltypesagg where month = 1 and (day is null or year = 2010)
@@ -543,7 +657,9 @@ select * from functional.alltypesagg where month = 1 and (day is null or year =
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: month = 1, (day IS NULL OR year = 2010)
    partitions=11/11 files=11 size=814.73KB
+   row-size=95B cardinality=11.00K
 ====
 # partition key predicates which are in conjunctive normal form (case 4)
 select * from functional.alltypesagg where month = 1 and (day <=> null or year = 2010)
@@ -551,7 +667,9 @@ select * from functional.alltypesagg where month = 1 and (day <=> null or year =
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: month = 1, (day IS NOT DISTINCT FROM NULL OR year = 2010)
    partitions=11/11 files=11 size=814.73KB
+   row-size=95B cardinality=11.00K
 ====
 # partition key predicates which are in conjunctive normal form (case 5)
 select * from functional.alltypesagg
@@ -560,7 +678,9 @@ where (year = 2010 or month = 1) and (day is not null or day = 10)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: (year = 2010 OR month = 1), (day IS NOT NULL OR day = 10)
    partitions=10/11 files=10 size=743.67KB
+   row-size=95B cardinality=10.00K
 ====
 # partition key predicates which are in conjunctive normal form (case 5)
 select * from functional.alltypesagg
@@ -569,7 +689,9 @@ where (year = 2010 or month = 1) and (day is distinct from null or day = 10)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: (year = 2010 OR month = 1), (day IS DISTINCT FROM NULL OR day = 10)
    partitions=10/11 files=10 size=743.67KB
+   row-size=95B cardinality=10.00K
 ====
 # partition key predicates which are in disjunctive normal form (case 1)
 select * from functional.alltypesagg where day is null or month = 1
@@ -577,7 +699,9 @@ select * from functional.alltypesagg where day is null or month = 1
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: day IS NULL OR month = 1
    partitions=11/11 files=11 size=814.73KB
+   row-size=95B cardinality=11.00K
 ====
 # partition key predicates which are in disjunctive normal form (case 1)
 select * from functional.alltypesagg where day <=> null or month = 1
@@ -585,7 +709,9 @@ select * from functional.alltypesagg where day <=> null or month = 1
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: day IS NOT DISTINCT FROM NULL OR month = 1
    partitions=11/11 files=11 size=814.73KB
+   row-size=95B cardinality=11.00K
 ====
 # partition key predicates which are in disjunctive normal form (case 2)
 select * from functional.alltypesagg where day is null or day = 10
@@ -593,7 +719,9 @@ select * from functional.alltypesagg where day is null or day = 10
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: day IS NULL OR day = 10
    partitions=2/11 files=2 size=145.53KB
+   row-size=95B cardinality=2.00K
 ====
 # partition key predicates which are in disjunctive normal form (case 2)
 select * from functional.alltypesagg where day <=> null or day = 10
@@ -601,7 +729,9 @@ select * from functional.alltypesagg where day <=> null or day = 10
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: day IS NOT DISTINCT FROM NULL OR day = 10
    partitions=2/11 files=2 size=145.53KB
+   row-size=95B cardinality=2.00K
 ====
 # partition key predicates which are in disjunctive normal form (case 3)
 select * from functional.alltypesagg where day = 10 or (day is null and year = 2010)
@@ -609,7 +739,9 @@ select * from functional.alltypesagg where day = 10 or (day is null and year = 2
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: day = 10 OR (day IS NULL AND year = 2010)
    partitions=2/11 files=2 size=145.53KB
+   row-size=95B cardinality=2.00K
 ====
 # partition key predicates which are in disjunctive normal form (case 3)
 select * from functional.alltypesagg where day = 10 or (day <=> null and year = 2010)
@@ -617,7 +749,9 @@ select * from functional.alltypesagg where day = 10 or (day <=> null and year =
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: day = 10 OR (day IS NOT DISTINCT FROM NULL AND year = 2010)
    partitions=2/11 files=2 size=145.53KB
+   row-size=95B cardinality=2.00K
 ====
 # partition key predicates which are in disjunctive normal form (case 4)
 select * from functional.alltypesagg
@@ -626,7 +760,9 @@ where (month = 1 and day = 1) or (day is null and year = 2010)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: (month = 1 AND day = 1) OR (day IS NULL AND year = 2010)
    partitions=2/11 files=2 size=144.45KB
+   row-size=95B cardinality=2.00K
 ====
 # partition key predicates which are in disjunctive normal form (case 4)
 select * from functional.alltypesagg
@@ -635,7 +771,9 @@ where (month = 1 and day = 1) or (day <=> null and year = 2010)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: (month = 1 AND day = 1) OR (day IS NOT DISTINCT FROM NULL AND year = 2010)
    partitions=2/11 files=2 size=144.45KB
+   row-size=95B cardinality=2.00K
 ====
 # partition key predicates with negation (case 1)
 select * from functional.alltypesagg where not (day is not null)
@@ -643,7 +781,9 @@ select * from functional.alltypesagg where not (day is not null)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: NOT (day IS NOT NULL)
    partitions=1/11 files=1 size=71.05KB
+   row-size=95B cardinality=1.00K
 ====
 # partition key predicates with negation (case 1)
 select * from functional.alltypesagg where not (day is distinct from null)
@@ -651,7 +791,9 @@ select * from functional.alltypesagg where not (day is distinct from null)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: NOT (day IS DISTINCT FROM NULL)
    partitions=1/11 files=1 size=71.05KB
+   row-size=95B cardinality=1.00K
 ====
 # partition key predicates with negation (case 2)
 select * from functional.alltypesagg where not (not (day is null))
@@ -659,7 +801,9 @@ select * from functional.alltypesagg where not (not (day is null))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: NOT (NOT (day IS NULL))
    partitions=1/11 files=1 size=71.05KB
+   row-size=95B cardinality=1.00K
 ====
 # partition key predicates with negation (case 2)
 select * from functional.alltypesagg where not (not (day <=> null))
@@ -667,7 +811,9 @@ select * from functional.alltypesagg where not (not (day <=> null))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: NOT (NOT (day IS NOT DISTINCT FROM NULL))
    partitions=1/11 files=1 size=71.05KB
+   row-size=95B cardinality=1.00K
 ====
 # partition key predicates with negation (case 3)
 select * from functional.alltypesagg where not (day is not null and month = 1)
@@ -675,7 +821,9 @@ select * from functional.alltypesagg where not (day is not null and month = 1)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: NOT (day IS NOT NULL AND month = 1)
    partitions=1/11 files=1 size=71.05KB
+   row-size=95B cardinality=1.00K
 ====
 # partition key predicates with negation (case 3)
 select * from functional.alltypesagg where not (day is distinct from null and month = 1)
@@ -683,7 +831,9 @@ select * from functional.alltypesagg where not (day is distinct from null and mo
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: NOT (day IS DISTINCT FROM NULL AND month = 1)
    partitions=1/11 files=1 size=71.05KB
+   row-size=95B cardinality=1.00K
 ====
 # partition key predicates with negation (case 3)
 select * from functional.alltypesagg where not (day is not null or day < 9)
@@ -691,7 +841,9 @@ select * from functional.alltypesagg where not (day is not null or day < 9)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: NOT (day IS NOT NULL OR day < 9)
    partitions=0/11 files=0 size=0B
+   row-size=95B cardinality=0
 ====
 # partition key predicates with negation (case 3)
 select * from functional.alltypesagg where not (day is distinct from null or day < 9)
@@ -699,7 +851,9 @@ select * from functional.alltypesagg where not (day is distinct from null or day
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: NOT (day IS DISTINCT FROM NULL OR day < 9)
    partitions=0/11 files=0 size=0B
+   row-size=95B cardinality=0
 ====
 # partition key predicates with negation (case 4)
 select * from functional.alltypesagg
@@ -708,7 +862,9 @@ where not (day is not null and (not (day < 9 and month = 1)))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: NOT (day IS NOT NULL AND (NOT (day < 9 AND month = 1)))
    partitions=9/11 files=9 size=665.77KB
+   row-size=95B cardinality=9.00K
 ====
 # partition key predicates with negation (case 4)
 select * from functional.alltypesagg
@@ -717,7 +873,9 @@ where not (day is distinct from null and (not (day < 9 and month = 1)))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: NOT (day IS DISTINCT FROM NULL AND (NOT (day < 9 AND month = 1)))
    partitions=9/11 files=9 size=665.77KB
+   row-size=95B cardinality=9.00K
 ====
 # partition key predicates with negation (case 5)
 select * from functional.alltypesagg
@@ -726,7 +884,9 @@ where not (day is not null or (day = 1 and (not (month = 1 or year = 2010))))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: NOT (day IS NOT NULL OR (day = 1 AND (NOT (month = 1 OR year = 2010))))
    partitions=1/11 files=1 size=71.05KB
+   row-size=95B cardinality=1.00K
 ====
 # partition key predicates with negation (case 5)
 select * from functional.alltypesagg
@@ -735,7 +895,9 @@ where not (day is distinct from null or (day = 1 and (not (month = 1 or year = 2
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: NOT (day IS DISTINCT FROM NULL OR (day = 1 AND (NOT (month = 1 OR year = 2010))))
    partitions=1/11 files=1 size=71.05KB
+   row-size=95B cardinality=1.00K
 ====
 # partition key predicates where some are evaluated by the index and others are evaluated in the BE
 select * from functional.alltypesagg
@@ -744,7 +906,9 @@ where year + 1 = 2011 and month + 1 <= 3 and day is null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: year + 1 = 2011, month + 1 <= 3, day IS NULL
    partitions=1/11 files=1 size=71.05KB
+   row-size=95B cardinality=1.00K
 ====
 # partition key predicates where some are evaluated by the index and others are evaluated in the BE
 select * from functional.alltypesagg
@@ -753,7 +917,9 @@ where year + 1 = 2011 and month + 1 <= 3 and day <=> null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: year + 1 = 2011, month + 1 <= 3, day IS NOT DISTINCT FROM NULL
    partitions=1/11 files=1 size=71.05KB
+   row-size=95B cardinality=1.00K
 ====
 # all supported predicates that can be evaluated using partition key index
 select * from functional.alltypesagg
@@ -764,7 +930,9 @@ or not (day not in (10)) or not (day != 8)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: day = 5 OR (day >= 1 AND day <= 2) OR (day > 6 AND day < 8) OR day IS NULL OR day IN (4) OR NOT (day IS NOT NULL) OR NOT (day NOT IN (10)) OR NOT (day != 8)
    partitions=8/11 files=8 size=591.30KB
+   row-size=95B cardinality=8.00K
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypesagg/year=2010/month=1/day=1/100101.txt 0:75153
@@ -785,7 +953,9 @@ or not (day not in (10)) or not (day != 8)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
+   partition predicates: day = 5 OR (day >= 1 AND day <= 2) OR (day > 6 AND day < 8) OR day IS NOT DISTINCT FROM NULL OR day IN (4) OR NOT (day IS DISTINCT FROM NULL) OR NOT (day NOT IN (10)) OR NOT (day != 8)
    partitions=8/11 files=8 size=591.30KB
+   row-size=95B cardinality=8.00K
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypesagg/year=2010/month=1/day=1/100101.txt 0:75153
@@ -803,56 +973,72 @@ select * from functional.emptytable where f2 = 10
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.emptytable]
+   partition predicates: f2 = 10
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 select * from functional.emptytable where f2 != 10
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.emptytable]
+   partition predicates: f2 != 10
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 select * from functional.emptytable where f2 > 10
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.emptytable]
+   partition predicates: f2 > 10
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 select * from functional.emptytable where f2 < 10
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.emptytable]
+   partition predicates: f2 < 10
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 select * from functional.emptytable where f2 in (10)
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.emptytable]
+   partition predicates: f2 IN (10)
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 select * from functional.emptytable where f2 not in (10)
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.emptytable]
+   partition predicates: f2 NOT IN (10)
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 select * from functional.emptytable where f2 is null
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.emptytable]
+   partition predicates: f2 IS NULL
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 select * from functional.emptytable where f2 is not null
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.emptytable]
+   partition predicates: f2 IS NOT NULL
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 # multi-file non-partitioned table
 select * from functional.alltypesaggmultifilesNoPart
@@ -861,6 +1047,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesaggmultifilesnopart]
    partitions=1/1 files=4 size=805.23KB
+   row-size=83B cardinality=11.00K
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypesaggmultifilesnopart/000000_0 0:222916
@@ -874,7 +1061,9 @@ select * from functional.alltypesaggmultifiles where day <= 2
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesaggmultifiles]
+   partition predicates: day <= 2
    partitions=2/11 files=8 size=145.97KB
+   row-size=84B cardinality=unavailable
 ====
 # Test partition pruning on a table that has a large number of partitions
 # (see IMPALA-887)
@@ -885,14 +1074,18 @@ select * from scale_db.num_partitions_1234_blocks_per_partition_1 where j = 1
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [scale_db.num_partitions_1234_blocks_per_partition_1]
+   partition predicates: j = 1
    partitions=1/1234 files=1 size=2B
+   row-size=8B cardinality=1
 ====
 select * from scale_db.num_partitions_1234_blocks_per_partition_1 where j <=> 1
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [scale_db.num_partitions_1234_blocks_per_partition_1]
+   partition predicates: j IS NOT DISTINCT FROM 1
    partitions=1/1234 files=1 size=2B
+   row-size=8B cardinality=1
 ====
 # Test disjunctive predicate on a partition column
 select * from scale_db.num_partitions_1234_blocks_per_partition_1 where j = 1 or j = 2
@@ -900,14 +1093,18 @@ select * from scale_db.num_partitions_1234_blocks_per_partition_1 where j = 1 or
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [scale_db.num_partitions_1234_blocks_per_partition_1]
+   partition predicates: j IN (1, 2)
    partitions=2/1234 files=2 size=4B
+   row-size=8B cardinality=2
 ====
 select * from scale_db.num_partitions_1234_blocks_per_partition_1 where j <=> 1 or j <=> 2
 ---- PLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [scale_db.num_partitions_1234_blocks_per_partition_1]
+   partition predicates: j IS NOT DISTINCT FROM 1 OR j IS NOT DISTINCT FROM 2
    partitions=2/1234 files=2 size=4B
+   row-size=8B cardinality=2
 ====
 # Test conjunctive predicate on a partition column
 select * from scale_db.num_partitions_1234_blocks_per_partition_1 where j = 1 and j = 2
@@ -921,7 +1118,9 @@ select * from scale_db.num_partitions_1234_blocks_per_partition_1 where j <=> 1
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [scale_db.num_partitions_1234_blocks_per_partition_1]
+   partition predicates: j IS NOT DISTINCT FROM 1, j IS NOT DISTINCT FROM 2
    partitions=0/1234 files=0 size=0B
+   row-size=8B cardinality=0
 ====
 # Partition pruning when a binary predicate contains a NullLiteral (IMPALA-1535)
 select * from functional.alltypestiny t1 where t1.year != null or t1.year = null
@@ -929,7 +1128,9 @@ select * from functional.alltypestiny t1 where t1.year != null or t1.year = null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypestiny t1]
+   partition predicates: t1.year != NULL OR t1.year = NULL
    partitions=0/4 files=0 size=0B
+   row-size=89B cardinality=0
 ====
 # Partition pruning when a binary predicate contains a NullLiteral and IS DISTINCT FROM
 select * from functional.alltypestiny t1
@@ -938,7 +1139,9 @@ where t1.year IS DISTINCT FROM null or t1.year = null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypestiny t1]
+   partition predicates: t1.year IS DISTINCT FROM NULL OR t1.year = NULL
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ====
 # Partition pruning when a binary predicate contains a NullLiteral in an arithmetic
 # expression
@@ -947,7 +1150,9 @@ select * from functional.alltypesagg t1 where t1.year + null != t1.day
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
+   partition predicates: t1.day != t1.year + NULL
    partitions=0/11 files=0 size=0B
+   row-size=95B cardinality=0
 ====
 # Partition pruning when a binary predicate contains a NullLiteral in an arithmetic
 # expression and IS DISTINCT FROM
@@ -956,7 +1161,9 @@ select * from functional.alltypesagg t1 where t1.year + null IS DISTINCT FROM t1
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
+   partition predicates: t1.day IS DISTINCT FROM t1.year + NULL
    partitions=10/11 files=10 size=743.67KB
+   row-size=95B cardinality=10.00K
 ====
 # Partition pruning when an IN predicate contains a NullLiteral
 # (a single partition is scanned)
@@ -965,7 +1172,9 @@ select * from functional.alltypesagg t1 where day in (10, null)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
+   partition predicates: day IN (10, NULL)
    partitions=1/11 files=1 size=74.48KB
+   row-size=95B cardinality=1.00K
 ====
 # Partition pruning when a NOT IN predicate contains a NullLiteral
 # (all partitions are pruned)
@@ -974,7 +1183,9 @@ select * from functional.alltypesagg t1 where day not in (10, null)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
+   partition predicates: day NOT IN (10, NULL)
    partitions=0/11 files=0 size=0B
+   row-size=95B cardinality=0
 ====
 # Partition pruning when a binary predicate contains a constant expression (IMPALA-1636)
 select * from functional.alltypesagg t1
@@ -983,7 +1194,9 @@ where t1.day = instr("this is a test", "this") or t1.year = year(now()) + 100
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
+   partition predicates: t1.day = 1 OR t1.year = 2119
    partitions=1/11 files=1 size=73.39KB
+   row-size=95B cardinality=1.00K
 ====
 # Partition pruning when there is a constant expression in the IN predicate values
 # (IMPALA-1636)
@@ -993,7 +1206,9 @@ where t1.day in (1, cast(2.0 as INT), year(now()) + 100)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
+   partition predicates: t1.day IN (1, 2, 2119)
    partitions=2/11 files=2 size=147.87KB
+   row-size=95B cardinality=2.00K
 ====
 # Partition pruning where a compound predicate contains a constant and a non-constant
 # expression (IMPALA-1636)
@@ -1003,7 +1218,9 @@ where -t1.day in(-1 - 1) or cast(t1.day as string) like '%1%'
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
+   partition predicates: -1 * t1.day IN (-2) OR CAST(t1.day AS STRING) LIKE '%1%'
    partitions=3/11 files=3 size=222.34KB
+   row-size=95B cardinality=3.00K
 ====
 # IMPALA-4470: Partition pruning with a constant expr that evaluates to NaN/infinity.
 # 0 / 0 --> NaN and 1 / 0 --> Infinity
@@ -1014,7 +1231,9 @@ where year = (cast(0 as double) / cast(0 as double))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = (0 / 0), month = (1 / 0)
    partitions=0/24 files=0 size=0B
+   row-size=89B cardinality=0
 ====
 # IMPALA-4592: Test random predicates that reference a partition column.
 select * from functional.alltypes where rand() > year
@@ -1024,6 +1243,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: year < rand()
+   row-size=89B cardinality=730
 ====
 # IMPALA-5180: Test that predicates not touching a partition column are ignored in
 # partition pruning
@@ -1035,10 +1255,12 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: random() = 100
+   row-size=0B cardinality=730
 ====
 # Same as above, with a column that gets constant folded away
 select count(*) from
@@ -1050,8 +1272,10 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.int_col = 1, FALSE OR 1 + random() * 1 = 100
+   row-size=4B cardinality=231
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/implicit-joins.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/implicit-joins.test b/testdata/workloads/functional-planner/queries/PlannerTest/implicit-joins.test
index 184fd1c..df7b607 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/implicit-joins.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/implicit-joins.test
@@ -9,20 +9,25 @@ PLAN-ROOT SINK
 |
 04:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: t3.id = coalesce(functional.alltypestiny.id, t3.id)
+|  row-size=97B cardinality=8
 |
 |--02:SCAN HDFS [functional.alltypestiny t3]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypestiny.id = functional.alltypestiny.id
 |  runtime filters: RF000 <- functional.alltypestiny.id
+|  row-size=93B cardinality=8
 |
 |--01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> functional.alltypestiny.id
+   row-size=89B cardinality=8
 ====
 select *
 from
@@ -36,12 +41,15 @@ PLAN-ROOT SINK
 |
 02:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: t1.id < t2.id
+|  row-size=8B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 select *
 from
@@ -54,20 +62,25 @@ where
 PLAN-ROOT SINK
 |
 04:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=267B cardinality=53.29M
 |
 |--02:SCAN HDFS [functional.alltypes t3]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=178B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t1.id
+   row-size=89B cardinality=7.30K
 ====
 select *
 from
@@ -80,20 +93,25 @@ where
 PLAN-ROOT SINK
 |
 04:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=267B cardinality=53.29M
 |
 |--01:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t3.id
 |  runtime filters: RF000 <- t3.id
+|  row-size=178B cardinality=7.30K
 |
 |--02:SCAN HDFS [functional.alltypes t3]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t1.id
+   row-size=89B cardinality=7.30K
 ====
 # The hash join will not be executed first, because this is the first executable
 # plan we find and t1 is the largest table (which is moved to the front of
@@ -111,18 +129,23 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: t2.id = t3.id
 |  runtime filters: RF000 <- t3.id
+|  row-size=267B cardinality=58.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall t3]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 03:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=178B cardinality=58.40K
 |
 |--01:SCAN HDFS [functional.alltypestiny t2]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> t2.id
+|     row-size=89B cardinality=8
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 select *
 from
@@ -134,23 +157,30 @@ from
 PLAN-ROOT SINK
 |
 06:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=356B cardinality=389.02G
 |
 |--03:SCAN HDFS [functional.alltypes t4]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 05:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t2.id = t3.id
+|  row-size=267B cardinality=53.29M
 |
 |--02:SCAN HDFS [functional.alltypes t3]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 04:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=178B cardinality=53.29M
 |
 |--01:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # Test that the non-equi predicate on the outer join does not
 # discard detecting the implicit cross join
@@ -164,17 +194,22 @@ PLAN-ROOT SINK
 04:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: a.id = c.id
 |  other join predicates: a.id < b.id
+|  row-size=97B cardinality=737.30K
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 03:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=93B cardinality=730.00K
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 select a.* from
 functional.alltypestiny a,
@@ -188,26 +223,33 @@ PLAN-ROOT SINK
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: d.id = a.id
 |  runtime filters: RF000 <- a.id
+|  row-size=101B cardinality=64
 |
 |--05:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=97B cardinality=64
 |  |
 |  |--04:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: b.id = c.id
 |  |  |  runtime filters: RF002 <- c.id
+|  |  |  row-size=8B cardinality=8
 |  |  |
 |  |  |--02:SCAN HDFS [functional.alltypestiny c]
 |  |  |     partitions=4/4 files=4 size=460B
+|  |  |     row-size=4B cardinality=8
 |  |  |
 |  |  01:SCAN HDFS [functional.alltypes b]
 |  |     partitions=24/24 files=24 size=478.45KB
 |  |     runtime filters: RF002 -> b.id
+|  |     row-size=4B cardinality=7.30K
 |  |
 |  00:SCAN HDFS [functional.alltypestiny a]
 |     partitions=4/4 files=4 size=460B
+|     row-size=89B cardinality=8
 |
 03:SCAN HDFS [functional.alltypes d]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> d.id
+   row-size=4B cardinality=7.30K
 ====
 # Do not allow a non-equi outer join
 select count(*)
@@ -221,25 +263,33 @@ PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 06:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=8B cardinality=389.02G
 |
 |--03:SCAN HDFS [functional.alltypes t3]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=0B cardinality=7.30K
 |
 05:NESTED LOOP JOIN [LEFT OUTER JOIN]
 |  join predicates: t1.id < t2.id
+|  row-size=8B cardinality=53.29M
 |
 |--02:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 04:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=4B cardinality=53.29M
 |
 |--00:SCAN HDFS [functional.alltypes t0]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=0B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 select count(*)
 from
@@ -254,29 +304,37 @@ PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 06:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=8B cardinality=389.02G
 |
 |--03:SCAN HDFS [functional.alltypes t3]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=0B cardinality=7.30K
 |
 05:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t1.id = t2.id
 |  other join predicates: t1.id < t2.id
 |  other predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=8B cardinality=53.29M
 |
 |--02:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 04:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=4B cardinality=53.29M
 |
 |--00:SCAN HDFS [functional.alltypes t0]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=0B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t1.id
+   row-size=4B cardinality=7.30K
 ====
 select count(*)
 from
@@ -291,29 +349,37 @@ PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 06:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=8B cardinality=389.07G
 |
 |--03:SCAN HDFS [functional.alltypes t3]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=0B cardinality=7.30K
 |
 05:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: t1.id = t2.id
 |  other join predicates: t1.id < t2.id
 |  other predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=8B cardinality=53.30M
 |
 |--02:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 04:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=4B cardinality=53.29M
 |
 |--00:SCAN HDFS [functional.alltypes t0]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=0B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t1.id
+   row-size=4B cardinality=7.30K
 ====
 select count(*)
 from
@@ -324,15 +390,19 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 02:NESTED LOOP JOIN [LEFT SEMI JOIN]
 |  join predicates: (t0.id < t1.id)
+|  row-size=4B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes t1]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes t0]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 # Check for implicit cross joins conversion in presence of complex where
 # clauses that lead to query rewriting
@@ -348,25 +418,32 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 05:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: avg(id) = b.id
+|  row-size=8B cardinality=8
 |
 |--04:NESTED LOOP JOIN [INNER JOIN]
 |  |  predicates: a.id < b.id
+|  |  row-size=8B cardinality=8
 |  |
 |  |--01:SCAN HDFS [functional.alltypestiny b]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=4B cardinality=8
 |  |
 |  00:SCAN HDFS [functional.alltypestiny a]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 03:AGGREGATE [FINALIZE]
 |  output: avg(id)
 |  group by: month
+|  row-size=12B cardinality=12
 |
 02:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=8B cardinality=7.30K
 ====
 # Check that cross joins are correctly detected and the first join becomes a hash join
 select *
@@ -377,23 +454,30 @@ functional.alltypes t4
 PLAN-ROOT SINK
 |
 06:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=356B cardinality=389.02G
 |
 |--03:SCAN HDFS [functional.alltypes t4]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 05:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=267B cardinality=53.29M
 |
 |--02:SCAN HDFS [functional.alltypes t3]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=178B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t1.id
+   row-size=89B cardinality=7.30K
 ====


[24/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/analytic-fns.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/analytic-fns.test b/testdata/workloads/functional-planner/queries/PlannerTest/analytic-fns.test
index 5084d2a..9a70f6a 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/analytic-fns.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/analytic-fns.test
@@ -12,20 +12,25 @@ PLAN-ROOT SINK
 03:ANALYTIC
 |  functions: max(tinyint_col)
 |  partition by: int_col
+|  row-size=6B cardinality=100
 |
 02:SORT
 |  order by: int_col ASC NULLS FIRST
+|  row-size=5B cardinality=100
 |
 05:AGGREGATE [FINALIZE]
 |  group by: int_col, tinyint_col
+|  row-size=5B cardinality=100
 |
 04:EXCHANGE [HASH(int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: int_col, tinyint_col
+|  row-size=5B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=5B cardinality=7.30K
 ====
 # partition groups are coalesced if the intersection of their partition exprs
 # has a high enough cardinality to allow distribution across all nodes
@@ -41,21 +46,26 @@ PLAN-ROOT SINK
 04:ANALYTIC
 |  functions: max(int_col)
 |  partition by: int_col, tinyint_col
+|  row-size=14B cardinality=7.30K
 |
 03:SORT
 |  order by: int_col ASC NULLS FIRST, tinyint_col ASC NULLS FIRST
+|  row-size=10B cardinality=7.30K
 |
 02:ANALYTIC
 |  functions: max(int_col)
 |  partition by: int_col, bool_col
+|  row-size=10B cardinality=7.30K
 |
 01:SORT
 |  order by: int_col ASC NULLS FIRST, bool_col ASC NULLS FIRST
+|  row-size=6B cardinality=7.30K
 |
 05:EXCHANGE [HASH(int_col)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=6B cardinality=7.30K
 ====
 # unpartitioned analytics are executed with distributed sorts
 # TODO: avoid resorting on the same exprs
@@ -70,19 +80,24 @@ PLAN-ROOT SINK
 |  functions: min(int_col)
 |  order by: int_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=12B cardinality=7.30K
 |
 03:SORT
 |  order by: int_col ASC
+|  row-size=8B cardinality=7.30K
 |
 02:ANALYTIC
 |  functions: max(int_col)
 |  partition by: int_col
+|  row-size=8B cardinality=7.30K
 |
 01:SORT
 |  order by: int_col ASC NULLS FIRST
+|  row-size=4B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -90,24 +105,29 @@ PLAN-ROOT SINK
 |  functions: min(int_col)
 |  order by: int_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=12B cardinality=7.30K
 |
 06:MERGING-EXCHANGE [UNPARTITIONED]
 |  order by: int_col ASC
 |
 03:SORT
 |  order by: int_col ASC
+|  row-size=8B cardinality=7.30K
 |
 02:ANALYTIC
 |  functions: max(int_col)
 |  partition by: int_col
+|  row-size=8B cardinality=7.30K
 |
 01:SORT
 |  order by: int_col ASC NULLS FIRST
+|  row-size=4B cardinality=7.30K
 |
 05:EXCHANGE [HASH(int_col)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 # coalesce sort groups
 select
@@ -128,42 +148,51 @@ PLAN-ROOT SINK
 08:ANALYTIC
 |  functions: max(int_col)
 |  partition by: bool_col
+|  row-size=34B cardinality=7.30K
 |
 07:ANALYTIC
 |  functions: max(int_col)
 |  partition by: bool_col
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=30B cardinality=7.30K
 |
 06:ANALYTIC
 |  functions: max(int_col)
 |  partition by: bool_col
 |  order by: bigint_col ASC, tinyint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=26B cardinality=7.30K
 |
 05:SORT
 |  order by: bool_col ASC NULLS FIRST, bigint_col ASC, tinyint_col ASC
+|  row-size=22B cardinality=7.30K
 |
 04:ANALYTIC
 |  functions: max(int_col)
 |  partition by: int_col
 |  order by: bigint_col DESC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=22B cardinality=7.30K
 |
 03:SORT
 |  order by: int_col ASC NULLS FIRST, bigint_col DESC
+|  row-size=18B cardinality=7.30K
 |
 02:ANALYTIC
 |  functions: max(int_col)
 |  partition by: int_col
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=18B cardinality=7.30K
 |
 01:SORT
 |  order by: int_col ASC NULLS FIRST, bigint_col ASC
+|  row-size=14B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=14B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -172,21 +201,25 @@ PLAN-ROOT SINK
 08:ANALYTIC
 |  functions: max(int_col)
 |  partition by: bool_col
+|  row-size=34B cardinality=7.30K
 |
 07:ANALYTIC
 |  functions: max(int_col)
 |  partition by: bool_col
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=30B cardinality=7.30K
 |
 06:ANALYTIC
 |  functions: max(int_col)
 |  partition by: bool_col
 |  order by: bigint_col ASC, tinyint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=26B cardinality=7.30K
 |
 05:SORT
 |  order by: bool_col ASC NULLS FIRST, bigint_col ASC, tinyint_col ASC
+|  row-size=22B cardinality=7.30K
 |
 10:EXCHANGE [HASH(bool_col)]
 |
@@ -195,23 +228,28 @@ PLAN-ROOT SINK
 |  partition by: int_col
 |  order by: bigint_col DESC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=22B cardinality=7.30K
 |
 03:SORT
 |  order by: int_col ASC NULLS FIRST, bigint_col DESC
+|  row-size=18B cardinality=7.30K
 |
 02:ANALYTIC
 |  functions: max(int_col)
 |  partition by: int_col
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=18B cardinality=7.30K
 |
 01:SORT
 |  order by: int_col ASC NULLS FIRST, bigint_col ASC
+|  row-size=14B cardinality=7.30K
 |
 09:EXCHANGE [HASH(int_col)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=14B cardinality=7.30K
 ====
 # check ordering of partition, sort and window groups
 select
@@ -244,51 +282,63 @@ PLAN-ROOT SINK
 |  functions: min(int_col), max(int_col)
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=59B cardinality=7.30K
 |
 10:ANALYTIC
 |  functions: min(int_col)
+|  row-size=51B cardinality=7.30K
 |
 09:SORT
 |  order by: bigint_col ASC
+|  row-size=47B cardinality=7.30K
 |
 08:ANALYTIC
 |  functions: max(int_col), min(int_col), count(int_col)
 |  partition by: bool_col
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=47B cardinality=7.30K
 |
 07:SORT
 |  order by: bool_col ASC NULLS FIRST, bigint_col ASC
+|  row-size=31B cardinality=7.30K
 |
 06:ANALYTIC
 |  functions: max(int_col)
 |  partition by: bool_col
 |  order by: int_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=31B cardinality=7.30K
 |
 05:SORT
 |  order by: bool_col ASC NULLS FIRST, int_col ASC
+|  row-size=27B cardinality=7.30K
 |
 04:ANALYTIC
 |  functions: max(int_col), min(int_col)
 |  partition by: int_col, smallint_col
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=27B cardinality=7.30K
 |
 03:SORT
 |  order by: int_col ASC NULLS FIRST, smallint_col ASC NULLS FIRST, bigint_col ASC
+|  row-size=19B cardinality=7.30K
 |
 02:ANALYTIC
 |  functions: max(int_col)
 |  partition by: int_col, smallint_col
 |  order by: int_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=19B cardinality=7.30K
 |
 01:SORT
 |  order by: int_col ASC NULLS FIRST, smallint_col ASC NULLS FIRST
+|  row-size=15B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -296,33 +346,40 @@ PLAN-ROOT SINK
 |  functions: min(int_col), max(int_col)
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=59B cardinality=7.30K
 |
 10:ANALYTIC
 |  functions: min(int_col)
+|  row-size=51B cardinality=7.30K
 |
 14:MERGING-EXCHANGE [UNPARTITIONED]
 |  order by: bigint_col ASC
 |
 09:SORT
 |  order by: bigint_col ASC
+|  row-size=47B cardinality=7.30K
 |
 08:ANALYTIC
 |  functions: max(int_col), min(int_col), count(int_col)
 |  partition by: bool_col
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=47B cardinality=7.30K
 |
 07:SORT
 |  order by: bool_col ASC NULLS FIRST, bigint_col ASC
+|  row-size=31B cardinality=7.30K
 |
 06:ANALYTIC
 |  functions: max(int_col)
 |  partition by: bool_col
 |  order by: int_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=31B cardinality=7.30K
 |
 05:SORT
 |  order by: bool_col ASC NULLS FIRST, int_col ASC
+|  row-size=27B cardinality=7.30K
 |
 13:EXCHANGE [HASH(bool_col)]
 |
@@ -331,23 +388,28 @@ PLAN-ROOT SINK
 |  partition by: int_col, smallint_col
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=27B cardinality=7.30K
 |
 03:SORT
 |  order by: int_col ASC NULLS FIRST, smallint_col ASC NULLS FIRST, bigint_col ASC
+|  row-size=19B cardinality=7.30K
 |
 02:ANALYTIC
 |  functions: max(int_col)
 |  partition by: int_col, smallint_col
 |  order by: int_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=19B cardinality=7.30K
 |
 01:SORT
 |  order by: int_col ASC NULLS FIRST, smallint_col ASC NULLS FIRST
+|  row-size=15B cardinality=7.30K
 |
 12:EXCHANGE [HASH(int_col,smallint_col)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=15B cardinality=7.30K
 ====
 # basic analytic with default window and no partition/ordering
 select count(*) over() from functional.alltypesagg
@@ -356,19 +418,23 @@ PLAN-ROOT SINK
 |
 01:ANALYTIC
 |  functions: count(*)
+|  row-size=8B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=0B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 01:ANALYTIC
 |  functions: count(*)
+|  row-size=8B cardinality=11.00K
 |
 02:EXCHANGE [UNPARTITIONED]
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=0B cardinality=11.00K
 ====
 # basic analytic with default window and partition
 select tinyint_col, sum(bigint_col) over(partition by tinyint_col) sum_of_bigints
@@ -379,12 +445,15 @@ PLAN-ROOT SINK
 02:ANALYTIC
 |  functions: sum(bigint_col)
 |  partition by: tinyint_col
+|  row-size=17B cardinality=11.00K
 |
 01:SORT
 |  order by: tinyint_col ASC NULLS FIRST
+|  row-size=9B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=9B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -393,14 +462,17 @@ PLAN-ROOT SINK
 02:ANALYTIC
 |  functions: sum(bigint_col)
 |  partition by: tinyint_col
+|  row-size=17B cardinality=11.00K
 |
 01:SORT
 |  order by: tinyint_col ASC NULLS FIRST
+|  row-size=9B cardinality=11.00K
 |
 03:EXCHANGE [HASH(tinyint_col)]
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=9B cardinality=11.00K
 ====
 # basic analytic with default window and ordering
 select int_col, rank() over(order by int_col) from functional.alltypesagg
@@ -411,12 +483,15 @@ PLAN-ROOT SINK
 |  functions: rank()
 |  order by: int_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=12B cardinality=11.00K
 |
 01:SORT
 |  order by: int_col ASC
+|  row-size=4B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=4B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -424,15 +499,18 @@ PLAN-ROOT SINK
 |  functions: rank()
 |  order by: int_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=12B cardinality=11.00K
 |
 03:MERGING-EXCHANGE [UNPARTITIONED]
 |  order by: int_col ASC
 |
 01:SORT
 |  order by: int_col ASC
+|  row-size=4B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=4B cardinality=11.00K
 ====
 # analytic rows window, partition and ordering using complex expressions, with limit
 select bigint_col, count(double_col)
@@ -449,12 +527,15 @@ PLAN-ROOT SINK
 |  order by: 4 - int_col ASC, 4 * smallint_col ASC
 |  window: ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING
 |  limit: 10
+|  row-size=31B cardinality=10
 |
 01:SORT
 |  order by: tinyint_col + 1 ASC NULLS FIRST, double_col / 2 ASC NULLS FIRST, 4 - int_col ASC, 4 * smallint_col ASC
+|  row-size=23B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=23B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -467,14 +548,17 @@ PLAN-ROOT SINK
 |  order by: 4 - int_col ASC, 4 * smallint_col ASC
 |  window: ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING
 |  limit: 10
+|  row-size=31B cardinality=10
 |
 01:SORT
 |  order by: tinyint_col + 1 ASC NULLS FIRST, double_col / 2 ASC NULLS FIRST, 4 - int_col ASC, 4 * smallint_col ASC
+|  row-size=23B cardinality=11.00K
 |
 03:EXCHANGE [HASH(tinyint_col + 1,double_col / 2)]
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=23B cardinality=11.00K
 ====
 # test de-duplication of analytic exprs
 select
@@ -502,27 +586,33 @@ PLAN-ROOT SINK
 |  order by: int_col DESC
 |  window: ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING
 |  limit: 10
+|  row-size=53B cardinality=10
 |
 04:SORT
 |  order by: int_col DESC
+|  row-size=45B cardinality=7.30K
 |
 03:ANALYTIC
 |  functions: count(bigint_col), sum(double_col)
 |  partition by: bool_col
 |  order by: int_col DESC
 |  window: ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING
+|  row-size=45B cardinality=7.30K
 |
 02:ANALYTIC
 |  functions: avg(double_col)
 |  partition by: bool_col
 |  order by: int_col DESC
 |  window: ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING
+|  row-size=29B cardinality=7.30K
 |
 01:SORT
 |  order by: bool_col ASC NULLS FIRST, int_col DESC
+|  row-size=21B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=21B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -531,32 +621,38 @@ PLAN-ROOT SINK
 |  order by: int_col DESC
 |  window: ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING
 |  limit: 10
+|  row-size=53B cardinality=10
 |
 07:MERGING-EXCHANGE [UNPARTITIONED]
 |  order by: int_col DESC
 |
 04:SORT
 |  order by: int_col DESC
+|  row-size=45B cardinality=7.30K
 |
 03:ANALYTIC
 |  functions: count(bigint_col), sum(double_col)
 |  partition by: bool_col
 |  order by: int_col DESC
 |  window: ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING
+|  row-size=45B cardinality=7.30K
 |
 02:ANALYTIC
 |  functions: avg(double_col)
 |  partition by: bool_col
 |  order by: int_col DESC
 |  window: ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING
+|  row-size=29B cardinality=7.30K
 |
 01:SORT
 |  order by: bool_col ASC NULLS FIRST, int_col DESC
+|  row-size=21B cardinality=7.30K
 |
 06:EXCHANGE [HASH(bool_col)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=21B cardinality=7.30K
 ====
 # analytic on the output of a join with a final order by
 select a.tinyint_col, a.int_col, count(a.double_col)
@@ -568,26 +664,32 @@ PLAN-ROOT SINK
 |
 05:SORT
 |  order by: tinyint_col ASC, int_col ASC
+|  row-size=13B cardinality=99
 |
 04:ANALYTIC
 |  functions: count(double_col)
 |  partition by: a.tinyint_col
 |  order by: int_col DESC
 |  window: ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING
+|  row-size=29B cardinality=99
 |
 03:SORT
 |  order by: tinyint_col ASC NULLS FIRST, int_col DESC
+|  row-size=21B cardinality=99
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF000 <- b.id
+|  row-size=21B cardinality=99
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.id
+   row-size=17B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -596,30 +698,36 @@ PLAN-ROOT SINK
 |
 05:SORT
 |  order by: tinyint_col ASC, int_col ASC
+|  row-size=13B cardinality=99
 |
 04:ANALYTIC
 |  functions: count(double_col)
 |  partition by: a.tinyint_col
 |  order by: int_col DESC
 |  window: ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING
+|  row-size=29B cardinality=99
 |
 03:SORT
 |  order by: tinyint_col ASC NULLS FIRST, int_col DESC
+|  row-size=21B cardinality=99
 |
 07:EXCHANGE [HASH(a.tinyint_col)]
 |
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF000 <- b.id
+|  row-size=21B cardinality=99
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall b]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.id
+   row-size=17B cardinality=7.30K
 ====
 # analytics on a grouped aggregation with a final order by
 select bool_col,
@@ -640,31 +748,38 @@ PLAN-ROOT SINK
 |
 06:SORT
 |  order by: bool_col ASC, sum(min(int_col)) ASC, max(sum(bigint_col)) ASC
+|  row-size=25B cardinality=2
 |
 05:ANALYTIC
 |  functions: sum(min(int_col)), max(sum(bigint_col))
 |  partition by: min(tinyint_col)
 |  order by: max(int_col) ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING
+|  row-size=50B cardinality=2
 |
 04:SORT
 |  order by: min(tinyint_col) ASC NULLS FIRST, max(int_col) ASC
+|  row-size=34B cardinality=2
 |
 03:ANALYTIC
 |  functions: min(sum(bigint_col))
 |  partition by: min(tinyint_col)
 |  order by: sum(int_col) ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 4 FOLLOWING
+|  row-size=34B cardinality=2
 |
 02:SORT
 |  order by: min(tinyint_col) ASC NULLS FIRST, sum(int_col) ASC
+|  row-size=26B cardinality=2
 |
 01:AGGREGATE [FINALIZE]
 |  output: min(int_col), min(tinyint_col), max(int_col), sum(bigint_col), sum(int_col)
 |  group by: bool_col
+|  row-size=26B cardinality=2
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=14B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -673,39 +788,47 @@ PLAN-ROOT SINK
 |
 06:SORT
 |  order by: bool_col ASC, sum(min(int_col)) ASC, max(sum(bigint_col)) ASC
+|  row-size=25B cardinality=2
 |
 05:ANALYTIC
 |  functions: sum(min(int_col)), max(sum(bigint_col))
 |  partition by: min(tinyint_col)
 |  order by: max(int_col) ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING
+|  row-size=50B cardinality=2
 |
 04:SORT
 |  order by: min(tinyint_col) ASC NULLS FIRST, max(int_col) ASC
+|  row-size=34B cardinality=2
 |
 03:ANALYTIC
 |  functions: min(sum(bigint_col))
 |  partition by: min(tinyint_col)
 |  order by: sum(int_col) ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 4 FOLLOWING
+|  row-size=34B cardinality=2
 |
 02:SORT
 |  order by: min(tinyint_col) ASC NULLS FIRST, sum(int_col) ASC
+|  row-size=26B cardinality=2
 |
 09:EXCHANGE [HASH(min(tinyint_col))]
 |
 08:AGGREGATE [FINALIZE]
 |  output: min:merge(int_col), min:merge(tinyint_col), max:merge(int_col), sum:merge(bigint_col), sum:merge(int_col)
 |  group by: bool_col
+|  row-size=26B cardinality=2
 |
 07:EXCHANGE [HASH(bool_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: min(int_col), min(tinyint_col), max(int_col), sum(bigint_col), sum(int_col)
 |  group by: bool_col
+|  row-size=26B cardinality=2
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=14B cardinality=7.30K
 ====
 # grouping of multiple analytic exprs by compatible window/partition/order;
 # the distributed plan repartitions only once on tinyint_col
@@ -751,54 +874,65 @@ PLAN-ROOT SINK
 |  partition by: tinyint_col, double_col
 |  order by: int_col DESC
 |  window: ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING
+|  row-size=63B cardinality=11.00K
 |
 09:ANALYTIC
 |  functions: sum(smallint_col)
 |  partition by: double_col, tinyint_col
 |  order by: int_col DESC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
+|  row-size=47B cardinality=11.00K
 |
 08:ANALYTIC
 |  functions: sum(tinyint_col)
 |  partition by: tinyint_col, double_col
 |  order by: int_col DESC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=39B cardinality=11.00K
 |
 07:ANALYTIC
 |  functions: first_value_rewrite(int_col, -1)
 |  partition by: double_col, tinyint_col
 |  order by: int_col DESC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 2 PRECEDING
+|  row-size=31B cardinality=11.00K
 |
 06:SORT
 |  order by: tinyint_col ASC NULLS FIRST, double_col ASC NULLS FIRST, int_col DESC
+|  row-size=27B cardinality=11.00K
 |
 05:ANALYTIC
 |  functions: first_value_rewrite(int_col, -1)
 |  partition by: double_col, tinyint_col
 |  order by: int_col ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 2 PRECEDING
+|  row-size=27B cardinality=11.00K
 |
 04:ANALYTIC
 |  functions: last_value_ignore_nulls(int_col)
 |  partition by: double_col, tinyint_col
 |  order by: int_col ASC
 |  window: ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING
+|  row-size=23B cardinality=11.00K
 |
 03:SORT
 |  order by: double_col ASC NULLS FIRST, tinyint_col ASC NULLS FIRST, int_col ASC
+|  row-size=19B cardinality=11.00K
 |
 02:ANALYTIC
 |  functions: min(int_col)
 |  partition by: tinyint_col
 |  order by: int_col DESC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 2 FOLLOWING
+|  row-size=19B cardinality=11.00K
 |
 01:SORT
 |  order by: tinyint_col ASC NULLS FIRST, int_col DESC
+|  row-size=15B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=15B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -809,56 +943,67 @@ PLAN-ROOT SINK
 |  partition by: tinyint_col, double_col
 |  order by: int_col DESC
 |  window: ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING
+|  row-size=63B cardinality=11.00K
 |
 09:ANALYTIC
 |  functions: sum(smallint_col)
 |  partition by: double_col, tinyint_col
 |  order by: int_col DESC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
+|  row-size=47B cardinality=11.00K
 |
 08:ANALYTIC
 |  functions: sum(tinyint_col)
 |  partition by: tinyint_col, double_col
 |  order by: int_col DESC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=39B cardinality=11.00K
 |
 07:ANALYTIC
 |  functions: first_value_rewrite(int_col, -1)
 |  partition by: double_col, tinyint_col
 |  order by: int_col DESC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 2 PRECEDING
+|  row-size=31B cardinality=11.00K
 |
 06:SORT
 |  order by: tinyint_col ASC NULLS FIRST, double_col ASC NULLS FIRST, int_col DESC
+|  row-size=27B cardinality=11.00K
 |
 05:ANALYTIC
 |  functions: first_value_rewrite(int_col, -1)
 |  partition by: double_col, tinyint_col
 |  order by: int_col ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 2 PRECEDING
+|  row-size=27B cardinality=11.00K
 |
 04:ANALYTIC
 |  functions: last_value_ignore_nulls(int_col)
 |  partition by: double_col, tinyint_col
 |  order by: int_col ASC
 |  window: ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING
+|  row-size=23B cardinality=11.00K
 |
 03:SORT
 |  order by: double_col ASC NULLS FIRST, tinyint_col ASC NULLS FIRST, int_col ASC
+|  row-size=19B cardinality=11.00K
 |
 02:ANALYTIC
 |  functions: min(int_col)
 |  partition by: tinyint_col
 |  order by: int_col DESC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 2 FOLLOWING
+|  row-size=19B cardinality=11.00K
 |
 01:SORT
 |  order by: tinyint_col ASC NULLS FIRST, int_col DESC
+|  row-size=15B cardinality=11.00K
 |
 11:EXCHANGE [HASH(tinyint_col)]
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=15B cardinality=11.00K
 ====
 # grouping of multiple analytic exprs by compatible window/partition/order
 select
@@ -883,34 +1028,42 @@ PLAN-ROOT SINK
 |  functions: sum(tinyint_col)
 |  order by: int_col DESC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=55B cardinality=11.00K
 |
 06:SORT
 |  order by: int_col DESC
+|  row-size=47B cardinality=11.00K
 |
 05:ANALYTIC
 |  functions: sum(smallint_col)
 |  partition by: double_col, tinyint_col
+|  row-size=47B cardinality=11.00K
 |
 04:ANALYTIC
 |  functions: count(double_col)
 |  partition by: tinyint_col, double_col
 |  order by: int_col DESC
 |  window: ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING
+|  row-size=39B cardinality=11.00K
 |
 03:SORT
 |  order by: tinyint_col ASC NULLS FIRST, double_col ASC NULLS FIRST, int_col DESC
+|  row-size=31B cardinality=11.00K
 |
 02:ANALYTIC
 |  functions: sum(smallint_col)
 |  partition by: bigint_col
 |  order by: tinyint_col ASC
 |  window: ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING
+|  row-size=31B cardinality=11.00K
 |
 01:SORT
 |  order by: bigint_col ASC NULLS FIRST, tinyint_col ASC
+|  row-size=23B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=23B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -918,25 +1071,30 @@ PLAN-ROOT SINK
 |  functions: sum(tinyint_col)
 |  order by: int_col DESC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=55B cardinality=11.00K
 |
 10:MERGING-EXCHANGE [UNPARTITIONED]
 |  order by: int_col DESC
 |
 06:SORT
 |  order by: int_col DESC
+|  row-size=47B cardinality=11.00K
 |
 05:ANALYTIC
 |  functions: sum(smallint_col)
 |  partition by: double_col, tinyint_col
+|  row-size=47B cardinality=11.00K
 |
 04:ANALYTIC
 |  functions: count(double_col)
 |  partition by: tinyint_col, double_col
 |  order by: int_col DESC
 |  window: ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING
+|  row-size=39B cardinality=11.00K
 |
 03:SORT
 |  order by: tinyint_col ASC NULLS FIRST, double_col ASC NULLS FIRST, int_col DESC
+|  row-size=31B cardinality=11.00K
 |
 09:EXCHANGE [HASH(tinyint_col,double_col)]
 |
@@ -945,14 +1103,17 @@ PLAN-ROOT SINK
 |  partition by: bigint_col
 |  order by: tinyint_col ASC
 |  window: ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING
+|  row-size=31B cardinality=11.00K
 |
 01:SORT
 |  order by: bigint_col ASC NULLS FIRST, tinyint_col ASC
+|  row-size=23B cardinality=11.00K
 |
 08:EXCHANGE [HASH(bigint_col)]
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=23B cardinality=11.00K
 ====
 # basic test for analytics and inline views
 select double_col, a, b, a + b, double_col + a from
@@ -968,41 +1129,51 @@ PLAN-ROOT SINK
 |
 04:SORT
 |  order by: a ASC, b ASC, a + b ASC
+|  row-size=24B cardinality=7.30K
 |
 03:ANALYTIC
 |  functions: count(int_col)
+|  row-size=37B cardinality=7.30K
 |
 02:ANALYTIC
 |  functions: sum(int_col + bigint_col)
 |  partition by: bool_col
+|  row-size=29B cardinality=7.30K
 |
 01:SORT
 |  order by: bool_col ASC NULLS FIRST
+|  row-size=21B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=21B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 04:SORT
 |  order by: a ASC, b ASC, a + b ASC
+|  row-size=24B cardinality=7.30K
 |
 03:ANALYTIC
 |  functions: count(int_col)
+|  row-size=37B cardinality=7.30K
 |
 06:EXCHANGE [UNPARTITIONED]
 |
 02:ANALYTIC
 |  functions: sum(int_col + bigint_col)
 |  partition by: bool_col
+|  row-size=29B cardinality=7.30K
 |
 01:SORT
 |  order by: bool_col ASC NULLS FIRST
+|  row-size=21B cardinality=7.30K
 |
 05:EXCHANGE [HASH(functional.alltypes.bool_col)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=21B cardinality=7.30K
 ====
 # same as above but using a WITH-clause view
 with v2 as
@@ -1019,41 +1190,51 @@ PLAN-ROOT SINK
 |
 04:SORT
 |  order by: a ASC, b ASC, a + b ASC
+|  row-size=24B cardinality=7.30K
 |
 03:ANALYTIC
 |  functions: count(int_col)
+|  row-size=37B cardinality=7.30K
 |
 02:ANALYTIC
 |  functions: sum(int_col + bigint_col)
 |  partition by: bool_col
+|  row-size=29B cardinality=7.30K
 |
 01:SORT
 |  order by: bool_col ASC NULLS FIRST
+|  row-size=21B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=21B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 04:SORT
 |  order by: a ASC, b ASC, a + b ASC
+|  row-size=24B cardinality=7.30K
 |
 03:ANALYTIC
 |  functions: count(int_col)
+|  row-size=37B cardinality=7.30K
 |
 06:EXCHANGE [UNPARTITIONED]
 |
 02:ANALYTIC
 |  functions: sum(int_col + bigint_col)
 |  partition by: bool_col
+|  row-size=29B cardinality=7.30K
 |
 01:SORT
 |  order by: bool_col ASC NULLS FIRST
+|  row-size=21B cardinality=7.30K
 |
 05:EXCHANGE [HASH(functional.alltypes.bool_col)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=21B cardinality=7.30K
 ====
 # test ignoring of non-materialized analytic exprs
 select b from
@@ -1071,25 +1252,31 @@ PLAN-ROOT SINK
 |
 05:SELECT
 |  predicates: count(1) < 10
+|  row-size=42B cardinality=730
 |
 04:ANALYTIC
 |  functions: count(1)
 |  partition by: bool_col
 |  order by: string_col ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING
+|  row-size=42B cardinality=7.30K
 |
 03:SORT
 |  order by: bool_col ASC NULLS FIRST, string_col ASC
+|  row-size=34B cardinality=7.30K
 |
 02:ANALYTIC
 |  functions: sum(int_col)
 |  partition by: bigint_col
+|  row-size=34B cardinality=7.30K
 |
 01:SORT
 |  order by: bigint_col ASC NULLS FIRST
+|  row-size=26B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=26B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -1097,29 +1284,35 @@ PLAN-ROOT SINK
 |
 05:SELECT
 |  predicates: count(1) < 10
+|  row-size=42B cardinality=730
 |
 04:ANALYTIC
 |  functions: count(1)
 |  partition by: bool_col
 |  order by: string_col ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING
+|  row-size=42B cardinality=7.30K
 |
 03:SORT
 |  order by: bool_col ASC NULLS FIRST, string_col ASC
+|  row-size=34B cardinality=7.30K
 |
 07:EXCHANGE [HASH(bool_col)]
 |
 02:ANALYTIC
 |  functions: sum(int_col)
 |  partition by: bigint_col
+|  row-size=34B cardinality=7.30K
 |
 01:SORT
 |  order by: bigint_col ASC NULLS FIRST
+|  row-size=26B cardinality=7.30K
 |
 06:EXCHANGE [HASH(bigint_col)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=26B cardinality=7.30K
 ====
 # basic test for analytics and unions
 select min(id) over (partition by int_col)
@@ -1136,46 +1329,59 @@ PLAN-ROOT SINK
 |
 12:SORT
 |  order by: min(id) OVER(...) DESC NULLS FIRST
+|  row-size=4B cardinality=7.41K
 |
 08:UNION
 |  pass-through-operands: 07
+|  row-size=4B cardinality=7.41K
 |
 |--11:ANALYTIC
 |  |  functions: sum(bigint_col)
 |  |  partition by: int_col
 |  |  order by: id ASC
 |  |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  |  row-size=24B cardinality=8
 |  |
 |  10:SORT
 |  |  order by: int_col ASC NULLS FIRST, id ASC
+|  |  row-size=16B cardinality=8
 |  |
 |  09:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=16B cardinality=8
 |
 07:AGGREGATE [FINALIZE]
 |  group by: min(id) OVER(...)
+|  row-size=4B cardinality=7.40K
 |
 00:UNION
+|  row-size=4B cardinality=7.40K
 |
 |--06:ANALYTIC
 |  |  functions: max(id)
 |  |  partition by: bool_col
+|  |  row-size=9B cardinality=100
 |  |
 |  05:SORT
 |  |  order by: bool_col ASC NULLS FIRST
+|  |  row-size=5B cardinality=100
 |  |
 |  04:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=5B cardinality=100
 |
 03:ANALYTIC
 |  functions: min(id)
 |  partition by: int_col
+|  row-size=12B cardinality=7.30K
 |
 02:SORT
 |  order by: int_col ASC NULLS FIRST
+|  row-size=8B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=8B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -1184,57 +1390,71 @@ PLAN-ROOT SINK
 |
 12:SORT
 |  order by: min(id) OVER(...) DESC NULLS FIRST
+|  row-size=4B cardinality=7.41K
 |
 08:UNION
 |  pass-through-operands: 16
+|  row-size=4B cardinality=7.41K
 |
 |--11:ANALYTIC
 |  |  functions: sum(bigint_col)
 |  |  partition by: int_col
 |  |  order by: id ASC
 |  |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  |  row-size=24B cardinality=8
 |  |
 |  10:SORT
 |  |  order by: int_col ASC NULLS FIRST, id ASC
+|  |  row-size=16B cardinality=8
 |  |
 |  17:EXCHANGE [HASH(int_col)]
 |  |
 |  09:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=16B cardinality=8
 |
 16:AGGREGATE [FINALIZE]
 |  group by: min(id) OVER(...)
+|  row-size=4B cardinality=7.40K
 |
 15:EXCHANGE [HASH(min(id) OVER(...))]
 |
 07:AGGREGATE [STREAMING]
 |  group by: min(id) OVER(...)
+|  row-size=4B cardinality=7.40K
 |
 00:UNION
+|  row-size=4B cardinality=7.40K
 |
 |--06:ANALYTIC
 |  |  functions: max(id)
 |  |  partition by: bool_col
+|  |  row-size=9B cardinality=100
 |  |
 |  05:SORT
 |  |  order by: bool_col ASC NULLS FIRST
+|  |  row-size=5B cardinality=100
 |  |
 |  14:EXCHANGE [HASH(bool_col)]
 |  |
 |  04:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=5B cardinality=100
 |
 03:ANALYTIC
 |  functions: min(id)
 |  partition by: int_col
+|  row-size=12B cardinality=7.30K
 |
 02:SORT
 |  order by: int_col ASC NULLS FIRST
+|  row-size=8B cardinality=7.30K
 |
 13:EXCHANGE [HASH(int_col)]
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=8B cardinality=7.30K
 ====
 # analytics in an uncorrelated subquery
 select id, int_col, bool_col from functional.alltypessmall t1
@@ -1247,21 +1467,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: int_col = min(bigint_col)
 |  runtime filters: RF000 <- min(bigint_col)
+|  row-size=9B cardinality=10
 |
 |--03:ANALYTIC
 |  |  functions: min(bigint_col)
 |  |  partition by: bool_col
+|  |  row-size=21B cardinality=1
 |  |
 |  02:SORT
 |  |  order by: bool_col ASC NULLS FIRST
+|  |  row-size=13B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypestiny t2]
 |     partitions=4/4 files=4 size=460B
 |     predicates: t2.id < 10
+|     row-size=13B cardinality=1
 |
 00:SCAN HDFS [functional.alltypessmall t1]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> int_col
+   row-size=9B cardinality=100
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -1270,25 +1495,30 @@ PLAN-ROOT SINK
 04:HASH JOIN [LEFT SEMI JOIN, BROADCAST]
 |  hash predicates: int_col = min(bigint_col)
 |  runtime filters: RF000 <- min(bigint_col)
+|  row-size=9B cardinality=10
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
 |  03:ANALYTIC
 |  |  functions: min(bigint_col)
 |  |  partition by: bool_col
+|  |  row-size=21B cardinality=1
 |  |
 |  02:SORT
 |  |  order by: bool_col ASC NULLS FIRST
+|  |  row-size=13B cardinality=1
 |  |
 |  05:EXCHANGE [HASH(bool_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny t2]
 |     partitions=4/4 files=4 size=460B
 |     predicates: t2.id < 10
+|     row-size=13B cardinality=1
 |
 00:SCAN HDFS [functional.alltypessmall t1]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> int_col
+   row-size=9B cardinality=100
 ====
 # test conjunct assignment and slot materialization due to conjuncts
 # (see IMPALA-1243)
@@ -1325,43 +1555,53 @@ PLAN-ROOT SINK
 |
 07:SELECT
 |  predicates: min(int_col) < 1, max(int_col) < 2, bigint_col > 10, count(int_col) < 3, sum(int_col) < 4, avg(int_col) < 5, min(int_col) != count(int_col), min(int_col) != avg(int_col), max(int_col) != count(int_col), count(int_col) < bigint_col + 3, sum(int_col) < bigint_col + 4, min(int_col) < bigint_col + 1, max(int_col) < bigint_col + 2, avg(int_col) < bigint_col + 5
+|  row-size=49B cardinality=73
 |
 06:ANALYTIC
 |  functions: min(int_col)
+|  row-size=49B cardinality=730
 |
 05:ANALYTIC
 |  functions: avg(int_col)
 |  partition by: bigint_col
 |  order by: id ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 2 FOLLOWING
+|  row-size=45B cardinality=730
 |
 04:ANALYTIC
 |  functions: sum(int_col)
 |  partition by: bigint_col
 |  order by: id ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING
+|  row-size=37B cardinality=730
 |
 03:SORT
 |  order by: bigint_col ASC NULLS FIRST, id ASC
+|  row-size=29B cardinality=730
 |
 02:ANALYTIC
 |  functions: max(int_col), count(int_col)
 |  partition by: bool_col
+|  row-size=29B cardinality=730
 |
 01:SORT
 |  order by: bool_col ASC NULLS FIRST
+|  row-size=17B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: int_col <= 10, int_col >= 5
+   row-size=17B cardinality=730
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 07:SELECT
 |  predicates: min(int_col) < 1, max(int_col) < 2, bigint_col > 10, count(int_col) < 3, sum(int_col) < 4, avg(int_col) < 5, min(int_col) != count(int_col), min(int_col) != avg(int_col), max(int_col) != count(int_col), count(int_col) < bigint_col + 3, sum(int_col) < bigint_col + 4, min(int_col) < bigint_col + 1, max(int_col) < bigint_col + 2, avg(int_col) < bigint_col + 5
+|  row-size=49B cardinality=73
 |
 06:ANALYTIC
 |  functions: min(int_col)
+|  row-size=49B cardinality=730
 |
 10:EXCHANGE [UNPARTITIONED]
 |
@@ -1370,30 +1610,36 @@ PLAN-ROOT SINK
 |  partition by: bigint_col
 |  order by: id ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 2 FOLLOWING
+|  row-size=45B cardinality=730
 |
 04:ANALYTIC
 |  functions: sum(int_col)
 |  partition by: bigint_col
 |  order by: id ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING
+|  row-size=37B cardinality=730
 |
 03:SORT
 |  order by: bigint_col ASC NULLS FIRST, id ASC
+|  row-size=29B cardinality=730
 |
 09:EXCHANGE [HASH(bigint_col)]
 |
 02:ANALYTIC
 |  functions: max(int_col), count(int_col)
 |  partition by: bool_col
+|  row-size=29B cardinality=730
 |
 01:SORT
 |  order by: bool_col ASC NULLS FIRST
+|  row-size=17B cardinality=730
 |
 08:EXCHANGE [HASH(bool_col)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: int_col <= 10, int_col >= 5
+   row-size=17B cardinality=730
 ====
 # test predicate propagation onto and through analytic nodes
 # TODO: allow AnalyticEvalNode to apply a < 20
@@ -1412,22 +1658,27 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: id = t2.id, sum(int_col) = t2.int_col
+|  row-size=32B cardinality=730
 |
 |--03:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: t2.id < 10, t2.int_col < 20
+|     row-size=8B cardinality=730
 |
 02:ANALYTIC
 |  functions: sum(int_col)
 |  partition by: bigint_col
 |  order by: id ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING
+|  row-size=24B cardinality=7.30K
 |
 01:SORT
 |  order by: bigint_col ASC NULLS FIRST, id ASC
+|  row-size=16B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=16B cardinality=7.30K
 ====
 # test that predicates are correctly propagated in the presence of outer joins
 # (i.e., no predicates should be propagated in this query)
@@ -1447,22 +1698,27 @@ PLAN-ROOT SINK
 04:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: id = t2.id, sum(int_col) = t2.int_col
 |  other predicates: t2.id < 10, t2.int_col < 20
+|  row-size=32B cardinality=7.30K
 |
 |--03:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: t2.id < 10, t2.int_col < 20
+|     row-size=8B cardinality=730
 |
 02:ANALYTIC
 |  functions: sum(int_col)
 |  partition by: bigint_col
 |  order by: id ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING
+|  row-size=24B cardinality=7.30K
 |
 01:SORT
 |  order by: bigint_col ASC NULLS FIRST, id ASC
+|  row-size=16B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=16B cardinality=7.30K
 ====
 # test canonical function/window/order: row_number() gets a ROWS window
 select
@@ -1476,12 +1732,15 @@ PLAN-ROOT SINK
 |  partition by: tinyint_col
 |  order by: id ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=13B cardinality=11.00K
 |
 01:SORT
 |  order by: tinyint_col ASC NULLS FIRST, id ASC
+|  row-size=5B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=5B cardinality=11.00K
 ====
 # test canonical function/window/order: lead() and lag() have default
 # arguments explicitly set
@@ -1501,48 +1760,58 @@ PLAN-ROOT SINK
 |  partition by: int_col
 |  order by: id ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 8 PRECEDING
+|  row-size=35B cardinality=11.00K
 |
 08:ANALYTIC
 |  functions: lead(int_col, 8, 20)
 |  partition by: int_col
 |  order by: id ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 8 FOLLOWING
+|  row-size=31B cardinality=11.00K
 |
 07:SORT
 |  order by: int_col ASC NULLS FIRST, id ASC
+|  row-size=27B cardinality=11.00K
 |
 06:ANALYTIC
 |  functions: lag(int_col, 4, NULL)
 |  partition by: smallint_col
 |  order by: id ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 4 PRECEDING
+|  row-size=27B cardinality=11.00K
 |
 05:ANALYTIC
 |  functions: lead(int_col, 4, NULL)
 |  partition by: smallint_col
 |  order by: id ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 4 FOLLOWING
+|  row-size=23B cardinality=11.00K
 |
 04:SORT
 |  order by: smallint_col ASC NULLS FIRST, id ASC
+|  row-size=19B cardinality=11.00K
 |
 03:ANALYTIC
 |  functions: lag(int_col, 1, NULL)
 |  partition by: tinyint_col
 |  order by: id ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING
+|  row-size=19B cardinality=11.00K
 |
 02:ANALYTIC
 |  functions: lead(int_col, 1, NULL)
 |  partition by: tinyint_col
 |  order by: id ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING
+|  row-size=15B cardinality=11.00K
 |
 01:SORT
 |  order by: tinyint_col ASC NULLS FIRST, id ASC
+|  row-size=11B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=11B cardinality=11.00K
 ====
 # Test canonical function/window/order: Reverse windows ending in UNBOUNDED FOLLOWING
 # and not starting with UNBOUNDED PRECEDING.
@@ -1571,42 +1840,51 @@ PLAN-ROOT SINK
 |  partition by: tinyint_col
 |  order by: id ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
+|  row-size=58B cardinality=11.00K
 |
 07:ANALYTIC
 |  functions: count(bigint_col)
 |  partition by: tinyint_col
 |  order by: id ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=50B cardinality=11.00K
 |
 06:ANALYTIC
 |  functions: sum(int_col)
 |  partition by: tinyint_col
 |  order by: id ASC, bool_col DESC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 2 FOLLOWING
+|  row-size=42B cardinality=11.00K
 |
 05:SORT
 |  order by: tinyint_col ASC NULLS FIRST, id ASC NULLS LAST, bool_col DESC NULLS FIRST
+|  row-size=34B cardinality=11.00K
 |
 04:ANALYTIC
 |  functions: count(bigint_col)
 |  partition by: tinyint_col
 |  order by: id ASC, int_col ASC
 |  window: ROWS BETWEEN 6 PRECEDING AND 8 FOLLOWING
+|  row-size=34B cardinality=11.00K
 |
 03:SORT
 |  order by: tinyint_col ASC NULLS FIRST, id ASC, int_col ASC
+|  row-size=26B cardinality=11.00K
 |
 02:ANALYTIC
 |  functions: sum(int_col)
 |  partition by: tinyint_col
 |  order by: id DESC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=26B cardinality=11.00K
 |
 01:SORT
 |  order by: tinyint_col ASC NULLS FIRST, id DESC NULLS FIRST
+|  row-size=18B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=18B cardinality=11.00K
 ====
 # Test canonical function/window/order: Reverse windows ending in UNBOUNDED FOLLOWING
 # and either not starting with UNBOUNDED PRECEDING or first_value(... IGNORE NULLS), and
@@ -1638,45 +1916,55 @@ PLAN-ROOT SINK
 |  functions: last_value_ignore_nulls(tinyint_col)
 |  order by: id DESC
 |  window: ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING
+|  row-size=51B cardinality=11.00K
 |
 08:SORT
 |  order by: id DESC NULLS FIRST
+|  row-size=50B cardinality=11.00K
 |
 07:ANALYTIC
 |  functions: first_value_ignore_nulls(bigint_col), first_value(bigint_col)
 |  partition by: tinyint_col
 |  order by: id ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=50B cardinality=11.00K
 |
 06:ANALYTIC
 |  functions: first_value(int_col)
 |  partition by: tinyint_col
 |  order by: id ASC, bool_col DESC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=34B cardinality=11.00K
 |
 05:SORT
 |  order by: tinyint_col ASC NULLS FIRST, id ASC NULLS LAST, bool_col DESC NULLS FIRST
+|  row-size=30B cardinality=11.00K
 |
 04:ANALYTIC
 |  functions: first_value_rewrite(bigint_col, -1)
 |  partition by: tinyint_col
 |  order by: id ASC, int_col ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 6 PRECEDING
+|  row-size=30B cardinality=11.00K
 |
 03:SORT
 |  order by: tinyint_col ASC NULLS FIRST, id ASC, int_col ASC
+|  row-size=22B cardinality=11.00K
 |
 02:ANALYTIC
 |  functions: first_value(int_col)
 |  partition by: tinyint_col
 |  order by: id DESC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=22B cardinality=11.00K
 |
 01:SORT
 |  order by: tinyint_col ASC NULLS FIRST, id DESC NULLS FIRST
+|  row-size=18B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=18B cardinality=11.00K
 ====
 # Test canonical function/window/order: Reverse windows ending in UNBOUNDED FOLLOWING
 # and not starting with UNBOUNDED PRECEDING, and change last_value() to first_value()
@@ -1704,42 +1992,51 @@ PLAN-ROOT SINK
 |  partition by: tinyint_col
 |  order by: id ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
+|  row-size=50B cardinality=11.00K
 |
 07:ANALYTIC
 |  functions: last_value(bigint_col)
 |  partition by: tinyint_col
 |  order by: id ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=42B cardinality=11.00K
 |
 06:ANALYTIC
 |  functions: first_value(int_col)
 |  partition by: tinyint_col
 |  order by: id ASC, bool_col DESC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=34B cardinality=11.00K
 |
 05:SORT
 |  order by: tinyint_col ASC NULLS FIRST, id ASC NULLS LAST, bool_col DESC NULLS FIRST
+|  row-size=30B cardinality=11.00K
 |
 04:ANALYTIC
 |  functions: last_value(bigint_col)
 |  partition by: tinyint_col
 |  order by: id ASC, int_col ASC
 |  window: ROWS BETWEEN 6 PRECEDING AND 8 FOLLOWING
+|  row-size=30B cardinality=11.00K
 |
 03:SORT
 |  order by: tinyint_col ASC NULLS FIRST, id ASC, int_col ASC
+|  row-size=22B cardinality=11.00K
 |
 02:ANALYTIC
 |  functions: first_value(int_col)
 |  partition by: tinyint_col
 |  order by: id DESC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=22B cardinality=11.00K
 |
 01:SORT
 |  order by: tinyint_col ASC NULLS FIRST, id DESC NULLS FIRST
+|  row-size=18B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=18B cardinality=11.00K
 ====
 # IMPALA-1229
 select DENSE_RANK() OVER (ORDER BY t1.day ASC)
@@ -1752,18 +2049,23 @@ PLAN-ROOT SINK
 |  functions: dense_rank()
 |  order by: day ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=12B cardinality=11.00K
 |
 03:SORT
 |  order by: day ASC
+|  row-size=4B cardinality=11.00K
 |
 02:NESTED LOOP JOIN [LEFT SEMI JOIN]
+|  row-size=4B cardinality=11.00K
 |
 |--01:SCAN HDFS [functional.alltypesagg t1]
 |     partitions=11/11 files=11 size=814.73KB
 |     limit: 1
+|     row-size=0B cardinality=1
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
+   row-size=4B cardinality=11.00K
 ====
 # IMPALA-1243: the Where clause predicate needs to be evaluated in a Select node
 # not as a scan predicate of alltypes
@@ -1780,41 +2082,52 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 05:SELECT
 |  predicates: id IS NULL, tinyint_col != 5
+|  row-size=5B cardinality=730
 |
 00:UNION
+|  row-size=5B cardinality=7.30K
 |
 |--04:ANALYTIC
 |  |  functions: dense_rank()
 |  |  order by: id ASC
 |  |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  |  row-size=13B cardinality=7.30K
 |  |
 |  03:SORT
 |  |  order by: id ASC
+|  |  row-size=5B cardinality=7.30K
 |  |
 |  02:SCAN HDFS [functional.alltypes t1]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=5B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypestiny t1]
    partitions=4/4 files=4 size=460B
    predicates: t1.id IS NULL, t1.tinyint_col != 5
+   row-size=5B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 10:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=1
 |
 09:EXCHANGE [UNPARTITIONED]
 |
 06:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 05:SELECT
 |  predicates: id IS NULL, tinyint_col != 5
+|  row-size=5B cardinality=730
 |
 00:UNION
+|  row-size=5B cardinality=7.30K
 |
 |--08:EXCHANGE [RANDOM]
 |  |
@@ -1822,19 +2135,23 @@ PLAN-ROOT SINK
 |  |  functions: dense_rank()
 |  |  order by: id ASC
 |  |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  |  row-size=13B cardinality=7.30K
 |  |
 |  07:MERGING-EXCHANGE [UNPARTITIONED]
 |  |  order by: id ASC
 |  |
 |  03:SORT
 |  |  order by: id ASC
+|  |  row-size=5B cardinality=7.30K
 |  |
 |  02:SCAN HDFS [functional.alltypes t1]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=5B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypestiny t1]
    partitions=4/4 files=4 size=460B
    predicates: t1.id IS NULL, t1.tinyint_col != 5
+   row-size=5B cardinality=1
 ====
 # Propagate a predicate on a partition key through an inline view that has an analytic
 # function. Predicates that are not compatible with analytic function's partition by
@@ -1849,18 +2166,23 @@ PLAN-ROOT SINK
 |
 03:SELECT
 |  predicates: id = 1, int_col < 10, sum(int_col) = 4
+|  row-size=20B cardinality=0
 |
 02:ANALYTIC
 |  functions: sum(int_col)
 |  partition by: year
 |  order by: id ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=20B cardinality=3.65K
 |
 01:SORT
 |  order by: year ASC NULLS FIRST, id ASC
+|  row-size=12B cardinality=3.65K
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: functional.alltypes.year = 2009
    partitions=12/24 files=12 size=238.68KB
+   row-size=12B cardinality=3.65K
 ====
 # Propagate predicates through an inline view that computes multiple analytic functions
 # (IMPALA-1900)
@@ -1879,43 +2201,54 @@ PLAN-ROOT SINK
 |
 09:SELECT
 |  predicates: tinyint_col + 1 = 1
+|  row-size=42B cardinality=365
 |
 08:ANALYTIC
 |  functions: sum(int_col)
 |  partition by: year, tinyint_col
+|  row-size=42B cardinality=3.65K
 |
 07:SORT
 |  order by: year ASC NULLS FIRST, tinyint_col ASC NULLS FIRST
+|  row-size=34B cardinality=3.65K
 |
 06:ANALYTIC
 |  functions: last_value(int_col)
 |  partition by: int_col, year
 |  order by: id ASC
 |  window: ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING
+|  row-size=34B cardinality=3.65K
 |
 05:SORT
 |  order by: int_col ASC NULLS FIRST, year ASC NULLS FIRST, id ASC
+|  row-size=30B cardinality=3.65K
 |
 04:ANALYTIC
 |  functions: avg(int_col)
 |  partition by: tinyint_col, id, year
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=30B cardinality=3.65K
 |
 03:SORT
 |  order by: tinyint_col ASC NULLS FIRST, id ASC NULLS FIRST, year ASC NULLS FIRST, bigint_col ASC
+|  row-size=22B cardinality=3.65K
 |
 02:ANALYTIC
 |  functions: last_value(tinyint_col)
 |  partition by: id, year
 |  order by: int_col ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
+|  row-size=22B cardinality=3.65K
 |
 01:SORT
 |  order by: id ASC NULLS FIRST, year ASC NULLS FIRST, int_col ASC
+|  row-size=21B cardinality=3.65K
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: functional.alltypes.year = 2009
    partitions=12/24 files=12 size=238.68KB
+   row-size=21B cardinality=3.65K
 ====
 # Don't propagate predicates through an inline view with multiple analytic functions
 # if the predicates are not compatible with every analytic function's partition by
@@ -1932,35 +2265,43 @@ PLAN-ROOT SINK
 |
 07:SELECT
 |  predicates: tinyint_col = 1, year = 2009
+|  row-size=37B cardinality=516
 |
 06:ANALYTIC
 |  functions: lead(int_col, 1, NULL)
 |  order by: tinyint_col ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING
+|  row-size=37B cardinality=7.30K
 |
 05:SORT
 |  order by: tinyint_col ASC
+|  row-size=33B cardinality=7.30K
 |
 04:ANALYTIC
 |  functions: avg(int_col)
 |  partition by: year
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=33B cardinality=7.30K
 |
 03:SORT
 |  order by: year ASC NULLS FIRST, bigint_col ASC
+|  row-size=25B cardinality=7.30K
 |
 02:ANALYTIC
 |  functions: sum(int_col)
 |  partition by: year, tinyint_col
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=25B cardinality=7.30K
 |
 01:SORT
 |  order by: year ASC NULLS FIRST, tinyint_col ASC NULLS FIRST, bigint_col ASC
+|  row-size=17B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ====
 # Propagate a predicate generated from equivalence classes
 # through an inline with an analytic function (IMPALA-1900)
@@ -1974,19 +2315,24 @@ PLAN-ROOT SINK
 |
 03:SELECT
 |  predicates: month = int_col
+|  row-size=29B cardinality=0
 |
 02:ANALYTIC
 |  functions: sum(id)
 |  partition by: month, tinyint_col
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=29B cardinality=1
 |
 01:SORT
 |  order by: month ASC NULLS FIRST, tinyint_col ASC NULLS FIRST, bigint_col ASC
+|  row-size=21B cardinality=1
 |
 00:SCAN HDFS [functional.alltypestiny]
+   partition predicates: functional.alltypestiny.month = 1
    partitions=1/4 files=1 size=115B
    predicates: functional.alltypestiny.id = 1, functional.alltypestiny.tinyint_col = 1
+   row-size=21B cardinality=1
 ====
 # Don't propagate predicates through an inline view with an analytic
 # function that has a complex (non SlotRef) partition by clause for consistency with
@@ -2004,26 +2350,32 @@ PLAN-ROOT SINK
 |
 05:SELECT
 |  predicates: tinyint_col + int_col < 10
+|  row-size=29B cardinality=781
 |
 04:ANALYTIC
 |  functions: sum(id)
 |  partition by: t1.tinyint_col + 1, t2.int_col - 1
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=29B cardinality=7.81K
 |
 03:SORT
 |  order by: tinyint_col + 1 ASC NULLS FIRST, int_col - 1 ASC NULLS FIRST, bigint_col ASC
+|  row-size=21B cardinality=7.81K
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=21B cardinality=7.81K
 |
 |--01:SCAN HDFS [functional.alltypesagg t2]
 |     partitions=11/11 files=11 size=814.73KB
+|     row-size=8B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t1.id
+   row-size=13B cardinality=7.30K
 ====
 # Don't propagate a predicate through an inline view with an analytic function
 # when the select list items contain a complex (non SlotRef) expr on a partition by
@@ -2040,18 +2392,22 @@ PLAN-ROOT SINK
 |
 03:SELECT
 |  predicates: int_col + 1 = 1
+|  row-size=24B cardinality=4
 |
 02:ANALYTIC
 |  functions: sum(id)
 |  partition by: int_col
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=24B cardinality=8
 |
 01:SORT
 |  order by: int_col ASC NULLS FIRST, bigint_col ASC
+|  row-size=16B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=16B cardinality=8
 ====
 # IMPALA-1519: Check that the first analytic sort of a select block
 # materializes TupleIsNullPredicates to be substituted in ancestor nodes.
@@ -2080,35 +2436,44 @@ PLAN-ROOT SINK
 |  functions: avg(if(TupleIsNull(), NULL, coalesce(id + bigint_col, 40)))
 |  order by: if(TupleIsNull(), NULL, coalesce(bigint_col, 30)) ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=58B cardinality=8
 |
 07:SORT
 |  order by: if(TupleIsNull(), NULL, coalesce(bigint_col, 30)) ASC
+|  row-size=50B cardinality=8
 |
 06:ANALYTIC
 |  functions: count(1)
 |  order by: int_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=42B cardinality=8
 |
 05:SORT
 |  order by: int_col ASC
+|  row-size=34B cardinality=8
 |
 04:ANALYTIC
 |  functions: sum(id)
 |  partition by: t1.bool_col
+|  row-size=34B cardinality=8
 |
 03:SORT
 |  order by: bool_col ASC NULLS FIRST
+|  row-size=26B cardinality=8
 |
 02:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: id + 100 = t1.id
 |  runtime filters: RF000 <- t1.id
+|  row-size=25B cardinality=8
 |
 |--00:SCAN HDFS [functional.alltypestiny t1]
 |     partitions=4/4 files=4 size=460B
+|     row-size=9B cardinality=8
 |
 01:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> id + 100
+   row-size=16B cardinality=8
 ====
 # IMPALA-1519: Check that the first analytic sort of a select block
 # materializes TupleIsNullPredicates to be substituted in ancestor nodes.
@@ -2140,32 +2505,40 @@ PLAN-ROOT SINK
 07:AGGREGATE [FINALIZE]
 |  output: avg(sum(t1.id)), sum(avg(g)), count(id)
 |  group by: if(TupleIsNull(), NULL, coalesce(int_col, 20))
+|  row-size=28B cardinality=3
 |
 06:ANALYTIC
 |  functions: avg(if(TupleIsNull(), NULL, coalesce(id + bigint_col, 40)))
 |  order by: if(TupleIsNull(), NULL, coalesce(bigint_col, 30)) ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=46B cardinality=8
 |
 05:SORT
 |  order by: if(TupleIsNull(), NULL, coalesce(bigint_col, 30)) ASC
+|  row-size=38B cardinality=8
 |
 04:ANALYTIC
 |  functions: sum(id)
 |  partition by: t1.bool_col
+|  row-size=30B cardinality=8
 |
 03:SORT
 |  order by: bool_col ASC NULLS FIRST
+|  row-size=22B cardinality=8
 |
 02:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: id + 100 = t1.id
 |  runtime filters: RF000 <- t1.id
+|  row-size=21B cardinality=8
 |
 |--00:SCAN HDFS [functional.alltypestiny t1]
 |     partitions=4/4 files=4 size=460B
+|     row-size=5B cardinality=8
 |
 01:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> id + 100
+   row-size=16B cardinality=8
 ====
 # IMPALA-1519: Check that expr wrapping with a TupleIsNullPredicate
 # is performed correctly with analytics and multiple nesting levels.
@@ -2208,54 +2581,69 @@ PLAN-ROOT SINK
 |
 14:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: id = id
+|  row-size=100B cardinality=16
 |
 |--13:ANALYTIC
 |  |  functions: sum(if(TupleIsNull(), NULL, ifnull(int_col, 1)))
 |  |  order by: id ASC
 |  |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  |  row-size=50B cardinality=8
 |  |
 |  12:SORT
 |  |  order by: id ASC
+|  |  row-size=42B cardinality=8
 |  |
 |  11:HASH JOIN [RIGHT OUTER JOIN]
 |  |  hash predicates: id = c.id
+|  |  row-size=41B cardinality=8
 |  |
 |  |--07:SCAN HDFS [functional.alltypestiny c]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=4B cardinality=8
 |  |
 |  10:ANALYTIC
 |  |  functions: count(bigint_col)
 |  |  order by: id ASC
 |  |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  |  row-size=37B cardinality=8
 |  |
 |  09:SORT
 |  |  order by: id ASC
+|  |  row-size=29B cardinality=8
 |  |
 |  08:SCAN HDFS [functional.alltypestiny d]
 |     partitions=4/4 files=4 size=460B
+|     row-size=29B cardinality=8
 |
 06:ANALYTIC
 |  functions: sum(if(TupleIsNull(), NULL, ifnull(int_col, 1)))
 |  partition by: a.id
+|  row-size=50B cardinality=8
 |
 05:SORT
 |  order by: id ASC NULLS FIRST
+|  row-size=42B cardinality=8
 |
 04:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: id = a.id
+|  row-size=41B cardinality=8
 |
 |--00:SCAN HDFS [functional.alltypestiny a]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 03:ANALYTIC
 |  functions: count(bigint_col)
 |  partition by: id
+|  row-size=37B cardinality=8
 |
 02:SORT
 |  order by: id ASC NULLS FIRST
+|  row-size=29B cardinality=8
 |
 01:SCAN HDFS [functional.alltypestiny b]
    partitions=4/4 files=4 size=460B
+   row-size=29B cardinality=8
 ====
 # IMPALA-1946: Check that On-clause predicates of an outer join assigned in a scan
 # are not wrapped in TupleIsNullPredicates.
@@ -2272,21 +2660,26 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: a.id = t1.id
+|  row-size=16B cardinality=8
 |
 |--03:HASH JOIN [INNER JOIN]
 |  |  hash predicates: t2.id = t1.id
 |  |  runtime filters: RF000 <- t1.id
+|  |  row-size=12B cardinality=1
 |  |
 |  |--01:SCAN HDFS [functional.alltypestiny t1]
 |  |     partitions=4/4 files=4 size=460B
 |  |     predicates: ifnull(t1.int_col, 10) < 10
+|  |     row-size=8B cardinality=1
 |  |
 |  02:SCAN HDFS [functional.alltypestiny t2]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> t2.id
+|     row-size=4B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny a]
    partitions=4/4 files=4 size=460B
+   row-size=4B cardinality=8
 ====
 # IMPALA-2832: Test proper cloning of analytic function call exprs in a CTAS.
 create table impala_2832 as select
@@ -2301,12 +2694,15 @@ WRITE TO HDFS [default.impala_2832, OVERWRITE=false]
 |  functions: last_value(int_col), last_value(bigint_col)
 |  order by: int_col ASC
 |  window: ROWS BETWEEN CURRENT ROW AND CURRENT ROW
+|  row-size=24B cardinality=7.30K
 |
 01:SORT
 |  order by: int_col ASC
+|  row-size=12B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ====
 # For first/last_value(), ranges windows get rewritten as rows windows,
 # so these should be grouped.
@@ -2322,12 +2718,15 @@ PLAN-ROOT SINK
 |  functions: last_value(int_col), first_value(int_col)
 |  order by: bigint_col ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=20B cardinality=7.30K
 |
 01:SORT
 |  order by: bigint_col ASC
+|  row-size=12B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ====
 # IMPALA-4263: Analytic function needs a hash exchange because the partition exprs
 # reference a tuple that is made nullable in the join fragment.
@@ -2343,26 +2742,31 @@ PLAN-ROOT SINK
 04:ANALYTIC
 |  functions: count(*)
 |  partition by: t1.id
+|  row-size=16B cardinality=100
 |
 03:SORT
 |  order by: id ASC NULLS FIRST
+|  row-size=8B cardinality=100
 |
 07:EXCHANGE [HASH(t1.id)]
 |
 02:HASH JOIN [RIGHT OUTER JOIN, PARTITIONED]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=8B cardinality=100
 |
 |--06:EXCHANGE [HASH(t2.id)]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall t2]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 05:EXCHANGE [HASH(t1.id)]
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t1.id
+   row-size=4B cardinality=7.30K
 ====
 # IMPALA-1882: Confirm that first_value function used without a partition by and order
 # by clause does not need a sort node
@@ -2372,11 +2776,13 @@ PLAN-ROOT SINK
 |
 01:ANALYTIC
 |  functions: first_value_ignore_nulls(tinyint_col)
+|  row-size=2B cardinality=11.00K
 |
 02:EXCHANGE [UNPARTITIONED]
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=1B cardinality=11.00K
 ====
 # IMPALA-1882: Confirm that last_value function used without a partition by and order
 # by clause does not need a sort node
@@ -2386,11 +2792,13 @@ PLAN-ROOT SINK
 |
 01:ANALYTIC
 |  functions: last_value_ignore_nulls(tinyint_col)
+|  row-size=2B cardinality=11.00K
 |
 02:EXCHANGE [UNPARTITIONED]
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=1B cardinality=11.00K
 ====
 # IMPALA-1882: Confirm that first_value function using only a partition by clause
 # sorts over partition column
@@ -2404,14 +2812,17 @@ PLAN-ROOT SINK
 02:ANALYTIC
 |  functions: first_value(id)
 |  partition by: bool_col
+|  row-size=93B cardinality=100
 |
 01:SORT
 |  order by: bool_col ASC NULLS FIRST
+|  row-size=89B cardinality=100
 |
 03:EXCHANGE [HASH(bool_col)]
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=89B cardinality=100
 ====
 # IMPALA-1882: Confirm that last_value function using only a partition by clause
 # sorts over partition column
@@ -2425,14 +2836,17 @@ PLAN-ROOT SINK
 02:ANALYTIC
 |  functions: last_value(id)
 |  partition by: bool_col
+|  row-size=93B cardinality=100
 |
 01:SORT
 |  order by: bool_col ASC NULLS FIRST
+|  row-size=89B cardinality=100
 |
 03:EXCHANGE [HASH(bool_col)]
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=89B cardinality=100
 ====
 # IMPALA-6473: analytic fn where the same expr is in the 'partition by' and the 'order by'
 select last_value(int_col)
@@ -2448,14 +2862,17 @@ PLAN-ROOT SINK
 |  partition by: abs(int_col), string_col
 |  order by: id ASC, abs(int_col) ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=33B cardinality=8
 |
 01:SORT
 |  order by: abs(int_col) ASC NULLS FIRST, string_col ASC NULLS FIRST, id ASC
+|  row-size=29B cardinality=8
 |
 03:EXCHANGE [HASH(abs(int_col),string_col)]
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=21B cardinality=8
 ====
 # IMPALA-6323 Partition by a constant is equivalent to no partitioning.
 select x, count() over(partition by 1) from (VALUES((1 x), (2), (3))) T;
@@ -2465,9 +2882,11 @@ PLAN-ROOT SINK
 01:ANALYTIC
 |  functions: count()
 |  partition by: 1
+|  row-size=9B cardinality=3
 |
 00:UNION
    constant-operands=3
+   row-size=1B cardinality=3
 ====
 # IMPALA-6323 Order by a constant is equivalent to no ordering.
 select x, count() over(order by 1) from (VALUES((1 x), (2), (3))) T;
@@ -2478,11 +2897,13 @@ PLAN-ROOT SINK
 |  functions: count()
 |  order by: 1 ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=9B cardinality=3
 |
 01:SORT
 |  order by: 1 ASC
+|  row-size=1B cardinality=3
 |
 00:UNION
    constant-operands=3
+   row-size=1B cardinality=3
 ====
-

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/complex-types-file-formats.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/complex-types-file-formats.test b/testdata/workloads/functional-planner/queries/PlannerTest/complex-types-file-formats.test
index b902f26..01a6fcf 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/complex-types-file-formats.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/complex-types-file-formats.test
@@ -4,16 +4,21 @@ select s.f1 from functional_parquet.complextypes_fileformat t, t.a
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=24B cardinality=unavailable
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=24B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=24B cardinality=1
 |  |
 |  03:UNNEST [t.a]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional_parquet.complextypes_fileformat t]
-   partitions=1/1 files=1 size=227B
+   partitions=1/1 files=1 size=1.22KB
    predicates: !empty(t.a)
+   row-size=24B cardinality=unavailable
 ====
 # Complex types are not supported on ORC.
 select 1 from functional_orc_def.complextypes_fileformat t, t.a
@@ -33,7 +38,8 @@ select id from functional_orc_def.complextypes_fileformat
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional_orc_def.complextypes_fileformat]
-   partitions=1/1 files=1 size=624B
+   partitions=1/1 files=1 size=621B
+   row-size=4B cardinality=unavailable
 ====
 # Complex types are not supported on ORC but count(*) and similar
 # queries should work.
@@ -43,9 +49,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional_orc_def.complextypes_fileformat]
-   partitions=1/1 files=1 size=624B
+   partitions=1/1 files=1 size=621B
+   row-size=0B cardinality=unavailable
 ====
 # Complex types are not supported on Avro.
 select s.f1 from functional_avro_snap.complextypes_fileformat t, t.a
@@ -84,7 +92,8 @@ select id from functional_rc_snap.complextypes_fileformat
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional_rc_snap.complextypes_fileformat]
-   partitions=1/1 files=1 size=56B
+   partitions=1/1 files=1 size=212B
+   row-size=4B cardinality=unavailable
 ====
 # Complex types are not supported on RC files but count(*) and similar
 # queries should work.
@@ -94,9 +103,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional_rc_snap.complextypes_fileformat]
-   partitions=1/1 files=1 size=56B
+   partitions=1/1 files=1 size=212B
+   row-size=0B cardinality=unavailable
 ====
 # Complex types are not supported on sequence files.
 select s.f1 from functional_seq_snap.complextypes_fileformat t, t.a
@@ -111,7 +122,8 @@ select id from functional_seq_snap.complextypes_fileformat
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional_seq_snap.complextypes_fileformat]
-   partitions=1/1 files=1 size=87B
+   partitions=1/1 files=1 size=281B
+   row-size=4B cardinality=unavailable
 ====
 # Scanning all partitions fails because there are partitions with a file format for which
 # complex types are not supported. The error message is abbreviated because it is
@@ -128,6 +140,7 @@ select id from functional_hbase.allcomplextypes
 PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.allcomplextypes]
+   row-size=4B cardinality=1
 ====
 # Scanning an HBase table with complex-types columns fails if a complex-typed
 # column is selected.
@@ -161,16 +174,22 @@ select s.f1 from functional.complextypes_multifileformat t, t.a where p = 2
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=24B cardinality=unavailable
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=24B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=24B cardinality=1
 |  |
 |  03:UNNEST [t.a]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.complextypes_multifileformat t]
+   partition predicates: p = 2
    partitions=1/5 files=1 size=128B
    predicates: !empty(t.a)
+   row-size=24B cardinality=unavailable
 ====
 # Scanning an Avro partition of a multi-format table with complex types fails.
 select s.f1 from functional.complextypes_multifileformat t, t.a where p = 3
@@ -192,9 +211,12 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.complextypes_multifileformat]
+   partition predicates: p = 4
    partitions=1/5 files=1 size=128B
+   row-size=0B cardinality=unavailable
 ====
 # Scanning an ORC file partition of a multi-format table with complex types fails.
 select id from functional.complextypes_multifileformat t, t.a where p = 5
@@ -210,7 +232,10 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.complextypes_multifileformat]
+   partition predicates: p = 5
    partitions=1/5 files=1 size=128B
+   row-size=0B cardinality=unavailable
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/conjunct-ordering.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/conjunct-ordering.test b/testdata/workloads/functional-planner/queries/PlannerTest/conjunct-ordering.test
index 7084b26..c94b04c 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/conjunct-ordering.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/conjunct-ordering.test
@@ -9,6 +9,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.bool_col, a.int_col = a.tinyint_col
+   row-size=89B cardinality=730
 ====
 # Check that numeric comparison costs less than LIKE.
 select *
@@ -21,6 +22,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.int_col = a.tinyint_col, a.string_col LIKE '%a%'
+   row-size=89B cardinality=730
 ====
 # Check that single numeric comparison costs less than compound numeric comparison.
 select *
@@ -33,6 +35,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.int_col = a.bigint_col, (a.int_col = a.tinyint_col OR a.int_col = a.smallint_col)
+   row-size=89B cardinality=730
 ====
 # Check that a simple numeric comparison costs less than one with arithmetic.
 select *
@@ -44,6 +47,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.int_col = a.tinyint_col, a.int_col + 5 = a.bigint_col - 10
+   row-size=89B cardinality=730
 ====
 # Check that large CASE costs more than numeric comparison.
 select *
@@ -56,6 +60,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.int_col = a.tinyint_col, (CASE a.int_col WHEN 0 THEN TRUE WHEN 1 THEN TRUE WHEN 2 THEN TRUE ELSE FALSE END), (CASE a.tinyint_col WHEN 0 THEN TRUE WHEN 1 THEN TRUE WHEN 2 THEN TRUE ELSE FALSE END)
+   row-size=89B cardinality=730
 ====
 # Check that a LIKE with only leading/trailing wildcards costs less then LIKE with
 # non-leading/trailing wildcards.
@@ -68,6 +73,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.date_string_col LIKE '%a%', a.date_string_col LIKE 'a%a'
+   row-size=89B cardinality=730
 ====
 # Check that an IN predicate costs more than a single numeric comparison.
 select *
@@ -79,6 +85,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.id = 1, a.int_col IN (1, 2, 3, 4, 5, 6, 7, 8, 9)
+   row-size=89B cardinality=1
 ====
 # Check that a timestamp comparison costs more than a numeric comparison.
 select *
@@ -90,6 +97,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.int_col = 0, a.timestamp_col > TIMESTAMP '2000-01-01 00:00:00'
+   row-size=89B cardinality=231
 ====
 # Check that string comparisons are ordered by string length.
 select *
@@ -102,6 +110,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.string_col = 'a', a.date_string_col = 'looooooooooooooooong string'
+   row-size=89B cardinality=3
 ====
 # Check that timestamp arithmetic adds cost.
 select *
@@ -114,6 +123,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.timestamp_col < TIMESTAMP '2020-01-01 00:00:00', a.timestamp_col - INTERVAL 1 day > TIMESTAMP '2000-01-01 00:00:00'
+   row-size=89B cardinality=730
 ====
 # Check that a function call adds cost.
 select *
@@ -125,6 +135,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.double_col > 0, ceil(a.double_col) > 0
+   row-size=89B cardinality=730
 ====
 # Check that a cast adds cost.
 select *
@@ -136,6 +147,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.int_col > 0, CAST(a.int_col AS DOUBLE) > 0
+   row-size=89B cardinality=730
 ====
 # Check that is null costs less than string comparison.
 select *
@@ -147,6 +159,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.int_col IS NULL, a.string_col = 'string'
+   row-size=89B cardinality=231
 ====
 # Check that long list of predicates is sorted correctly.
 select *
@@ -161,6 +174,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.bool_col, a.int_col = a.bigint_col, (a.int_col = a.tinyint_col OR a.int_col = a.smallint_col), a.string_col LIKE '%a%'
+   row-size=89B cardinality=730
 ====
 # Check that for two equal cost conjuncts, the one with the higher selectivity goes first.
 # There are more distinct id values, so it is more selective.
@@ -173,6 +187,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.id = 0, a.int_col = 0
+   row-size=89B cardinality=1
 ====
 # IMPALA-4614: Tests that the eval cost of timestamp literals is set.
 # The HAVING predicate is assigned to the scan and tests that it has
@@ -189,8 +204,10 @@ PLAN-ROOT SINK
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: timestamp_col
+|  row-size=24B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.timestamp_col < TIMESTAMP '2010-01-01 01:05:20', int_col < 10, timestamp_col < TIMESTAMP '2010-01-02 01:05:20', timestamp_col != CAST(date_string_col AS TIMESTAMP)
+   row-size=40B cardinality=730
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/constant-folding.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/constant-folding.test b/testdata/workloads/functional-planner/queries/PlannerTest/constant-folding.test
index 5b1cbe4..752ccf3 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/constant-folding.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/constant-folding.test
@@ -15,7 +15,7 @@ PLAN-ROOT SINK
 |
 01:SUBPLAN
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=2,1,0 row-size=44B cardinality=1500000
+|  tuple-ids=2,1,0 row-size=44B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
@@ -58,21 +58,21 @@ PLAN-ROOT SINK
 |     in pipelines: 00(GETNEXT)
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=289.00MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: c_custkey > CAST(10 AS BIGINT), !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o_orderkey = CAST(4 AS BIGINT)
    predicates on o_lineitems: CAST(20 AS BIGINT) + CAST(l_linenumber AS BIGINT) < CAST(0 AS BIGINT)
    stored statistics:
-     table: rows=150000 size=288.96MB
+     table: rows=150000 size=288.99MB
      columns missing stats: c_orders
-   extrapolated-rows=disabled max-scan-range-rows=44227
+   extrapolated-rows=disabled max-scan-range-rows=44225
    parquet statistics predicates: c_custkey > CAST(10 AS BIGINT)
    parquet statistics predicates on o: o_orderkey = CAST(4 AS BIGINT)
    parquet dictionary predicates: c_custkey > CAST(10 AS BIGINT)
    parquet dictionary predicates on o: o_orderkey = CAST(4 AS BIGINT)
    parquet dictionary predicates on o_lineitems: CAST(20 AS BIGINT) + CAST(l_linenumber AS BIGINT) < CAST(0 AS BIGINT)
    mem-estimate=264.00MB mem-reservation=16.00MB thread-reservation=1
-   tuple-ids=0 row-size=20B cardinality=15000
+   tuple-ids=0 row-size=20B cardinality=15.00K
    in pipelines: 00(GETNEXT)
 ====
 # Test HBase scan node.
@@ -153,7 +153,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=20B cardinality=7300
+   tuple-ids=0 row-size=20B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # Test hash join.
@@ -180,7 +180,7 @@ PLAN-ROOT SINK
 |  other join predicates: CAST(a.int_col AS BIGINT) <= b.bigint_col + CAST(97 AS BIGINT), CAST(a.int_col AS BIGINT) >= CAST(0 AS BIGINT) + b.bigint_col
 |  other predicates: CAST(b.double_col AS DECIMAL(3,2)) > CAST(11.1 AS DECIMAL(6,1))
 |  mem-estimate=1.94MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
-|  tuple-ids=0,1N row-size=28B cardinality=7300
+|  tuple-ids=0,1N row-size=28B cardinality=7.30K
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--01:SCAN HDFS [functional.alltypes b]
@@ -203,7 +203,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=8B cardinality=7300
+   tuple-ids=0 row-size=8B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # Test nested-loop join. Same as above but and with a disjunction in the On clause.
@@ -229,7 +229,7 @@ PLAN-ROOT SINK
 |  join predicates: (CAST(2 AS BIGINT) + CAST(a.id AS BIGINT) = CAST(b.id AS BIGINT) - CAST(2 AS BIGINT) OR CAST(a.int_col AS BIGINT) >= CAST(0 AS BIGINT) + b.bigint_col AND CAST(a.int_col AS BIGINT) <= b.bigint_col + CAST(97 AS BIGINT))
 |  predicates: CAST(b.double_col AS DECIMAL(3,2)) > CAST(11.1 AS DECIMAL(6,1))
 |  mem-estimate=14.26KB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1N row-size=28B cardinality=7300
+|  tuple-ids=0,1N row-size=28B cardinality=7.30K
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--01:SCAN HDFS [functional.alltypes b]
@@ -252,7 +252,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=8B cardinality=7300
+   tuple-ids=0 row-size=8B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # Test distinct aggregation with grouping.
@@ -282,7 +282,7 @@ PLAN-ROOT SINK
 |  output: count(*)
 |  group by: timestamp_col = TIMESTAMP '2016-11-15 00:00:00', CAST(2 AS BIGINT) + CAST(id AS BIGINT)
 |  mem-estimate=10.00MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
-|  tuple-ids=1 row-size=17B cardinality=7300
+|  tuple-ids=1 row-size=17B cardinality=7.30K
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypes]
@@ -293,7 +293,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=20B cardinality=7300
+   tuple-ids=0 row-size=20B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # Test non-grouping distinct aggregation.
@@ -320,7 +320,7 @@ PLAN-ROOT SINK
 |  output: count(*)
 |  group by: CAST(2 AS BIGINT) + CAST(id AS BIGINT)
 |  mem-estimate=10.00MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
-|  tuple-ids=1 row-size=16B cardinality=7300
+|  tuple-ids=1 row-size=16B cardinality=7.30K
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypes]
@@ -331,7 +331,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=4B cardinality=7300
+   tuple-ids=0 row-size=4B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # Test analytic eval node.
@@ -354,14 +354,14 @@ PLAN-ROOT SINK
 |  order by: greatest(20, bigint_col) ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
 |  mem-estimate=4.00MB mem-reservation=4.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=3,2 row-size=53B cardinality=7300
+|  tuple-ids=3,2 row-size=53B cardinality=7.30K
 |  in pipelines: 01(GETNEXT)
 |
 01:SORT
 |  order by: concat('ab', string_col) ASC NULLS FIRST, greatest(20, bigint_col) ASC
 |  materialized: concat('ab', string_col), greatest(20, bigint_col)
 |  mem-estimate=12.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=3 row-size=45B cardinality=7300
+|  tuple-ids=3 row-size=45B cardinality=7.30K
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypes]
@@ -372,7 +372,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=25B cardinality=7300
+   tuple-ids=0 row-size=25B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # Test sort node.
@@ -389,7 +389,7 @@ PLAN-ROOT SINK
 01:SORT
 |  order by: id * 7.5 ASC
 |  mem-estimate=6.00MB mem-reservation=6.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=8B cardinality=7300
+|  tuple-ids=1 row-size=8B cardinality=7.30K
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypes]
@@ -400,7 +400,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=8B cardinality=7300
+   tuple-ids=0 row-size=8B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # Test HDFS table sink.


[06/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/tpch-kudu.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/tpch-kudu.test b/testdata/workloads/functional-planner/queries/PlannerTest/tpch-kudu.test
index cc35a26..c923eb5 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/tpch-kudu.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/tpch-kudu.test
@@ -27,13 +27,16 @@ PLAN-ROOT SINK
 |
 02:SORT
 |  order by: l_returnflag ASC, l_linestatus ASC
+|  row-size=122B cardinality=6
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum(l_quantity), sum(l_extendedprice), sum(l_extendedprice * (1 - l_discount)), sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)), avg(l_quantity), avg(l_extendedprice), avg(l_discount), count(*)
 |  group by: l_returnflag, l_linestatus
+|  row-size=122B cardinality=6
 |
 00:SCAN KUDU [tpch_kudu.lineitem]
    kudu predicates: l_shipdate <= '1998-09-02'
+   row-size=66B cardinality=600.12K
 ====
 # Q2 - Minimum Cost Supplier Query
 select
@@ -87,70 +90,89 @@ PLAN-ROOT SINK
 |
 18:TOP-N [LIMIT=100]
 |  order by: s_acctbal DESC, n_name ASC, s_name ASC, p_partkey ASC
+|  row-size=230B cardinality=100
 |
 17:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: ps_partkey = p_partkey, min(ps_supplycost) = ps_supplycost
 |  runtime filters: RF002 <- p_partkey
+|  row-size=330B cardinality=1.01K
 |
 |--16:HASH JOIN [INNER JOIN]
 |  |  hash predicates: n_regionkey = r_regionkey
 |  |  runtime filters: RF011 <- r_regionkey
+|  |  row-size=330B cardinality=1.01K
 |  |
 |  |--04:SCAN KUDU [tpch_kudu.region]
 |  |     kudu predicates: r_name = 'EUROPE'
+|  |     row-size=2B cardinality=1
 |  |
 |  15:HASH JOIN [INNER JOIN]
 |  |  hash predicates: s_nationkey = n_nationkey
 |  |  runtime filters: RF013 <- n_nationkey
+|  |  row-size=328B cardinality=5.04K
 |  |
 |  |--03:SCAN KUDU [tpch_kudu.nation]
 |  |     runtime filters: RF011 -> n_regionkey
+|  |     row-size=27B cardinality=25
 |  |
 |  14:HASH JOIN [INNER JOIN]
 |  |  hash predicates: s_suppkey = ps_suppkey
 |  |  runtime filters: RF015 <- ps_suppkey
+|  |  row-size=301B cardinality=5.04K
 |  |
 |  |--13:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: ps_partkey = p_partkey
 |  |  |  runtime filters: RF017 <- p_partkey
+|  |  |  row-size=99B cardinality=5.04K
 |  |  |
 |  |  |--00:SCAN KUDU [tpch_kudu.part]
 |  |  |     predicates: p_type LIKE '%BRASS'
 |  |  |     kudu predicates: p_size = 15
+|  |  |     row-size=75B cardinality=1.26K
 |  |  |
 |  |  02:SCAN KUDU [tpch_kudu.partsupp]
 |  |     runtime filters: RF017 -> ps_partkey
+|  |     row-size=24B cardinality=800.00K
 |  |
 |  01:SCAN KUDU [tpch_kudu.supplier]
 |     runtime filters: RF013 -> s_nationkey, RF015 -> s_suppkey
+|     row-size=203B cardinality=10.00K
 |
 12:AGGREGATE [FINALIZE]
 |  output: min(ps_supplycost)
 |  group by: ps_partkey
+|  row-size=16B cardinality=160.00K
 |
 11:HASH JOIN [INNER JOIN]
 |  hash predicates: n_regionkey = r_regionkey
 |  runtime filters: RF005 <- r_regionkey
+|  row-size=40B cardinality=160.00K
 |
 |--08:SCAN KUDU [tpch_kudu.region]
 |     kudu predicates: r_name = 'EUROPE'
+|     row-size=2B cardinality=1
 |
 10:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF007 <- n_nationkey
+|  row-size=38B cardinality=800.00K
 |
 |--07:SCAN KUDU [tpch_kudu.nation]
 |     runtime filters: RF005 -> n_regionkey
+|     row-size=4B cardinality=25
 |
 09:HASH JOIN [INNER JOIN]
 |  hash predicates: ps_suppkey = s_suppkey
 |  runtime filters: RF009 <- s_suppkey
+|  row-size=34B cardinality=800.00K
 |
 |--06:SCAN KUDU [tpch_kudu.supplier]
 |     runtime filters: RF007 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 05:SCAN KUDU [tpch_kudu.partsupp]
    runtime filters: RF002 -> tpch_kudu.partsupp.ps_partkey, RF009 -> ps_suppkey
+   row-size=24B cardinality=800.00K
 ====
 # Q3 - Shipping Priority Query
 select
@@ -183,29 +205,36 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=10]
 |  order by: round(sum(l_extendedprice * (1 - l_discount)), 2) DESC, o_orderdate ASC
+|  row-size=50B cardinality=10
 |
 05:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: l_orderkey, o_orderdate, o_shippriority
+|  row-size=50B cardinality=17.56K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF001 <- c_custkey
+|  row-size=78B cardinality=17.56K
 |
 |--00:SCAN KUDU [tpch_kudu.customer]
 |     kudu predicates: c_mktsegment = 'BUILDING'
+|     row-size=8B cardinality=30.00K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF003 <- o_orderkey
+|  row-size=70B cardinality=57.58K
 |
 |--01:SCAN KUDU [tpch_kudu.orders]
 |     kudu predicates: o_orderdate < '1995-03-15'
 |     runtime filters: RF001 -> o_custkey
+|     row-size=46B cardinality=150.00K
 |
 02:SCAN KUDU [tpch_kudu.lineitem]
    kudu predicates: l_shipdate > '1995-03-15'
    runtime filters: RF003 -> l_orderkey
+   row-size=24B cardinality=600.12K
 ====
 # Q4 - Order Priority Checking Query
 select
@@ -236,21 +265,26 @@ PLAN-ROOT SINK
 |
 04:SORT
 |  order by: o_orderpriority ASC
+|  row-size=28B cardinality=5
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: o_orderpriority
+|  row-size=28B cardinality=5
 |
 02:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF001 <- o_orderkey
+|  row-size=32B cardinality=150.00K
 |
 |--00:SCAN KUDU [tpch_kudu.orders]
 |     kudu predicates: o_orderdate < '1993-10-01', o_orderdate >= '1993-07-01'
+|     row-size=32B cardinality=150.00K
 |
 01:SCAN KUDU [tpch_kudu.lineitem]
    predicates: l_commitdate < l_receiptdate
    runtime filters: RF001 -> l_orderkey
+   row-size=60B cardinality=600.12K
 ====
 # Q5 - Local Supplier Volume Query
 select
@@ -284,49 +318,62 @@ PLAN-ROOT SINK
 |
 12:SORT
 |  order by: round(sum(l_extendedprice * (1 - l_discount)), 2) DESC
+|  row-size=35B cardinality=25
 |
 11:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: n_name
+|  row-size=35B cardinality=25
 |
 10:HASH JOIN [INNER JOIN]
 |  hash predicates: n_regionkey = r_regionkey
 |  runtime filters: RF001 <- r_regionkey
+|  row-size=97B cardinality=115.16K
 |
 |--05:SCAN KUDU [tpch_kudu.region]
 |     kudu predicates: r_name = 'ASIA'
+|     row-size=2B cardinality=1
 |
 09:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF003 <- n_nationkey
+|  row-size=95B cardinality=575.77K
 |
 |--04:SCAN KUDU [tpch_kudu.nation]
 |     runtime filters: RF001 -> n_regionkey
+|     row-size=27B cardinality=25
 |
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: c_nationkey = s_nationkey, l_suppkey = s_suppkey
 |  runtime filters: RF006 <- s_nationkey, RF007 <- s_suppkey
+|  row-size=68B cardinality=575.77K
 |
 |--03:SCAN KUDU [tpch_kudu.supplier]
 |     runtime filters: RF003 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF009 <- c_custkey
+|  row-size=58B cardinality=575.77K
 |
 |--00:SCAN KUDU [tpch_kudu.customer]
 |     runtime filters: RF003 -> tpch_kudu.customer.c_nationkey, RF006 -> c_nationkey
+|     row-size=10B cardinality=150.00K
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF011 <- o_orderkey
+|  row-size=48B cardinality=575.77K
 |
 |--01:SCAN KUDU [tpch_kudu.orders]
 |     kudu predicates: o_orderdate < '1995-01-01', o_orderdate >= '1994-01-01'
 |     runtime filters: RF009 -> o_custkey
+|     row-size=16B cardinality=150.00K
 |
 02:SCAN KUDU [tpch_kudu.lineitem]
    runtime filters: RF007 -> l_suppkey, RF011 -> l_orderkey
+   row-size=32B cardinality=6.00M
 ====
 # Q6 - Forecasting Revenue Change Query
 select
@@ -345,9 +392,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * l_discount)
+|  row-size=16B cardinality=1
 |
 00:SCAN KUDU [tpch_kudu.lineitem]
    kudu predicates: l_discount <= 0.07, l_discount >= 0.05, l_quantity < 24, l_shipdate < '1995-01-01', l_shipdate >= '1994-01-01'
+   row-size=16B cardinality=600.12K
 ====
 # Q7 - Volume Shipping Query
 select
@@ -395,48 +444,61 @@ PLAN-ROOT SINK
 |
 12:SORT
 |  order by: supp_nation ASC, cust_nation ASC, l_year ASC
+|  row-size=58B cardinality=575.77K
 |
 11:AGGREGATE [FINALIZE]
 |  output: sum(round(l_extendedprice * (1 - l_discount), 2))
 |  group by: n1.n_name, n2.n_name, year(l_shipdate)
+|  row-size=58B cardinality=575.77K
 |
 10:HASH JOIN [INNER JOIN]
 |  hash predicates: c_nationkey = n2.n_nationkey
 |  other predicates: ((n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE'))
 |  runtime filters: RF001 <- n2.n_nationkey
+|  row-size=144B cardinality=575.77K
 |
 |--05:SCAN KUDU [tpch_kudu.nation n2]
+|     row-size=25B cardinality=25
 |
 09:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n1.n_nationkey
 |  runtime filters: RF003 <- n1.n_nationkey
+|  row-size=119B cardinality=575.77K
 |
 |--04:SCAN KUDU [tpch_kudu.nation n1]
+|     row-size=25B cardinality=25
 |
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF005 <- c_custkey
+|  row-size=94B cardinality=575.77K
 |
 |--03:SCAN KUDU [tpch_kudu.customer]
 |     runtime filters: RF001 -> c_nationkey
+|     row-size=10B cardinality=150.00K
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: l_suppkey = s_suppkey
 |  runtime filters: RF007 <- s_suppkey
+|  row-size=84B cardinality=575.77K
 |
 |--00:SCAN KUDU [tpch_kudu.supplier]
 |     runtime filters: RF003 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF009 <- o_orderkey
+|  row-size=74B cardinality=575.77K
 |
 |--02:SCAN KUDU [tpch_kudu.orders]
 |     runtime filters: RF005 -> o_custkey
+|     row-size=16B cardinality=1.50M
 |
 01:SCAN KUDU [tpch_kudu.lineitem]
    kudu predicates: l_shipdate <= '1996-12-31', l_shipdate >= '1995-01-01'
    runtime filters: RF007 -> l_suppkey, RF009 -> l_orderkey
+   row-size=58B cardinality=600.12K
 ====
 # Q8 - National Market Share Query
 select
@@ -483,62 +545,79 @@ PLAN-ROOT SINK
 |
 16:SORT
 |  order by: o_year ASC
+|  row-size=36B cardinality=761
 |
 15:AGGREGATE [FINALIZE]
 |  output: sum(CASE WHEN n2.n_name = 'BRAZIL' THEN round(l_extendedprice * (1 - l_discount), 2) ELSE 0 END), sum(round(l_extendedprice * (1 - l_discount), 2))
 |  group by: year(o_orderdate)
+|  row-size=36B cardinality=761
 |
 14:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n2.n_nationkey
 |  runtime filters: RF001 <- n2.n_nationkey
+|  row-size=141B cardinality=761
 |
 |--06:SCAN KUDU [tpch_kudu.nation n2]
+|     row-size=25B cardinality=25
 |
 13:HASH JOIN [INNER JOIN]
 |  hash predicates: n1.n_regionkey = r_regionkey
 |  runtime filters: RF003 <- r_regionkey
+|  row-size=116B cardinality=761
 |
 |--07:SCAN KUDU [tpch_kudu.region]
 |     kudu predicates: r_name = 'AMERICA'
+|     row-size=2B cardinality=1
 |
 12:HASH JOIN [INNER JOIN]
 |  hash predicates: c_nationkey = n1.n_nationkey
 |  runtime filters: RF005 <- n1.n_nationkey
+|  row-size=114B cardinality=3.80K
 |
 |--05:SCAN KUDU [tpch_kudu.nation n1]
 |     runtime filters: RF003 -> n1.n_regionkey
+|     row-size=4B cardinality=25
 |
 11:HASH JOIN [INNER JOIN]
 |  hash predicates: c_custkey = o_custkey
 |  runtime filters: RF007 <- o_custkey
+|  row-size=110B cardinality=3.80K
 |
 |--10:HASH JOIN [INNER JOIN]
 |  |  hash predicates: l_suppkey = s_suppkey
 |  |  runtime filters: RF009 <- s_suppkey
+|  |  row-size=100B cardinality=3.80K
 |  |
 |  |--01:SCAN KUDU [tpch_kudu.supplier]
 |  |     runtime filters: RF001 -> s_nationkey
+|  |     row-size=10B cardinality=10.00K
 |  |
 |  09:HASH JOIN [INNER JOIN]
 |  |  hash predicates: o_orderkey = l_orderkey
 |  |  runtime filters: RF011 <- l_orderkey
+|  |  row-size=90B cardinality=3.80K
 |  |
 |  |--08:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: l_partkey = p_partkey
 |  |  |  runtime filters: RF013 <- p_partkey
+|  |  |  row-size=48B cardinality=39.63K
 |  |  |
 |  |  |--00:SCAN KUDU [tpch_kudu.part]
 |  |  |     kudu predicates: p_type = 'ECONOMY ANODIZED STEEL'
+|  |  |     row-size=8B cardinality=1.32K
 |  |  |
 |  |  02:SCAN KUDU [tpch_kudu.lineitem]
 |  |     runtime filters: RF009 -> l_suppkey, RF013 -> l_partkey
+|  |     row-size=40B cardinality=6.00M
 |  |
 |  03:SCAN KUDU [tpch_kudu.orders]
 |     kudu predicates: o_orderdate <= '1996-12-31', o_orderdate >= '1995-01-01'
 |     runtime filters: RF011 -> o_orderkey
+|     row-size=42B cardinality=150.00K
 |
 04:SCAN KUDU [tpch_kudu.customer]
    runtime filters: RF005 -> c_nationkey, RF007 -> c_custkey
+   row-size=10B cardinality=150.00K
 ====
 # Q9 - Product Type Measure Query
 select
@@ -579,46 +658,59 @@ PLAN-ROOT SINK
 |
 12:SORT
 |  order by: nation ASC, o_year DESC
+|  row-size=39B cardinality=61.70K
 |
 11:AGGREGATE [FINALIZE]
 |  output: sum(round(l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity, 2))
 |  group by: n_name, year(o_orderdate)
+|  row-size=39B cardinality=61.70K
 |
 10:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF001 <- n_nationkey
+|  row-size=198B cardinality=574.29K
 |
 |--05:SCAN KUDU [tpch_kudu.nation]
+|     row-size=25B cardinality=25
 |
 09:HASH JOIN [INNER JOIN]
 |  hash predicates: l_partkey = ps_partkey, l_suppkey = ps_suppkey
 |  runtime filters: RF004 <- ps_partkey, RF005 <- ps_suppkey
+|  row-size=173B cardinality=574.29K
 |
 |--03:SCAN KUDU [tpch_kudu.partsupp]
+|     row-size=24B cardinality=800.00K
 |
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: l_suppkey = s_suppkey
 |  runtime filters: RF007 <- s_suppkey
+|  row-size=149B cardinality=574.29K
 |
 |--01:SCAN KUDU [tpch_kudu.supplier]
 |     runtime filters: RF001 -> s_nationkey, RF005 -> tpch_kudu.supplier.s_suppkey
+|     row-size=10B cardinality=10.00K
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF009 <- o_orderkey
+|  row-size=139B cardinality=574.29K
 |
 |--04:SCAN KUDU [tpch_kudu.orders]
+|     row-size=34B cardinality=1.50M
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: l_partkey = p_partkey
 |  runtime filters: RF011 <- p_partkey
+|  row-size=105B cardinality=598.58K
 |
 |--00:SCAN KUDU [tpch_kudu.part]
 |     predicates: p_name LIKE '%green%'
 |     runtime filters: RF004 -> tpch_kudu.part.p_partkey
+|     row-size=57B cardinality=20.00K
 |
 02:SCAN KUDU [tpch_kudu.lineitem]
    runtime filters: RF004 -> l_partkey, RF005 -> l_suppkey, RF007 -> l_suppkey, RF009 -> l_orderkey, RF011 -> l_partkey
+   row-size=48B cardinality=6.00M
 ====
 # Q10 - Returned Item Reporting Query
 # Converted select from multiple tables to joins
@@ -661,34 +753,43 @@ PLAN-ROOT SINK
 |
 08:TOP-N [LIMIT=20]
 |  order by: round(sum(l_extendedprice * (1 - l_discount)), 1) DESC
+|  row-size=230B cardinality=20
 |
 07:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: c_custkey, c_name, CAST(c_acctbal AS BIGINT), c_phone, n_name, c_address, c_comment
+|  row-size=230B cardinality=191.92K
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: c_nationkey = n_nationkey
 |  runtime filters: RF001 <- n_nationkey
+|  row-size=278B cardinality=191.92K
 |
 |--03:SCAN KUDU [tpch_kudu.nation]
+|     row-size=25B cardinality=25
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: c_custkey = o_custkey
 |  runtime filters: RF003 <- o_custkey
+|  row-size=253B cardinality=191.92K
 |
 |--04:HASH JOIN [INNER JOIN]
 |  |  hash predicates: l_orderkey = o_orderkey
 |  |  runtime filters: RF005 <- o_orderkey
+|  |  row-size=40B cardinality=191.92K
 |  |
 |  |--01:SCAN KUDU [tpch_kudu.orders]
 |  |     kudu predicates: o_orderdate < '1994-01-01', o_orderdate >= '1993-10-01'
+|  |     row-size=16B cardinality=150.00K
 |  |
 |  02:SCAN KUDU [tpch_kudu.lineitem]
 |     kudu predicates: l_returnflag = 'R'
 |     runtime filters: RF005 -> l_orderkey
+|     row-size=24B cardinality=2.00M
 |
 00:SCAN KUDU [tpch_kudu.customer]
    runtime filters: RF001 -> c_nationkey, RF003 -> c_custkey
+   row-size=213B cardinality=150.00K
 ====
 # Q11 - Important Stock Identification
 # Modifications: query was rewritten to not have a subquery in the having clause
@@ -731,50 +832,64 @@ PLAN-ROOT SINK
 |
 13:SORT
 |  order by: value DESC
+|  row-size=24B cardinality=32.00K
 |
 12:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: round(sum(ps_supplycost * ps_availqty), 2) > round(sum(ps_supplycost * ps_availqty) * 0.0001, 2)
+|  row-size=40B cardinality=32.00K
 |
 |--11:AGGREGATE [FINALIZE]
 |  |  output: sum(ps_supplycost * ps_availqty)
+|  |  row-size=16B cardinality=1
 |  |
 |  10:HASH JOIN [INNER JOIN]
 |  |  hash predicates: s_nationkey = n_nationkey
 |  |  runtime filters: RF005 <- n_nationkey
+|  |  row-size=36B cardinality=32.00K
 |  |
 |  |--08:SCAN KUDU [tpch_kudu.nation]
 |  |     kudu predicates: n_name = 'GERMANY'
+|  |     row-size=2B cardinality=1
 |  |
 |  09:HASH JOIN [INNER JOIN]
 |  |  hash predicates: ps_suppkey = s_suppkey
 |  |  runtime filters: RF007 <- s_suppkey
+|  |  row-size=34B cardinality=800.00K
 |  |
 |  |--07:SCAN KUDU [tpch_kudu.supplier]
 |  |     runtime filters: RF005 -> s_nationkey
+|  |     row-size=10B cardinality=10.00K
 |  |
 |  06:SCAN KUDU [tpch_kudu.partsupp]
 |     runtime filters: RF007 -> ps_suppkey
+|     row-size=24B cardinality=800.00K
 |
 05:AGGREGATE [FINALIZE]
 |  output: sum(ps_supplycost * ps_availqty)
 |  group by: ps_partkey
+|  row-size=24B cardinality=32.00K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF001 <- n_nationkey
+|  row-size=44B cardinality=32.00K
 |
 |--02:SCAN KUDU [tpch_kudu.nation]
 |     kudu predicates: n_name = 'GERMANY'
+|     row-size=2B cardinality=1
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: ps_suppkey = s_suppkey
 |  runtime filters: RF003 <- s_suppkey
+|  row-size=42B cardinality=800.00K
 |
 |--01:SCAN KUDU [tpch_kudu.supplier]
 |     runtime filters: RF001 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 00:SCAN KUDU [tpch_kudu.partsupp]
    runtime filters: RF003 -> ps_suppkey
+   row-size=32B cardinality=800.00K
 ====
 # Q12 - Shipping Mode and Order Priority Query
 select
@@ -812,21 +927,26 @@ PLAN-ROOT SINK
 |
 04:SORT
 |  order by: l_shipmode ASC
+|  row-size=32B cardinality=7
 |
 03:AGGREGATE [FINALIZE]
 |  output: sum(CASE WHEN o_orderpriority IN ('1-URGENT', '2-HIGH') THEN 1 ELSE 0 END), sum(CASE WHEN o_orderpriority != '1-URGENT' AND o_orderpriority != '2-HIGH' THEN 1 ELSE 0 END)
 |  group by: l_shipmode
+|  row-size=32B cardinality=7
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: o_orderkey = l_orderkey
 |  runtime filters: RF001 <- l_orderkey
+|  row-size=139B cardinality=320.78K
 |
 |--01:SCAN KUDU [tpch_kudu.lineitem]
 |     predicates: l_commitdate < l_receiptdate, l_shipdate < l_commitdate
 |     kudu predicates: l_shipmode IN ('MAIL', 'SHIP'), l_receiptdate < '1995-01-01', l_receiptdate >= '1994-01-01'
+|     row-size=106B cardinality=320.78K
 |
 00:SCAN KUDU [tpch_kudu.orders]
    runtime filters: RF001 -> o_orderkey
+   row-size=32B cardinality=1.50M
 ====
 # Q13 - Customer Distribution Query
 select
@@ -856,24 +976,30 @@ PLAN-ROOT SINK
 |
 05:SORT
 |  order by: count(*) DESC, c_count DESC
+|  row-size=16B cardinality=150.00K
 |
 04:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: count(o_orderkey)
+|  row-size=16B cardinality=150.00K
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(o_orderkey)
 |  group by: c_custkey
+|  row-size=16B cardinality=150.00K
 |
 02:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF001 <- c_custkey
+|  row-size=89B cardinality=150.00K
 |
 |--00:SCAN KUDU [tpch_kudu.customer]
+|     row-size=8B cardinality=150.00K
 |
 01:SCAN KUDU [tpch_kudu.orders]
    predicates: NOT o_comment LIKE '%special%requests%'
    runtime filters: RF001 -> o_custkey
+   row-size=81B cardinality=150.00K
 ====
 # Q14 - Promotion Effect
 select
@@ -896,16 +1022,20 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: sum(CASE WHEN p_type LIKE 'PROMO%' THEN l_extendedprice * (1 - l_discount) ELSE 0 END), sum(l_extendedprice * (1 - l_discount))
+|  row-size=32B cardinality=1
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: l_partkey = p_partkey
 |  runtime filters: RF001 <- p_partkey
+|  row-size=69B cardinality=598.58K
 |
 |--01:SCAN KUDU [tpch_kudu.part]
+|     row-size=45B cardinality=200.00K
 |
 00:SCAN KUDU [tpch_kudu.lineitem]
    kudu predicates: l_shipdate < '1995-10-01', l_shipdate >= '1995-09-01'
    runtime filters: RF001 -> l_partkey
+   row-size=24B cardinality=600.12K
 ====
 # Q15 - Top Supplier Query
 with revenue_view as (
@@ -945,33 +1075,42 @@ PLAN-ROOT SINK
 |
 08:SORT
 |  order by: s_suppkey ASC
+|  row-size=118B cardinality=10.00K
 |
 07:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: round(sum(l_extendedprice * (1 - l_discount)), 1) = max(total_revenue)
+|  row-size=138B cardinality=10.00K
 |
 |--05:AGGREGATE [FINALIZE]
 |  |  output: max(round(sum(l_extendedprice * (1 - l_discount)), 1))
+|  |  row-size=16B cardinality=1
 |  |
 |  04:AGGREGATE [FINALIZE]
 |  |  output: sum(l_extendedprice * (1 - l_discount))
 |  |  group by: l_suppkey
+|  |  row-size=24B cardinality=9.71K
 |  |
 |  03:SCAN KUDU [tpch_kudu.lineitem]
 |     kudu predicates: l_shipdate < '1996-04-01', l_shipdate >= '1996-01-01'
+|     row-size=24B cardinality=600.12K
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: s_suppkey = l_suppkey
 |  runtime filters: RF001 <- l_suppkey
+|  row-size=138B cardinality=10.00K
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: sum(l_extendedprice * (1 - l_discount))
 |  |  group by: l_suppkey
+|  |  row-size=24B cardinality=9.71K
 |  |
 |  01:SCAN KUDU [tpch_kudu.lineitem]
 |     kudu predicates: l_shipdate < '1996-04-01', l_shipdate >= '1996-01-01'
+|     row-size=24B cardinality=600.12K
 |
 00:SCAN KUDU [tpch_kudu.supplier]
    runtime filters: RF001 -> s_suppkey
+   row-size=114B cardinality=10.00K
 ====
 # Q16 - Parts/Supplier Relation Query
 select
@@ -1011,30 +1150,38 @@ PLAN-ROOT SINK
 |
 07:SORT
 |  order by: count(ps_suppkey) DESC, p_brand ASC, p_type ASC, p_size ASC
+|  row-size=65B cardinality=31.92K
 |
 06:AGGREGATE [FINALIZE]
 |  output: count(ps_suppkey)
 |  group by: p_brand, p_type, p_size
+|  row-size=65B cardinality=31.92K
 |
 05:AGGREGATE
 |  group by: p_brand, p_type, p_size, ps_suppkey
+|  row-size=65B cardinality=31.92K
 |
 04:HASH JOIN [NULL AWARE LEFT ANTI JOIN]
 |  hash predicates: ps_suppkey = s_suppkey
+|  row-size=89B cardinality=31.92K
 |
 |--02:SCAN KUDU [tpch_kudu.supplier]
 |     predicates: s_comment LIKE '%Customer%Complaints%'
+|     row-size=87B cardinality=1.00K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: ps_partkey = p_partkey
 |  runtime filters: RF001 <- p_partkey
+|  row-size=89B cardinality=31.92K
 |
 |--01:SCAN KUDU [tpch_kudu.part]
 |     predicates: p_brand != 'Brand#45', NOT p_type LIKE 'MEDIUM POLISHED%'
 |     kudu predicates: p_size IN (49, 14, 23, 45, 19, 3, 36, 9)
+|     row-size=73B cardinality=8.00K
 |
 00:SCAN KUDU [tpch_kudu.partsupp]
    runtime filters: RF001 -> ps_partkey
+   row-size=16B cardinality=800.00K
 ====
 # Q17 - Small-Quantity-Order Revenue Query
 select
@@ -1061,28 +1208,35 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice)
+|  row-size=16B cardinality=1
 |
 05:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: l_partkey = p_partkey
 |  other join predicates: l_quantity < round(0.2 * avg(l_quantity), 2)
 |  runtime filters: RF001 <- p_partkey
+|  row-size=32B cardinality=29.93K
 |
 |--04:HASH JOIN [INNER JOIN]
 |  |  hash predicates: l_partkey = p_partkey
 |  |  runtime filters: RF003 <- p_partkey
+|  |  row-size=32B cardinality=29.93K
 |  |
 |  |--01:SCAN KUDU [tpch_kudu.part]
 |  |     kudu predicates: p_container = 'MED BOX', p_brand = 'Brand#23'
+|  |     row-size=8B cardinality=1.00K
 |  |
 |  00:SCAN KUDU [tpch_kudu.lineitem]
 |     runtime filters: RF003 -> l_partkey
+|     row-size=24B cardinality=6.00M
 |
 03:AGGREGATE [FINALIZE]
 |  output: avg(l_quantity)
 |  group by: l_partkey
+|  row-size=16B cardinality=200.52K
 |
 02:SCAN KUDU [tpch_kudu.lineitem]
    runtime filters: RF001 -> tpch_kudu.lineitem.l_partkey
+   row-size=16B cardinality=6.00M
 ====
 # Q18 - Large Value tpch_kudu.customer Query
 select
@@ -1126,37 +1280,47 @@ PLAN-ROOT SINK
 |
 09:TOP-N [LIMIT=100]
 |  order by: o_totalprice DESC, o_orderdate ASC
+|  row-size=92B cardinality=100
 |
 08:AGGREGATE [FINALIZE]
 |  output: sum(l_quantity)
 |  group by: c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice
+|  row-size=92B cardinality=600.12K
 |
 07:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: o_orderkey = l_orderkey
 |  runtime filters: RF001 <- l_orderkey
+|  row-size=108B cardinality=600.12K
 |
 |--04:AGGREGATE [FINALIZE]
 |  |  output: sum(l_quantity)
 |  |  group by: l_orderkey
 |  |  having: sum(l_quantity) > 300
+|  |  row-size=24B cardinality=156.34K
 |  |
 |  03:SCAN KUDU [tpch_kudu.lineitem]
+|     row-size=16B cardinality=6.00M
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF003 <- c_custkey
+|  row-size=108B cardinality=5.76M
 |
 |--00:SCAN KUDU [tpch_kudu.customer]
+|     row-size=42B cardinality=150.00K
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF005 <- o_orderkey
+|  row-size=66B cardinality=5.76M
 |
 |--01:SCAN KUDU [tpch_kudu.orders]
 |     runtime filters: RF001 -> o_orderkey, RF003 -> o_custkey
+|     row-size=50B cardinality=1.50M
 |
 02:SCAN KUDU [tpch_kudu.lineitem]
    runtime filters: RF001 -> tpch_kudu.lineitem.l_orderkey, RF005 -> l_orderkey
+   row-size=16B cardinality=6.00M
 ====
 # Q19 - Discounted Revenue Query
 select
@@ -1201,18 +1365,22 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount))
+|  row-size=16B cardinality=1
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: l_partkey = p_partkey
 |  other predicates: ((p_brand = 'Brand#12' AND p_container IN ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') AND l_quantity >= 1 AND l_quantity <= 11 AND p_size <= 5) OR (p_brand = 'Brand#23' AND p_container IN ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') AND l_quantity >= 10 AND l_quantity <= 20 AND p_size <= 10) OR (p_brand = 'Brand#34' AND p_container IN ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') AND l_quantity >= 20 AND l_quantity <= 30 AND p_size <= 15))
 |  runtime filters: RF001 <- p_partkey
+|  row-size=92B cardinality=79.99K
 |
 |--01:SCAN KUDU [tpch_kudu.part]
 |     kudu predicates: p_size >= 1
+|     row-size=60B cardinality=20.00K
 |
 00:SCAN KUDU [tpch_kudu.lineitem]
    kudu predicates: l_shipmode IN ('AIR', 'AIR REG'), l_shipinstruct = 'DELIVER IN PERSON'
    runtime filters: RF001 -> l_partkey
+   row-size=32B cardinality=801.95K
 ====
 # Q20 - Potential Part Promotion Query
 select
@@ -1258,43 +1426,54 @@ PLAN-ROOT SINK
 |
 10:SORT
 |  order by: s_name ASC
+|  row-size=67B cardinality=400
 |
 09:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: ps_suppkey = s_suppkey
 |  runtime filters: RF001 <- s_suppkey
+|  row-size=87B cardinality=400
 |
 |--08:HASH JOIN [INNER JOIN]
 |  |  hash predicates: s_nationkey = n_nationkey
 |  |  runtime filters: RF009 <- n_nationkey
+|  |  row-size=87B cardinality=400
 |  |
 |  |--01:SCAN KUDU [tpch_kudu.nation]
 |  |     kudu predicates: n_name = 'CANADA'
+|  |     row-size=2B cardinality=1
 |  |
 |  00:SCAN KUDU [tpch_kudu.supplier]
 |     runtime filters: RF009 -> s_nationkey
+|     row-size=85B cardinality=10.00K
 |
 07:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: l_partkey = ps_partkey, l_suppkey = ps_suppkey
 |  other join predicates: ps_availqty > 0.5 * sum(l_quantity)
 |  runtime filters: RF004 <- ps_partkey, RF005 <- ps_suppkey
+|  row-size=24B cardinality=79.79K
 |
 |--06:HASH JOIN [LEFT SEMI JOIN]
 |  |  hash predicates: ps_partkey = p_partkey
 |  |  runtime filters: RF007 <- p_partkey
+|  |  row-size=24B cardinality=79.79K
 |  |
 |  |--03:SCAN KUDU [tpch_kudu.part]
 |  |     predicates: p_name LIKE 'forest%'
+|  |     row-size=57B cardinality=20.00K
 |  |
 |  02:SCAN KUDU [tpch_kudu.partsupp]
 |     runtime filters: RF001 -> ps_suppkey, RF007 -> ps_partkey
+|     row-size=24B cardinality=800.00K
 |
 05:AGGREGATE [FINALIZE]
 |  output: sum(l_quantity)
 |  group by: l_partkey, l_suppkey
+|  row-size=32B cardinality=600.12K
 |
 04:SCAN KUDU [tpch_kudu.lineitem]
    kudu predicates: l_shipdate < '1995-01-01', l_shipdate >= '1994-01-01'
    runtime filters: RF001 -> tpch_kudu.lineitem.l_suppkey, RF004 -> tpch_kudu.lineitem.l_partkey, RF005 -> tpch_kudu.lineitem.l_suppkey
+   row-size=24B cardinality=600.12K
 ====
 # Q21 - Suppliers Who Kept Orders Waiting Query
 select
@@ -1344,50 +1523,63 @@ PLAN-ROOT SINK
 |
 12:TOP-N [LIMIT=100]
 |  order by: count(*) DESC, s_name ASC
+|  row-size=38B cardinality=100
 |
 11:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: s_name
+|  row-size=38B cardinality=7.68K
 |
 10:HASH JOIN [RIGHT ANTI JOIN]
 |  hash predicates: l3.l_orderkey = l1.l_orderkey
 |  other join predicates: l3.l_suppkey != l1.l_suppkey
+|  row-size=122B cardinality=7.68K
 |
 |--09:HASH JOIN [RIGHT SEMI JOIN]
 |  |  hash predicates: l2.l_orderkey = l1.l_orderkey
 |  |  other join predicates: l2.l_suppkey != l1.l_suppkey
 |  |  runtime filters: RF001 <- l1.l_orderkey
+|  |  row-size=122B cardinality=7.68K
 |  |
 |  |--08:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: s_nationkey = n_nationkey
 |  |  |  runtime filters: RF003 <- n_nationkey
+|  |  |  row-size=122B cardinality=7.68K
 |  |  |
 |  |  |--03:SCAN KUDU [tpch_kudu.nation]
 |  |  |     kudu predicates: n_name = 'SAUDI ARABIA'
+|  |  |     row-size=2B cardinality=1
 |  |  |
 |  |  07:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: l1.l_suppkey = s_suppkey
 |  |  |  runtime filters: RF005 <- s_suppkey
+|  |  |  row-size=120B cardinality=191.92K
 |  |  |
 |  |  |--00:SCAN KUDU [tpch_kudu.supplier]
 |  |  |     runtime filters: RF003 -> s_nationkey
+|  |  |     row-size=44B cardinality=10.00K
 |  |  |
 |  |  06:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: l1.l_orderkey = o_orderkey
 |  |  |  runtime filters: RF007 <- o_orderkey
+|  |  |  row-size=76B cardinality=191.92K
 |  |  |
 |  |  |--02:SCAN KUDU [tpch_kudu.orders]
 |  |  |     kudu predicates: o_orderstatus = 'F'
+|  |  |     row-size=8B cardinality=500.00K
 |  |  |
 |  |  01:SCAN KUDU [tpch_kudu.lineitem l1]
 |  |     predicates: l1.l_receiptdate > l1.l_commitdate
 |  |     runtime filters: RF005 -> l1.l_suppkey, RF007 -> l1.l_orderkey
+|  |     row-size=68B cardinality=600.12K
 |  |
 |  04:SCAN KUDU [tpch_kudu.lineitem l2]
 |     runtime filters: RF001 -> l2.l_orderkey
+|     row-size=16B cardinality=6.00M
 |
 05:SCAN KUDU [tpch_kudu.lineitem l3]
    predicates: l3.l_receiptdate > l3.l_commitdate
+   row-size=68B cardinality=600.12K
 ====
 # Q22 - Global Sales Opportunity Query
 select
@@ -1431,26 +1623,34 @@ PLAN-ROOT SINK
 |
 07:SORT
 |  order by: cntrycode ASC
+|  row-size=36B cardinality=15.00K
 |
 06:AGGREGATE [FINALIZE]
 |  output: count(*), sum(c_acctbal)
 |  group by: substr(c_phone, 1, 2)
+|  row-size=36B cardinality=15.00K
 |
 05:HASH JOIN [RIGHT ANTI JOIN]
 |  hash predicates: o_custkey = c_custkey
+|  row-size=55B cardinality=15.00K
 |
 |--04:NESTED LOOP JOIN [INNER JOIN]
 |  |  predicates: c_acctbal > round(avg(c_acctbal), 1)
+|  |  row-size=55B cardinality=15.00K
 |  |
 |  |--02:AGGREGATE [FINALIZE]
 |  |  |  output: avg(c_acctbal)
+|  |  |  row-size=8B cardinality=1
 |  |  |
 |  |  01:SCAN KUDU [tpch_kudu.customer]
 |  |     predicates: substr(c_phone, 1, 2) IN ('13', '31', '23', '29', '30', '18', '17')
 |  |     kudu predicates: c_acctbal > 0
+|  |     row-size=39B cardinality=15.00K
 |  |
 |  00:SCAN KUDU [tpch_kudu.customer]
 |     predicates: substr(c_phone, 1, 2) IN ('13', '31', '23', '29', '30', '18', '17')
+|     row-size=47B cardinality=15.00K
 |
 03:SCAN KUDU [tpch_kudu.orders]
+   row-size=8B cardinality=1.50M
 ====


[12/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/runtime-filter-propagation.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/runtime-filter-propagation.test b/testdata/workloads/functional-planner/queries/PlannerTest/runtime-filter-propagation.test
index 9ca806f..a6c32e3 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/runtime-filter-propagation.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/runtime-filter-propagation.test
@@ -7,15 +7,18 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.year = t2.int_col
 |  runtime filters: RF000 <- t2.int_col
+|  row-size=167B cardinality=1
 |
 |--01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.id < 10
+|     row-size=72B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    predicates: t1.id = 10
    runtime filters: RF000 -> t1.year
+   row-size=95B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -24,17 +27,20 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: t1.year = t2.int_col
 |  runtime filters: RF000 <- t2.int_col
+|  row-size=167B cardinality=1
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.id < 10
+|     row-size=72B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    predicates: t1.id = 10
    runtime filters: RF000 -> t1.year
+   row-size=95B cardinality=1
 ====
 # Four-way join query
 select straight_join * from functional.alltypestiny t1, functional.alltypesagg t2,
@@ -47,31 +53,38 @@ PLAN-ROOT SINK
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: t3.month = t4.id
 |  runtime filters: RF000 <- t4.id
+|  row-size=345B cardinality=9
 |
 |--03:SCAN HDFS [functional.alltypesnopart t4]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t4.bigint_col < 10
+|     row-size=72B cardinality=0
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: t2.id = t3.tinyint_col
 |  runtime filters: RF002 <- t3.tinyint_col
+|  row-size=273B cardinality=9
 |
 |--02:SCAN HDFS [functional.alltypestiny t3]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> t3.month
+|     row-size=89B cardinality=8
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.year = t2.int_col
 |  runtime filters: RF004 <- t2.int_col
+|  row-size=184B cardinality=92
 |
 |--01:SCAN HDFS [functional.alltypesagg t2]
 |     partitions=11/11 files=11 size=814.73KB
 |     predicates: t2.bool_col = TRUE
 |     runtime filters: RF002 -> t2.id
+|     row-size=95B cardinality=5.50K
 |
 00:SCAN HDFS [functional.alltypestiny t1]
    partitions=4/4 files=4 size=460B
    runtime filters: RF004 -> t1.year
+   row-size=89B cardinality=8
 ====
 # Two-way join query where multiple runtime filters are generated
 select straight_join * from functional.alltypesagg t1, functional.alltypesnopart t2
@@ -82,14 +95,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.year = t2.int_col, t1.month = t2.bigint_col
 |  runtime filters: RF000 <- t2.int_col, RF001 <- t2.bigint_col
+|  row-size=167B cardinality=11.00K
 |
 |--01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.id = 10
+|     row-size=72B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.year, RF001 -> t1.month
+   row-size=95B cardinality=11.00K
 ====
 # Two-way join query with an inline view in the build side of the join
 select straight_join * from functional.alltypesagg t1,
@@ -101,14 +117,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.year = t2.int_col
 |  runtime filters: RF000 <- t2.int_col
+|  row-size=167B cardinality=11.00K
 |
 |--01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.id = 1
+|     row-size=72B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.year
+   row-size=95B cardinality=11.00K
 ====
 # Two-way join query with an inline view in the build side of the join where the
 # right child of the join predicate is an arithmetic expr between two slots
@@ -122,14 +141,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.year = id + int_col
 |  runtime filters: RF000 <- id + int_col
+|  row-size=111B cardinality=11.00K
 |
 |--01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.bigint_col < 10
+|     row-size=16B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.year
+   row-size=95B cardinality=11.00K
 ====
 # Two-way join query where the lhs of the join predicate is an arithmetic expr
 select straight_join * from functional.alltypesagg t1, functional.alltypesnopart t2
@@ -140,14 +162,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.year + 1 = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=167B cardinality=11.00K
 |
 |--01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.int_col < 10
+|     row-size=72B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.year + 1
+   row-size=95B cardinality=11.00K
 ====
 # Two-way join query with join predicates that are not suitable for hashing
 select straight_join * from functional.alltypesagg t1, functional.alltypesnopart t2
@@ -161,15 +186,18 @@ PLAN-ROOT SINK
 |  hash predicates: t1.id = t2.id
 |  other predicates: t1.year = t1.month + t2.int_col, t2.tinyint_col = t1.year + t2.smallint_col, t1.year + t2.int_col = t1.month + t2.tinyint_col
 |  runtime filters: RF000 <- t2.id
+|  row-size=167B cardinality=11
 |
 |--01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.bigint_col = 1
+|     row-size=72B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    predicates: t1.int_col = 1
    runtime filters: RF000 -> t1.id
+   row-size=95B cardinality=11
 ====
 # Two-way join query where the left child of the equi-join predicate
 # is an arithmetic expr between two slots from the same scan tuple
@@ -182,14 +210,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.year + t1.month = t2.id, t1.int_col * 100 = t2.bigint_col / 100, t1.int_col + 1 - t1.tinyint_col = t2.smallint_col + 10
 |  runtime filters: RF000 <- t2.id, RF001 <- t2.bigint_col / 100, RF002 <- t2.smallint_col + 10
+|  row-size=167B cardinality=11.00K
 |
 |--01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.bool_col = FALSE
+|     row-size=72B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.year + t1.month, RF001 -> t1.int_col * 100, RF002 -> t1.int_col + 1 - t1.tinyint_col
+   row-size=95B cardinality=11.00K
 ====
 # Three-way join query with an inline view on the probe side of the join where the left
 # child of the equi-join predicate is an arithmetic expr between two slots from
@@ -204,21 +235,26 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.year + t2.year = t3.int_col
+|  row-size=88B cardinality=7.81K
 |
 |--03:SCAN HDFS [functional.alltypesnopart t3]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t3.bool_col = TRUE
+|     row-size=72B cardinality=0
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=16B cardinality=7.81K
 |
 |--01:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.id
+   row-size=8B cardinality=11.00K
 ====
 # Two-way join query with an inline view in the build side of the join that has an
 # aggregation
@@ -232,18 +268,23 @@ PLAN-ROOT SINK
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.month = id, t1.year = count(int_col)
 |  runtime filters: RF000 <- id, RF001 <- count(int_col)
+|  row-size=107B cardinality=0
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: count(int_col)
 |  |  group by: id
 |  |  having: count(int_col) < 10
+|  |  row-size=12B cardinality=0
 |  |
 |  01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
+|     row-size=8B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
+   partition predicates: t1.year < 10
    partitions=0/11 files=0 size=0B
    runtime filters: RF000 -> t1.month, RF001 -> t1.year
+   row-size=95B cardinality=0
 ====
 # Two-way join query with an inline view in the build side of the join that has a
 # two-way join (bushy plan)
@@ -257,21 +298,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.month = t3.tinyint_col, t1.year = t2.id + t3.id
 |  runtime filters: RF000 <- t3.tinyint_col, RF001 <- t2.id + t3.id
+|  row-size=112B cardinality=11.00K
 |
 |--03:HASH JOIN [INNER JOIN]
 |  |  hash predicates: t2.int_col = t3.int_col
 |  |  runtime filters: RF004 <- t3.int_col
+|  |  row-size=17B cardinality=0
 |  |
 |  |--02:SCAN HDFS [functional.alltypesnopart t3]
 |  |     partitions=1/1 files=0 size=0B
+|  |     row-size=9B cardinality=0
 |  |
 |  01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
 |     runtime filters: RF004 -> t2.int_col
+|     row-size=8B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.month, RF001 -> t1.year
+   row-size=95B cardinality=11.00K
 ====
 # Four-way join query with an inline view in the build side of the join where the
 # inline view has a tree-way cyclic join (bushy plan)
@@ -286,30 +332,37 @@ PLAN-ROOT SINK
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.year = t2.int_col, t1.month = t4.tinyint_col
 |  runtime filters: RF000 <- t2.int_col, RF001 <- t4.tinyint_col
+|  row-size=117B cardinality=11.00K
 |
 |--05:HASH JOIN [INNER JOIN]
 |  |  hash predicates: t2.tinyint_col = t4.tinyint_col, t3.int_col = t4.int_col
 |  |  runtime filters: RF004 <- t4.tinyint_col, RF005 <- t4.int_col
+|  |  row-size=22B cardinality=0
 |  |
 |  |--03:SCAN HDFS [functional.alltypesnopart t4]
 |  |     partitions=1/1 files=0 size=0B
+|  |     row-size=5B cardinality=0
 |  |
 |  04:HASH JOIN [INNER JOIN]
 |  |  hash predicates: t2.id = t3.id
 |  |  runtime filters: RF008 <- t3.id
+|  |  row-size=17B cardinality=0
 |  |
 |  |--02:SCAN HDFS [functional.alltypesnopart t3]
 |  |     partitions=1/1 files=0 size=0B
 |  |     runtime filters: RF005 -> t3.int_col
+|  |     row-size=8B cardinality=0
 |  |
 |  01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.int_col = t2.id
 |     runtime filters: RF004 -> t2.tinyint_col, RF008 -> t2.id
+|     row-size=9B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.year, RF001 -> t1.month
+   row-size=95B cardinality=11.00K
 ====
 # Four-way join query between base tables in a star schema
 select straight_join * from functional.alltypesagg t1, functional.alltypesnopart t2,
@@ -322,32 +375,39 @@ PLAN-ROOT SINK
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.year = t4.tinyint_col
 |  runtime filters: RF000 <- t4.tinyint_col
+|  row-size=311B cardinality=11.00K
 |
 |--03:SCAN HDFS [functional.alltypesnopart t4]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t4.bigint_col IN (1, 2)
+|     row-size=72B cardinality=0
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.year = t3.int_col
 |  runtime filters: RF002 <- t3.int_col
+|  row-size=239B cardinality=11.00K
 |
 |--02:SCAN HDFS [functional.alltypesnopart t3]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t3.bool_col = TRUE
 |     runtime filters: RF000 -> t3.int_col
+|     row-size=72B cardinality=0
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.year = t2.id
 |  runtime filters: RF004 <- t2.id
+|  row-size=167B cardinality=11.00K
 |
 |--01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.bool_col = FALSE
 |     runtime filters: RF000 -> t2.id, RF002 -> t2.id
+|     row-size=72B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.year, RF002 -> t1.year, RF004 -> t1.year
+   row-size=95B cardinality=11.00K
 ====
 # Five-way cyclic join query
 select straight_join * from functional.alltypesagg t1, functional.alltypesnopart t2,
@@ -360,37 +420,46 @@ PLAN-ROOT SINK
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.month = t5.id, t4.smallint_col = t5.smallint_col
 |  runtime filters: RF000 <- t5.id, RF001 <- t5.smallint_col
+|  row-size=400B cardinality=11.00K
 |
 |--04:SCAN HDFS [functional.alltypesnopart t5]
 |     partitions=1/1 files=0 size=0B
+|     row-size=72B cardinality=0
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: t3.month = t4.bigint_col
 |  runtime filters: RF004 <- t4.bigint_col
+|  row-size=328B cardinality=11.00K
 |
 |--03:SCAN HDFS [functional.alltypesnopart t4]
 |     partitions=1/1 files=0 size=0B
 |     runtime filters: RF001 -> t4.smallint_col
+|     row-size=72B cardinality=0
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: t2.int_col = t3.tinyint_col
 |  runtime filters: RF006 <- t3.tinyint_col
+|  row-size=256B cardinality=11.00K
 |
 |--02:SCAN HDFS [functional.alltypessmall t3]
 |     partitions=4/4 files=4 size=6.32KB
 |     runtime filters: RF004 -> t3.month
+|     row-size=89B cardinality=100
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.year = t2.id
 |  runtime filters: RF008 <- t2.id
+|  row-size=167B cardinality=11.00K
 |
 |--01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
 |     runtime filters: RF006 -> t2.int_col
+|     row-size=72B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.month, RF008 -> t1.year
+   row-size=95B cardinality=11.00K
 ====
 # Two-way left outer join query; no runtime filters should be generated from the
 # ON-clause equi-join predicate
@@ -403,13 +472,16 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t1.year = t2.int_col
 |  other predicates: t2.id = 1
+|  row-size=167B cardinality=11.00K
 |
 |--01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.id = 1
+|     row-size=72B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
+   row-size=95B cardinality=11.00K
 ====
 # Two-way left outer join query where not all equi-join predicates should
 # generate a runtime filter
@@ -423,14 +495,17 @@ PLAN-ROOT SINK
 |  hash predicates: t1.year = t2.int_col
 |  other predicates: t2.id = 2, t1.month = t2.tinyint_col
 |  runtime filters: RF000 <- t2.tinyint_col
+|  row-size=167B cardinality=11.00K
 |
 |--01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.id = 2
+|     row-size=72B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.month
+   row-size=95B cardinality=11.00K
 ====
 # Multi-way join query with outer joins
 select straight_join * from functional.alltypesagg t1 left outer join functional.alltypesnopart t2
@@ -446,37 +521,46 @@ PLAN-ROOT SINK
 |  hash predicates: t1.year = t5.smallint_col
 |  other predicates: t2.id = 1, t3.int_col = 1, t4.bool_col = TRUE
 |  runtime filters: RF000 <- t5.smallint_col
+|  row-size=383B cardinality=11.00K
 |
 |--04:SCAN HDFS [functional.alltypesnopart t5]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t5.bool_col = FALSE
+|     row-size=72B cardinality=0
 |
 07:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: t1.year = t4.tinyint_col
+|  row-size=311B cardinality=11.00K
 |
 |--03:SCAN HDFS [functional.alltypesnopart t4]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t4.bool_col = TRUE
+|     row-size=72B cardinality=0
 |
 06:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t1.year = t3.id
+|  row-size=239B cardinality=11.00K
 |
 |--02:SCAN HDFS [functional.alltypesnopart t3]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t3.int_col = 1
 |     runtime filters: RF000 -> t3.id
+|     row-size=72B cardinality=0
 |
 05:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t1.year = t2.int_col
+|  row-size=167B cardinality=11.00K
 |
 |--01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.id = 1
 |     runtime filters: RF000 -> t2.int_col
+|     row-size=72B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.year
+   row-size=95B cardinality=11.00K
 ====
 # Two-way right outer join query where a runtime filter can be pushed to the nullable
 # probe side
@@ -492,15 +576,19 @@ PLAN-ROOT SINK
 |  other join predicates: t2.int_col = 10
 |  other predicates: t1.int_col = 1, t1.month = t2.tinyint_col
 |  runtime filters: RF000 <- t2.int_col, RF001 <- t2.tinyint_col
+|  row-size=167B cardinality=0
 |
 |--01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.id = 10
+|     row-size=72B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
+   partition predicates: t1.month = 1, t1.year = 10
    partitions=0/11 files=0 size=0B
    predicates: t1.int_col = 1
    runtime filters: RF000 -> t1.year, RF001 -> t1.month
+   row-size=95B cardinality=0
 ====
 # Three-way join query with semi joins
 select straight_join * from functional.alltypesagg t1 left semi join functional.alltypesnopart t2
@@ -513,21 +601,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: t1.month = t3.tinyint_col
 |  runtime filters: RF000 <- t3.tinyint_col
+|  row-size=72B cardinality=0
 |
 |--02:SCAN HDFS [functional.alltypesnopart t3]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t3.id = 1
+|     row-size=72B cardinality=0
 |
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: t1.year = t2.int_col
 |  runtime filters: RF002 <- t2.int_col
+|  row-size=95B cardinality=11.00K
 |
 |--01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
+|     row-size=4B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.month, RF002 -> t1.year
+   row-size=95B cardinality=11.00K
 ====
 # Query with a subquery that is converted to a null-aware left anti join
 select straight_join * from functional.alltypesagg t1
@@ -538,14 +631,17 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [NULL AWARE LEFT ANTI JOIN]
 |  hash predicates: t1.year = id
+|  row-size=95B cardinality=1.10K
 |
 |--01:SCAN HDFS [functional.alltypesnopart]
 |     partitions=1/1 files=0 size=0B
 |     predicates: int_col = 10
+|     row-size=8B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    predicates: t1.int_col < 10
+   row-size=95B cardinality=1.10K
 ====
 # Two-way join query between two inline views where the scan node to apply the filter
 # is below an aggregation node in the probe side of the join
@@ -559,18 +655,22 @@ PLAN-ROOT SINK
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: year = id
 |  runtime filters: RF000 <- id
+|  row-size=21B cardinality=1
 |
 |--02:SCAN HDFS [functional.alltypesnopart]
 |     partitions=1/1 files=0 size=0B
 |     predicates: tinyint_col < 10
+|     row-size=9B cardinality=0
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: year
+|  row-size=12B cardinality=1
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.year
+   row-size=4B cardinality=11.00K
 ====
 # Two-way join query where the lhs of the join is an inline view with an aggregation;
 # the runtime filter cannot be pushed through the aggregation node
@@ -583,17 +683,21 @@ PLAN-ROOT SINK
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: count(*) = t2.id
+|  row-size=101B cardinality=1
 |
 |--02:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: t2.int_col = 1
+|     row-size=89B cardinality=730
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: year
+|  row-size=12B cardinality=1
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
+   row-size=4B cardinality=11.00K
 ====
 # Two-way join query with multiple nested inline views in the probe side of the join
 # where the scan node to apply the filter is below multiple aggregation nodes
@@ -610,28 +714,35 @@ PLAN-ROOT SINK
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: v1.year = t3.smallint_col
 |  runtime filters: RF000 <- t3.smallint_col
+|  row-size=88B cardinality=1
 |
 |--05:SCAN HDFS [functional.alltypesnopart t3]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t3.id = 1
+|     row-size=72B cardinality=0
 |
 04:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: year, t2.int_col
+|  row-size=16B cardinality=1
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: month = t2.int_col
 |  runtime filters: RF002 <- t2.int_col
+|  row-size=12B cardinality=1
 |
 |--02:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
+|     row-size=4B cardinality=0
 |
 01:AGGREGATE [FINALIZE]
 |  group by: year, month
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.year, RF002 -> t1.month
+   row-size=8B cardinality=11.00K
 ====
 # Four-way join query between an inline view with an aggregation and three base tables
 select straight_join 1 from
@@ -646,33 +757,41 @@ PLAN-ROOT SINK
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: year = c.year
 |  runtime filters: RF000 <- c.year
+|  row-size=28B cardinality=58.40K
 |
 |--04:SCAN HDFS [functional.alltypestiny c]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: year = b.year
 |  runtime filters: RF002 <- b.year
+|  row-size=24B cardinality=14.60K
 |
 |--03:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
 |     predicates: b.int_col < 10
 |     runtime filters: RF000 -> b.year
+|     row-size=8B cardinality=1
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: year = a.year
 |  runtime filters: RF004 <- a.year
+|  row-size=16B cardinality=29.20K
 |
 |--02:SCAN HDFS [functional.alltypestiny a]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> a.year, RF002 -> a.year
+|     row-size=4B cardinality=8
 |
 01:AGGREGATE [FINALIZE]
 |  group by: id, year, month
+|  row-size=12B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.year, RF002 -> functional.alltypes.year, RF004 -> functional.alltypes.year
+   row-size=12B cardinality=7.30K
 ====
 # Two-way join query with an inline view in the probe side of the join where the
 # scan node to apply the filter is below a top-n (order by with limit) operator
@@ -685,16 +804,20 @@ PLAN-ROOT SINK
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = t2.int_col, year = t2.id
+|  row-size=80B cardinality=10
 |
 |--02:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.smallint_col = 1
+|     row-size=72B cardinality=0
 |
 01:TOP-N [LIMIT=10]
 |  order by: year ASC
+|  row-size=8B cardinality=10
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=8B cardinality=11.00K
 ====
 # Two-way join query with an inline in the probe side of the join that has a union
 select straight_join * from
@@ -708,21 +831,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: year = t3.int_col
 |  runtime filters: RF000 <- t3.int_col
+|  row-size=76B cardinality=11.01K
 |
 |--03:SCAN HDFS [functional.alltypesnopart t3]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t3.bool_col = FALSE
+|     row-size=72B cardinality=0
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=4B cardinality=11.01K
 |
 |--02:SCAN HDFS [functional.alltypestiny t2]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> t2.year
+|     row-size=4B cardinality=8
 |
 01:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.year
+   row-size=4B cardinality=11.00K
 ====
 # Query with nested UNION ALL operators
 select straight_join count(*) from
@@ -739,29 +867,36 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: month = b.month
 |  runtime filters: RF000 <- b.month
+|  row-size=12B cardinality=21.90K
 |
 |--04:SCAN HDFS [functional.alltypessmall b]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: b.int_col = 1
+|     row-size=8B cardinality=10
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=4B cardinality=21.90K
 |
 |--03:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> functional.alltypes.month
+|     row-size=4B cardinality=7.30K
 |
 |--02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> functional.alltypes.month
+|     row-size=4B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.month
+   row-size=4B cardinality=7.30K
 ====
 # Query with nested UNION DISTINCT operators
 select straight_join count(*) from
@@ -778,32 +913,40 @@ PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: month = b.month
 |  runtime filters: RF000 <- b.month
+|  row-size=16B cardinality=216
 |
 |--05:SCAN HDFS [functional.alltypessmall b]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: b.int_col = 1
+|     row-size=8B cardinality=10
 |
 04:AGGREGATE [FINALIZE]
 |  group by: month, year
+|  row-size=8B cardinality=216
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=8B cardinality=21.90K
 |
 |--03:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> functional.alltypes.month
+|     row-size=8B cardinality=7.30K
 |
 |--02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> functional.alltypes.month
+|     row-size=8B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.month
+   row-size=8B cardinality=7.30K
 ====
 # UNION ALL query
 select straight_join t2.id, t1.year from functional.alltypesagg t1, functional.alltypesnopart t2
@@ -815,30 +958,37 @@ where t3.month = t4.smallint_col and t4.bool_col = true
 PLAN-ROOT SINK
 |
 00:UNION
+|  row-size=8B cardinality=18.30K
 |
 |--06:HASH JOIN [INNER JOIN]
 |  |  hash predicates: t3.month = t4.smallint_col
 |  |  runtime filters: RF002 <- t4.smallint_col
+|  |  row-size=15B cardinality=7.30K
 |  |
 |  |--05:SCAN HDFS [functional.alltypesnopart t4]
 |  |     partitions=1/1 files=0 size=0B
 |  |     predicates: t4.bool_col = TRUE
+|  |     row-size=7B cardinality=0
 |  |
 |  04:SCAN HDFS [functional.alltypes t3]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF002 -> t3.month
+|     row-size=8B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.year = t2.int_col
 |  runtime filters: RF000 <- t2.int_col
+|  row-size=13B cardinality=11.00K
 |
 |--02:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t2.bool_col = FALSE
+|     row-size=9B cardinality=0
 |
 01:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.year
+   row-size=4B cardinality=11.00K
 ====
 # Query with UNION ALL operator on the rhs of a join node
 select straight_join count(*) from functional.alltypes a
@@ -853,25 +1003,31 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = id
 |  runtime filters: RF000 <- id
+|  row-size=8B cardinality=730
 |
 |--01:UNION
+|  |  row-size=4B cardinality=1.46K
 |  |
 |  |--03:SCAN HDFS [functional.alltypes]
 |  |     partitions=24/24 files=24 size=478.45KB
 |  |     predicates: (functional.alltypes.id - functional.alltypes.id) < 1, (functional.alltypes.int_col - functional.alltypes.int_col) < 1
+|  |     row-size=8B cardinality=730
 |  |
 |  02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: (functional.alltypes.id - functional.alltypes.id) < 1, (functional.alltypes.int_col - functional.alltypes.int_col) < 1
+|     row-size=8B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: (a.id - a.id) < 1
    runtime filters: RF000 -> a.id
+   row-size=4B cardinality=730
 ====
 # Two-way join query with an inline view in the probe side of the join where the
 # scan node to apply the filter in below an analytic function on the probe side of the join
@@ -886,30 +1042,37 @@ PLAN-ROOT SINK
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: year = t3.id, month = t3.tinyint_col
+|  row-size=92B cardinality=11.00K
 |
 |--05:SCAN HDFS [functional.alltypesnopart t3]
 |     partitions=1/1 files=0 size=0B
 |     predicates: t3.bool_col = FALSE
+|     row-size=72B cardinality=0
 |
 04:ANALYTIC
 |  functions: count(id)
 |  partition by: year
 |  order by: month DESC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=20B cardinality=11.00K
 |
 03:SORT
 |  order by: year ASC NULLS FIRST, month DESC
+|  row-size=12B cardinality=11.00K
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.year = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=12B cardinality=11.00K
 |
 |--01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
+|     row-size=4B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.year
+   row-size=8B cardinality=11.00K
 ====
 # Two-way join query with an analytic function on the probe side
 # TODO: Propagate a runtime filter through the analytic function
@@ -923,22 +1086,28 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: year = v1.int_col
+|  row-size=109B cardinality=14.60K
 |
 |--03:SCAN HDFS [functional.alltypestiny v1]
 |     partitions=4/4 files=4 size=460B
 |     predicates: v1.int_col = 2009
+|     row-size=89B cardinality=4
 |
 02:ANALYTIC
 |  functions: sum(int_col)
 |  partition by: year
 |  order by: id ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  row-size=20B cardinality=3.65K
 |
 01:SORT
 |  order by: year ASC NULLS FIRST, id ASC
+|  row-size=12B cardinality=3.65K
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: functional.alltypes.year = 2009
    partitions=12/24 files=12 size=238.68KB
+   row-size=12B cardinality=3.65K
 ====
 # Multi-way join query with a bushy plan
 select straight_join * from
@@ -961,36 +1130,45 @@ PLAN-ROOT SINK
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.month = t4.int_col
 |  runtime filters: RF000 <- t4.int_col
+|  row-size=121B cardinality=11.00K
 |
 |--07:HASH JOIN [LEFT OUTER JOIN]
 |  |  hash predicates: t4.smallint_col = t5.smallint_col
 |  |  other predicates: t5.bool_col = TRUE
+|  |  row-size=17B cardinality=0
 |  |
 |  |--06:SCAN HDFS [functional.alltypesnopart t5]
 |  |     partitions=1/1 files=0 size=0B
 |  |     predicates: t5.bool_col = TRUE
+|  |     row-size=11B cardinality=0
 |  |
 |  05:SCAN HDFS [functional.alltypesnopart t4]
 |     partitions=1/1 files=0 size=0B
+|     row-size=6B cardinality=0
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.year = t2.id + t3.id + 1
 |  runtime filters: RF002 <- t2.id + t3.id + 1
+|  row-size=104B cardinality=11.00K
 |
 |--03:HASH JOIN [LEFT OUTER JOIN]
 |  |  hash predicates: t2.id = t3.id
 |  |  other predicates: t3.bool_col = FALSE
+|  |  row-size=9B cardinality=0
 |  |
 |  |--02:SCAN HDFS [functional.alltypesnopart t3]
 |  |     partitions=1/1 files=0 size=0B
 |  |     predicates: t3.bool_col = FALSE
+|  |     row-size=5B cardinality=0
 |  |
 |  01:SCAN HDFS [functional.alltypesnopart t2]
 |     partitions=1/1 files=0 size=0B
+|     row-size=4B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.month, RF002 -> t1.year
+   row-size=95B cardinality=11.00K
 ====
 # Multi-way join query where the slots of all the join predicates belong to the same
 # equivalence class
@@ -1003,22 +1181,27 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: t2.id = t3.id
 |  runtime filters: RF000 <- t3.id
+|  row-size=16B cardinality=4
 |
 |--02:SCAN HDFS [functional.alltypestiny t3]
 |     partitions=4/4 files=4 size=460B
 |     predicates: t3.int_col = 1
+|     row-size=8B cardinality=4
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF002 <- t2.id
+|  row-size=8B cardinality=8
 |
 |--01:SCAN HDFS [functional.alltypestiny t2]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> t2.id
+|     row-size=4B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny t1]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> t1.id, RF002 -> t1.id
+   row-size=4B cardinality=8
 ====
 # Equivalent query to the one above; the same runtime filters should be generated
 select straight_join 1 from functional.alltypestiny t1 join functional.alltypestiny t2 on t1.id = t2.id
@@ -1030,22 +1213,27 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t3.id
 |  runtime filters: RF000 <- t3.id
+|  row-size=16B cardinality=4
 |
 |--02:SCAN HDFS [functional.alltypestiny t3]
 |     partitions=4/4 files=4 size=460B
 |     predicates: t3.int_col = 1
+|     row-size=8B cardinality=4
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF002 <- t2.id
+|  row-size=8B cardinality=8
 |
 |--01:SCAN HDFS [functional.alltypestiny t2]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> t2.id
+|     row-size=4B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny t1]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> t1.id, RF002 -> t1.id
+   row-size=4B cardinality=8
 ====
 # Check that runtime filters are not generated in subplans
 select straight_join 1 from tpch_nested_parquet.customer c,
@@ -1057,21 +1245,28 @@ where c_custkey = v.o_orderkey
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=48B cardinality=150.00K
 |
 |--06:HASH JOIN [INNER JOIN]
 |  |  hash predicates: c_custkey = o1.o_orderkey
+|  |  row-size=48B cardinality=1
 |  |
 |  |--05:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: o1.o_orderkey = o2.o_orderkey
+|  |  |  row-size=16B cardinality=10
 |  |  |
 |  |  |--04:UNNEST [c.c_orders o2]
+|  |  |     row-size=0B cardinality=10
 |  |  |
 |  |  03:UNNEST [c.c_orders o1]
+|  |     row-size=0B cardinality=10
 |  |
 |  02:SINGULAR ROW SRC
+|     row-size=32B cardinality=1
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=577.87MB
+   partitions=1/1 files=4 size=288.99MB
+   row-size=32B cardinality=150.00K
 ====
 # Two-way join query where the build side is optimized into an empty set
 select straight_join 1
@@ -1084,12 +1279,14 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=8B cardinality=0
 |
 |--01:EMPTYSET
 |
 00:SCAN HDFS [functional.alltypestiny t1]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> t1.id
+   row-size=4B cardinality=8
 ====
 # Two-way join query where both the build side and probe side are optimized
 # into empty sets
@@ -1101,6 +1298,7 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypestiny.id = functional.alltypessmall.id
+|  row-size=8B cardinality=0
 |
 |--01:EMPTYSET
 |
@@ -1118,29 +1316,36 @@ PLAN-ROOT SINK
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id + t2.id = t4.id
 |  runtime filters: RF000 <- t4.id
+|  row-size=16B cardinality=8
 |
 |--03:SCAN HDFS [functional.alltypestiny t4]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t3.id
 |  runtime filters: RF002 <- t3.id
+|  row-size=12B cardinality=8
 |
 |--02:SCAN HDFS [functional.alltypestiny t3]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> t3.id + t3.id
+|     row-size=4B cardinality=8
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF004 <- t2.id
+|  row-size=8B cardinality=8
 |
 |--01:SCAN HDFS [functional.alltypestiny t2]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> t2.id + t2.id, RF002 -> t2.id
+|     row-size=4B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny t1]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> t1.id + t1.id, RF002 -> t1.id, RF004 -> t1.id
+   row-size=4B cardinality=8
 ====
 # IMPALA-3074: Generated runtime filter has multiple candidate target nodes not all of
 # which are valid due to type mismatch between the associated source and target
@@ -1154,21 +1359,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: a3.smallint_col = a4.smallint_col
 |  runtime filters: RF000 <- a4.smallint_col
+|  row-size=8B cardinality=23.36K
 |
 |--02:SCAN HDFS [functional.alltypes a4]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=2B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: a1.int_col = a3.smallint_col
 |  runtime filters: RF002 <- a3.smallint_col
+|  row-size=6B cardinality=32
 |
 |--01:SCAN HDFS [functional.alltypestiny a3]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> a3.smallint_col
+|     row-size=2B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny a1]
    partitions=4/4 files=4 size=460B
    runtime filters: RF002 -> a1.int_col
+   row-size=4B cardinality=8
 ====
 # IMPALA-3574: Runtime filter generated from a targer expr that contains a TupleIsNull
 # predicate.
@@ -1185,25 +1395,32 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  group by: t1.int_col
+|  row-size=4B cardinality=2
 |
 04:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: if(TupleIsNull(), NULL, coalesce(int_col, 384)) = t1.month
 |  runtime filters: RF000 <- t1.month
+|  row-size=12B cardinality=8
 |
 |--00:SCAN HDFS [functional.alltypestiny t1]
+|     partition predicates: t1.month IS NOT NULL
 |     partitions=4/4 files=4 size=460B
+|     row-size=8B cardinality=8
 |
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=4B cardinality=115
 |
 |--01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> coalesce(functional.alltypes.int_col, 384)
+|     row-size=4B cardinality=7.30K
 |
 02:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> coalesce(int_col, 384), RF002 -> int_col
+   row-size=4B cardinality=11.00K
 ====
 # IMPALA-4076: Test pruning the least selective runtime filters to obey
 # MAX_NUM_RUNTIME_FILTERS in the presence of zero-cardinality plan nodes. This query was
@@ -1300,110 +1517,146 @@ from big_six
 PLAN-ROOT SINK
 |
 36:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=377B cardinality=0
 |
 |--28:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.bigint_col = b.bigint_col, a.bool_col = b.bool_col, a.double_col = b.double_col, a.float_col = b.float_col, a.id = b.id, a.int_col = b.int_col, a.smallint_col = b.smallint_col, a.tinyint_col = b.tinyint_col
+|  |  row-size=64B cardinality=8
 |  |
 |  |--27:SCAN HDFS [functional.alltypestiny b]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=32B cardinality=8
 |  |
 |  26:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=32B cardinality=7.30K
 |
 35:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=313B cardinality=0
 |
 |--25:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.bool_col = b.bool_col, a.id = b.id, a.tinyint_col = b.tinyint_col
+|  |  row-size=12B cardinality=7.30K
 |  |
 |  |--24:SCAN HDFS [functional.alltypes b]
 |  |     partitions=24/24 files=24 size=478.45KB
+|  |     row-size=6B cardinality=7.30K
 |  |
 |  23:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=6B cardinality=7.30K
 |
 34:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=301B cardinality=0
 |
 |--22:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.bigint_col = b.bigint_col, a.bool_col = b.bool_col, a.id = b.id, a.int_col = b.int_col, a.smallint_col = b.smallint_col, a.tinyint_col = b.tinyint_col
+|  |  row-size=40B cardinality=8
 |  |
 |  |--21:SCAN HDFS [functional.alltypestiny b]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=20B cardinality=8
 |  |
 |  20:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=20B cardinality=7.30K
 |
 33:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=261B cardinality=0
 |
 |--19:HASH JOIN [INNER JOIN]
 |  |  hash predicates: b.id = x.id
+|  |  row-size=9B cardinality=0
 |  |
 |  |--18:SCAN HDFS [functional.alltypestiny x]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=4B cardinality=8
 |  |
 |  17:HASH JOIN [INNER JOIN]
 |  |  hash predicates: id = b.id
+|  |  row-size=5B cardinality=0
 |  |
 |  |--16:SCAN HDFS [functional.alltypes b]
 |  |     partitions=24/24 files=24 size=478.45KB
+|  |     row-size=4B cardinality=7.30K
 |  |
 |  15:EMPTYSET
 |
 32:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=252B cardinality=24.90T
 |
 |--14:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.id = b.id
+|  |  row-size=8B cardinality=7.30K
 |  |
 |  |--13:SCAN HDFS [functional.alltypes b]
 |  |     partitions=24/24 files=24 size=478.45KB
+|  |     row-size=4B cardinality=7.30K
 |  |
 |  12:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 31:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=244B cardinality=3.41G
 |
 |--11:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.bigint_col = b.bigint_col, a.bool_col = b.bool_col, a.double_col = b.double_col, a.float_col = b.float_col, a.id = b.id, a.int_col = b.int_col, a.smallint_col = b.smallint_col, a.tinyint_col = b.tinyint_col
 |  |  runtime filters: RF032 <- b.bigint_col, RF033 <- b.bool_col, RF034 <- b.double_col, RF035 <- b.float_col, RF036 <- b.id, RF037 <- b.int_col, RF038 <- b.smallint_col, RF039 <- b.tinyint_col
+|  |  row-size=64B cardinality=8
 |  |
 |  |--10:SCAN HDFS [functional.alltypestiny b]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=32B cardinality=8
 |  |
 |  09:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF032 -> a.bigint_col, RF033 -> a.bool_col, RF034 -> a.double_col, RF035 -> a.float_col, RF036 -> a.id, RF037 -> a.int_col, RF038 -> a.smallint_col, RF039 -> a.tinyint_col
+|     row-size=32B cardinality=7.30K
 |
 30:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=180B cardinality=426.32M
 |
 |--08:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.bool_col = b.bool_col, a.double_col = b.double_col, a.id = b.id, a.smallint_col = b.smallint_col, a.timestamp_col = b.timestamp_col, a.tinyint_col = b.tinyint_col, a.string_col = b.string_col, a.date_string_col = b.date_string_col
+|  |  row-size=130B cardinality=7.30K
 |  |
 |  |--07:SCAN HDFS [functional.alltypes b]
 |  |     partitions=24/24 files=24 size=478.45KB
+|  |     row-size=65B cardinality=7.30K
 |  |
 |  06:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=65B cardinality=7.30K
 |
 29:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=50B cardinality=58.40K
 |
 |--05:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.bool_col = b.bool_col, a.id = b.id
 |  |  runtime filters: RF012 <- b.bool_col, RF013 <- b.id
+|  |  row-size=10B cardinality=8
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny b]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=5B cardinality=8
 |  |
 |  03:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF012 -> a.bool_col, RF013 -> a.id
+|     row-size=5B cardinality=7.30K
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: a.bigint_col = b.bigint_col, a.bool_col = b.bool_col, a.id = b.id, a.int_col = b.int_col, a.smallint_col = b.smallint_col, a.tinyint_col = b.tinyint_col
+|  row-size=40B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=20B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
+   row-size=20B cardinality=7.30K
 ====
 # IMPALA-4490: Only generate runtime filters for hash join nodes, even if there is an
 # otherwise suitable equality predicate.
@@ -1419,20 +1672,25 @@ PLAN-ROOT SINK
 04:NESTED LOOP JOIN [LEFT OUTER JOIN]
 |  join predicates: b.id IS DISTINCT FROM c.id
 |  predicates: c.int_col = b.int_col + b.bigint_col
+|  row-size=28B cardinality=7.30K
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: b.id = a.id
 |  runtime filters: RF000 <- a.id
+|  row-size=20B cardinality=7.30K
 |
 |--00:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypes b]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> b.id
+   row-size=16B cardinality=7.30K
 ====
 # IMPALA-5597: Runtime filter should be generated and assigned successfully when the
 # source expr and target expr have different decimal types.
@@ -1440,21 +1698,24 @@ select *
 from tpch_parquet.lineitem
 left join tpch_parquet.part on if(l_orderkey % 2 = 0, NULL, l_partkey) = p_partkey
 where l_orderkey = 965 and l_extendedprice * l_tax = p_retailprice;
----- Plan
+---- PLAN
 PLAN-ROOT SINK
 |
 02:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: p_partkey = if(l_orderkey % 2 = 0, NULL, l_partkey)
 |  other predicates: p_retailprice = l_extendedprice * l_tax
 |  runtime filters: RF000 <- if(l_orderkey % 2 = 0, NULL, l_partkey), RF001 <- l_extendedprice * l_tax
+|  row-size=419B cardinality=4
 |
 |--00:SCAN HDFS [tpch_parquet.lineitem]
-|     partitions=1/1 files=3 size=193.71MB
+|     partitions=1/1 files=3 size=193.60MB
 |     predicates: l_orderkey = 965
+|     row-size=231B cardinality=4
 |
 01:SCAN HDFS [tpch_parquet.part]
    partitions=1/1 files=1 size=6.23MB
    runtime filters: RF000 -> p_partkey, RF001 -> p_retailprice
+   row-size=188B cardinality=200.00K
 ====
 # IMPALA-6286: Runtime filter must not be assigned at scan 01 because that could
 # alter the query results due to the coalesce() in the join condition of join 04.
@@ -1467,18 +1728,23 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: coalesce(t2.id + 10, 100) = `$a$1`.`$c$1`
+|  row-size=12B cardinality=32
 |
 |--02:UNION
 |     constant-operands=1
+|     row-size=1B cardinality=1
 |
 03:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t1.int_col = t2.int_col
+|  row-size=12B cardinality=32
 |
 |--01:SCAN HDFS [functional.alltypestiny t2]
 |     partitions=4/4 files=4 size=460B
+|     row-size=8B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny t1]
    partitions=4/4 files=4 size=460B
+   row-size=4B cardinality=8
 ====
 # IMPALA-6286: Same as above but with an inline view.
 select /* +straight_join */ 1 from functional.alltypestiny t1
@@ -1490,18 +1756,23 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: coalesce(t2.id + 10, 100) = `$a$1`.`$c$1`
+|  row-size=12B cardinality=32
 |
 |--02:UNION
 |     constant-operands=1
+|     row-size=1B cardinality=1
 |
 03:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t1.int_col = t2.int_col
+|  row-size=12B cardinality=32
 |
 |--01:SCAN HDFS [functional.alltypestiny t2]
 |     partitions=4/4 files=4 size=460B
+|     row-size=8B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny t1]
    partitions=4/4 files=4 size=460B
+   row-size=4B cardinality=8
 ====
 # IMPALA-6286: The runtime filter produced by inner join 05 can safely be assigned
 # at scan 01. It would also be safe to produce a runtime filter at join 06 and assign
@@ -1517,24 +1788,31 @@ PLAN-ROOT SINK
 |
 06:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: coalesce(t2.id + 10, 100) = `$a$1`.`$c$1`
+|  row-size=16B cardinality=32
 |
 |--03:UNION
 |     constant-operands=1
+|     row-size=1B cardinality=1
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: t2.id = t3.id
 |  runtime filters: RF000 <- t3.id
+|  row-size=16B cardinality=32
 |
 |--02:SCAN HDFS [functional.alltypestiny t3]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 04:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t1.int_col = t2.int_col
+|  row-size=12B cardinality=32
 |
 |--01:SCAN HDFS [functional.alltypestiny t2]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> t2.id
+|     row-size=8B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny t1]
    partitions=4/4 files=4 size=460B
+   row-size=4B cardinality=8
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/runtime-filter-query-options.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/runtime-filter-query-options.test b/testdata/workloads/functional-planner/queries/PlannerTest/runtime-filter-query-options.test
index 5188d25..64c654b 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/runtime-filter-query-options.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/runtime-filter-query-options.test
@@ -10,74 +10,91 @@ PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: a.bool_col = d.bool_col, a.year = d.year
 |  runtime filters: RF000 <- d.bool_col, RF001 <- d.year
+|  row-size=74B cardinality=16.21G
 |
 |--03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=5B cardinality=7.30K
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: a.int_col = c.int_col, a.month = c.month
 |  runtime filters: RF004 <- c.int_col, RF005 <- c.month
+|  row-size=69B cardinality=4.44M
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id, a.date_string_col = b.date_string_col
 |  runtime filters: RF008 <- b.id, RF009 <- b.date_string_col
+|  row-size=61B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=24B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.bool_col, RF001 -> a.year, RF004 -> a.int_col, RF005 -> a.month, RF008 -> a.id, RF009 -> a.date_string_col
+   row-size=37B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 13:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=1
 |
 12:EXCHANGE [UNPARTITIONED]
 |
 07:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: a.bool_col = d.bool_col, a.year = d.year
 |  runtime filters: RF000 <- d.bool_col, RF001 <- d.year
+|  row-size=74B cardinality=16.21G
 |
 |--11:EXCHANGE [HASH(d.bool_col,d.year)]
 |  |
 |  03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=5B cardinality=7.30K
 |
 10:EXCHANGE [HASH(a.bool_col,a.year)]
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.int_col = c.int_col, a.month = c.month
 |  runtime filters: RF004 <- c.int_col, RF005 <- c.month
+|  row-size=69B cardinality=4.44M
 |
 |--09:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.id = b.id, a.date_string_col = b.date_string_col
 |  runtime filters: RF008 <- b.id, RF009 <- b.date_string_col
+|  row-size=61B cardinality=7.30K
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=24B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.bool_col, RF001 -> a.year, RF004 -> a.int_col, RF005 -> a.month, RF008 -> a.id, RF009 -> a.date_string_col
+   row-size=37B cardinality=7.30K
 ====
 # Keep only MAX_NUM_RUNTIME_FILTERS most selective filters, remove the rest.
 # In this query RF000 (<- d.bool_col) and RF001 (<- d.year) are the least selective
@@ -95,43 +112,52 @@ PLAN-ROOT SINK
 |
 13:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=1
 |
 12:EXCHANGE [UNPARTITIONED]
 |
 07:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: a.bool_col = d.bool_col, a.year = d.year
+|  row-size=74B cardinality=16.21G
 |
 |--11:EXCHANGE [HASH(d.bool_col,d.year)]
 |  |
 |  03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=5B cardinality=7.30K
 |
 10:EXCHANGE [HASH(a.bool_col,a.year)]
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.int_col = c.int_col, a.month = c.month
 |  runtime filters: RF004 <- c.int_col, RF005 <- c.month
+|  row-size=69B cardinality=4.44M
 |
 |--09:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.id = b.id, a.date_string_col = b.date_string_col
 |  runtime filters: RF008 <- b.id, RF009 <- b.date_string_col
+|  row-size=61B cardinality=7.30K
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=24B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF008 -> a.id, RF009 -> a.date_string_col, RF004 -> a.int_col, RF005 -> a.month
+   row-size=37B cardinality=7.30K
 ====
 # DISABLE_ROW_RUNTIME_FILTERING is set: only partition column filters are applied.
 select /* +straight_join */ count(*) from functional.alltypes a
@@ -147,30 +173,38 @@ PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: a.bool_col = d.bool_col, a.year = d.year
 |  runtime filters: RF001 <- d.year
+|  row-size=74B cardinality=16.21G
 |
 |--03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=5B cardinality=7.30K
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: a.int_col = c.int_col, a.month = c.month
 |  runtime filters: RF005 <- c.month
+|  row-size=69B cardinality=4.44M
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id, a.date_string_col = b.date_string_col
+|  row-size=61B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=24B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF001 -> a.year, RF005 -> a.month
+   row-size=37B cardinality=7.30K
 ====
 # DISABLE_ROW_RUNTIME_FILTERING is set and MAX_NUM_RUNTIME_FILTERS is set to 2: only the 2
 # partition column filters are applied
@@ -188,30 +222,38 @@ PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: a.bool_col = d.bool_col, a.year = d.year
 |  runtime filters: RF001 <- d.year
+|  row-size=74B cardinality=16.21G
 |
 |--03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=5B cardinality=7.30K
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: a.int_col = c.int_col, a.month = c.month
 |  runtime filters: RF005 <- c.month
+|  row-size=69B cardinality=4.44M
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id, a.date_string_col = b.date_string_col
+|  row-size=61B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=24B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF001 -> a.year, RF005 -> a.month
+   row-size=37B cardinality=7.30K
 ====
 # RUNTIME_FILTER_MODE is set to LOCAL: only local filters are applied
 select /* +straight_join */ count(*) from functional.alltypes a
@@ -227,73 +269,90 @@ PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: a.bool_col = d.bool_col, a.year = d.year
 |  runtime filters: RF000 <- d.bool_col, RF001 <- d.year
+|  row-size=74B cardinality=16.21G
 |
 |--03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=5B cardinality=7.30K
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: a.int_col = c.int_col, a.month = c.month
 |  runtime filters: RF004 <- c.int_col, RF005 <- c.month
+|  row-size=69B cardinality=4.44M
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id, a.date_string_col = b.date_string_col
 |  runtime filters: RF008 <- b.id, RF009 <- b.date_string_col
+|  row-size=61B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=24B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.bool_col, RF001 -> a.year, RF004 -> a.int_col, RF005 -> a.month, RF008 -> a.id, RF009 -> a.date_string_col
+   row-size=37B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 13:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=1
 |
 12:EXCHANGE [UNPARTITIONED]
 |
 07:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: a.bool_col = d.bool_col, a.year = d.year
+|  row-size=74B cardinality=16.21G
 |
 |--11:EXCHANGE [HASH(d.bool_col,d.year)]
 |  |
 |  03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=5B cardinality=7.30K
 |
 10:EXCHANGE [HASH(a.bool_col,a.year)]
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.int_col = c.int_col, a.month = c.month
 |  runtime filters: RF004 <- c.int_col, RF005 <- c.month
+|  row-size=69B cardinality=4.44M
 |
 |--09:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.id = b.id, a.date_string_col = b.date_string_col
 |  runtime filters: RF008 <- b.id, RF009 <- b.date_string_col
+|  row-size=61B cardinality=7.30K
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=24B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF004 -> a.int_col, RF005 -> a.month, RF008 -> a.id, RF009 -> a.date_string_col
+   row-size=37B cardinality=7.30K
 ====
 # RUNTIME_FILTER_MODE is set to LOCAL and MAX_NUM_RUNTIME_FILTERS is set to 3: only 3
 # local filters are kept, which means that both local and non-local filters are removed
@@ -312,72 +371,89 @@ PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: a.bool_col = d.bool_col, a.year = d.year
+|  row-size=74B cardinality=16.21G
 |
 |--03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=5B cardinality=7.30K
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: a.int_col = c.int_col, a.month = c.month
 |  runtime filters: RF004 <- c.int_col
+|  row-size=69B cardinality=4.44M
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id, a.date_string_col = b.date_string_col
 |  runtime filters: RF008 <- b.id, RF009 <- b.date_string_col
+|  row-size=61B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=24B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF008 -> a.id, RF009 -> a.date_string_col, RF004 -> a.int_col
+   row-size=37B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 13:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=1
 |
 12:EXCHANGE [UNPARTITIONED]
 |
 07:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: a.bool_col = d.bool_col, a.year = d.year
+|  row-size=74B cardinality=16.21G
 |
 |--11:EXCHANGE [HASH(d.bool_col,d.year)]
 |  |
 |  03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=5B cardinality=7.30K
 |
 10:EXCHANGE [HASH(a.bool_col,a.year)]
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.int_col = c.int_col, a.month = c.month
 |  runtime filters: RF004 <- c.int_col
+|  row-size=69B cardinality=4.44M
 |
 |--09:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.id = b.id, a.date_string_col = b.date_string_col
 |  runtime filters: RF008 <- b.id, RF009 <- b.date_string_col
+|  row-size=61B cardinality=7.30K
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=24B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF008 -> a.id, RF009 -> a.date_string_col, RF004 -> a.int_col
+   row-size=37B cardinality=7.30K
 ====
 # DISABLE_ROW_RUNTIME_FILTERING is set and RUNTIME_FILTER_MODE is set to LOCAL: only local
 # partition column filters are applied
@@ -395,71 +471,88 @@ PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: a.bool_col = d.bool_col, a.year = d.year
 |  runtime filters: RF001 <- d.year
+|  row-size=74B cardinality=16.21G
 |
 |--03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=5B cardinality=7.30K
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: a.int_col = c.int_col, a.month = c.month
 |  runtime filters: RF005 <- c.month
+|  row-size=69B cardinality=4.44M
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id, a.date_string_col = b.date_string_col
+|  row-size=61B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=24B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF001 -> a.year, RF005 -> a.month
+   row-size=37B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 13:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=1
 |
 12:EXCHANGE [UNPARTITIONED]
 |
 07:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: a.bool_col = d.bool_col, a.year = d.year
+|  row-size=74B cardinality=16.21G
 |
 |--11:EXCHANGE [HASH(d.bool_col,d.year)]
 |  |
 |  03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=5B cardinality=7.30K
 |
 10:EXCHANGE [HASH(a.bool_col,a.year)]
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.int_col = c.int_col, a.month = c.month
 |  runtime filters: RF005 <- c.month
+|  row-size=69B cardinality=4.44M
 |
 |--09:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.id = b.id, a.date_string_col = b.date_string_col
+|  row-size=61B cardinality=7.30K
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=24B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF005 -> a.month
+   row-size=37B cardinality=7.30K
 ====
 # RUNTIME_FILTER_MODE is OFF: no filters are applied
 select /* +straight_join */ count(*) from functional.alltypes a
@@ -475,27 +568,35 @@ PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: a.bool_col = d.bool_col, a.year = d.year
+|  row-size=74B cardinality=16.21G
 |
 |--03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=5B cardinality=7.30K
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: a.int_col = c.int_col, a.month = c.month
+|  row-size=69B cardinality=4.44M
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id, a.date_string_col = b.date_string_col
+|  row-size=61B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=24B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
+   row-size=37B cardinality=7.30K
 ====
 # MAX_NUM_RUNTIME_FILTERS is 0: no filters are applied
 select /* +straight_join */ count(*) from functional.alltypes a
@@ -511,27 +612,35 @@ PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: a.bool_col = d.bool_col, a.year = d.year
+|  row-size=74B cardinality=16.21G
 |
 |--03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=5B cardinality=7.30K
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: a.int_col = c.int_col, a.month = c.month
+|  row-size=69B cardinality=4.44M
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id, a.date_string_col = b.date_string_col
+|  row-size=61B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=24B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
+   row-size=37B cardinality=7.30K
 ====
 # DISABLE_ROW_RUNTIME_FILTERING completely disables filters for Kudu.
 select /* +straight_join */ count(*) from functional_kudu.alltypes a
@@ -543,11 +652,15 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id
+|  row-size=8B cardinality=7.30K
 |
 |--01:SCAN KUDU [functional_kudu.alltypes b]
+|     row-size=4B cardinality=7.30K
 |
 00:SCAN KUDU [functional_kudu.alltypes a]
+   row-size=4B cardinality=7.30K
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/shuffle-by-distinct-exprs.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/shuffle-by-distinct-exprs.test b/testdata/workloads/functional-planner/queries/PlannerTest/shuffle-by-distinct-exprs.test
index 74f09c3..babc89d 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/shuffle-by-distinct-exprs.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/shuffle-by-distinct-exprs.test
@@ -7,22 +7,27 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(int_col)
+|  row-size=8B cardinality=1
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: count(int_col)
+|  row-size=8B cardinality=1
 |
 04:AGGREGATE
 |  group by: int_col
+|  row-size=4B cardinality=10
 |
 03:EXCHANGE [HASH(int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: int_col
+|  row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 select count(distinct int_col) from functional.alltypes;
 ---- QUERYOPTIONS
@@ -34,22 +39,27 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(int_col)
+|  row-size=8B cardinality=1
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: count(int_col)
+|  row-size=8B cardinality=1
 |
 04:AGGREGATE
 |  group by: int_col
+|  row-size=4B cardinality=10
 |
 03:EXCHANGE [HASH(int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: int_col
+|  row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 # Distinct agg with a grouping expr
 select count(distinct int_col) from functional.alltypes group by year;
@@ -63,17 +73,21 @@ PLAN-ROOT SINK
 02:AGGREGATE [FINALIZE]
 |  output: count(int_col)
 |  group by: year
+|  row-size=12B cardinality=2
 |
 04:AGGREGATE
 |  group by: year, int_col
+|  row-size=8B cardinality=20
 |
 03:EXCHANGE [HASH(year)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: year, int_col
+|  row-size=8B cardinality=20
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=8B cardinality=7.30K
 ====
 select count(distinct int_col) from functional.alltypes group by year;
 ---- QUERYOPTIONS
@@ -87,23 +101,28 @@ PLAN-ROOT SINK
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(int_col)
 |  group by: year
+|  row-size=12B cardinality=2
 |
 05:EXCHANGE [HASH(year)]
 |
 02:AGGREGATE [STREAMING]
 |  output: count(int_col)
 |  group by: year
+|  row-size=12B cardinality=2
 |
 04:AGGREGATE
 |  group by: year, int_col
+|  row-size=8B cardinality=20
 |
 03:EXCHANGE [HASH(year,int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: year, int_col
+|  row-size=8B cardinality=20
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=8B cardinality=7.30K
 ====
 # Distinct agg without a grouping expr and with a compatible child partition
 select count(distinct a.int_col) from functional.alltypes a inner join [shuffle]
@@ -115,29 +134,35 @@ PLAN-ROOT SINK
 |
 08:AGGREGATE [FINALIZE]
 |  output: count:merge(a.int_col)
+|  row-size=8B cardinality=1
 |
 07:EXCHANGE [UNPARTITIONED]
 |
 04:AGGREGATE
 |  output: count(a.int_col)
+|  row-size=8B cardinality=1
 |
 03:AGGREGATE
 |  group by: a.int_col
+|  row-size=4B cardinality=10
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: a.int_col = b.int_col
 |  runtime filters: RF000 <- b.int_col
+|  row-size=8B cardinality=5.33M
 |
 |--06:EXCHANGE [HASH(b.int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 05:EXCHANGE [HASH(a.int_col)]
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.int_col
+   row-size=4B cardinality=7.30K
 ====
 select count(distinct a.int_col) from functional.alltypes a inner join [shuffle]
     functional.alltypes b on a.int_col = b.int_col;
@@ -150,29 +175,35 @@ PLAN-ROOT SINK
 |
 08:AGGREGATE [FINALIZE]
 |  output: count:merge(a.int_col)
+|  row-size=8B cardinality=1
 |
 07:EXCHANGE [UNPARTITIONED]
 |
 04:AGGREGATE
 |  output: count(a.int_col)
+|  row-size=8B cardinality=1
 |
 03:AGGREGATE
 |  group by: a.int_col
+|  row-size=4B cardinality=10
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: a.int_col = b.int_col
 |  runtime filters: RF000 <- b.int_col
+|  row-size=8B cardinality=5.33M
 |
 |--06:EXCHANGE [HASH(b.int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 05:EXCHANGE [HASH(a.int_col)]
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.int_col
+   row-size=4B cardinality=7.30K
 ====
 # Distinct agg with a grouping expr and a compatible child partition
 select count(distinct a.int_col) from functional.alltypes a inner join [shuffle]
@@ -187,24 +218,29 @@ PLAN-ROOT SINK
 04:AGGREGATE [FINALIZE]
 |  output: count(a.int_col)
 |  group by: a.year
+|  row-size=12B cardinality=2
 |
 03:AGGREGATE
 |  group by: a.year, a.int_col
+|  row-size=8B cardinality=20
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: a.year = b.year
 |  runtime filters: RF000 <- b.year
+|  row-size=12B cardinality=26.64M
 |
 |--06:EXCHANGE [HASH(b.year)]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 05:EXCHANGE [HASH(a.year)]
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.year
+   row-size=8B cardinality=7.30K
 ====
 select count(distinct a.int_col) from functional.alltypes a inner join [shuffle]
     functional.alltypes b on a.year = b.year group by a.year;
@@ -219,35 +255,42 @@ PLAN-ROOT SINK
 10:AGGREGATE [FINALIZE]
 |  output: count:merge(a.int_col)
 |  group by: a.year
+|  row-size=12B cardinality=2
 |
 09:EXCHANGE [HASH(a.year)]
 |
 04:AGGREGATE [STREAMING]
 |  output: count(a.int_col)
 |  group by: a.year
+|  row-size=12B cardinality=2
 |
 08:AGGREGATE
 |  group by: a.year, a.int_col
+|  row-size=8B cardinality=20
 |
 07:EXCHANGE [HASH(a.year,a.int_col)]
 |
 03:AGGREGATE [STREAMING]
 |  group by: a.year, a.int_col
+|  row-size=8B cardinality=20
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: a.year = b.year
 |  runtime filters: RF000 <- b.year
+|  row-size=12B cardinality=26.64M
 |
 |--06:EXCHANGE [HASH(b.year)]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 05:EXCHANGE [HASH(a.year)]
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.year
+   row-size=8B cardinality=7.30K
 ====
 # The input is partitioned by distinct exprs + grouping exprs
 select count(distinct a.int_col) from functional.alltypes a inner join [shuffle]
@@ -264,30 +307,36 @@ PLAN-ROOT SINK
 08:AGGREGATE [FINALIZE]
 |  output: count:merge(a.int_col)
 |  group by: a.year
+|  row-size=12B cardinality=2
 |
 07:EXCHANGE [HASH(a.year)]
 |
 04:AGGREGATE [STREAMING]
 |  output: count(a.int_col)
 |  group by: a.year
+|  row-size=12B cardinality=2
 |
 03:AGGREGATE
 |  group by: a.year, a.int_col
+|  row-size=8B cardinality=20
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: a.int_col = b.int_col, a.year = b.year
 |  runtime filters: RF000 <- b.int_col, RF001 <- b.year
+|  row-size=16B cardinality=5.33M
 |
 |--06:EXCHANGE [HASH(b.int_col,b.year)]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 05:EXCHANGE [HASH(a.int_col,a.year)]
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.int_col, RF001 -> a.year
+   row-size=8B cardinality=7.30K
 ====
 select count(distinct a.int_col) from functional.alltypes a inner join [shuffle]
    functional.alltypes b on a.year = b.year and a.int_col = b.int_col group by a.year;
@@ -303,27 +352,33 @@ PLAN-ROOT SINK
 04:AGGREGATE [FINALIZE]
 |  output: count(a.int_col)
 |  group by: a.year
+|  row-size=12B cardinality=2
 |
 08:AGGREGATE
 |  group by: a.year, a.int_col
+|  row-size=8B cardinality=20
 |
 07:EXCHANGE [HASH(a.year)]
 |
 03:AGGREGATE [STREAMING]
 |  group by: a.year, a.int_col
+|  row-size=8B cardinality=20
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: a.int_col = b.int_col, a.year = b.year
 |  runtime filters: RF000 <- b.int_col, RF001 <- b.year
+|  row-size=16B cardinality=5.33M
 |
 |--06:EXCHANGE [HASH(b.int_col,b.year)]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 05:EXCHANGE [HASH(a.int_col,a.year)]
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.int_col, RF001 -> a.year
+   row-size=8B cardinality=7.30K
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/small-query-opt.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/small-query-opt.test b/testdata/workloads/functional-planner/queries/PlannerTest/small-query-opt.test
index 2c91472..e60e032 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/small-query-opt.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/small-query-opt.test
@@ -3,14 +3,16 @@ select * from functional_seq.alltypes t1 limit 5
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional_seq.alltypes t1]
-   partitions=24/24 files=24 size=562.59KB
+   partitions=24/24 files=24 size=557.47KB
    limit: 5
+   row-size=80B cardinality=5
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional_seq.alltypes t1]
-   partitions=24/24 files=24 size=562.59KB
+   partitions=24/24 files=24 size=557.47KB
    limit: 5
+   row-size=80B cardinality=5
 ====
 # Query is over the limit of 8 rows to be optimized, will distribute the query
 select * from functional.alltypes t1 limit 10
@@ -20,6 +22,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    limit: 10
+   row-size=89B cardinality=10
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -29,6 +32,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    limit: 10
+   row-size=89B cardinality=10
 ====
 # Query is optimized, run on coordinator only
 select * from functional.alltypes t1 limit 5
@@ -38,12 +42,14 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    limit: 5
+   row-size=89B cardinality=5
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    limit: 5
+   row-size=89B cardinality=5
 ====
 # If a predicate is applied the optimization is disabled
 select * from functional.alltypes t1 where t1.id < 99 limit 5
@@ -54,6 +60,7 @@ PLAN-ROOT SINK
    partitions=24/24 files=24 size=478.45KB
    predicates: t1.id < 99
    limit: 5
+   row-size=89B cardinality=5
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -64,6 +71,7 @@ PLAN-ROOT SINK
    partitions=24/24 files=24 size=478.45KB
    predicates: t1.id < 99
    limit: 5
+   row-size=89B cardinality=5
 ====
 # No optimization for hbase tables
 select * from functional_hbase.alltypes t1 where t1.id < 99 limit 5
@@ -73,6 +81,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.alltypes t1]
    predicates: t1.id < 99
    limit: 5
+   row-size=80B cardinality=5
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -82,6 +91,7 @@ PLAN-ROOT SINK
 00:SCAN HBASE [functional_hbase.alltypes t1]
    predicates: t1.id < 99
    limit: 5
+   row-size=80B cardinality=5
 ====
 # Applies optimization for small queries in hbase
 select * from functional_hbase.alltypes t1 limit 5
@@ -90,11 +100,13 @@ PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.alltypes t1]
    limit: 5
+   row-size=80B cardinality=5
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.alltypes t1]
    limit: 5
+   row-size=80B cardinality=5
 ====
 insert into
   functional_hbase.alltypes
@@ -105,11 +117,13 @@ WRITE TO HBASE table=functional_hbase.alltypes
 |
 00:UNION
    constant-operands=1
+   row-size=57B cardinality=1
 ---- DISTRIBUTEDPLAN
 WRITE TO HBASE table=functional_hbase.alltypes
 |
 00:UNION
    constant-operands=1
+   row-size=57B cardinality=1
 ====
 create table tm as select * from functional_hbase.alltypes limit 5
 ---- PLAN
@@ -118,12 +132,14 @@ WRITE TO HDFS [default.tm, OVERWRITE=false]
 |
 00:SCAN HBASE [functional_hbase.alltypes]
    limit: 5
+   row-size=80B cardinality=5
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [default.tm, OVERWRITE=false]
 |  partitions=1
 |
 00:SCAN HBASE [functional_hbase.alltypes]
    limit: 5
+   row-size=80B cardinality=5
 ====
 create table tm as select * from functional_hbase.alltypes limit 50
 ---- PLAN
@@ -132,6 +148,7 @@ WRITE TO HDFS [default.tm, OVERWRITE=false]
 |
 00:SCAN HBASE [functional_hbase.alltypes]
    limit: 50
+   row-size=80B cardinality=50
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [default.tm, OVERWRITE=false]
 |  partitions=1
@@ -141,6 +158,7 @@ WRITE TO HDFS [default.tm, OVERWRITE=false]
 |
 00:SCAN HBASE [functional_hbase.alltypes]
    limit: 50
+   row-size=80B cardinality=50
 ====
 select * from functional_hbase.alltypes limit 5
 union all
@@ -150,23 +168,29 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=80B cardinality=7
 |
 |--02:SCAN HBASE [functional_hbase.alltypes]
 |     limit: 2
+|     row-size=80B cardinality=2
 |
 01:SCAN HBASE [functional_hbase.alltypes]
    limit: 5
+   row-size=80B cardinality=5
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=80B cardinality=7
 |
 |--02:SCAN HBASE [functional_hbase.alltypes]
 |     limit: 2
+|     row-size=80B cardinality=2
 |
 01:SCAN HBASE [functional_hbase.alltypes]
    limit: 5
+   row-size=80B cardinality=5
 ====
 select * from functional_hbase.alltypes limit 5
 union all
@@ -176,29 +200,35 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=80B cardinality=10
 |
 |--02:SCAN HBASE [functional_hbase.alltypes]
 |     limit: 5
+|     row-size=80B cardinality=5
 |
 01:SCAN HBASE [functional_hbase.alltypes]
    limit: 5
+   row-size=80B cardinality=5
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=80B cardinality=10
 |
 |--04:EXCHANGE [UNPARTITIONED]
 |  |  limit: 5
 |  |
 |  02:SCAN HBASE [functional_hbase.alltypes]
 |     limit: 5
+|     row-size=80B cardinality=5
 |
 03:EXCHANGE [UNPARTITIONED]
 |  limit: 5
 |
 01:SCAN HBASE [functional_hbase.alltypes]
    limit: 5
+   row-size=80B cardinality=5
 ====
 # Two scans cannot run in the same fragment. IMPALA-561
 select * from
@@ -211,15 +241,18 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF000 <- b.id
+|  row-size=48B cardinality=0
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.testtbl b]
 |     partitions=1/1 files=0 size=0B
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl a]
    partitions=1/1 files=0 size=0B
    runtime filters: RF000 -> a.id
+   row-size=24B cardinality=0
 ====
 select * from
   functional.testtbl a, functional.testtbl b
@@ -229,14 +262,17 @@ PLAN-ROOT SINK
 04:EXCHANGE [UNPARTITIONED]
 |
 02:NESTED LOOP JOIN [CROSS JOIN, BROADCAST]
+|  row-size=48B cardinality=0
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.testtbl b]
 |     partitions=1/1 files=0 size=0B
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl a]
    partitions=1/1 files=0 size=0B
+   row-size=24B cardinality=0
 ====
 select * from
   functional.alltypestiny a
@@ -251,6 +287,7 @@ PLAN-ROOT SINK
 |  hash predicates: a.id = id
 |  runtime filters: RF000 <- id
 |  limit: 5
+|  row-size=89B cardinality=5
 |
 |--04:EXCHANGE [BROADCAST]
 |  |
@@ -260,10 +297,12 @@ PLAN-ROOT SINK
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     limit: 5
+|     row-size=4B cardinality=5
 |
 00:SCAN HDFS [functional.alltypestiny a]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> a.id
+   row-size=89B cardinality=8
 ====
 # Test correct single-node planning for mixed union distinct/all (IMPALA-1553).
 select
@@ -283,20 +322,29 @@ PLAN-ROOT SINK
 |
 04:UNION
 |  pass-through-operands: 03
+|  row-size=5B cardinality=6
 |
 |--05:SCAN HDFS [functional.alltypestiny c]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=5B cardinality=2
 |
 03:AGGREGATE [FINALIZE]
 |  group by: id, bool_col
+|  row-size=5B cardinality=4
 |
 00:UNION
+|  row-size=5B cardinality=4
 |
 |--02:SCAN HDFS [functional.alltypestiny b]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=5B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny a]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=5B cardinality=2
 ====
 # IMPALA-2527: Tests that the small query optimization is disabled for colleciton types
 select key from functional.allcomplextypes.map_map_col.value limit 5;
@@ -309,4 +357,5 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.allcomplextypes.map_map_col.value]
    partitions=0/0 files=0 size=0B
    limit: 5
+   row-size=12B cardinality=0
 ====


[09/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/topn.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/topn.test b/testdata/workloads/functional-planner/queries/PlannerTest/topn.test
index d3ab315..5ff642c 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/topn.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/topn.test
@@ -7,9 +7,11 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=1]
 |  order by: name ASC
+|  row-size=16B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
+   row-size=16B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -19,9 +21,11 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=1]
 |  order by: name ASC
+|  row-size=16B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 select zip, count(*)
 from functional.testtbl
@@ -34,14 +38,17 @@ PLAN-ROOT SINK
 |
 02:TOP-N [LIMIT=10]
 |  order by: count(*) DESC
+|  row-size=12B cardinality=0
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: zip
+|  row-size=12B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
    predicates: name LIKE 'm%'
+   row-size=16B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -51,20 +58,24 @@ PLAN-ROOT SINK
 |
 02:TOP-N [LIMIT=10]
 |  order by: count(*) DESC
+|  row-size=12B cardinality=0
 |
 04:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: zip
+|  row-size=12B cardinality=0
 |
 03:EXCHANGE [HASH(zip)]
 |
 01:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: zip
+|  row-size=12B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
    predicates: name LIKE 'm%'
+   row-size=16B cardinality=0
 ====
 select int_col, sum(float_col)
 from functional_hbase.alltypessmall
@@ -77,13 +88,16 @@ PLAN-ROOT SINK
 |
 02:TOP-N [LIMIT=4]
 |  order by: sum(float_col) ASC
+|  row-size=12B cardinality=4
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum(float_col)
 |  group by: int_col
+|  row-size=12B cardinality=5
 |
 00:SCAN HBASE [functional_hbase.alltypessmall]
    predicates: id < 5
+   row-size=12B cardinality=5
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -93,19 +107,23 @@ PLAN-ROOT SINK
 |
 02:TOP-N [LIMIT=4]
 |  order by: sum(float_col) ASC
+|  row-size=12B cardinality=4
 |
 04:AGGREGATE [FINALIZE]
 |  output: sum:merge(float_col)
 |  group by: int_col
+|  row-size=12B cardinality=5
 |
 03:EXCHANGE [HASH(int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: sum(float_col)
 |  group by: int_col
+|  row-size=12B cardinality=5
 |
 00:SCAN HBASE [functional_hbase.alltypessmall]
    predicates: id < 5
+   row-size=12B cardinality=5
 ====
 select int_col, sum(float_col), min(float_col)
 from functional_hbase.alltypessmall
@@ -134,18 +152,22 @@ PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=2]
 |  order by: int_col ASC
+|  row-size=4B cardinality=2
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=16B cardinality=10
 |
 |--01:SCAN HDFS [functional.alltypessmall t2]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: t2.int_col IS NOT NULL
+|     row-size=8B cardinality=10
 |
 00:SCAN HDFS [functional.alltypessmall t1]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> t1.id
+   row-size=8B cardinality=100
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -155,20 +177,24 @@ PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=2]
 |  order by: int_col ASC
+|  row-size=4B cardinality=2
 |
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=16B cardinality=10
 |
 |--04:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall t2]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: t2.int_col IS NOT NULL
+|     row-size=8B cardinality=10
 |
 00:SCAN HDFS [functional.alltypessmall t1]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> t1.id
+   row-size=8B cardinality=100
 ====
 # Test that the top-n is on int_col and not on the id column
 select int_col as id from functional.alltypessmall order by id limit 2
@@ -177,9 +203,11 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=2]
 |  order by: int_col ASC
+|  row-size=4B cardinality=2
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=4B cardinality=100
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -189,9 +217,11 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=2]
 |  order by: int_col ASC
+|  row-size=4B cardinality=2
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=4B cardinality=100
 ====
 # Test that the top-n is on id and not on int_col
 select int_col as id from functional.alltypessmall order by functional.alltypessmall.id limit 2
@@ -200,9 +230,11 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=2]
 |  order by: id ASC
+|  row-size=8B cardinality=2
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=8B cardinality=100
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -212,9 +244,11 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=2]
 |  order by: id ASC
+|  row-size=8B cardinality=2
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=8B cardinality=100
 ====
 # Test that the limit/offset is propagated correctly to child nodes. The TOP-N node
 # should have the limit/offset specified in the query. Child sort nodes should have a
@@ -228,18 +262,22 @@ PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=10 OFFSET=5]
 |  order by: int_col ASC
+|  row-size=4B cardinality=10
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=16B cardinality=10
 |
 |--01:SCAN HDFS [functional.alltypessmall t2]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: t2.int_col IS NOT NULL
+|     row-size=8B cardinality=10
 |
 00:SCAN HDFS [functional.alltypessmall t1]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> t1.id
+   row-size=8B cardinality=100
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypessmall/year=2009/month=1/090101.txt 0:1610
@@ -261,20 +299,24 @@ PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=15]
 |  order by: int_col ASC
+|  row-size=4B cardinality=10
 |
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=16B cardinality=10
 |
 |--04:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall t2]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: t2.int_col IS NOT NULL
+|     row-size=8B cardinality=10
 |
 00:SCAN HDFS [functional.alltypessmall t1]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> t1.id
+   row-size=8B cardinality=100
 ====
 # test distributed top-n over a union (IMPALA-831)
 select int_col, bigint_col from
@@ -287,15 +329,19 @@ PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=10 OFFSET=5]
 |  order by: int_col DESC
+|  row-size=12B cardinality=10
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=12B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=12B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -306,15 +352,19 @@ PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=15]
 |  order by: int_col DESC
+|  row-size=12B cardinality=15
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=12B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=12B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ====
 # test distributed top-n over a union distinct (IMPALA-831)
 select int_col, bigint_col from
@@ -327,18 +377,23 @@ PLAN-ROOT SINK
 |
 04:TOP-N [LIMIT=10 OFFSET=5]
 |  order by: int_col DESC
+|  row-size=12B cardinality=10
 |
 03:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=7.40K
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -349,23 +404,29 @@ PLAN-ROOT SINK
 |
 04:TOP-N [LIMIT=15]
 |  order by: int_col DESC
+|  row-size=12B cardinality=15
 |
 06:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=7.40K
 |
 05:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 03:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=7.40K
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # test that top-n is not placed below an unpartitioned exchange with a limit
 select * from (select * from functional.alltypes limit 10) t
@@ -375,15 +436,18 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=10]
 |  order by: int_col ASC
+|  row-size=89B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    limit: 10
+   row-size=89B cardinality=10
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=10]
 |  order by: int_col ASC
+|  row-size=89B cardinality=10
 |
 02:EXCHANGE [UNPARTITIONED]
 |  limit: 10
@@ -391,6 +455,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    limit: 10
+   row-size=89B cardinality=10
 ====
 # test that top-n is not placed below an unpartitioned exchange with a limit
 select * from
@@ -403,21 +468,26 @@ PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=10]
 |  order by: int_col ASC
+|  row-size=89B cardinality=10
 |
 00:UNION
 |  pass-through-operands: all
 |  limit: 10
+|  row-size=89B cardinality=10
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=10]
 |  order by: int_col ASC
+|  row-size=89B cardinality=10
 |
 04:EXCHANGE [UNPARTITIONED]
 |  limit: 10
@@ -425,10 +495,13 @@ PLAN-ROOT SINK
 00:UNION
 |  pass-through-operands: all
 |  limit: 10
+|  row-size=89B cardinality=10
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====


[14/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/parquet-stats-agg.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/parquet-stats-agg.test b/testdata/workloads/functional-planner/queries/PlannerTest/parquet-stats-agg.test
index 1afe61c..85962ce 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/parquet-stats-agg.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/parquet-stats-agg.test
@@ -13,79 +13,101 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=8B cardinality=4
 |
 |--08:AGGREGATE [FINALIZE]
 |  |  output: count(*)
+|  |  row-size=8B cardinality=1
 |  |
 |  07:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=0B cardinality=7.30K
 |
 |--06:AGGREGATE [FINALIZE]
 |  |  output: sum_init_zero(functional_parquet.alltypes.parquet-stats: num_rows)
+|  |  row-size=8B cardinality=1
 |  |
 |  05:SCAN HDFS [functional_parquet.alltypes]
-|     partitions=24/24 files=24 size=178.13KB
+|     partitions=24/24 files=24 size=189.28KB
+|     row-size=8B cardinality=unavailable
 |
 |--04:AGGREGATE [FINALIZE]
 |  |  output: sum_init_zero(functional_parquet.alltypes.parquet-stats: num_rows)
+|  |  row-size=8B cardinality=1
 |  |
 |  03:SCAN HDFS [functional_parquet.alltypes]
-|     partitions=24/24 files=24 size=178.13KB
+|     partitions=24/24 files=24 size=189.28KB
+|     row-size=8B cardinality=unavailable
 |
 02:AGGREGATE [FINALIZE]
 |  output: sum_init_zero(functional_parquet.alltypes.parquet-stats: num_rows)
+|  row-size=8B cardinality=1
 |
 01:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=178.13KB
+   partitions=24/24 files=24 size=189.28KB
+   row-size=8B cardinality=unavailable
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=8B cardinality=4
 |
 |--16:AGGREGATE [FINALIZE]
 |  |  output: count:merge(*)
+|  |  row-size=8B cardinality=1
 |  |
 |  15:EXCHANGE [UNPARTITIONED]
 |  |
 |  08:AGGREGATE
 |  |  output: count(*)
+|  |  row-size=8B cardinality=1
 |  |
 |  07:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=0B cardinality=7.30K
 |
 |--14:AGGREGATE [FINALIZE]
 |  |  output: count:merge(*)
+|  |  row-size=8B cardinality=1
 |  |
 |  13:EXCHANGE [UNPARTITIONED]
 |  |
 |  06:AGGREGATE
 |  |  output: sum_init_zero(functional_parquet.alltypes.parquet-stats: num_rows)
+|  |  row-size=8B cardinality=1
 |  |
 |  05:SCAN HDFS [functional_parquet.alltypes]
-|     partitions=24/24 files=24 size=178.13KB
+|     partitions=24/24 files=24 size=189.28KB
+|     row-size=8B cardinality=unavailable
 |
 |--12:AGGREGATE [FINALIZE]
 |  |  output: count:merge(*)
+|  |  row-size=8B cardinality=1
 |  |
 |  11:EXCHANGE [UNPARTITIONED]
 |  |
 |  04:AGGREGATE
 |  |  output: sum_init_zero(functional_parquet.alltypes.parquet-stats: num_rows)
+|  |  row-size=8B cardinality=1
 |  |
 |  03:SCAN HDFS [functional_parquet.alltypes]
-|     partitions=24/24 files=24 size=178.13KB
+|     partitions=24/24 files=24 size=189.28KB
+|     row-size=8B cardinality=unavailable
 |
 10:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=1
 |
 09:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: sum_init_zero(functional_parquet.alltypes.parquet-stats: num_rows)
+|  row-size=8B cardinality=1
 |
 01:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=178.13KB
+   partitions=24/24 files=24 size=189.28KB
+   row-size=8B cardinality=unavailable
 ====
 # Verify that the parquet count(*) optimization is applied even if there is more than
 # one item in the select list.
@@ -95,9 +117,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum_init_zero(functional_parquet.alltypes.parquet-stats: num_rows)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=178.13KB
+   partitions=24/24 files=24 size=189.28KB
+   row-size=8B cardinality=unavailable
 ====
 # Select count(<partition col>) - the optimization should be disabled because it's not a
 # count(<literal>) or count(*) aggregate function.
@@ -107,9 +131,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(year)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=178.13KB
+   partitions=24/24 files=24 size=189.28KB
+   row-size=4B cardinality=unavailable
 ====
 # Group by partition columns.
 select month, count(*) from functional_parquet.alltypes group by month, year
@@ -119,9 +145,11 @@ PLAN-ROOT SINK
 01:AGGREGATE [FINALIZE]
 |  output: sum_init_zero(functional_parquet.alltypes.parquet-stats: num_rows)
 |  group by: month, year
+|  row-size=16B cardinality=24
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=178.13KB
+   partitions=24/24 files=24 size=189.28KB
+   row-size=16B cardinality=unavailable
 ====
 # The optimization is disabled because tinyint_col is not a partition col.
 select tinyint_col, count(*) from functional_parquet.alltypes group by tinyint_col, year
@@ -131,9 +159,11 @@ PLAN-ROOT SINK
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: tinyint_col, year
+|  row-size=13B cardinality=unavailable
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=178.13KB
+   partitions=24/24 files=24 size=189.28KB
+   row-size=5B cardinality=unavailable
 ====
 # The optimization is disabled because there are two aggregate functions.
 select avg(year), count(*) from functional_parquet.alltypes
@@ -142,9 +172,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: avg(year), count(*)
+|  row-size=16B cardinality=1
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=178.13KB
+   partitions=24/24 files=24 size=189.28KB
+   row-size=4B cardinality=unavailable
 ====
 # Optimization is not applied because the inner count(*) is not materialized. The outer
 # count(*) does not reference a base table.
@@ -154,11 +186,14 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 01:AGGREGATE [FINALIZE]
+|  row-size=0B cardinality=1
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=178.13KB
+   partitions=24/24 files=24 size=189.28KB
+   row-size=0B cardinality=unavailable
 ====
 # The optimization is applied if count(*) is in the having clause.
 select 1 from functional_parquet.alltypes having count(*) > 1
@@ -168,9 +203,11 @@ PLAN-ROOT SINK
 01:AGGREGATE [FINALIZE]
 |  output: sum_init_zero(functional_parquet.alltypes.parquet-stats: num_rows)
 |  having: count(*) > 1
+|  row-size=8B cardinality=0
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=178.13KB
+   partitions=24/24 files=24 size=189.28KB
+   row-size=8B cardinality=unavailable
 ====
 # The count(*) optimization is applied in the inline view.
 select count(*), count(a) from (select count(1) as a from functional_parquet.alltypes) t
@@ -179,12 +216,15 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(*), count(count(*))
+|  row-size=16B cardinality=1
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum_init_zero(functional_parquet.alltypes.parquet-stats: num_rows)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=178.13KB
+   partitions=24/24 files=24 size=189.28KB
+   row-size=8B cardinality=unavailable
 ====
 # The count(*) optimization is applied to the inline view even if there is a join.
 select *
@@ -197,17 +237,21 @@ PLAN-ROOT SINK
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: x.id = count(*)
 |  runtime filters: RF000 <- count(*)
+|  row-size=101B cardinality=7.30K
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: sum_init_zero(functional_parquet.alltypes.parquet-stats: num_rows)
 |  |  group by: year
+|  |  row-size=12B cardinality=2
 |  |
 |  01:SCAN HDFS [functional_parquet.alltypes]
-|     partitions=24/24 files=24 size=178.13KB
+|     partitions=24/24 files=24 size=189.28KB
+|     row-size=12B cardinality=unavailable
 |
 00:SCAN HDFS [functional.alltypes x]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> x.id
+   row-size=89B cardinality=7.30K
 ====
 # The count(*) optimization is not applied if there is more than 1 table ref.
 select count(*) from functional_parquet.alltypes a, functional_parquet.alltypes b
@@ -216,14 +260,18 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 02:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=0B cardinality=unavailable
 |
 |--01:SCAN HDFS [functional_parquet.alltypes b]
-|     partitions=24/24 files=24 size=178.13KB
+|     partitions=24/24 files=24 size=189.28KB
+|     row-size=0B cardinality=unavailable
 |
 00:SCAN HDFS [functional_parquet.alltypes a]
-   partitions=24/24 files=24 size=178.13KB
+   partitions=24/24 files=24 size=189.28KB
+   row-size=0B cardinality=unavailable
 ====
 # The count(*) optimization is applied if there are predicates on partition columns.
 select count(1) from functional_parquet.alltypes where year < 2010 and month > 8;
@@ -232,9 +280,12 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum_init_zero(functional_parquet.alltypes.parquet-stats: num_rows)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=4/24 files=4 size=29.75KB
+   partition predicates: year < 2010, month > 8
+   partitions=4/24 files=4 size=31.40KB
+   row-size=8B cardinality=unavailable
 ====
 # tinyint_col is not a partition column so the optimization is disabled.
 select count(1) from functional_parquet.alltypes where year < 2010 and tinyint_col > 8;
@@ -243,10 +294,13 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=12/24 files=12 size=89.05KB
+   partition predicates: year < 2010
+   partitions=12/24 files=12 size=94.74KB
    predicates: tinyint_col > 8
+   row-size=1B cardinality=unavailable
 ====
 # Optimization is applied after constant folding.
 select count(1 + 2 + 3) from functional_parquet.alltypes
@@ -255,9 +309,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum_init_zero(functional_parquet.alltypes.parquet-stats: num_rows)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=178.13KB
+   partitions=24/24 files=24 size=189.28KB
+   row-size=8B cardinality=unavailable
 ====
 # Optimization is not applied to count(null).
 select count(1 + null + 3) from functional_parquet.alltypes
@@ -268,18 +324,23 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=8B cardinality=2
 |
 |--04:AGGREGATE [FINALIZE]
 |  |  output: count(NULL)
+|  |  row-size=8B cardinality=1
 |  |
 |  03:SCAN HDFS [functional_parquet.alltypes]
-|     partitions=24/24 files=24 size=178.13KB
+|     partitions=24/24 files=24 size=189.28KB
+|     row-size=0B cardinality=unavailable
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(NULL + 3)
+|  row-size=8B cardinality=1
 |
 01:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=178.13KB
+   partitions=24/24 files=24 size=189.28KB
+   row-size=0B cardinality=unavailable
 ====
 # Optimization is not applied when selecting from an empty table.
 select count(*) from functional_parquet.emptytable
@@ -288,9 +349,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 00:SCAN HDFS [functional_parquet.emptytable]
    partitions=0/0 files=0 size=0B
+   row-size=0B cardinality=0
 ====
 # Optimization is not applied when all partitions are pruned.
 select count(1) from functional_parquet.alltypes where year = -1
@@ -299,9 +362,12 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 00:SCAN HDFS [functional_parquet.alltypes]
+   partition predicates: year = -1
    partitions=0/24 files=0 size=0B
+   row-size=0B cardinality=0
 ====
 # Optimization is not applied across query blocks, even though it would be correct here.
 select count(*) from (select int_col from functional_parquet.alltypes) t
@@ -310,9 +376,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=178.13KB
+   partitions=24/24 files=24 size=189.28KB
+   row-size=0B cardinality=unavailable
 ====
 # Optimization is not applied when there is a distinct agg.
 select count(*), count(distinct 1) from functional_parquet.alltypes
@@ -321,13 +389,16 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(1), count:merge(*)
+|  row-size=16B cardinality=1
 |
 01:AGGREGATE
 |  output: count(*)
 |  group by: 1
+|  row-size=9B cardinality=1
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=178.13KB
+   partitions=24/24 files=24 size=189.28KB
+   row-size=0B cardinality=unavailable
 ====
 # The optimization is applied here because only the count(*) and a partition column are
 # materialized. Non-materialized agg exprs are ignored.
@@ -343,7 +414,10 @@ PLAN-ROOT SINK
 01:AGGREGATE [FINALIZE]
 |  output: sum_init_zero(functional_parquet.alltypes.parquet-stats: num_rows)
 |  group by: year
+|  row-size=12B cardinality=2
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=2/24 files=2 size=15.01KB
+   partition predicates: month = 1
+   partitions=2/24 files=2 size=16.06KB
+   row-size=12B cardinality=unavailable
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/partition-key-scans.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/partition-key-scans.test b/testdata/workloads/functional-planner/queries/PlannerTest/partition-key-scans.test
index deda7e9..bc4d740 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/partition-key-scans.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/partition-key-scans.test
@@ -5,17 +5,21 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: min(month), max(year), ndv(day)
+|  row-size=16B cardinality=1
 |
 00:UNION
    constant-operands=11
+   row-size=12B cardinality=11
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: min(month), max(year), ndv(day)
+|  row-size=16B cardinality=1
 |
 00:UNION
    constant-operands=11
+   row-size=12B cardinality=11
 ====
 # Test with explicit distinct keyword.
 select count(distinct year), ndv(day) from functional.alltypesagg
@@ -24,25 +28,31 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(year), ndv:merge(day)
+|  row-size=16B cardinality=1
 |
 01:AGGREGATE
 |  output: ndv(day)
 |  group by: year
+|  row-size=12B cardinality=1
 |
 00:UNION
    constant-operands=11
+   row-size=8B cardinality=11
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(year), ndv:merge(day)
+|  row-size=16B cardinality=1
 |
 01:AGGREGATE
 |  output: ndv(day)
 |  group by: year
+|  row-size=12B cardinality=1
 |
 00:UNION
    constant-operands=11
+   row-size=8B cardinality=11
 ====
 # Test static partition pruning.
 select min(month), max(day) from functional.alltypesagg where year = 2010 and day = 1;
@@ -51,9 +61,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: min(month), max(day)
+|  row-size=8B cardinality=1
 |
 00:UNION
    constant-operands=1
+   row-size=8B cardinality=1
 ====
 # Test with cases where all partitions are pruned.
 select c1, c2 from
@@ -64,8 +76,10 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: min(year), max(month)
+|  row-size=8B cardinality=0
 |
 00:UNION
+   row-size=8B cardinality=0
 ====
 # Test with group by and having clauses.
 select ndv(month) from functional.alltypesagg group by year having max(day)=10
@@ -76,9 +90,11 @@ PLAN-ROOT SINK
 |  output: ndv(month), max(day)
 |  group by: year
 |  having: max(day) = 10
+|  row-size=16B cardinality=0
 |
 00:UNION
    constant-operands=11
+   row-size=12B cardinality=11
 ====
 # Test with group-by clauses (no aggregate expressions) only.
 select month from functional.alltypes group by month
@@ -87,9 +103,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  group by: month
+|  row-size=4B cardinality=12
 |
 00:UNION
    constant-operands=12
+   row-size=4B cardinality=12
 ====
 # Test with distinct select list.
 select distinct month from functional.alltypes where month % 2 = 0
@@ -98,9 +116,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  group by: month
+|  row-size=4B cardinality=6
 |
 00:UNION
    constant-operands=6
+   row-size=4B cardinality=6
 ====
 # Test with joins on the partition keys.
 select min(a.month)
@@ -111,31 +131,39 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: min(a.month)
+|  row-size=4B cardinality=1
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: a.year = b.year
+|  row-size=12B cardinality=24
 |
 |--01:UNION
 |     constant-operands=1
+|     row-size=4B cardinality=1
 |
 00:UNION
    constant-operands=24
+   row-size=8B cardinality=24
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: min(a.month)
+|  row-size=4B cardinality=1
 |
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.year = b.year
+|  row-size=12B cardinality=24
 |
 |--04:EXCHANGE [UNPARTITIONED]
 |  |
 |  01:UNION
 |     constant-operands=1
+|     row-size=4B cardinality=1
 |
 00:UNION
    constant-operands=24
+   row-size=8B cardinality=24
 ====
 # Test query which contains both distinct and non-distinct aggregate
 # expressions and make sure the optimization is applied when applicable.
@@ -149,20 +177,25 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: year = year
 |  runtime filters: RF000 <- year
+|  row-size=16B cardinality=4
 |
 |--01:AGGREGATE [FINALIZE]
 |  |  group by: year
+|  |  row-size=4B cardinality=2
 |  |
 |  00:UNION
 |     constant-operands=2
+|     row-size=4B cardinality=2
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(month)
 |  group by: year
+|  row-size=12B cardinality=2
 |
 02:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.year
+   row-size=8B cardinality=7.30K
 ====
 # Test queries with tableRefs which cannot be evaluated by metadata.
 select min(a.year), ndv(b.timestamp_col) from
@@ -172,14 +205,18 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: min(a.year), ndv(b.timestamp_col)
+|  row-size=12B cardinality=0
 |
 02:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=20B cardinality=0
 |
 |--01:SCAN HDFS [functional.alltypesnopart b]
 |     partitions=1/1 files=0 size=0B
+|     row-size=16B cardinality=0
 |
 00:UNION
    constant-operands=2
+   row-size=4B cardinality=2
 ====
 # Test that non-partitioning slots which aren't materialized won't block the
 # optimization from being applied.
@@ -191,14 +228,18 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: ndv(a.year + b.year), min(a.month + b.month)
+|  row-size=16B cardinality=1
 |
 02:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=16B cardinality=24
 |
 |--01:UNION
 |     constant-operands=1
+|     row-size=8B cardinality=1
 |
 00:UNION
    constant-operands=24
+   row-size=8B cardinality=24
 ====
 # IMPALA-2948. Unmaterialized slots won't block the optimization (the hash join version).
 select t1.int_col
@@ -212,16 +253,20 @@ PLAN-ROOT SINK
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.int_col = min(t2.year)
 |  runtime filters: RF000 <- min(t2.year)
+|  row-size=8B cardinality=8
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: min(t2.year)
+|  |  row-size=4B cardinality=1
 |  |
 |  01:UNION
 |     constant-operands=1
+|     row-size=4B cardinality=1
 |
 00:SCAN HDFS [functional.alltypestiny t1]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> t1.int_col
+   row-size=4B cardinality=8
 ====
 # Test with with clauses on the partition keys.
 with c1 as (select distinct month from functional.alltypes),
@@ -232,20 +277,26 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  output: ndv(month)
+|  row-size=8B cardinality=1
 |
 00:UNION
+|  row-size=4B cardinality=14
 |
 |--04:AGGREGATE [FINALIZE]
 |  |  group by: year
+|  |  row-size=4B cardinality=2
 |  |
 |  03:UNION
 |     constant-operands=2
+|     row-size=4B cardinality=2
 |
 02:AGGREGATE [FINALIZE]
 |  group by: month
+|  row-size=4B cardinality=12
 |
 01:UNION
    constant-operands=12
+   row-size=4B cardinality=12
 ====
 # If slots other than partition keys are accessed, make sure scan nodes are generated.
 select date_string_col, min(month) from functional.alltypes group by date_string_col
@@ -255,9 +306,11 @@ PLAN-ROOT SINK
 01:AGGREGATE [FINALIZE]
 |  output: min(month)
 |  group by: date_string_col
+|  row-size=24B cardinality=736
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=24B cardinality=7.30K
 ====
 # Make sure non-distinct aggregation functions will generate scan nodes.
 select count(month) from functional.alltypes
@@ -266,9 +319,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(month)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 # Make sure that queries without any aggregation will generate scan nodes.
 select month from functional.alltypes order by year
@@ -277,7 +332,9 @@ PLAN-ROOT SINK
 |
 01:SORT
 |  order by: year ASC
+|  row-size=8B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=8B cardinality=7.30K
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/partition-pruning.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/partition-pruning.test b/testdata/workloads/functional-planner/queries/PlannerTest/partition-pruning.test
index 768106c..ef7d8c4 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/partition-pruning.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/partition-pruning.test
@@ -9,6 +9,7 @@ PLAN-ROOT SINK
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
 |
 00:SCAN HDFS [functional.stringpartitionkey]
+   partition predicates: CAST(string_col AS TIMESTAMP) = TIMESTAMP '2009-01-01 00:00:00'
    partitions=1/2 files=1 size=2B
    stored statistics:
      table: rows=1 size=2B

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/predicate-propagation.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/predicate-propagation.test b/testdata/workloads/functional-planner/queries/PlannerTest/predicate-propagation.test
index 09a2993..4b1ce24 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/predicate-propagation.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/predicate-propagation.test
@@ -7,18 +7,22 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: a.double_col = b.bigint_col
 |  runtime filters: RF000 <- b.bigint_col
+|  row-size=16B cardinality=532.90K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: b.bigint_col DIV 2 = 0
+|     row-size=8B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.double_col
+   row-size=8B cardinality=7.30K
 ====
 # Where clause predicate is turned into Having clause
 select a.cnt, b.int_col
@@ -32,18 +36,22 @@ PLAN-ROOT SINK
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: count(id) = b.id
+|  row-size=21B cardinality=10
 |
 |--02:SCAN HDFS [functional.alltypessmall b]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: b.id < 10
+|     row-size=8B cardinality=10
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(id)
 |  group by: int_col, tinyint_col
 |  having: count(id) < 10
+|  row-size=13B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=9B cardinality=7.30K
 ====
 # single-table test case: partitions are pruned due to predicate inference
 select count(*) from functional.alltypes
@@ -53,10 +61,13 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: functional.alltypes.month < 2
    partitions=2/24 files=2 size=40.32KB
    predicates: functional.alltypes.id < 2, functional.alltypes.tinyint_col < 2, id = int_col, int_col < 2, month = id, tinyint_col = int_col
+   row-size=13B cardinality=62
 ====
 # all subquery results get materialized correctly;
 # a.string_col = 'a' needs to be evaluated by the join itself, not the scan
@@ -71,13 +82,16 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: id = id
 |  other join predicates: string_col = 'a'
+|  row-size=38B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: string_col = 'b'
+|     row-size=17B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=21B cardinality=7.30K
 ====
 # variation with Where clause; "b.string_col = 'b'" still needs to be applied
 # by the join node but it's safe to have the 'b' scan apply it as well
@@ -93,14 +107,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: id = id
 |  other predicates: string_col = 'b'
+|  row-size=38B cardinality=730
 |
 |--01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: functional.alltypes.string_col = 'b'
+|     row-size=17B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.string_col = 'a'
+   row-size=21B cardinality=730
 ====
 # Predicates are pushed through cross join to the inline views
 select a.int_col
@@ -112,30 +129,36 @@ where a.string_col = 'a' and b.string_col = 'b'
 PLAN-ROOT SINK
 |
 02:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=30B cardinality=532.90K
 |
 |--01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: functional.alltypes.string_col = 'b'
+|     row-size=13B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.string_col = 'a'
+   row-size=17B cardinality=730
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |
 02:NESTED LOOP JOIN [CROSS JOIN, BROADCAST]
+|  row-size=30B cardinality=532.90K
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: functional.alltypes.string_col = 'b'
+|     row-size=13B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.string_col = 'a'
+   row-size=17B cardinality=730
 ====
 # c1 > 0 does not get propagated into inl view due to limit clauses
 select c1, c2, c3
@@ -153,19 +176,24 @@ PLAN-ROOT SINK
 |
 04:TOP-N [LIMIT=3]
 |  order by: c2 ASC, c1 DESC
+|  row-size=9B cardinality=1
 |
 03:SELECT
 |  predicates: int_col > 0
+|  row-size=9B cardinality=1
 |
 02:TOP-N [LIMIT=5]
 |  order by: int_col ASC, tinyint_col ASC
+|  row-size=9B cardinality=5
 |
 01:AGGREGATE [FINALIZE]
 |  output: max(id)
 |  group by: int_col, tinyint_col
+|  row-size=9B cardinality=100
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=9B cardinality=100
 ====
 # same for with clause variant
 with t as (select int_col c1, tinyint_col c2, max(id) c3
@@ -183,19 +211,24 @@ PLAN-ROOT SINK
 |
 04:TOP-N [LIMIT=3]
 |  order by: c2 ASC, c1 DESC
+|  row-size=9B cardinality=1
 |
 03:SELECT
 |  predicates: int_col > 0
+|  row-size=9B cardinality=1
 |
 02:TOP-N [LIMIT=5]
 |  order by: int_col ASC, tinyint_col ASC
+|  row-size=9B cardinality=5
 |
 01:AGGREGATE [FINALIZE]
 |  output: max(id)
 |  group by: int_col, tinyint_col
+|  row-size=9B cardinality=100
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=9B cardinality=100
 ====
 # basic propagation between equivalence classes, with partition pruning
 select straight_join a.year, a.month, b.year, b.month
@@ -212,24 +245,32 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: b.id = c.id, b.month = c.month, b.year = c.year, b.smallint_col = c.int_col
 |  runtime filters: RF000 <- c.id, RF001 <- c.month, RF002 <- c.year, RF003 <- c.int_col
+|  row-size=43B cardinality=1
 |
 |--02:SCAN HDFS [functional.alltypestiny c]
+|     partition predicates: c.year = 2009, c.month + 2 <= 4
 |     partitions=2/4 files=2 size=230B
 |     predicates: c.id = 17, CAST(sin(c.int_col) AS BOOLEAN) = TRUE
+|     row-size=16B cardinality=1
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id, a.month = b.month, a.year = b.year, a.tinyint_col = b.smallint_col
 |  runtime filters: RF008 <- b.id, RF009 <- b.month, RF010 <- b.year, RF011 <- b.smallint_col
+|  row-size=27B cardinality=1
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
+|     partition predicates: b.year = 2009, b.month + 2 <= 4
 |     partitions=2/4 files=2 size=3.16KB
 |     predicates: b.id = 17, CAST(sin(b.smallint_col) AS BOOLEAN) = TRUE
 |     runtime filters: RF000 -> b.id, RF001 -> b.month, RF002 -> b.year, RF003 -> b.smallint_col
+|     row-size=14B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes a]
+   partition predicates: a.year = 2009, a.month + 2 <= 4
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id = 17, CAST(sin(a.tinyint_col) AS BOOLEAN) = TRUE
    runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col, RF008 -> a.id, RF009 -> a.month, RF010 -> a.year, RF011 -> a.tinyint_col
+   row-size=13B cardinality=1
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=1/090101.txt 0:20433
@@ -248,30 +289,38 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: b.id = c.id, b.month = c.month, b.year = c.year, b.smallint_col = c.int_col
 |  runtime filters: RF000 <- c.id, RF001 <- c.month, RF002 <- c.year, RF003 <- c.int_col
+|  row-size=43B cardinality=1
 |
 |--07:EXCHANGE [HASH(c.id,c.month,c.year,c.int_col)]
 |  |
 |  02:SCAN HDFS [functional.alltypestiny c]
+|     partition predicates: c.year = 2009, c.month + 2 <= 4
 |     partitions=2/4 files=2 size=230B
 |     predicates: c.id = 17, CAST(sin(c.int_col) AS BOOLEAN) = TRUE
+|     row-size=16B cardinality=1
 |
 03:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: a.id = b.id, a.month = b.month, a.year = b.year, a.tinyint_col = b.smallint_col
 |  runtime filters: RF008 <- b.id, RF009 <- b.month, RF010 <- b.year, RF011 <- b.smallint_col
+|  row-size=27B cardinality=1
 |
 |--06:EXCHANGE [HASH(b.id,b.month,b.year,b.smallint_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall b]
+|     partition predicates: b.year = 2009, b.month + 2 <= 4
 |     partitions=2/4 files=2 size=3.16KB
 |     predicates: b.id = 17, CAST(sin(b.smallint_col) AS BOOLEAN) = TRUE
 |     runtime filters: RF000 -> b.id, RF001 -> b.month, RF002 -> b.year, RF003 -> b.smallint_col
+|     row-size=14B cardinality=1
 |
 05:EXCHANGE [HASH(a.id,a.month,a.year,a.tinyint_col)]
 |
 00:SCAN HDFS [functional.alltypes a]
+   partition predicates: a.year = 2009, a.month + 2 <= 4
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id = 17, CAST(sin(a.tinyint_col) AS BOOLEAN) = TRUE
    runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col, RF008 -> a.id, RF009 -> a.month, RF010 -> a.year, RF011 -> a.tinyint_col
+   row-size=13B cardinality=1
 ====
 # basic propagation between equivalence classes, with partition pruning;
 # variation with inline views
@@ -289,24 +338,32 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypessmall.id = functional.alltypestiny.id, functional.alltypessmall.month = functional.alltypestiny.month, functional.alltypessmall.year = functional.alltypestiny.year, functional.alltypessmall.smallint_col = functional.alltypestiny.int_col
 |  runtime filters: RF000 <- functional.alltypestiny.id, RF001 <- functional.alltypestiny.month, RF002 <- functional.alltypestiny.year, RF003 <- functional.alltypestiny.int_col
+|  row-size=43B cardinality=1
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: functional.alltypestiny.year = 2009, functional.alltypestiny.month + 2 <= 4
 |     partitions=2/4 files=2 size=230B
 |     predicates: functional.alltypestiny.id = 17, CAST(sin(functional.alltypestiny.int_col) AS BOOLEAN) = TRUE
+|     row-size=16B cardinality=1
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = functional.alltypessmall.id, functional.alltypes.month = functional.alltypessmall.month, functional.alltypes.year = functional.alltypessmall.year, functional.alltypes.tinyint_col = functional.alltypessmall.smallint_col
 |  runtime filters: RF008 <- functional.alltypessmall.id, RF009 <- functional.alltypessmall.month, RF010 <- functional.alltypessmall.year, RF011 <- functional.alltypessmall.smallint_col
+|  row-size=27B cardinality=1
 |
 |--01:SCAN HDFS [functional.alltypessmall]
+|     partition predicates: functional.alltypessmall.year = 2009, functional.alltypessmall.month + 2 <= 4
 |     partitions=2/4 files=2 size=3.16KB
 |     predicates: functional.alltypessmall.id = 17, CAST(sin(functional.alltypessmall.smallint_col) AS BOOLEAN) = TRUE
 |     runtime filters: RF000 -> functional.alltypessmall.id, RF001 -> functional.alltypessmall.month, RF002 -> functional.alltypessmall.year, RF003 -> functional.alltypessmall.smallint_col
+|     row-size=14B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: functional.alltypes.year = 2009, functional.alltypes.month + 2 <= 4
    partitions=2/24 files=2 size=38.07KB
    predicates: functional.alltypes.id = 17, CAST(sin(functional.alltypes.tinyint_col) AS BOOLEAN) = TRUE
    runtime filters: RF000 -> functional.alltypes.id, RF001 -> functional.alltypes.month, RF002 -> functional.alltypes.year, RF003 -> functional.alltypes.tinyint_col, RF008 -> functional.alltypes.id, RF009 -> functional.alltypes.month, RF010 -> functional.alltypes.year, RF011 -> functional.alltypes.tinyint_col
+   row-size=13B cardinality=1
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=1/090101.txt 0:20433
@@ -325,30 +382,38 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypessmall.id = functional.alltypestiny.id, functional.alltypessmall.month = functional.alltypestiny.month, functional.alltypessmall.year = functional.alltypestiny.year, functional.alltypessmall.smallint_col = functional.alltypestiny.int_col
 |  runtime filters: RF000 <- functional.alltypestiny.id, RF001 <- functional.alltypestiny.month, RF002 <- functional.alltypestiny.year, RF003 <- functional.alltypestiny.int_col
+|  row-size=43B cardinality=1
 |
 |--07:EXCHANGE [HASH(functional.alltypestiny.id,functional.alltypestiny.month,functional.alltypestiny.year,functional.alltypestiny.int_col)]
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: functional.alltypestiny.year = 2009, functional.alltypestiny.month + 2 <= 4
 |     partitions=2/4 files=2 size=230B
 |     predicates: functional.alltypestiny.id = 17, CAST(sin(functional.alltypestiny.int_col) AS BOOLEAN) = TRUE
+|     row-size=16B cardinality=1
 |
 03:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypes.id = functional.alltypessmall.id, functional.alltypes.month = functional.alltypessmall.month, functional.alltypes.year = functional.alltypessmall.year, functional.alltypes.tinyint_col = functional.alltypessmall.smallint_col
 |  runtime filters: RF008 <- functional.alltypessmall.id, RF009 <- functional.alltypessmall.month, RF010 <- functional.alltypessmall.year, RF011 <- functional.alltypessmall.smallint_col
+|  row-size=27B cardinality=1
 |
 |--06:EXCHANGE [HASH(functional.alltypessmall.id,functional.alltypessmall.month,functional.alltypessmall.year,functional.alltypessmall.smallint_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
+|     partition predicates: functional.alltypessmall.year = 2009, functional.alltypessmall.month + 2 <= 4
 |     partitions=2/4 files=2 size=3.16KB
 |     predicates: functional.alltypessmall.id = 17, CAST(sin(functional.alltypessmall.smallint_col) AS BOOLEAN) = TRUE
 |     runtime filters: RF000 -> functional.alltypessmall.id, RF001 -> functional.alltypessmall.month, RF002 -> functional.alltypessmall.year, RF003 -> functional.alltypessmall.smallint_col
+|     row-size=14B cardinality=1
 |
 05:EXCHANGE [HASH(functional.alltypes.id,functional.alltypes.month,functional.alltypes.year,functional.alltypes.tinyint_col)]
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: functional.alltypes.year = 2009, functional.alltypes.month + 2 <= 4
    partitions=2/24 files=2 size=38.07KB
    predicates: functional.alltypes.id = 17, CAST(sin(functional.alltypes.tinyint_col) AS BOOLEAN) = TRUE
    runtime filters: RF000 -> functional.alltypes.id, RF001 -> functional.alltypes.month, RF002 -> functional.alltypes.year, RF003 -> functional.alltypes.tinyint_col, RF008 -> functional.alltypes.id, RF009 -> functional.alltypes.month, RF010 -> functional.alltypes.year, RF011 -> functional.alltypes.tinyint_col
+   row-size=13B cardinality=1
 ====
 # propagation between outer-joined tables only goes in one direction:
 # - predicates on a.year and a.tinyint_col are propagated to b
@@ -371,14 +436,19 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: a.id = b.id, a.month = b.month, a.tinyint_col = b.tinyint_col, a.year = b.year
 |  other predicates: b.int_col IS NULL, b.id = 17
+|  row-size=30B cardinality=115
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
+|     partition predicates: b.month + 1 = 2, b.year = 2009
 |     partitions=1/4 files=1 size=1.57KB
 |     predicates: b.id = 17, b.tinyint_col = 7
+|     row-size=17B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes a]
+   partition predicates: a.year = 2009
    partitions=12/24 files=12 size=238.68KB
    predicates: a.id IS NULL, a.tinyint_col = 7
+   row-size=13B cardinality=115
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=1/090101.txt 0:20433
@@ -403,16 +473,21 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT OUTER JOIN, BROADCAST]
 |  hash predicates: a.id = b.id, a.month = b.month, a.tinyint_col = b.tinyint_col, a.year = b.year
 |  other predicates: b.int_col IS NULL, b.id = 17
+|  row-size=30B cardinality=115
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall b]
+|     partition predicates: b.month + 1 = 2, b.year = 2009
 |     partitions=1/4 files=1 size=1.57KB
 |     predicates: b.id = 17, b.tinyint_col = 7
+|     row-size=17B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes a]
+   partition predicates: a.year = 2009
    partitions=12/24 files=12 size=238.68KB
    predicates: a.id IS NULL, a.tinyint_col = 7
+   row-size=13B cardinality=115
 ====
 # propagation between outer-joined tables only goes in one direction:
 # - predicates on b.year, b.tinyint_col are propagated to a
@@ -436,15 +511,20 @@ PLAN-ROOT SINK
 |  hash predicates: a.id = b.id, a.month = b.month, a.tinyint_col = b.tinyint_col, a.year = b.year
 |  other predicates: a.int_col IS NULL, a.id = 17
 |  runtime filters: RF000 <- b.id, RF001 <- b.month, RF002 <- b.tinyint_col, RF003 <- b.year
+|  row-size=30B cardinality=115
 |
 |--01:SCAN HDFS [functional.alltypes b]
+|     partition predicates: b.year = 2009
 |     partitions=12/24 files=12 size=238.68KB
 |     predicates: b.id IS NULL, b.tinyint_col = 7
+|     row-size=13B cardinality=115
 |
 00:SCAN HDFS [functional.alltypessmall a]
+   partition predicates: a.month + 1 = 2, a.year = 2009
    partitions=1/4 files=1 size=1.57KB
    predicates: a.id = 17, a.tinyint_col = 7
    runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.tinyint_col, RF003 -> a.year
+   row-size=17B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -454,19 +534,24 @@ PLAN-ROOT SINK
 |  hash predicates: a.id = b.id, a.month = b.month, a.tinyint_col = b.tinyint_col, a.year = b.year
 |  other predicates: a.int_col IS NULL, a.id = 17
 |  runtime filters: RF000 <- b.id, RF001 <- b.month, RF002 <- b.tinyint_col, RF003 <- b.year
+|  row-size=30B cardinality=115
 |
 |--04:EXCHANGE [HASH(b.id,b.month,b.tinyint_col,b.year)]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
+|     partition predicates: b.year = 2009
 |     partitions=12/24 files=12 size=238.68KB
 |     predicates: b.id IS NULL, b.tinyint_col = 7
+|     row-size=13B cardinality=115
 |
 03:EXCHANGE [HASH(a.id,a.month,a.tinyint_col,a.year)]
 |
 00:SCAN HDFS [functional.alltypessmall a]
+   partition predicates: a.month + 1 = 2, a.year = 2009
    partitions=1/4 files=1 size=1.57KB
    predicates: a.id = 17, a.tinyint_col = 7
    runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.tinyint_col, RF003 -> a.year
+   row-size=17B cardinality=1
 ====
 # propagation into inline view with aggregation:
 # - predicates from enclosing scope applied to grouping exprs; with partition pruning
@@ -487,20 +572,26 @@ PLAN-ROOT SINK
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = id, a.month = month, a.year = year, a.tinyint_col = int_col
 |  runtime filters: RF000 <- id, RF001 <- month, RF002 <- year, RF003 <- int_col
+|  row-size=37B cardinality=1
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: count(*)
 |  |  group by: year, month, id, int_col
 |  |  having: count(*) + 1 = 17
+|  |  row-size=24B cardinality=5
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
+|     partition predicates: functional.alltypessmall.year = 2009, functional.alltypessmall.month <= 2
 |     partitions=2/4 files=2 size=3.16KB
 |     predicates: functional.alltypessmall.int_col != 5, id > 11
+|     row-size=16B cardinality=5
 |
 00:SCAN HDFS [functional.alltypes a]
+   partition predicates: a.year = 2009, a.month <= 2
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id > 11, a.tinyint_col != 5
    runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col
+   row-size=13B cardinality=59
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=1/090101.txt 0:20433
@@ -516,6 +607,7 @@ PLAN-ROOT SINK
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.id = id, a.month = month, a.year = year, a.tinyint_col = int_col
 |  runtime filters: RF000 <- id, RF001 <- month, RF002 <- year, RF003 <- int_col
+|  row-size=37B cardinality=1
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
@@ -523,21 +615,27 @@ PLAN-ROOT SINK
 |  |  output: count:merge(*)
 |  |  group by: year, month, id, int_col
 |  |  having: count(*) + 1 = 17
+|  |  row-size=24B cardinality=5
 |  |
 |  04:EXCHANGE [HASH(year,month,id,int_col)]
 |  |
 |  02:AGGREGATE [STREAMING]
 |  |  output: count(*)
 |  |  group by: year, month, id, int_col
+|  |  row-size=24B cardinality=5
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
+|     partition predicates: functional.alltypessmall.year = 2009, functional.alltypessmall.month <= 2
 |     partitions=2/4 files=2 size=3.16KB
 |     predicates: functional.alltypessmall.int_col != 5, id > 11
+|     row-size=16B cardinality=5
 |
 00:SCAN HDFS [functional.alltypes a]
+   partition predicates: a.year = 2009, a.month <= 2
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id > 11, a.tinyint_col != 5
    runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col
+   row-size=13B cardinality=59
 ====
 # Same as above but with cross join
 select straight_join a.id, b.id
@@ -561,20 +659,26 @@ PLAN-ROOT SINK
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = id, a.month = month, a.year = year, a.tinyint_col = int_col
 |  runtime filters: RF000 <- id, RF001 <- month, RF002 <- year, RF003 <- int_col
+|  row-size=37B cardinality=1
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: count(*)
 |  |  group by: year, month, id, int_col
 |  |  having: count(*) + 1 = 17
+|  |  row-size=24B cardinality=5
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
+|     partition predicates: functional.alltypessmall.year = 2009, functional.alltypessmall.month <= 2
 |     partitions=2/4 files=2 size=3.16KB
 |     predicates: functional.alltypessmall.int_col != 5, id > 11
+|     row-size=16B cardinality=5
 |
 00:SCAN HDFS [functional.alltypes a]
+   partition predicates: a.year = 2009, a.month <= 2
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id > 11, a.tinyint_col != 5
    runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col
+   row-size=13B cardinality=59
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -583,6 +687,7 @@ PLAN-ROOT SINK
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.id = id, a.month = month, a.year = year, a.tinyint_col = int_col
 |  runtime filters: RF000 <- id, RF001 <- month, RF002 <- year, RF003 <- int_col
+|  row-size=37B cardinality=1
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
@@ -590,21 +695,27 @@ PLAN-ROOT SINK
 |  |  output: count:merge(*)
 |  |  group by: year, month, id, int_col
 |  |  having: count(*) + 1 = 17
+|  |  row-size=24B cardinality=5
 |  |
 |  04:EXCHANGE [HASH(year,month,id,int_col)]
 |  |
 |  02:AGGREGATE [STREAMING]
 |  |  output: count(*)
 |  |  group by: year, month, id, int_col
+|  |  row-size=24B cardinality=5
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
+|     partition predicates: functional.alltypessmall.year = 2009, functional.alltypessmall.month <= 2
 |     partitions=2/4 files=2 size=3.16KB
 |     predicates: functional.alltypessmall.int_col != 5, id > 11
+|     row-size=16B cardinality=5
 |
 00:SCAN HDFS [functional.alltypes a]
+   partition predicates: a.year = 2009, a.month <= 2
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id > 11, a.tinyint_col != 5
    runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col
+   row-size=13B cardinality=59
 ====
 # no propagation into select block with limit;
 # propagation out of that block is okay;
@@ -626,23 +737,29 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = id, a.month = month, a.year = year, a.tinyint_col = int_col
 |  runtime filters: RF000 <- id, RF001 <- month, RF002 <- year, RF003 <- int_col
+|  row-size=37B cardinality=1
 |
 |--03:SELECT
 |  |  predicates: count(*) + 1 = 17
+|  |  row-size=24B cardinality=1
 |  |
 |  02:AGGREGATE [FINALIZE]
 |  |  output: count(*)
 |  |  group by: year, month, id, int_col
 |  |  limit: 5
+|  |  row-size=24B cardinality=5
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: id > 11
+|     row-size=16B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes a]
+   partition predicates: a.year = 2009, a.month <= 2
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id > 11, a.tinyint_col != 5
    runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col
+   row-size=13B cardinality=59
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=1/090101.txt 0:20433
@@ -660,11 +777,13 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.id = id, a.month = month, a.year = year, a.tinyint_col = int_col
 |  runtime filters: RF000 <- id, RF001 <- month, RF002 <- year, RF003 <- int_col
+|  row-size=37B cardinality=1
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
 |  03:SELECT
 |  |  predicates: count(*) + 1 = 17
+|  |  row-size=24B cardinality=1
 |  |
 |  07:EXCHANGE [UNPARTITIONED]
 |  |  limit: 5
@@ -673,21 +792,26 @@ PLAN-ROOT SINK
 |  |  output: count:merge(*)
 |  |  group by: year, month, id, int_col
 |  |  limit: 5
+|  |  row-size=24B cardinality=5
 |  |
 |  05:EXCHANGE [HASH(year,month,id,int_col)]
 |  |
 |  02:AGGREGATE [STREAMING]
 |  |  output: count(*)
 |  |  group by: year, month, id, int_col
+|  |  row-size=24B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: id > 11
+|     row-size=16B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes a]
+   partition predicates: a.year = 2009, a.month <= 2
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id > 11, a.tinyint_col != 5
    runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col
+   row-size=13B cardinality=59
 ====
 # Similar to the above, converts the cross join to a hash join
 select straight_join a.id, b.id
@@ -712,23 +836,29 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = id, a.month = month, a.year = year, a.tinyint_col = int_col
 |  runtime filters: RF000 <- id, RF001 <- month, RF002 <- year, RF003 <- int_col
+|  row-size=37B cardinality=1
 |
 |--03:SELECT
 |  |  predicates: count(*) + 1 = 17
+|  |  row-size=24B cardinality=1
 |  |
 |  02:AGGREGATE [FINALIZE]
 |  |  output: count(*)
 |  |  group by: year, month, id, int_col
 |  |  limit: 5
+|  |  row-size=24B cardinality=5
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: id > 11
+|     row-size=16B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes a]
+   partition predicates: a.year = 2009, a.month <= 2
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id > 11, a.tinyint_col != 5
    runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col
+   row-size=13B cardinality=59
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -737,11 +867,13 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.id = id, a.month = month, a.year = year, a.tinyint_col = int_col
 |  runtime filters: RF000 <- id, RF001 <- month, RF002 <- year, RF003 <- int_col
+|  row-size=37B cardinality=1
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
 |  03:SELECT
 |  |  predicates: count(*) + 1 = 17
+|  |  row-size=24B cardinality=1
 |  |
 |  07:EXCHANGE [UNPARTITIONED]
 |  |  limit: 5
@@ -750,21 +882,26 @@ PLAN-ROOT SINK
 |  |  output: count:merge(*)
 |  |  group by: year, month, id, int_col
 |  |  limit: 5
+|  |  row-size=24B cardinality=5
 |  |
 |  05:EXCHANGE [HASH(year,month,id,int_col)]
 |  |
 |  02:AGGREGATE [STREAMING]
 |  |  output: count(*)
 |  |  group by: year, month, id, int_col
+|  |  row-size=24B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: id > 11
+|     row-size=16B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes a]
+   partition predicates: a.year = 2009, a.month <= 2
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id > 11, a.tinyint_col != 5
    runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col
+   row-size=13B cardinality=59
 ====
 # propagation of z.month=1 to alltypesagg is prevented
 select straight_join x.int_col, z.int_col
@@ -779,17 +916,22 @@ PLAN-ROOT SINK
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = functional.alltypesagg.id
 |  runtime filters: RF000 <- functional.alltypesagg.id
+|  row-size=20B cardinality=10
 |
 |--02:SELECT
 |  |  predicates: functional.alltypesagg.month = 1
+|  |  row-size=12B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypesagg]
 |     partitions=11/11 files=11 size=814.73KB
 |     limit: 10
+|     row-size=12B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: functional.alltypes.year = 2009
    partitions=12/24 files=12 size=238.68KB
    runtime filters: RF000 -> functional.alltypes.id
+   row-size=8B cardinality=3.65K
 ====
 # extra join predicate "x.id + x.b_id = 17" results in referenced slots being
 # materialized
@@ -813,25 +955,33 @@ PLAN-ROOT SINK
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = functional.alltypesagg.id
 |  runtime filters: RF000 <- functional.alltypesagg.id
+|  row-size=36B cardinality=50
 |
 |--04:SELECT
 |  |  predicates: functional.alltypesagg.month = 1
+|  |  row-size=12B cardinality=10
 |  |
 |  03:SCAN HDFS [functional.alltypesagg]
 |     partitions=11/11 files=11 size=814.73KB
 |     limit: 10
+|     row-size=12B cardinality=10
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: a.int_col = b.int_col, a.year = b.year
 |  other predicates: a.id + b.id = 17
 |  runtime filters: RF002 <- b.int_col, RF003 <- b.year
+|  row-size=24B cardinality=36.50K
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
+|     partition predicates: b.year = 2009
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=12B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes a]
+   partition predicates: a.year = 2009
    partitions=12/24 files=12 size=238.68KB
    runtime filters: RF000 -> a.id, RF002 -> a.int_col, RF003 -> a.year
+   row-size=12B cardinality=3.65K
 ====
 # correct placement of predicates in the presence of aggregation in an inline view
 select straight_join a.id, b.id
@@ -844,17 +994,21 @@ PLAN-ROOT SINK
 03:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: a.id = id
 |  other predicates: int_col = 17, isnull(id, 0) = 0
+|  row-size=12B cardinality=730
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  group by: id, int_col
+|  |  row-size=8B cardinality=730
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: functional.alltypes.int_col = 17
+|     row-size=8B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.id IS NULL
+   row-size=4B cardinality=730
 ====
 select straight_join a.id, b.id
 from functional.alltypes a left outer join
@@ -868,27 +1022,34 @@ PLAN-ROOT SINK
 06:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: a.id = id
 |  other predicates: id IS NULL, int_col = 17
+|  row-size=12B cardinality=730
 |
 |--01:UNION
 |  |  pass-through-operands: all
+|  |  row-size=8B cardinality=740
 |  |
 |  |--05:AGGREGATE [FINALIZE]
 |  |  |  group by: id, int_col
+|  |  |  row-size=8B cardinality=10
 |  |  |
 |  |  04:SCAN HDFS [functional.alltypessmall]
 |  |     partitions=4/4 files=4 size=6.32KB
 |  |     predicates: functional.alltypessmall.int_col = 17
+|  |     row-size=8B cardinality=10
 |  |
 |  03:AGGREGATE [FINALIZE]
 |  |  group by: id, int_col
+|  |  row-size=8B cardinality=730
 |  |
 |  02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: functional.alltypes.int_col = 17
+|     row-size=8B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: isnull(a.id, 0) = 0
+   row-size=4B cardinality=730
 ====
 select a.id, b.id
 from
@@ -902,18 +1063,22 @@ PLAN-ROOT SINK
 |  hash predicates: id = a.id
 |  other predicates: int_col = 17, isnull(id, 0) = 0
 |  runtime filters: RF000 <- a.id
+|  row-size=12B cardinality=730
 |
 |--02:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: a.id IS NULL
+|     row-size=4B cardinality=730
 |
 01:AGGREGATE [FINALIZE]
 |  group by: id, int_col
+|  row-size=8B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.int_col = 17
    runtime filters: RF000 -> functional.alltypes.id
+   row-size=8B cardinality=730
 ====
 select straight_join a.id, b.id
 from
@@ -928,29 +1093,36 @@ PLAN-ROOT SINK
 |  hash predicates: id = a.id
 |  other predicates: id IS NULL, int_col = 17
 |  runtime filters: RF000 <- a.id
+|  row-size=12B cardinality=740
 |
 |--05:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: isnull(a.id, 0) = 0
+|     row-size=4B cardinality=730
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=8B cardinality=740
 |
 |--04:AGGREGATE [FINALIZE]
 |  |  group by: id, int_col
+|  |  row-size=8B cardinality=10
 |  |
 |  03:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: functional.alltypessmall.int_col = 17
 |     runtime filters: RF000 -> functional.alltypessmall.id
+|     row-size=8B cardinality=10
 |
 02:AGGREGATE [FINALIZE]
 |  group by: id, int_col
+|  row-size=8B cardinality=730
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.int_col = 17
    runtime filters: RF000 -> functional.alltypes.id
+   row-size=8B cardinality=730
 ====
 # predicate inside outer-joined inline view must be assigned in scan
 select straight_join a.string_col from functional.alltypes a
@@ -961,13 +1133,16 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: a.id = functional.alltypessmall.id
+|  row-size=21B cardinality=7.31K
 |
 |--01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: id > 0
+|     row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ====
 # TODO: Remove the following limitation in our predicate propagation:
 # It is safe to propagate 'y.id is null' to the scan of y, but we prevent
@@ -986,21 +1161,26 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: a.id = x.id
+|  row-size=25B cardinality=7.30K
 |
 |--03:HASH JOIN [INNER JOIN]
 |  |  hash predicates: x.id = y.id
 |  |  runtime filters: RF000 <- y.id
+|  |  row-size=8B cardinality=1
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny y]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=4B cardinality=8
 |  |
 |  01:SCAN HDFS [functional.alltypessmall x]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: x.id IS NULL
 |     runtime filters: RF000 -> x.id
+|     row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ====
 # Test proper assignment of Having-clause predicates (IMPALA-820):
 # - Predicates only referencing the group-by exprs are assigned in the scan node.
@@ -1016,10 +1196,12 @@ PLAN-ROOT SINK
 |  output: count(bigint_col)
 |  group by: bool_col, int_col
 |  having: count(bigint_col) > 0
+|  row-size=13B cardinality=2
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.int_col > 0, functional.alltypes.bool_col = FALSE
+   row-size=13B cardinality=516
 ====
 # basic propagation of multi-slot, single-tuple predicates
 select straight_join 1 from
@@ -1044,24 +1226,29 @@ PLAN-ROOT SINK
 |  hash predicates: t2.bigint_col = functional.alltypestiny.bigint_col, t2.id = functional.alltypestiny.id, t2.smallint_col = functional.alltypestiny.int_col
 |  other predicates: t2.id + functional.alltypestiny.int_col > 40
 |  runtime filters: RF000 <- functional.alltypestiny.bigint_col, RF001 <- functional.alltypestiny.id, RF002 <- functional.alltypestiny.int_col
+|  row-size=35B cardinality=1
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     predicates: functional.alltypestiny.id + functional.alltypestiny.bigint_col > 20, functional.alltypestiny.id + functional.alltypestiny.int_col > 10, functional.alltypestiny.id + functional.alltypestiny.int_col + functional.alltypestiny.bigint_col > 30
+|     row-size=16B cardinality=1
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id, t1.tinyint_col = t2.smallint_col
 |  runtime filters: RF006 <- t2.id, RF007 <- t2.smallint_col
+|  row-size=19B cardinality=1
 |
 |--01:SCAN HDFS [functional.alltypessmall t2]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: t2.id + t2.bigint_col > 20, t2.id + t2.smallint_col > 10, t2.id + t2.smallint_col + t2.bigint_col > 30
 |     runtime filters: RF000 -> t2.bigint_col, RF001 -> t2.id, RF002 -> t2.smallint_col
+|     row-size=14B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    predicates: t1.id + t1.tinyint_col > 10
    runtime filters: RF001 -> t1.id, RF002 -> t1.tinyint_col, RF006 -> t1.id, RF007 -> t1.tinyint_col
+   row-size=5B cardinality=730
 ====
 # basic propagation of multi-slot, single-tuple predicates with aggregates
 select straight_join 1 from
@@ -1086,30 +1273,37 @@ PLAN-ROOT SINK
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: max(smallint_col) = max(smallint_col), min(int_col) = min(int_col)
 |  runtime filters: RF000 <- max(smallint_col)
+|  row-size=33B cardinality=730
 |
 |--04:AGGREGATE [FINALIZE]
 |  |  output: max(smallint_col), min(int_col)
 |  |  having: max(smallint_col) + min(int_col) > 30
+|  |  row-size=6B cardinality=0
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=6B cardinality=8
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = count(tinyint_col), t1.tinyint_col = max(smallint_col)
 |  runtime filters: RF002 <- count(tinyint_col), RF003 <- max(smallint_col)
+|  row-size=27B cardinality=730
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: count(tinyint_col), max(smallint_col), min(int_col)
 |  |  group by: bigint_col
 |  |  having: count(tinyint_col) + max(smallint_col) > 10, count(tinyint_col) + max(smallint_col) > 20, max(smallint_col) + min(int_col) > 30
+|  |  row-size=22B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=15B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    predicates: t1.id + t1.tinyint_col > 10, t1.id + t1.tinyint_col > 20
    runtime filters: RF000 -> t1.tinyint_col, RF002 -> t1.id, RF003 -> t1.tinyint_col
+   row-size=5B cardinality=730
 ====
 # assignment of multi-slot, single-tuple predicates with outer-joined tuple (IMPALA-824)
 select straight_join 1
@@ -1135,22 +1329,27 @@ PLAN-ROOT SINK
 04:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t2.id = functional.alltypestiny.id, t2.int_col = functional.alltypestiny.int_col
 |  other predicates: functional.alltypestiny.tinyint_col + functional.alltypestiny.smallint_col + functional.alltypestiny.int_col > 10, ifnull(functional.alltypestiny.tinyint_col + functional.alltypestiny.bigint_col, 1) = 1
+|  row-size=46B cardinality=730
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     predicates: functional.alltypestiny.id * functional.alltypestiny.int_col < 100, functional.alltypestiny.tinyint_col + functional.alltypestiny.smallint_col + functional.alltypestiny.int_col > 10
+|     row-size=19B cardinality=1
 |
 03:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t1.id = t2.id, t1.int_col = t2.int_col
 |  other predicates: t2.tinyint_col = t2.smallint_col, ifnull(t2.tinyint_col + t2.bigint_col, 1) = 1
+|  row-size=27B cardinality=730
 |
 |--01:SCAN HDFS [functional.alltypessmall t2]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: t2.tinyint_col = t2.smallint_col, t2.id * t2.int_col < 100
+|     row-size=19B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    predicates: t1.id * t1.int_col < 100
+   row-size=8B cardinality=730
 ====
 # TODO: Fix this limitation of our getBindingPredicates() implementation:
 # We use the first multi-slot mapping and not necessarily the best,
@@ -1165,14 +1364,18 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.month, t1.year = t2.year
 |  runtime filters: RF000 <- t2.month, RF001 <- t2.year
+|  row-size=178B cardinality=100
 |
 |--01:SCAN HDFS [functional.alltypessmall t2]
+|     partition predicates: t2.year + t2.month > 10
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    predicates: t1.id = t1.month, t1.year + t1.id > 10
    runtime filters: RF000 -> t1.id, RF001 -> t1.year
+   row-size=89B cardinality=730
 ====
 # TODO: Fix this limitation of our getBindingPredicates() implementation:
 # We use the first multi-slot mapping and not all non-redundant mappings, i.e.,
@@ -1190,15 +1393,18 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id, t1.tinyint_col = t2.tinyint_col
 |  runtime filters: RF000 <- t2.id, RF001 <- t2.tinyint_col
+|  row-size=178B cardinality=1
 |
 |--01:SCAN HDFS [functional.alltypessmall t2]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: t2.id = t2.smallint_col, t2.tinyint_col = t2.int_col, t2.id + t2.tinyint_col > 10
+|     row-size=89B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    predicates: t1.id + t1.tinyint_col > 10
    runtime filters: RF000 -> t1.id, RF001 -> t1.tinyint_col
+   row-size=89B cardinality=730
 ====
 # TODO: Fix this limitation of our predicate propagation implementation:
 # Multi-slot predicates are not propagated onto an agg node if the slot mapping
@@ -1217,18 +1423,22 @@ PLAN-ROOT SINK
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.int_col = min(int_col), t1.id = bigint_col
 |  runtime filters: RF000 <- min(int_col), RF001 <- bigint_col
+|  row-size=105B cardinality=10
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: min(int_col), max(int_col)
 |  |  group by: bigint_col
+|  |  row-size=16B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=12B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    predicates: t1.id + t1.int_col > 10
    runtime filters: RF000 -> t1.int_col, RF001 -> t1.id
+   row-size=89B cardinality=730
 ====
 # Anti-joins have a uni-directional value transfer (IMPALA-1249).
 select * from
@@ -1243,14 +1453,17 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [LEFT ANTI JOIN]
 |  hash predicates: a.id = id
+|  row-size=89B cardinality=730
 |
 |--01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     predicates: functional.alltypestiny.id < 10, id > -20
+|     row-size=4B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.id < 10
+   row-size=89B cardinality=730
 ====
 # Anti-joins have a uni-directional value transfer (IMPALA-1249).
 select * from
@@ -1264,14 +1477,17 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [RIGHT ANTI JOIN]
 |  hash predicates: id = b.id
+|  row-size=89B cardinality=1
 |
 |--01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
 |     predicates: b.id < 10
+|     row-size=89B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id < 10, id > -20
+   row-size=4B cardinality=730
 ====
 # Test proper predicate assignment with predicate propagation when the
 # generated predicate is bound by an outer joined tuple (IMPALA-2018)
@@ -1289,18 +1505,22 @@ PLAN-ROOT SINK
 |  output: sum(a.tinyint_col)
 |  group by: b.int_col
 |  having: j.int_col = 10
+|  row-size=12B cardinality=1
 |
 02:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: b.id = a.id
 |  runtime filters: RF000 <- a.id
+|  row-size=13B cardinality=9
 |
 |--00:SCAN HDFS [functional.alltypestiny a]
 |     partitions=4/4 files=4 size=460B
+|     row-size=5B cardinality=8
 |
 01:SCAN HDFS [functional.alltypesagg b]
    partitions=11/11 files=11 size=814.73KB
    predicates: b.int_col = 10
    runtime filters: RF000 -> b.id
+   row-size=8B cardinality=11
 ====
 # Test proper predicate assignment with predicate propagation when the
 # generated predicate is bound by an outer joined tuple (IMPALA-2018)
@@ -1317,14 +1537,17 @@ PLAN-ROOT SINK
 |  hash predicates: b.id = a.id
 |  other predicates: b.int_col = 10
 |  runtime filters: RF000 <- a.id
+|  row-size=13B cardinality=9
 |
 |--00:SCAN HDFS [functional.alltypestiny a]
 |     partitions=4/4 files=4 size=460B
+|     row-size=5B cardinality=8
 |
 01:SCAN HDFS [functional.alltypesagg b]
    partitions=11/11 files=11 size=814.73KB
    predicates: b.int_col = 10
    runtime filters: RF000 -> b.id
+   row-size=8B cardinality=11
 ====
 # Tests propagation of cardinality estimation of SCAN HDFS node with small
 # initial cardinality and low selectivity (IMPALA-2165). If any of the
@@ -1339,21 +1562,27 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 04:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: n.n_regionkey = r_regionkey
+|  row-size=31B cardinality=15.00K
 |
 |--03:SCAN HDFS [tpch_parquet.region r]
-|     partitions=1/1 files=1 size=1.01KB
+|     partitions=1/1 files=1 size=1.34KB
 |     predicates: r.r_regionkey = 1
+|     row-size=2B cardinality=1
 |
 02:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=29B cardinality=15.00K
 |
 |--01:SCAN HDFS [tpch_parquet.nation n]
-|     partitions=1/1 files=1 size=2.38KB
+|     partitions=1/1 files=1 size=2.75KB
 |     predicates: n_regionkey = 1, n_name = 'BRAZIL'
+|     row-size=21B cardinality=1
 |
 00:SCAN HDFS [tpch_parquet.customer c]
-   partitions=1/1 files=1 size=12.27MB
+   partitions=1/1 files=1 size=12.31MB
    predicates: c_custkey % 2 = 0
+   row-size=8B cardinality=15.00K
 ====


[18/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/kudu-upsert.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/kudu-upsert.test b/testdata/workloads/functional-planner/queries/PlannerTest/kudu-upsert.test
index 0ee1aa9..d4c3beb 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/kudu-upsert.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/kudu-upsert.test
@@ -6,17 +6,22 @@ where year=2009 and month=05
 UPSERT INTO KUDU [functional_kudu.testtbl]
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month = 5
    partitions=1/24 files=1 size=20.36KB
+   row-size=25B cardinality=310
 ---- DISTRIBUTEDPLAN
 UPSERT INTO KUDU [functional_kudu.testtbl]
 |
 02:PARTIAL SORT
 |  order by: KuduPartition(bigint_col) ASC NULLS LAST, bigint_col ASC NULLS LAST
+|  row-size=29B cardinality=310
 |
 01:EXCHANGE [KUDU(KuduPartition(bigint_col))]
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month = 5
    partitions=1/24 files=1 size=20.36KB
+   row-size=25B cardinality=310
 ====
 # simple upsert with values clause
 upsert into table functional_kudu.testtbl
@@ -26,6 +31,7 @@ UPSERT INTO KUDU [functional_kudu.testtbl]
 |
 00:UNION
    constant-operands=2
+   row-size=14B cardinality=2
 ====
 # upsert with 'with' clause and limit
 with x as (select string_col, count(*) from functional.alltypes group by string_col)
@@ -38,44 +44,54 @@ UPSERT INTO KUDU [functional_kudu.testtbl]
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: a.string_col = string_col
 |  runtime filters: RF000 <- string_col
+|  row-size=38B cardinality=7.30K
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  group by: string_col
+|  |  row-size=13B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=13B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.string_col
+   row-size=25B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 UPSERT INTO KUDU [functional_kudu.testtbl]
 |
 08:PARTIAL SORT
 |  order by: KuduPartition(a.bigint_col) ASC NULLS LAST, bigint_col ASC NULLS LAST
+|  row-size=29B cardinality=7.30K
 |
 07:EXCHANGE [KUDU(KuduPartition(a.bigint_col))]
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.string_col = string_col
 |  runtime filters: RF000 <- string_col
+|  row-size=38B cardinality=7.30K
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
 |  05:AGGREGATE [FINALIZE]
 |  |  group by: string_col
+|  |  row-size=13B cardinality=10
 |  |
 |  04:EXCHANGE [HASH(string_col)]
 |  |
 |  02:AGGREGATE [STREAMING]
 |  |  group by: string_col
+|  |  row-size=13B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=13B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.string_col
+   row-size=25B cardinality=7.30K
 ====
 # upsert with inline view
 upsert into functional_kudu.testtbl
@@ -91,14 +107,17 @@ UPSERT INTO KUDU [functional_kudu.testtbl]
 |  output: count(*)
 |  group by: id, string_col
 |  having: CAST(count(*) AS INT) < 10
+|  row-size=25B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 UPSERT INTO KUDU [functional_kudu.testtbl]
 |
 05:PARTIAL SORT
 |  order by: KuduPartition(id) ASC NULLS LAST, id ASC NULLS LAST
+|  row-size=29B cardinality=7.30K
 |
 04:EXCHANGE [KUDU(KuduPartition(id))]
 |
@@ -106,15 +125,18 @@ UPSERT INTO KUDU [functional_kudu.testtbl]
 |  output: count:merge(*)
 |  group by: id, string_col
 |  having: CAST(count(*) AS INT) < 10
+|  row-size=25B cardinality=7.30K
 |
 02:EXCHANGE [HASH(id,string_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: id, string_col
+|  row-size=25B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ====
 upsert into functional_kudu.testtbl
 select * from functional_kudu.testtbl
@@ -122,15 +144,18 @@ select * from functional_kudu.testtbl
 UPSERT INTO KUDU [functional_kudu.testtbl]
 |
 00:SCAN KUDU [functional_kudu.testtbl]
+   row-size=28B cardinality=0
 ---- DISTRIBUTEDPLAN
 UPSERT INTO KUDU [functional_kudu.testtbl]
 |
 02:PARTIAL SORT
 |  order by: KuduPartition(functional_kudu.testtbl.id) ASC NULLS LAST, id ASC NULLS LAST
+|  row-size=28B cardinality=0
 |
 01:EXCHANGE [KUDU(KuduPartition(functional_kudu.testtbl.id))]
 |
 00:SCAN KUDU [functional_kudu.testtbl]
+   row-size=28B cardinality=0
 ====
 # upsert with a union
 upsert into functional_kudu.testtbl select * from functional_kudu.testtbl where id % 3 = 0
@@ -139,27 +164,34 @@ union all select * from functional_kudu.testtbl where id % 3 = 1
 UPSERT INTO KUDU [functional_kudu.testtbl]
 |
 00:UNION
+|  row-size=24B cardinality=0
 |
 |--02:SCAN KUDU [functional_kudu.testtbl]
 |     predicates: id % 3 = 1
+|     row-size=28B cardinality=0
 |
 01:SCAN KUDU [functional_kudu.testtbl]
    predicates: id % 3 = 0
+   row-size=28B cardinality=0
 ---- DISTRIBUTEDPLAN
 UPSERT INTO KUDU [functional_kudu.testtbl]
 |
 04:PARTIAL SORT
 |  order by: KuduPartition(id) ASC NULLS LAST, id ASC NULLS LAST
+|  row-size=28B cardinality=0
 |
 03:EXCHANGE [KUDU(KuduPartition(id))]
 |
 00:UNION
+|  row-size=24B cardinality=0
 |
 |--02:SCAN KUDU [functional_kudu.testtbl]
 |     predicates: id % 3 = 1
+|     row-size=28B cardinality=0
 |
 01:SCAN KUDU [functional_kudu.testtbl]
    predicates: id % 3 = 0
+   row-size=28B cardinality=0
 ====
 # upsert with agg on col that is already partitioned in the input and target table
 # TODO: we shouldn't need to do any repartioning here (IMPALA-5254).
@@ -170,25 +202,31 @@ UPSERT INTO KUDU [functional_kudu.alltypes]
 |
 01:AGGREGATE [FINALIZE]
 |  group by: id
+|  row-size=4B cardinality=7.30K
 |
 00:SCAN KUDU [functional_kudu.alltypes]
+   row-size=4B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 UPSERT INTO KUDU [functional_kudu.alltypes]
 |
 05:PARTIAL SORT
 |  order by: KuduPartition(id) ASC NULLS LAST, id ASC NULLS LAST
+|  row-size=8B cardinality=7.30K
 |
 04:EXCHANGE [KUDU(KuduPartition(id))]
 |
 03:AGGREGATE [FINALIZE]
 |  group by: id
+|  row-size=4B cardinality=7.30K
 |
 02:EXCHANGE [HASH(id)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: id
+|  row-size=4B cardinality=7.30K
 |
 00:SCAN KUDU [functional_kudu.alltypes]
+   row-size=4B cardinality=7.30K
 ====
 # Hint - noshuffle should remove the exchange node.
 upsert into functional_kudu.alltypes /* +noshuffle */ select * from functional.alltypes;
@@ -197,9 +235,11 @@ UPSERT INTO KUDU [functional_kudu.alltypes]
 |
 01:PARTIAL SORT
 |  order by: KuduPartition(functional.alltypes.id) ASC NULLS LAST, id ASC NULLS LAST
+|  row-size=93B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # same as above but the hint is at Oracle hint location.
 upsert /* +noshuffle */ into functional_kudu.alltypes select * from functional.alltypes;
@@ -208,9 +248,11 @@ UPSERT INTO KUDU [functional_kudu.alltypes]
 |
 01:PARTIAL SORT
 |  order by: KuduPartition(functional.alltypes.id) ASC NULLS LAST, id ASC NULLS LAST
+|  row-size=93B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # Hint - noclustered should remove the sort node.
 upsert into functional_kudu.alltypes /* +noclustered */ select * from functional.alltypes;
@@ -221,6 +263,7 @@ UPSERT INTO KUDU [functional_kudu.alltypes]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # same as above but the hint is at Oracle hint location.
 upsert /* +noclustered */ into functional_kudu.alltypes select * from functional.alltypes;
@@ -231,6 +274,7 @@ UPSERT INTO KUDU [functional_kudu.alltypes]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # Hint - noshuffle should remove the exchange node.
 upsert into functional_kudu.alltypes /* +noshuffle */ select * from functional.alltypes;
@@ -239,9 +283,11 @@ UPSERT INTO KUDU [functional_kudu.alltypes]
 |
 01:PARTIAL SORT
 |  order by: KuduPartition(functional.alltypes.id) ASC NULLS LAST, id ASC NULLS LAST
+|  row-size=93B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # same as above but the hint is at Oracle hint location.
 upsert /* +noshuffle */ into functional_kudu.alltypes select * from functional.alltypes;
@@ -250,9 +296,11 @@ UPSERT INTO KUDU [functional_kudu.alltypes]
 |
 01:PARTIAL SORT
 |  order by: KuduPartition(functional.alltypes.id) ASC NULLS LAST, id ASC NULLS LAST
+|  row-size=93B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 upsert into functional_kudu.alltypes /* +noclustered,noshuffle */
 select * from functional.alltypes;
@@ -261,6 +309,7 @@ UPSERT INTO KUDU [functional_kudu.alltypes]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # same as above but the hint is at Oracle hint location.
 upsert into functional_kudu.alltypes /* +noclustered,noshuffle */
@@ -270,4 +319,5 @@ UPSERT INTO KUDU [functional_kudu.alltypes]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/kudu.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/kudu.test b/testdata/workloads/functional-planner/queries/PlannerTest/kudu.test
index dfea970..d8b50dc 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/kudu.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/kudu.test
@@ -3,6 +3,7 @@ select * from functional_kudu.testtbl
 PLAN-ROOT SINK
 |
 00:SCAN KUDU [functional_kudu.testtbl]
+   row-size=28B cardinality=0
 ---- SCANRANGELOCATIONS
 NODE 0:
   ScanToken{table=impala::functional_kudu.testtbl, range-partition: [(int64 id=1004), (int64 id=1008))}
@@ -14,6 +15,7 @@ PLAN-ROOT SINK
 01:EXCHANGE [UNPARTITIONED]
 |
 00:SCAN KUDU [functional_kudu.testtbl]
+   row-size=28B cardinality=0
 ====
 select * from functional_kudu.testtbl where name = '10'
 ---- PLAN
@@ -21,6 +23,7 @@ PLAN-ROOT SINK
 |
 00:SCAN KUDU [functional_kudu.testtbl]
    kudu predicates: name = '10'
+   row-size=28B cardinality=0
 ---- SCANRANGELOCATIONS
 NODE 0:
   ScanToken{table=impala::functional_kudu.testtbl, range-partition: [(int64 id=1004), (int64 id=1008))}
@@ -33,6 +36,7 @@ PLAN-ROOT SINK
 |
 00:SCAN KUDU [functional_kudu.testtbl]
    kudu predicates: name = '10'
+   row-size=28B cardinality=0
 ====
 select * from functional_kudu.testtbl where name = NULL
 ---- PLAN
@@ -40,6 +44,7 @@ PLAN-ROOT SINK
 |
 00:SCAN KUDU [functional_kudu.testtbl]
    predicates: name = NULL
+   row-size=28B cardinality=0
 ====
 insert into functional_kudu.testtbl(id) values (10)
 ---- PLAN
@@ -47,31 +52,37 @@ INSERT INTO KUDU [functional_kudu.testtbl]
 |
 00:UNION
    constant-operands=1
+   row-size=1B cardinality=1
 ---- DISTRIBUTEDPLAN
 INSERT INTO KUDU [functional_kudu.testtbl]
 |
 02:PARTIAL SORT
 |  order by: KuduPartition(10) ASC NULLS LAST, 10 ASC NULLS LAST
+|  row-size=5B cardinality=1
 |
 01:EXCHANGE [KUDU(KuduPartition(10))]
 |
 00:UNION
    constant-operands=1
+   row-size=1B cardinality=1
 ====
 insert into functional_kudu.testtbl(id) select int_col from functional_kudu.tinyinttable
 ---- PLAN
 INSERT INTO KUDU [functional_kudu.testtbl]
 |
 00:SCAN KUDU [functional_kudu.tinyinttable]
+   row-size=4B cardinality=10
 ---- DISTRIBUTEDPLAN
 INSERT INTO KUDU [functional_kudu.testtbl]
 |
 02:PARTIAL SORT
 |  order by: KuduPartition(int_col) ASC NULLS LAST, int_col ASC NULLS LAST
+|  row-size=8B cardinality=10
 |
 01:EXCHANGE [KUDU(KuduPartition(int_col))]
 |
 00:SCAN KUDU [functional_kudu.tinyinttable]
+   row-size=4B cardinality=10
 ====
 insert into functional_kudu.testtbl(id, name)
 select count(distinct id), name from functional_kudu.dimtbl
@@ -82,38 +93,47 @@ INSERT INTO KUDU [functional_kudu.testtbl]
 02:AGGREGATE [FINALIZE]
 |  output: count(id)
 |  group by: name
+|  row-size=25B cardinality=10
 |
 01:AGGREGATE
 |  group by: name, id
+|  row-size=25B cardinality=10
 |
 00:SCAN KUDU [functional_kudu.dimtbl]
+   row-size=29B cardinality=10
 ---- DISTRIBUTEDPLAN
 INSERT INTO KUDU [functional_kudu.testtbl]
 |
 08:PARTIAL SORT
 |  order by: KuduPartition(count(id)) ASC NULLS LAST, count(id) ASC NULLS LAST
+|  row-size=29B cardinality=10
 |
 07:EXCHANGE [KUDU(KuduPartition(count(id)))]
 |
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(id)
 |  group by: name
+|  row-size=25B cardinality=10
 |
 05:EXCHANGE [HASH(name)]
 |
 02:AGGREGATE [STREAMING]
 |  output: count(id)
 |  group by: name
+|  row-size=25B cardinality=10
 |
 04:AGGREGATE
 |  group by: name, id
+|  row-size=25B cardinality=10
 |
 03:EXCHANGE [HASH(name,id)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: name, id
+|  row-size=25B cardinality=10
 |
 00:SCAN KUDU [functional_kudu.dimtbl]
+   row-size=29B cardinality=10
 ====
 # All predicates can be pushed down.
 select * from functional_kudu.testtbl
@@ -124,6 +144,7 @@ PLAN-ROOT SINK
 |
 00:SCAN KUDU [functional_kudu.testtbl]
    kudu predicates: id <= 20, id >= 10, zip < 50, zip <= 30, zip <= 5, zip > 1, zip >= 0, name = 'foo'
+   row-size=28B cardinality=0
 ---- SCANRANGELOCATIONS
 NODE 0:
   ScanToken{table=impala::functional_kudu.testtbl, range-partition: [<start>, (int64 id=1004))}
@@ -134,6 +155,7 @@ PLAN-ROOT SINK
 |
 00:SCAN KUDU [functional_kudu.testtbl]
    kudu predicates: id <= 20, id >= 10, zip < 50, zip <= 30, zip <= 5, zip > 1, zip >= 0, name = 'foo'
+   row-size=28B cardinality=0
 ====
 # Constant propagation works for Kudu
 select * from functional_kudu.alltypes t
@@ -145,6 +167,7 @@ PLAN-ROOT SINK
 00:SCAN KUDU [functional_kudu.alltypes t]
    predicates: CAST(t.tinyint_col AS BIGINT) = 1000
    kudu predicates: t.bigint_col = 1000, t.int_col = 10
+   row-size=97B cardinality=107
 ====
 # Test constant folding.
 select * from functional_kudu.testtbl
@@ -155,6 +178,7 @@ PLAN-ROOT SINK
 00:SCAN KUDU [functional_kudu.testtbl]
    predicates: CAST(sin(id) AS BOOLEAN) = TRUE
    kudu predicates: id < 103, id < 40, id <= 60
+   row-size=28B cardinality=0
 ---- SCANRANGELOCATIONS
 NODE 0:
   ScanToken{table=impala::functional_kudu.testtbl, range-partition: [<start>, (int64 id=1004))}
@@ -166,6 +190,7 @@ PLAN-ROOT SINK
 00:SCAN KUDU [functional_kudu.testtbl]
    predicates: CAST(sin(id) AS BOOLEAN) = TRUE
    kudu predicates: id < 103, id < 40, id <= 60
+   row-size=28B cardinality=0
 ====
 # Some predicates can be pushed down but others can't (predicate on an non-const value).
 select * from functional_kudu.testtbl
@@ -176,6 +201,7 @@ PLAN-ROOT SINK
 00:SCAN KUDU [functional_kudu.testtbl]
    predicates: CAST(sin(id) AS BOOLEAN) = TRUE
    kudu predicates: name = 'a'
+   row-size=28B cardinality=0
 ---- SCANRANGELOCATIONS
 NODE 0:
   ScanToken{table=impala::functional_kudu.testtbl, range-partition: [(int64 id=1004), (int64 id=1008))}
@@ -189,6 +215,7 @@ PLAN-ROOT SINK
 00:SCAN KUDU [functional_kudu.testtbl]
    predicates: CAST(sin(id) AS BOOLEAN) = TRUE
    kudu predicates: name = 'a'
+   row-size=28B cardinality=0
 ====
 # No predicates can be pushed down (predicate on a non-const value and
 # non-binary predicate). There is a single tablet so no partition pruning.
@@ -200,6 +227,7 @@ PLAN-ROOT SINK
 00:SCAN KUDU [functional_kudu.testtbl]
    predicates: CAST(sin(id) AS BOOLEAN) = TRUE
    kudu predicates: name IS NULL
+   row-size=28B cardinality=0
 ---- SCANRANGELOCATIONS
 NODE 0:
   ScanToken{table=impala::functional_kudu.testtbl, range-partition: [(int64 id=1004), (int64 id=1008))}
@@ -213,6 +241,7 @@ PLAN-ROOT SINK
 00:SCAN KUDU [functional_kudu.testtbl]
    predicates: CAST(sin(id) AS BOOLEAN) = TRUE
    kudu predicates: name IS NULL
+   row-size=28B cardinality=0
 ====
 # Derived EmptySets for Kudu
 select * from functional_kudu.alltypes t
@@ -230,9 +259,11 @@ PLAN-ROOT SINK
 |
 01:SORT
 |  order by: o_orderkey ASC
+|  row-size=8B cardinality=150.00K
 |
 00:SCAN KUDU [tpch_kudu.orders]
    predicates: o_orderkey < 10.0
+   row-size=8B cardinality=150.00K
 ====
 # IMPALA-3871: Casting literals to TIMESTAMP throw when pushed to KuduScanNode
 select t.c from
@@ -243,10 +274,12 @@ PLAN-ROOT SINK
 |
 01:SORT
 |  order by: c ASC
+|  row-size=22B cardinality=150.00K
 |
 00:SCAN KUDU [tpch_kudu.orders]
    predicates: CAST(o_orderdate AS TIMESTAMP) <= TIMESTAMP '1995-01-01 00:00:00'
    kudu predicates: o_orderkey < 10
+   row-size=26B cardinality=150.00K
 ====
 # IMPALA-4213: Planner not pushing some predicates with constant exprs to Kudu
 select count(*) from functional_kudu.alltypes
@@ -257,9 +290,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN KUDU [functional_kudu.alltypes]
    kudu predicates: id < 1475059775, id > 1475059665
+   row-size=0B cardinality=730
 ====
 insert into table functional_kudu.alltypes
 select * from functional_kudu.alltypes
@@ -267,15 +302,18 @@ select * from functional_kudu.alltypes
 INSERT INTO KUDU [functional_kudu.alltypes]
 |
 00:SCAN KUDU [functional_kudu.alltypes]
+   row-size=97B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 INSERT INTO KUDU [functional_kudu.alltypes]
 |
 02:PARTIAL SORT
 |  order by: KuduPartition(functional_kudu.alltypes.id) ASC NULLS LAST, id ASC NULLS LAST
+|  row-size=93B cardinality=7.30K
 |
 01:EXCHANGE [KUDU(KuduPartition(functional_kudu.alltypes.id))]
 |
 00:SCAN KUDU [functional_kudu.alltypes]
+   row-size=97B cardinality=7.30K
 ====
 insert into table functional_kudu.testtbl
 select id, name, maxzip as zip
@@ -289,27 +327,33 @@ INSERT INTO KUDU [functional_kudu.testtbl]
 01:AGGREGATE [FINALIZE]
 |  output: max(zip)
 |  group by: id, name
+|  row-size=24B cardinality=0
 |
 00:SCAN KUDU [functional_kudu.testtbl]
+   row-size=28B cardinality=0
 ---- DISTRIBUTEDPLAN
 INSERT INTO KUDU [functional_kudu.testtbl]
 |
 05:PARTIAL SORT
 |  order by: KuduPartition(id) ASC NULLS LAST, id ASC NULLS LAST
+|  row-size=28B cardinality=0
 |
 04:EXCHANGE [KUDU(KuduPartition(id))]
 |
 03:AGGREGATE [FINALIZE]
 |  output: max:merge(zip)
 |  group by: id, name
+|  row-size=24B cardinality=0
 |
 02:EXCHANGE [HASH(id,name)]
 |
 01:AGGREGATE [STREAMING]
 |  output: max(zip)
 |  group by: id, name
+|  row-size=24B cardinality=0
 |
 00:SCAN KUDU [functional_kudu.testtbl]
+   row-size=28B cardinality=0
 ====
 # IMPALA-4479: Test proper folding of constant boolean exprs.
 select * from functional_kudu.alltypes
@@ -319,6 +363,7 @@ PLAN-ROOT SINK
 |
 00:SCAN KUDU [functional_kudu.alltypes]
    kudu predicates: bool_col = FALSE
+   row-size=97B cardinality=3.65K
 ====
 # IMPALA-4578: Test predicate propagation for Kudu scans.
 select /* +straight_join */ count(*)
@@ -334,26 +379,32 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 04:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: b.int_col = c.int_col
 |  other predicates: c.int_col > 20
+|  row-size=16B cardinality=1
 |
 |--02:SCAN KUDU [functional_kudu.alltypestiny c]
 |     kudu predicates: c.int_col > 20
+|     row-size=4B cardinality=1
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF001 <- b.id
+|  row-size=12B cardinality=1
 |
 |--01:SCAN KUDU [functional_kudu.alltypessmall b]
 |     predicates: CAST(b.id AS STRING) > '123'
 |     kudu predicates: b.id > 10
+|     row-size=8B cardinality=10
 |
 00:SCAN KUDU [functional_kudu.alltypes a]
    predicates: CAST(a.id AS STRING) > '123'
    kudu predicates: a.id > 10
    runtime filters: RF001 -> a.id
+   row-size=4B cardinality=730
 ====
 # IMPALA-4662: Kudu analysis failure for NULL literal in IN list
 # NULL literal in values list results in applying predicate at scan node
@@ -366,6 +417,7 @@ PLAN-ROOT SINK
 |
 00:SCAN KUDU [functional_kudu.alltypestiny]
    predicates: id IN (1, NULL), bigint_col IN (NULL), bool_col IN (NULL), double_col IN (NULL), float_col IN (NULL), smallint_col IN (NULL), string_col IN (NULL), tinyint_col IN (NULL)
+   row-size=45B cardinality=1
 ====
 # IMPALA-3586: The operand with the Kudu scan cannot be passed through because id is
 # not-nullable (primary key).
@@ -377,11 +429,14 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: 02
+|  row-size=4B cardinality=14.60K
 |
 |--01:SCAN KUDU [functional_kudu.alltypes]
+|     row-size=4B cardinality=7.30K
 |
 02:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 # IMPALA-3586: When both operands are Kudu scans, they should both be passed through.
 select id from functional_kudu.alltypes
@@ -392,10 +447,13 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=4B cardinality=14.60K
 |
 |--02:SCAN KUDU [functional_kudu.alltypes]
+|     row-size=4B cardinality=7.30K
 |
 01:SCAN KUDU [functional_kudu.alltypes]
+   row-size=4B cardinality=7.30K
 ====
 # Hint - noshuffle should remove the exchange node.
 insert into functional_kudu.alltypes /* +noshuffle */ select * from functional.alltypes;
@@ -404,9 +462,11 @@ INSERT INTO KUDU [functional_kudu.alltypes]
 |
 01:PARTIAL SORT
 |  order by: KuduPartition(functional.alltypes.id) ASC NULLS LAST, id ASC NULLS LAST
+|  row-size=93B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # Hint - noclustered should remove the sort node.
 insert into functional_kudu.alltypes /* +noclustered */ select * from functional.alltypes;
@@ -417,6 +477,7 @@ INSERT INTO KUDU [functional_kudu.alltypes]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 insert into functional_kudu.alltypes /* +noclustered,noshuffle */
 select * from functional.alltypes;
@@ -425,6 +486,7 @@ INSERT INTO KUDU [functional_kudu.alltypes]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # Hint - noclustered should remove the sort node from CTAS.
 create /* +noclustered */ table t
@@ -437,6 +499,7 @@ INSERT INTO KUDU [default.t]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 create /* +noclustered,noshuffle */ table t
 primary key(id) partition by hash(id) partitions 3 stored as kudu as
@@ -446,6 +509,7 @@ INSERT INTO KUDU [default.t]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # IMPALA-5602: If a query contains predicates that are all pushed to kudu and there is a
 # limit, then the query should not incorrectly run with 'small query' optimization.
@@ -459,6 +523,7 @@ PLAN-ROOT SINK
 00:SCAN KUDU [functional_kudu.alltypesagg_idx]
    kudu predicates: functional_kudu.alltypesagg_idx.tinyint_col = 9
    limit: 10
+   row-size=103B cardinality=10
 ====
 # IMPALA-5602: If a query contains predicates that are all pushed to kudu, there is a
 # limit, and no table stats, then the query should not incorrectly run with 'small query'
@@ -473,6 +538,7 @@ PLAN-ROOT SINK
 00:SCAN KUDU [kudu_planner_test.no_stats]
    kudu predicates: tinyint_col = 9
    limit: 10
+   row-size=88B cardinality=10
 ====
 # Insert into an unpartitioned table, shouldn't partition/sort
 insert into tpch_kudu.nation
@@ -481,7 +547,8 @@ select * from tpch_parquet.nation
 INSERT INTO KUDU [tpch_kudu.nation]
 |
 00:SCAN HDFS [tpch_parquet.nation]
-   partitions=1/1 files=1 size=2.74KB
+   partitions=1/1 files=1 size=2.75KB
+   row-size=109B cardinality=25
 ====
 # Unpartitioned table, still has an exchange due to shuffle hint.
 insert into tpch_kudu.nation /* +shuffle */
@@ -492,7 +559,8 @@ INSERT INTO KUDU [tpch_kudu.nation]
 01:EXCHANGE [UNPARTITIONED]
 |
 00:SCAN HDFS [tpch_parquet.nation]
-   partitions=1/1 files=1 size=2.74KB
+   partitions=1/1 files=1 size=2.75KB
+   row-size=109B cardinality=25
 ====
 # Unpartitioned table, clustered hint forces sort node.
 insert into tpch_kudu.nation /* +clustered */
@@ -502,9 +570,11 @@ INSERT INTO KUDU [tpch_kudu.nation]
 |
 01:PARTIAL SORT
 |  order by: n_nationkey ASC NULLS LAST
+|  row-size=109B cardinality=25
 |
 00:SCAN HDFS [tpch_parquet.nation]
    partitions=1/1 files=1 size=2.75KB
+   row-size=109B cardinality=25
 ====
 # Unpartitioned table, no sort node without clustered hint.
 insert into tpch_kudu.nation
@@ -514,6 +584,7 @@ INSERT INTO KUDU [tpch_kudu.nation]
 |
 00:SCAN HDFS [tpch_parquet.nation]
    partitions=1/1 files=1 size=2.75KB
+   row-size=109B cardinality=25
 ====
 # Partition and primary key exprs are all constant, so don't partition/sort.
 insert into functional_kudu.alltypes (id)
@@ -522,6 +593,7 @@ select 1 from functional_kudu.alltypes
 INSERT INTO KUDU [functional_kudu.alltypes]
 |
 00:SCAN KUDU [functional_kudu.alltypes]
+   row-size=0B cardinality=7.30K
 ====
 # Partition exprs are constant but primary key exprs aren't, so sort but don't partition.
 insert into functional_kudu.jointbl (test_id, test_name, test_zip, alltypes_id)
@@ -531,9 +603,11 @@ INSERT INTO KUDU [functional_kudu.jointbl]
 |
 01:PARTIAL SORT
 |  order by: int_col ASC NULLS LAST, id ASC NULLS LAST
+|  row-size=8B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=8B cardinality=7.30K
 ====
 # Decimal predicate.
 select * from functional_kudu.decimal_tbl where d4 = 0.123456789;
@@ -542,6 +616,7 @@ PLAN-ROOT SINK
 |
 00:SCAN KUDU [functional_kudu.decimal_tbl]
    kudu predicates: d4 = 0.123456789
+   row-size=56B cardinality=5
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -549,4 +624,5 @@ PLAN-ROOT SINK
 |
 00:SCAN KUDU [functional_kudu.decimal_tbl]
    kudu predicates: d4 = 0.123456789
-====
\ No newline at end of file
+   row-size=56B cardinality=5
+====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test b/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test
index 02ac733..9c4c49b 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test
@@ -9,8 +9,8 @@ select * from (
     "queryText":"select * from (\n  select tinyint_col + int_col x from functional.alltypes\n  union all\n  select sum(bigint_col) y from (select bigint_col from functional.alltypes) v1) v2",
     "queryId":"0:0",
     "hash":"25456c60a2e874a20732f42c7af27553",
-    "user":"dev",
-    "timestamp":1446159271,
+    "user":"progers",
+    "timestamp":1546590134,
     "edges":[
         {
             "sources":[
@@ -63,8 +63,8 @@ order by b.bigint_col limit 10
     "queryText":"select sum(a.tinyint_col) over (partition by a.smallint_col order by a.id),\n  count(b.string_col), b.timestamp_col\nfrom functional.alltypes a join functional.alltypessmall b on (a.id = b.id)\nwhere a.year = 2010 and b.float_col > 0\ngroup by a.tinyint_col, a.smallint_col, a.id, b.string_col, b.timestamp_col, b.bigint_col\nhaving count(a.int_col) > 10\norder by b.bigint_col limit 10",
     "queryId":"0:0",
     "hash":"e0309eeff9811f53c82657d62c1e04eb",
-    "user":"dev",
-    "timestamp":1446159271,
+    "user":"progers",
+    "timestamp":1546590134,
     "edges":[
         {
             "sources":[
@@ -199,9 +199,9 @@ create table lineage_test_tbl as select int_col, tinyint_col from functional.all
 {
     "queryText":"create table lineage_test_tbl as select int_col, tinyint_col from functional.alltypes",
     "queryId":"0:0",
-    "hash":"f7666959b65ce1aa2a695ae90adb7c85",
-    "user":"dev",
-    "timestamp":1446159271,
+    "hash":"407f23b24758ffcb2ac445b9703f5c44",
+    "user":"progers",
+    "timestamp":1546590134,
     "edges":[
         {
             "sources":[
@@ -254,9 +254,9 @@ where a.year = 2009 and b.month = 2
 {
     "queryText":"create table lineage_test_tbl as\nselect distinct a.int_col, a.string_col from functional.alltypes a\ninner join functional.alltypessmall b on (a.id = b.id)\nwhere a.year = 2009 and b.month = 2",
     "queryId":"0:0",
-    "hash":"6d83126f8e34eec31ed4e111e1c32e78",
-    "user":"dev",
-    "timestamp":1446159271,
+    "hash":"f3101dcb046a7d34d7ee14892a6cc94e",
+    "user":"progers",
+    "timestamp":1546590134,
     "edges":[
         {
             "sources":[
@@ -342,9 +342,9 @@ select * from
 {
     "queryText":"create table lineage_test_tbl as\nselect * from\n  (select * from\n     (select int_col from functional.alltypestiny limit 1) v1 ) v2",
     "queryId":"0:0",
-    "hash":"f719f8eba46eda75e9cc560310885558",
-    "user":"dev",
-    "timestamp":1446159271,
+    "hash":"9c04c1e9feee35ffacf14bfcd3b363a7",
+    "user":"progers",
+    "timestamp":1546590134,
     "edges":[
         {
             "sources":[
@@ -376,9 +376,9 @@ create table lineage_test_tblm as select * from functional_hbase.alltypes limit
 {
     "queryText":"create table lineage_test_tblm as select * from functional_hbase.alltypes limit 5",
     "queryId":"0:0",
-    "hash":"bedebc5bc72bbc6aec385c514944daae",
-    "user":"dev",
-    "timestamp":1446159271,
+    "hash":"a294f36bddf2adb329eac3055a76b2b5",
+    "user":"progers",
+    "timestamp":1546590134,
     "edges":[
         {
             "sources":[
@@ -642,8 +642,8 @@ functional_hbase.alltypes
     "queryText":"insert into\nfunctional_hbase.alltypes\n  values (1, 1, true, \"1999-12-01\", 2.0, 1.0, 1, 12, 2, \"abs\",\n  cast(now() as timestamp), 1, 1999)",
     "queryId":"0:0",
     "hash":"b923425ce9cc2d53d36523ec83971e67",
-    "user":"dev",
-    "timestamp":1446159271,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -827,8 +827,8 @@ from functional.alltypes
     "queryText":"insert into table functional.alltypesnopart (id, bool_col, timestamp_col)\nselect id, bool_col, timestamp_col\nfrom functional.alltypes",
     "queryId":"0:0",
     "hash":"b7b9474fc6b97f104bd031209438ee0e",
-    "user":"dev",
-    "timestamp":1446159271,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -1006,8 +1006,8 @@ where year=2009 and month=05
     "queryText":"insert into table functional.alltypessmall (smallint_col, int_col)\npartition (year=2009, month=04)\nselect smallint_col, int_col\nfrom functional.alltypes\nwhere year=2009 and month=05",
     "queryId":"0:0",
     "hash":"2ed3a6c784e1c0c7fcef226d71375180",
-    "user":"dev",
-    "timestamp":1446159271,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -1237,8 +1237,8 @@ where year=2009 and month>10
     "queryText":"insert into table functional.alltypessmall (id, string_col, int_col)\npartition (year, month)\nselect id, string_col, int_col, year, month\nfrom functional_seq_snap.alltypes\nwhere year=2009 and month>10",
     "queryId":"0:0",
     "hash":"39ac95ce0632ef1ee8b474be644971f3",
-    "user":"dev",
-    "timestamp":1446159271,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -1480,8 +1480,8 @@ having min(id) > 10
     "queryText":"insert into table functional.alltypessmall\npartition (year=2009, month)\nselect min(id), min(bool_col), min(tinyint_col), min(smallint_col), min(int_col),\nmin(bigint_col), min(float_col), min(double_col), min(date_string_col), min(string_col),\nmin(timestamp_col), month\nfrom functional.alltypes\nwhere year=2009 and month>10\ngroup by month\nhaving min(id) > 10",
     "queryId":"0:0",
     "hash":"e6969c2cc67e9d6f3f985ddc6431f915",
-    "user":"dev",
-    "timestamp":1446159271,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -1767,8 +1767,8 @@ group by int_col, tinyint_col
     "queryText":"select\nmax(tinyint_col) over(partition by int_col)\nfrom functional.alltypes\ngroup by int_col, tinyint_col",
     "queryId":"0:0",
     "hash":"83c78528e6f5325c56a3f3521b08a78d",
-    "user":"dev",
-    "timestamp":1446159271,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -1814,8 +1814,8 @@ select int_col, rank() over(order by int_col) from functional.alltypesagg
     "queryText":"select int_col, rank() over(order by int_col) from functional.alltypesagg",
     "queryId":"0:0",
     "hash":"4f1ecaaed571d2ed9f09f091f399c311",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -1872,8 +1872,8 @@ order by a.tinyint_col, a.int_col
     "queryText":"select a.tinyint_col, a.int_col, count(a.double_col)\n  over(partition by a.tinyint_col order by a.int_col desc rows between 1 preceding and 1 following)\nfrom functional.alltypes a inner join functional.alltypessmall b on a.id = b.id\norder by a.tinyint_col, a.int_col",
     "queryId":"0:0",
     "hash":"b6e26c00b2ef17f0592ebadb0ecc21f6",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -1983,8 +1983,8 @@ order by 2, 3, 4
     "queryText":"with v2 as\n  (select\n   double_col,\n   count(int_col) over() a,\n   sum(int_col + bigint_col) over(partition by bool_col) b\n   from\n     (select * from functional.alltypes) v1)\nselect double_col, a, b, a + b, double_col + a from v2\norder by 2, 3, 4",
     "queryId":"0:0",
     "hash":"6bf993cea0d1ab9e613674ef178916c9",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -2115,8 +2115,8 @@ order by 2, 3, 4
     "queryText":"select double_col, a, b, a + b, double_col + a from\n  (select\n   double_col,\n   count(int_col) over() a,\n   sum(int_col + bigint_col) over(partition by bool_col) b\n   from\n     (select * from functional.alltypes) v1) v2\norder by 2, 3, 4",
     "queryId":"0:0",
     "hash":"811403c86e86fe630dea7bd0a6c89273",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -2249,8 +2249,8 @@ where b.month = 1
     "queryText":"select a.month, a.year, b.int_col, b.month\nfrom\n  (select year, month from functional.alltypes\n   union all\n   select year, month from functional.alltypes) a\n  inner join\n  functional.alltypessmall b\n  on (a.month = b.month)\nwhere b.month = 1",
     "queryId":"0:0",
     "hash":"e3000cd5edf2a02e1f5407810f3cc09a",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -2357,8 +2357,8 @@ where month = 1
     "queryText":"select t1.int_col, t2.month, t2.int_col + 1\nfrom (\n  select int_col, count(*)\n  from functional.alltypessmall\n  where month = 1\n  group by int_col\n  having count(*) > 1\n  order by count(*) desc limit 5\n  ) t1\njoin functional.alltypes t2 on (t1.int_col = t2.int_col)\nwhere month = 1",
     "queryId":"0:0",
     "hash":"3f1ecf7239e205342aee4979e7cb4877",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -2459,8 +2459,8 @@ and x.int_col + x.float_col + cast(c.string_col as float) < 1000
     "queryText":"select x.smallint_col, x.id, x.tinyint_col, c.id, x.int_col, x.float_col, c.string_col\nfrom functional.alltypessmall c\njoin (\n   select a.smallint_col smallint_col, a.tinyint_col tinyint_col, a.day day,\n           a.int_col int_col, a.month month, b.float_col float_col, b.id id\n   from ( select * from functional.alltypesagg a where month=1 ) a\n   join functional.alltypessmall b on (a.smallint_col = b.id)\n ) x on (x.tinyint_col = c.id)\nwhere x.day=1\nand x.int_col > 899\nand x.float_col > 4.5\nand c.string_col < '7'\nand x.int_col + x.float_col + cast(c.string_col as float) < 1000",
     "queryId":"0:0",
     "hash":"4edf165aed5982ede63f7c91074f4b44",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -2636,8 +2636,8 @@ from
     "queryText":"select c1, c2, c3\nfrom\n  (select c1, c2, c3\n   from\n     (select int_col c1, sum(float_col) c2, min(float_col) c3\n      from functional_hbase.alltypessmall\n      group by 1) x\n    order by 2,3 desc\n    limit 5\n) y",
     "queryId":"0:0",
     "hash":"8b4d1ab11721d9ebdf26666d4195eb18",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -2730,8 +2730,8 @@ limit 0
     "queryText":"select c1, x2\nfrom (\n  select c1, min(c2) x2\n  from (\n    select c1, c2, c3\n    from (\n      select int_col c1, tinyint_col c2, min(float_col) c3\n      from functional_hbase.alltypessmall\n      group by 1, 2\n      order by 1,2\n      limit 1\n    ) x\n  ) x2\n  group by c1\n) y\norder by 2,1 desc\nlimit 0",
     "queryId":"0:0",
     "hash":"50d3b4f249f038b0711ea75c17640fc9",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -2794,8 +2794,8 @@ select int_col, string_col from functional.view_view
     "queryText":"select int_col, string_col from functional.view_view",
     "queryId":"0:0",
     "hash":"9073496459077de1332e5017977dedf5",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -2847,8 +2847,8 @@ where t.id < 10
     "queryText":"select t.id from (select id from functional.alltypes_view) t\nwhere t.id < 10",
     "queryId":"0:0",
     "hash":"8ba7998033f90e1e358f4fdc7ea4251b",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -2893,8 +2893,8 @@ where id in
     "queryText":"select string_col, float_col, bool_col\nfrom functional.alltypes\nwhere id in\n  (select id from functional.alltypesagg)",
     "queryId":"0:0",
     "hash":"e8ad1371d2a13e1ee9ec45689b62cdc9",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -2993,8 +2993,8 @@ and tinyint_col < 10
     "queryText":"select 1\nfrom functional.alltypesagg a\nwhere exists\n  (select id, count(int_col) over (partition by bool_col)\n   from functional.alltypestiny b\n   where a.tinyint_col = b.tinyint_col\n   group by id, int_col, bool_col)\nand tinyint_col < 10",
     "queryId":"0:0",
     "hash":"a7500c022d29c583c31b287868a848bf",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -3044,8 +3044,8 @@ and a.bigint_col > 10
     "queryText":"select int_col + 1, tinyint_col - 1\nfrom functional.alltypes a\nwhere a.int_col <\n  (select max(int_col) from functional.alltypesagg g where g.bool_col = true)\nand a.bigint_col > 10",
     "queryId":"0:0",
     "hash":"5e6227f323793ea4441e2a3119af2f09",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -3125,8 +3125,8 @@ with t as (select int_col x, bigint_col y from functional.alltypes) select x, y
     "queryText":"with t as (select int_col x, bigint_col y from functional.alltypes) select x, y from t",
     "queryId":"0:0",
     "hash":"a7ab58d90540f28a8dfd69703632ad7a",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -3179,8 +3179,8 @@ select id, int_col, string_col, year, month from t1
     "queryText":"with t1 as (select * from functional.alltypestiny)\ninsert into functional.alltypesinsert (id, int_col, string_col) partition(year, month)\nselect id, int_col, string_col, year, month from t1",
     "queryId":"0:0",
     "hash":"0bc5b3e66cc72387f74893b1f1934946",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -3398,8 +3398,8 @@ from
     "queryText":"select lead(a) over (partition by b order by c)\nfrom\n  (select lead(id) over (partition by int_col order by bigint_col) as a,\n   max(id) over (partition by tinyint_col order by int_col) as b,\n   min(int_col) over (partition by string_col order by bool_col) as c\n   from functional.alltypes) v",
     "queryId":"0:0",
     "hash":"aa95e5e6f39fc80bb3c318a2515dc77d",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -3471,8 +3471,8 @@ create view test_view_lineage as select id from functional.alltypestiny
     "queryText":"create view test_view_lineage as select id from functional.alltypestiny",
     "queryId":"0:0",
     "hash":"ff6b1ecb265afe4f03355a07238cfe37",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -3521,8 +3521,8 @@ limit 0
     "queryText":"create view test_view_lineage (a, b) as select c1, x2\nfrom (\n  select c1, min(c2) x2\n  from (\n    select c1, c2, c3\n    from (\n      select int_col c1, tinyint_col c2, min(float_col) c3\n      from functional_hbase.alltypessmall\n      group by 1, 2\n      order by 1,2\n      limit 1\n    ) x\n  ) x2\n  group by c1\n) y\norder by 2,1 desc\nlimit 0",
     "queryId":"0:0",
     "hash":"b96adf892b897da1e562c5be98724fb5",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -3598,8 +3598,8 @@ create view test_view_lineage (a1, a2, a3, a4, a5, a6, a7) as
     "queryText":"create view test_view_lineage (a1, a2, a3, a4, a5, a6, a7) as\n  select x.smallint_col, x.id, x.tinyint_col, c.id, x.int_col, x.float_col, c.string_col\n  from functional.alltypessmall c\n  join (\n     select a.smallint_col smallint_col, a.tinyint_col tinyint_col, a.day day,\n           a.int_col int_col, a.month month, b.float_col float_col, b.id id\n     from ( select * from functional.alltypesagg a where month=1 ) a\n     join functional.alltypessmall b on (a.smallint_col = b.id)\n   ) x on (x.tinyint_col = c.id)\n  where x.day=1\n  and x.int_col > 899\n  and x.float_col > 4.5\n  and c.string_col < '7'\n  and x.int_col + x.float_col + cast(c.string_col as float) < 1000",
     "queryId":"0:0",
     "hash":"ffbe643df8f26e92907fb45de1aeda36",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -3781,8 +3781,8 @@ create view test_view_lineage as
     "queryText":"create view test_view_lineage as\n  select * from (\n    select sum(a.tinyint_col) over (partition by a.smallint_col order by a.id),\n      count(b.string_col), b.timestamp_col\n    from functional.alltypes a join functional.alltypessmall b on (a.id = b.id)\n    where a.year = 2010 and b.float_col > 0\n    group by a.tinyint_col, a.smallint_col, a.id, b.string_col, b.timestamp_col, b.bigint_col\n    having count(a.int_col) > 10\n    order by b.bigint_col limit 10) t",
     "queryId":"0:0",
     "hash":"d4b9e2d63548088f911816b2ae29d7c2",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -3918,8 +3918,8 @@ alter view functional.alltypes_view as select id from functional.alltypestiny
     "queryText":"alter view functional.alltypes_view as select id from functional.alltypestiny",
     "queryId":"0:0",
     "hash":"8c9367afc562a4c04d2d40e1276646c2",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -3959,8 +3959,8 @@ select * from (
     "queryText":"select * from (\n  select int_struct_col.f1 + int_struct_col.f2 x from functional.allcomplextypes\n  where year = 2000\n  order by nested_struct_col.f2.f12.f21 limit 10\n  union all\n  select sum(f1) y from\n    (select complex_struct_col.f1 f1 from functional.allcomplextypes\n     group by 1) v1) v2",
     "queryId":"0:0",
     "hash":"4fb3ceddbf596097335af607d528f5a7",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -4027,8 +4027,8 @@ select * from functional.allcomplextypes.int_array_col a inner join
     "queryText":"select * from functional.allcomplextypes.int_array_col a inner join\n  functional.allcomplextypes.struct_map_col m on (a.item = m.f1)",
     "queryId":"0:0",
     "hash":"8c0c64f8a4c08b82ad343ab439101957",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -4133,8 +4133,8 @@ select * from functional.allcomplextypes t, t.int_array_col a, t.struct_map_col
     "queryText":"select * from functional.allcomplextypes t, t.int_array_col a, t.struct_map_col m\n  where a.item = m.f1",
     "queryId":"0:0",
     "hash":"1b0db371b32e90d33629ed7779332cf7",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -4318,8 +4318,8 @@ select a + b as ab, c, d, e from functional.allcomplextypes t,
     "queryText":"select a + b as ab, c, d, e from functional.allcomplextypes t,\n  (select sum(item) a from t.int_array_col\n   where item < 10) v1,\n  (select count(f1) b from t.struct_map_col\n   group by key) v2,\n  (select avg(value) over(partition by key) c from t.map_map_col.value) v3,\n  (select item d from t.int_array_col\n   union all\n   select value from t.int_map_col) v4,\n  (select f21 e from t.complex_nested_struct_col.f2.f12 order by key limit 10) v5",
     "queryId":"0:0",
     "hash":"4affc0d1e384475d1ff2fc2e19643064",
-    "user":"dev",
-    "timestamp":1446159272,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -4456,8 +4456,8 @@ where not exists (select 1 from functional.alltypes a where v.id = a.id)
     "queryText":"create view test_view_lineage as\nselect id from functional.alltypes_view v\nwhere not exists (select 1 from functional.alltypes a where v.id = a.id)",
     "queryId":"0:0",
     "hash":"e79b8abc8a682d9e0f6b2c30a6c885f3",
-    "user":"dev",
-    "timestamp":1475094005,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -4500,8 +4500,8 @@ where k.int_col < 10
     "queryText":"select count(*) from functional_kudu.alltypes k join functional.alltypes h on k.id = h.id\nwhere k.int_col < 10",
     "queryId":"0:0",
     "hash":"7b7c92d488186d869bb6b78c97666f41",
-    "user":"dev",
-    "timestamp":1479538352,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -4555,8 +4555,8 @@ functional.alltypes a where a.id < 100
     "queryText":"insert into functional_kudu.testtbl select id, string_col as name, int_col as zip from\nfunctional.alltypes a where a.id < 100",
     "queryId":"0:0",
     "hash":"87a59bac56c6ad27f7af6e71af46d552",
-    "user":"dev",
-    "timestamp":1479539012,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -4639,8 +4639,8 @@ functional.alltypes where id < 10
     "queryText":"insert into functional_kudu.testtbl (name, id) select string_col as name, id from\nfunctional.alltypes where id < 10",
     "queryId":"0:0",
     "hash":"0bccfdbf4118e6d5a3d94062ecb5130a",
-    "user":"dev",
-    "timestamp":1479933751,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -4703,8 +4703,8 @@ functional.alltypes where id < 10
     "queryText":"upsert into functional_kudu.testtbl (name, id) select string_col as name, id from\nfunctional.alltypes where id < 10",
     "queryId":"0:0",
     "hash":"f4c1e7b016e75012f7268f2f42ae5630",
-    "user":"dev",
-    "timestamp":1479933751,
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -4768,9 +4768,9 @@ from functional.alltypestiny
 {
     "queryText":"create table kudu_ctas primary key (id) partition by hash (id) partitions 3\nstored as kudu as select id, bool_col, tinyint_col, smallint_col, int_col,\nbigint_col, float_col, double_col, date_string_col, string_col\nfrom functional.alltypestiny",
     "queryId":"0:0",
-    "hash":"6e3e192c7fb8bb6b22674a9b7b488b55",
-    "user":"dev",
-    "timestamp":1479933751,
+    "hash":"de98b09af6b6ab0f0678c5fc0c4369b4",
+    "user":"progers",
+    "timestamp":1546590135,
     "edges":[
         {
             "sources":[
@@ -4967,9 +4967,3 @@ from functional.alltypestiny
     ]
 }
 ====
-# No lineage should be generated for UPDATE
-update functional_kudu.alltypes set int_col = 1 where id = 1
-====
-# No lineage should be generated from DELETE
-delete from functional_kudu.alltypes where id = 1
-====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/max-row-size.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/max-row-size.test b/testdata/workloads/functional-planner/queries/PlannerTest/max-row-size.test
index 0ffe8af..34c4938 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/max-row-size.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/max-row-size.test
@@ -18,7 +18,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.33MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1 row-size=327B cardinality=150000
+|  tuple-ids=0,1 row-size=327B cardinality=150.00K
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=1 instances=1
@@ -28,7 +28,7 @@ Per-Host Resources: mem-estimate=41.95MB mem-reservation=33.94MB thread-reservat
 |  fk/pk conjuncts: c_nationkey = n_nationkey
 |  runtime filters: RF000[bloom] <- n_nationkey
 |  mem-estimate=16.94MB mem-reservation=16.94MB spill-buffer=64.00KB thread-reservation=0
-|  tuple-ids=0,1 row-size=327B cardinality=150000
+|  tuple-ids=0,1 row-size=327B cardinality=150.00K
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--03:EXCHANGE [BROADCAST]
@@ -56,7 +56,7 @@ Per-Host Resources: mem-estimate=41.95MB mem-reservation=33.94MB thread-reservat
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=150000
    mem-estimate=24.00MB mem-reservation=16.00MB thread-reservation=1
-   tuple-ids=0 row-size=218B cardinality=150000
+   tuple-ids=0 row-size=218B cardinality=150.00K
    in pipelines: 00(GETNEXT)
 ====
 # Join with large build side.
@@ -79,7 +79,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=11.20MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1N row-size=402B cardinality=6001215
+|  tuple-ids=0,1N row-size=402B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -88,12 +88,12 @@ Per-Host Resources: mem-estimate=359.29MB mem-reservation=86.00MB thread-reserva
 |  hash predicates: l_orderkey = o_orderkey
 |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  mem-estimate=268.94MB mem-reservation=46.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=0,1N row-size=402B cardinality=6001215
+|  tuple-ids=0,1N row-size=402B cardinality=6.00M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--03:EXCHANGE [BROADCAST]
 |  |  mem-estimate=10.34MB mem-reservation=0B thread-reservation=0
-|  |  tuple-ids=1 row-size=171B cardinality=1500000
+|  |  tuple-ids=1 row-size=171B cardinality=1.50M
 |  |  in pipelines: 01(GETNEXT)
 |  |
 |  F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=2
@@ -103,19 +103,19 @@ Per-Host Resources: mem-estimate=359.29MB mem-reservation=86.00MB thread-reserva
 |     stored statistics:
 |       table: rows=1500000 size=54.07MB
 |       columns: all
-|     extrapolated-rows=disabled max-scan-range-rows=1177135
+|     extrapolated-rows=disabled max-scan-range-rows=1177136
 |     mem-estimate=40.00MB mem-reservation=24.00MB thread-reservation=1
-|     tuple-ids=1 row-size=171B cardinality=1500000
+|     tuple-ids=1 row-size=171B cardinality=1.50M
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=40.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # Null-aware anti-join with medium build side.
@@ -136,7 +136,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.69MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=231B cardinality=6001215
+|  tuple-ids=0 row-size=231B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -144,12 +144,12 @@ Per-Host Resources: mem-estimate=124.02MB mem-reservation=74.00MB thread-reserva
 02:HASH JOIN [NULL AWARE LEFT ANTI JOIN, BROADCAST]
 |  hash predicates: l_orderkey = o_orderkey
 |  mem-estimate=34.00MB mem-reservation=34.00MB spill-buffer=1.00MB thread-reservation=0
-|  tuple-ids=0 row-size=231B cardinality=6001215
+|  tuple-ids=0 row-size=231B cardinality=6.00M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--03:EXCHANGE [BROADCAST]
 |  |  mem-estimate=10.02MB mem-reservation=0B thread-reservation=0
-|  |  tuple-ids=1 row-size=8B cardinality=1500000
+|  |  tuple-ids=1 row-size=8B cardinality=1.50M
 |  |  in pipelines: 01(GETNEXT)
 |  |
 |  F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=2
@@ -159,19 +159,19 @@ Per-Host Resources: mem-estimate=124.02MB mem-reservation=74.00MB thread-reserva
 |     stored statistics:
 |       table: rows=1500000 size=54.07MB
 |       columns: all
-|     extrapolated-rows=disabled max-scan-range-rows=1177135
+|     extrapolated-rows=disabled max-scan-range-rows=1177136
 |     mem-estimate=40.00MB mem-reservation=4.00MB thread-reservation=1
-|     tuple-ids=1 row-size=8B cardinality=1500000
+|     tuple-ids=1 row-size=8B cardinality=1.50M
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=40.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # Mid NDV aggregation.
@@ -197,7 +197,7 @@ PLAN-ROOT SINK
 |
 08:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.10MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=2 row-size=29B cardinality=4690314
+|  tuple-ids=2 row-size=29B cardinality=4.69M
 |  in pipelines: 07(GETNEXT)
 |
 F03:PLAN FRAGMENT [HASH(l_orderkey,o_orderstatus)] hosts=3 instances=3
@@ -207,12 +207,12 @@ Per-Host Resources: mem-estimate=41.10MB mem-reservation=31.00MB thread-reservat
 |  group by: l_orderkey, o_orderstatus
 |  having: count(*) = CAST(1 AS BIGINT)
 |  mem-estimate=31.00MB mem-reservation=31.00MB spill-buffer=1.00MB thread-reservation=0
-|  tuple-ids=2 row-size=29B cardinality=4690314
+|  tuple-ids=2 row-size=29B cardinality=4.69M
 |  in pipelines: 07(GETNEXT), 00(OPEN)
 |
 06:EXCHANGE [HASH(l_orderkey,o_orderstatus)]
 |  mem-estimate=10.10MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=2 row-size=29B cardinality=4690314
+|  tuple-ids=2 row-size=29B cardinality=4.69M
 |  in pipelines: 00(GETNEXT)
 |
 F02:PLAN FRAGMENT [HASH(l_orderkey)] hosts=3 instances=3
@@ -221,7 +221,7 @@ Per-Host Resources: mem-estimate=99.65MB mem-reservation=66.00MB thread-reservat
 |  output: count(*)
 |  group by: l_orderkey, o_orderstatus
 |  mem-estimate=47.56MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=2 row-size=29B cardinality=4690314
+|  tuple-ids=2 row-size=29B cardinality=4.69M
 |  in pipelines: 00(GETNEXT)
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
@@ -229,12 +229,12 @@ Per-Host Resources: mem-estimate=99.65MB mem-reservation=66.00MB thread-reservat
 |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  runtime filters: RF000[bloom] <- o_orderkey
 |  mem-estimate=31.00MB mem-reservation=31.00MB spill-buffer=1.00MB thread-reservation=0
-|  tuple-ids=0,1 row-size=29B cardinality=5757710
+|  tuple-ids=0,1 row-size=29B cardinality=5.76M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--05:EXCHANGE [HASH(o_orderkey)]
 |  |  mem-estimate=10.05MB mem-reservation=0B thread-reservation=0
-|  |  tuple-ids=1 row-size=21B cardinality=1500000
+|  |  tuple-ids=1 row-size=21B cardinality=1.50M
 |  |  in pipelines: 01(GETNEXT)
 |  |
 |  F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=2
@@ -244,27 +244,27 @@ Per-Host Resources: mem-estimate=99.65MB mem-reservation=66.00MB thread-reservat
 |     stored statistics:
 |       table: rows=1500000 size=54.07MB
 |       columns: all
-|     extrapolated-rows=disabled max-scan-range-rows=1177135
+|     extrapolated-rows=disabled max-scan-range-rows=1177136
 |     mem-estimate=40.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=1 row-size=21B cardinality=1500000
+|     tuple-ids=1 row-size=21B cardinality=1.50M
 |     in pipelines: 01(GETNEXT)
 |
 04:EXCHANGE [HASH(l_orderkey)]
 |  mem-estimate=10.04MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=8B cardinality=6001215
+|  tuple-ids=0 row-size=8B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
 Per-Host Resources: mem-estimate=81.00MB mem-reservation=5.00MB thread-reservation=2 runtime-filters-memory=1.00MB
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    runtime filters: RF000[bloom] -> l_orderkey
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=4.00MB thread-reservation=1
-   tuple-ids=0 row-size=8B cardinality=6001215
+   tuple-ids=0 row-size=8B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # High NDV aggregation.
@@ -283,7 +283,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.69MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=1 row-size=231B cardinality=6001215
+|  tuple-ids=1 row-size=231B cardinality=6.00M
 |  in pipelines: 03(GETNEXT)
 |
 F01:PLAN FRAGMENT [HASH(tpch_parquet.lineitem.l_orderkey,tpch_parquet.lineitem.l_partkey,tpch_parquet.lineitem.l_suppkey,tpch_parquet.lineitem.l_linenumber,tpch_parquet.lineitem.l_quantity,tpch_parquet.lineitem.l_extendedprice,tpch_parquet.lineitem.l_discount,tpch_parquet.lineitem.l_tax,tpch_parquet.lineitem.l_returnflag,tpch_parquet.lineitem.l_linestatus,tpch_parquet.lineitem.l_shipdate,tpch_parquet.lineitem.l_commitdate,tpch_parquet.lineitem.l_receiptdate,tpch_parquet.lineitem.l_shipinstruct,tpch_parquet.lineitem.l_shipmode,tpch_parquet.lineitem.l_comment)] hosts=3 instances=3
@@ -291,12 +291,12 @@ Per-Host Resources: mem-estimate=737.12MB mem-reservation=46.00MB thread-reserva
 03:AGGREGATE [FINALIZE]
 |  group by: tpch_parquet.lineitem.l_orderkey, tpch_parquet.lineitem.l_partkey, tpch_parquet.lineitem.l_suppkey, tpch_parquet.lineitem.l_linenumber, tpch_parquet.lineitem.l_quantity, tpch_parquet.lineitem.l_extendedprice, tpch_parquet.lineitem.l_discount, tpch_parquet.lineitem.l_tax, tpch_parquet.lineitem.l_returnflag, tpch_parquet.lineitem.l_linestatus, tpch_parquet.lineitem.l_shipdate, tpch_parquet.lineitem.l_commitdate, tpch_parquet.lineitem.l_receiptdate, tpch_parquet.lineitem.l_shipinstruct, tpch_parquet.lineitem.l_shipmode, tpch_parquet.lineitem.l_comment
 |  mem-estimate=726.43MB mem-reservation=46.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=231B cardinality=6001215
+|  tuple-ids=1 row-size=231B cardinality=6.00M
 |  in pipelines: 03(GETNEXT), 00(OPEN)
 |
 02:EXCHANGE [HASH(tpch_parquet.lineitem.l_orderkey,tpch_parquet.lineitem.l_partkey,tpch_parquet.lineitem.l_suppkey,tpch_parquet.lineitem.l_linenumber,tpch_parquet.lineitem.l_quantity,tpch_parquet.lineitem.l_extendedprice,tpch_parquet.lineitem.l_discount,tpch_parquet.lineitem.l_tax,tpch_parquet.lineitem.l_returnflag,tpch_parquet.lineitem.l_linestatus,tpch_parquet.lineitem.l_shipdate,tpch_parquet.lineitem.l_commitdate,tpch_parquet.lineitem.l_receiptdate,tpch_parquet.lineitem.l_shipinstruct,tpch_parquet.lineitem.l_shipmode,tpch_parquet.lineitem.l_comment)]
 |  mem-estimate=10.69MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=1 row-size=231B cardinality=6001215
+|  tuple-ids=1 row-size=231B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -304,17 +304,17 @@ Per-Host Resources: mem-estimate=806.43MB mem-reservation=74.00MB thread-reserva
 01:AGGREGATE [STREAMING]
 |  group by: tpch_parquet.lineitem.l_orderkey, tpch_parquet.lineitem.l_partkey, tpch_parquet.lineitem.l_suppkey, tpch_parquet.lineitem.l_linenumber, tpch_parquet.lineitem.l_quantity, tpch_parquet.lineitem.l_extendedprice, tpch_parquet.lineitem.l_discount, tpch_parquet.lineitem.l_tax, tpch_parquet.lineitem.l_returnflag, tpch_parquet.lineitem.l_linestatus, tpch_parquet.lineitem.l_shipdate, tpch_parquet.lineitem.l_commitdate, tpch_parquet.lineitem.l_receiptdate, tpch_parquet.lineitem.l_shipinstruct, tpch_parquet.lineitem.l_shipmode, tpch_parquet.lineitem.l_comment
 |  mem-estimate=726.43MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=231B cardinality=6001215
+|  tuple-ids=1 row-size=231B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=40.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # High NDV aggregation with string aggregation function.
@@ -335,7 +335,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.09MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=1 row-size=28B cardinality=6001215
+|  tuple-ids=1 row-size=28B cardinality=6.00M
 |  in pipelines: 03(GETNEXT)
 |
 F01:PLAN FRAGMENT [HASH(l_orderkey,l_partkey)] hosts=3 instances=3
@@ -344,12 +344,12 @@ Per-Host Resources: mem-estimate=98.23MB mem-reservation=48.00MB thread-reservat
 |  output: group_concat:merge(l_linestatus, ',')
 |  group by: l_orderkey, l_partkey
 |  mem-estimate=88.14MB mem-reservation=48.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=28B cardinality=6001215
+|  tuple-ids=1 row-size=28B cardinality=6.00M
 |  in pipelines: 03(GETNEXT), 00(OPEN)
 |
 02:EXCHANGE [HASH(l_orderkey,l_partkey)]
 |  mem-estimate=10.09MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=1 row-size=28B cardinality=6001215
+|  tuple-ids=1 row-size=28B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -358,17 +358,17 @@ Per-Host Resources: mem-estimate=168.14MB mem-reservation=50.00MB thread-reserva
 |  output: group_concat(l_linestatus, ',')
 |  group by: l_orderkey, l_partkey
 |  mem-estimate=88.14MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=28B cardinality=6001215
+|  tuple-ids=1 row-size=28B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=16.00MB thread-reservation=1
-   tuple-ids=0 row-size=29B cardinality=6001215
+   tuple-ids=0 row-size=29B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # Sort + Analytic.
@@ -389,7 +389,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=56.26KB mem-reservation=0B thread-reservation=0
-|  tuple-ids=3,2 row-size=6B cardinality=7300
+|  tuple-ids=3,2 row-size=6B cardinality=7.30K
 |  in pipelines: 01(GETNEXT)
 |
 F01:PLAN FRAGMENT [HASH(int_col)] hosts=3 instances=3
@@ -398,18 +398,18 @@ Per-Host Resources: mem-estimate=40.04MB mem-reservation=40.00MB thread-reservat
 |  functions: max(tinyint_col)
 |  partition by: int_col
 |  mem-estimate=16.00MB mem-reservation=16.00MB spill-buffer=8.00MB thread-reservation=0
-|  tuple-ids=3,2 row-size=6B cardinality=7300
+|  tuple-ids=3,2 row-size=6B cardinality=7.30K
 |  in pipelines: 01(GETNEXT)
 |
 01:SORT
 |  order by: int_col ASC NULLS FIRST
 |  mem-estimate=24.00MB mem-reservation=24.00MB spill-buffer=8.00MB thread-reservation=0
-|  tuple-ids=3 row-size=5B cardinality=7300
+|  tuple-ids=3 row-size=5B cardinality=7.30K
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 03:EXCHANGE [HASH(int_col)]
 |  mem-estimate=38.88KB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=5B cardinality=7300
+|  tuple-ids=0 row-size=5B cardinality=7.30K
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -422,6 +422,6 @@ Per-Host Resources: mem-estimate=16.00MB mem-reservation=32.00KB thread-reservat
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=16.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=5B cardinality=7300
+   tuple-ids=0 row-size=5B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/mem-limit-broadcast-join.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/mem-limit-broadcast-join.test b/testdata/workloads/functional-planner/queries/PlannerTest/mem-limit-broadcast-join.test
index 689e78a..e6f0d7d 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/mem-limit-broadcast-join.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/mem-limit-broadcast-join.test
@@ -9,13 +9,16 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: n1.n_regionkey = n2.n_regionkey
 |  runtime filters: RF000 <- n2.n_regionkey
+|  row-size=219B cardinality=125
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [tpch.nation n2]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=109B cardinality=25
 |
 00:SCAN HDFS [tpch.nation n1]
    partitions=1/1 files=1 size=2.15KB
    runtime filters: RF000 -> n1.n_regionkey
+   row-size=109B cardinality=25
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/min-max-runtime-filters.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/min-max-runtime-filters.test b/testdata/workloads/functional-planner/queries/PlannerTest/min-max-runtime-filters.test
index 0ddccdc..0a63ef6 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/min-max-runtime-filters.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/min-max-runtime-filters.test
@@ -18,7 +18,7 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: none
 |  runtime filters: RF002[min_max] <- b.string_col, RF003[min_max] <- b.tinyint_col + 1
 |  mem-estimate=1.94MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
-|  tuple-ids=0,1 row-size=39B cardinality=5840
+|  tuple-ids=0,1 row-size=39B cardinality=5.84K
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--01:SCAN KUDU [functional_kudu.alltypestiny b]
@@ -29,7 +29,7 @@ PLAN-ROOT SINK
 00:SCAN KUDU [functional_kudu.alltypes a]
    runtime filters: RF002[min_max] -> a.string_col, RF003[min_max] -> a.int_col
    mem-estimate=1.50MB mem-reservation=0B thread-reservation=1
-   tuple-ids=0 row-size=21B cardinality=7300
+   tuple-ids=0 row-size=21B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # Filters are not created if the target isn't a bare Kudu column or if 'is (not) distinct'
@@ -55,7 +55,7 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: assumed fk/pk
 |  other predicates: a.string_col IS DISTINCT FROM b.string_col
 |  mem-estimate=1.94MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
-|  tuple-ids=0,1 row-size=44B cardinality=7300
+|  tuple-ids=0,1 row-size=44B cardinality=7.30K
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--01:SCAN KUDU [functional_kudu.alltypestiny b]
@@ -65,7 +65,7 @@ PLAN-ROOT SINK
 |
 00:SCAN KUDU [functional_kudu.alltypes a]
    mem-estimate=2.25MB mem-reservation=0B thread-reservation=1
-   tuple-ids=0 row-size=22B cardinality=7300
+   tuple-ids=0 row-size=22B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # Filters are only assigned when the target expr is cast if its an implicit integer cast.
@@ -91,7 +91,7 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: a.string_col = b.timestamp_col, a.tinyint_col = b.bigint_col
 |  runtime filters: RF007[min_max] <- b.bigint_col
 |  mem-estimate=1.94MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
-|  tuple-ids=0,1 row-size=60B cardinality=1460
+|  tuple-ids=0,1 row-size=60B cardinality=1.46K
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--01:SCAN KUDU [functional_kudu.alltypestiny b]
@@ -102,7 +102,7 @@ PLAN-ROOT SINK
 00:SCAN KUDU [functional_kudu.alltypes a]
    runtime filters: RF007[min_max] -> a.tinyint_col
    mem-estimate=3.00MB mem-reservation=0B thread-reservation=1
-   tuple-ids=0 row-size=26B cardinality=7300
+   tuple-ids=0 row-size=26B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # Query with both Kudu and HDFS filter targets.
@@ -126,12 +126,12 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: none
 |  runtime filters: RF000[bloom] <- c.int_col, RF001[min_max] <- c.int_col
 |  mem-estimate=1.94MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
-|  tuple-ids=0,1,2 row-size=12B cardinality=5329000
+|  tuple-ids=0,1,2 row-size=12B cardinality=5.33M
 |  in pipelines: 00(GETNEXT), 02(OPEN)
 |
 |--02:SCAN KUDU [functional_kudu.alltypes c]
 |     mem-estimate=768.00KB mem-reservation=0B thread-reservation=1
-|     tuple-ids=2 row-size=4B cardinality=7300
+|     tuple-ids=2 row-size=4B cardinality=7.30K
 |     in pipelines: 02(GETNEXT)
 |
 03:HASH JOIN [INNER JOIN]
@@ -139,11 +139,11 @@ PLAN-ROOT SINK
 |  fk/pk conjuncts: assumed fk/pk
 |  runtime filters: RF003[min_max] <- b.int_col
 |  mem-estimate=2.00GB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=0,1 row-size=8B cardinality=7300
+|  tuple-ids=0,1 row-size=8B cardinality=7.30K
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--01:SCAN HDFS [functional_parquet.alltypes b]
-|     partitions=24/24 files=24 size=199.69KB
+|     partitions=24/24 files=24 size=189.28KB
 |     runtime filters: RF000[bloom] -> b.int_col
 |     stored statistics:
 |       table: rows=unavailable size=unavailable
@@ -157,6 +157,6 @@ PLAN-ROOT SINK
 00:SCAN KUDU [functional_kudu.alltypes a]
    runtime filters: RF001[min_max] -> a.int_col, RF003[min_max] -> a.int_col
    mem-estimate=768.00KB mem-reservation=0B thread-reservation=1
-   tuple-ids=0 row-size=4B cardinality=7300
+   tuple-ids=0 row-size=4B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/mt-dop-validation.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/mt-dop-validation.test b/testdata/workloads/functional-planner/queries/PlannerTest/mt-dop-validation.test
index a1571b5..5d340f3 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/mt-dop-validation.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/mt-dop-validation.test
@@ -58,7 +58,7 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=188.92KB
+   partitions=24/24 files=24 size=189.28KB
    predicates: id < CAST(10 AS INT)
    stored statistics:
      table: rows=unavailable size=unavailable
@@ -113,7 +113,7 @@ Per-Host Resources: mem-estimate=432.00MB mem-reservation=102.07MB thread-reserv
 |  in pipelines: 00(GETNEXT)
 |
 00:SCAN HDFS [functional_parquet.alltypes, RANDOM]
-   partitions=24/24 files=24 size=188.92KB
+   partitions=24/24 files=24 size=189.28KB
    predicates: id < CAST(10 AS INT)
    stored statistics:
      table: rows=unavailable size=unavailable
@@ -152,7 +152,7 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=24/24 files=24 size=188.92KB
+   partitions=24/24 files=24 size=189.28KB
    predicates: id < CAST(10 AS INT)
    stored statistics:
      table: rows=unavailable size=unavailable
@@ -200,7 +200,7 @@ Per-Host Resources: mem-estimate=30.32MB mem-reservation=30.00MB thread-reservat
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=9
 Per-Host Resources: mem-estimate=48.00MB mem-reservation=48.00KB thread-reservation=3
 00:SCAN HDFS [functional_parquet.alltypes, RANDOM]
-   partitions=24/24 files=24 size=188.92KB
+   partitions=24/24 files=24 size=189.28KB
    predicates: id < CAST(10 AS INT)
    stored statistics:
      table: rows=unavailable size=unavailable
@@ -225,7 +225,7 @@ PLAN-ROOT SINK
 |
 01:SUBPLAN
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=2,1,0 row-size=482B cardinality=1500000
+|  tuple-ids=2,1,0 row-size=482B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
@@ -268,14 +268,14 @@ PLAN-ROOT SINK
 |     in pipelines: 00(GETNEXT)
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=288.98MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: c_custkey < CAST(10 AS BIGINT), !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o_orderkey < CAST(5 AS BIGINT)
    predicates on o_lineitems: l_linenumber < CAST(3 AS INT)
    stored statistics:
      table: rows=150000 size=288.99MB
      columns missing stats: c_orders
-   extrapolated-rows=disabled max-scan-range-rows=44227
+   extrapolated-rows=disabled max-scan-range-rows=44225
    parquet statistics predicates: c_custkey < CAST(10 AS BIGINT)
    parquet statistics predicates on o: o_orderkey < CAST(5 AS BIGINT)
    parquet statistics predicates on o_lineitems: l_linenumber < CAST(3 AS INT)
@@ -283,7 +283,7 @@ PLAN-ROOT SINK
    parquet dictionary predicates on o: o_orderkey < CAST(5 AS BIGINT)
    parquet dictionary predicates on o_lineitems: l_linenumber < CAST(3 AS INT)
    mem-estimate=88.00MB mem-reservation=88.00MB thread-reservation=0
-   tuple-ids=0 row-size=230B cardinality=15000
+   tuple-ids=0 row-size=230B cardinality=15.00K
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 F01:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
@@ -293,14 +293,14 @@ PLAN-ROOT SINK
 |
 09:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=14.34MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=2,1,0 row-size=482B cardinality=1500000
+|  tuple-ids=2,1,0 row-size=482B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=9
 Per-Host Resources: mem-estimate=264.00MB mem-reservation=264.00MB thread-reservation=3
 01:SUBPLAN
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=2,1,0 row-size=482B cardinality=1500000
+|  tuple-ids=2,1,0 row-size=482B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
@@ -343,14 +343,14 @@ Per-Host Resources: mem-estimate=264.00MB mem-reservation=264.00MB thread-reserv
 |     in pipelines: 00(GETNEXT)
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c, RANDOM]
-   partitions=1/1 files=4 size=288.98MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: c_custkey < CAST(10 AS BIGINT), !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o_orderkey < CAST(5 AS BIGINT)
    predicates on o_lineitems: l_linenumber < CAST(3 AS INT)
    stored statistics:
      table: rows=150000 size=288.99MB
      columns missing stats: c_orders
-   extrapolated-rows=disabled max-scan-range-rows=44229
+   extrapolated-rows=disabled max-scan-range-rows=44225
    parquet statistics predicates: c_custkey < CAST(10 AS BIGINT)
    parquet statistics predicates on o: o_orderkey < CAST(5 AS BIGINT)
    parquet statistics predicates on o_lineitems: l_linenumber < CAST(3 AS INT)
@@ -358,7 +358,7 @@ Per-Host Resources: mem-estimate=264.00MB mem-reservation=264.00MB thread-reserv
    parquet dictionary predicates on o: o_orderkey < CAST(5 AS BIGINT)
    parquet dictionary predicates on o_lineitems: l_linenumber < CAST(3 AS INT)
    mem-estimate=88.00MB mem-reservation=88.00MB thread-reservation=0
-   tuple-ids=0 row-size=230B cardinality=15000
+   tuple-ids=0 row-size=230B cardinality=15.00K
    in pipelines: 00(GETNEXT)
 ====
 # Hash-join in a subplan should work.
@@ -373,7 +373,7 @@ PLAN-ROOT SINK
 |
 01:SUBPLAN
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=1,0,2 row-size=258B cardinality=1500000
+|  tuple-ids=1,0,2 row-size=258B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 |--06:HASH JOIN [INNER JOIN]
@@ -413,11 +413,11 @@ PLAN-ROOT SINK
    stored statistics:
      table: rows=150000 size=288.99MB
      columns missing stats: c_orders, c_orders
-   extrapolated-rows=disabled max-scan-range-rows=44229
+   extrapolated-rows=disabled max-scan-range-rows=44225
    parquet statistics predicates on o1: o1.o_orderkey < CAST(5 AS BIGINT)
    parquet dictionary predicates on o1: o1.o_orderkey < CAST(5 AS BIGINT)
    mem-estimate=88.00MB mem-reservation=16.00MB thread-reservation=0
-   tuple-ids=0 row-size=242B cardinality=150000
+   tuple-ids=0 row-size=242B cardinality=150.00K
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 F01:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
@@ -427,14 +427,14 @@ PLAN-ROOT SINK
 |
 07:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=12.37MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=1,0,2 row-size=258B cardinality=1500000
+|  tuple-ids=1,0,2 row-size=258B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=9
 Per-Host Resources: mem-estimate=269.81MB mem-reservation=53.81MB thread-reservation=3
 01:SUBPLAN
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
-|  tuple-ids=1,0,2 row-size=258B cardinality=1500000
+|  tuple-ids=1,0,2 row-size=258B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 |--06:HASH JOIN [INNER JOIN]
@@ -474,10 +474,10 @@ Per-Host Resources: mem-estimate=269.81MB mem-reservation=53.81MB thread-reserva
    stored statistics:
      table: rows=150000 size=288.99MB
      columns missing stats: c_orders, c_orders
-   extrapolated-rows=disabled max-scan-range-rows=44229
+   extrapolated-rows=disabled max-scan-range-rows=44225
    parquet statistics predicates on o1: o1.o_orderkey < CAST(5 AS BIGINT)
    parquet dictionary predicates on o1: o1.o_orderkey < CAST(5 AS BIGINT)
    mem-estimate=88.00MB mem-reservation=16.00MB thread-reservation=0
-   tuple-ids=0 row-size=242B cardinality=150000
+   tuple-ids=0 row-size=242B cardinality=150.00K
    in pipelines: 00(GETNEXT)
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct-limit.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct-limit.test b/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct-limit.test
index d3c5553..bb9ee16 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct-limit.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/multiple-distinct-limit.test
@@ -9,6 +9,7 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, min(timestamp_col)), aggif(valid_tid() = 5, max(timestamp_col))
 |  limit: 10
+|  row-size=48B cardinality=1
 |
 02:AGGREGATE [FINALIZE]
 |  Class 0
@@ -17,6 +18,7 @@ PLAN-ROOT SINK
 |    output: count(smallint_col)
 |  Class 2
 |    output: min:merge(timestamp_col), max:merge(timestamp_col)
+|  row-size=48B cardinality=3
 |
 01:AGGREGATE
 |  Class 0
@@ -25,15 +27,18 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: min(timestamp_col), max(timestamp_col)
+|  row-size=35B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=19B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: aggif(valid_tid() = 2, count(tinyint_col)), aggif(valid_tid() = 4, count(smallint_col)), aggif(valid_tid() = 5, min(timestamp_col)), aggif(valid_tid() = 5, max(timestamp_col))
 |  limit: 10
+|  row-size=48B cardinality=1
 |
 07:AGGREGATE [FINALIZE]
 |  Class 0
@@ -42,6 +47,7 @@ PLAN-ROOT SINK
 |    output: count:merge(smallint_col)
 |  Class 2
 |    output: min:merge(timestamp_col), max:merge(timestamp_col)
+|  row-size=48B cardinality=3
 |
 06:EXCHANGE [UNPARTITIONED]
 |
@@ -52,6 +58,7 @@ PLAN-ROOT SINK
 |    output: count(smallint_col)
 |  Class 2
 |    output: min:merge(timestamp_col), max:merge(timestamp_col)
+|  row-size=48B cardinality=3
 |
 05:AGGREGATE
 |  Class 0
@@ -60,6 +67,7 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: min:merge(timestamp_col), max:merge(timestamp_col)
+|  row-size=35B cardinality=31
 |
 04:EXCHANGE [HASH(CASE valid_tid() WHEN 1 THEN murmur_hash(tinyint_col) WHEN 3 THEN murmur_hash(smallint_col) WHEN 5 THEN 0 END)]
 |
@@ -70,9 +78,11 @@ PLAN-ROOT SINK
 |    group by: smallint_col
 |  Class 2
 |    output: min(timestamp_col), max(timestamp_col)
+|  row-size=35B cardinality=31
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=19B cardinality=7.30K
 ====
 # Test correct placement of limit. Simplifies to a single class with one distinct agg.
 select b from (
@@ -86,23 +96,28 @@ PLAN-ROOT SINK
 01:AGGREGATE [FINALIZE]
 |  output: min(timestamp_col)
 |  limit: 10
+|  row-size=16B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=16B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 04:AGGREGATE [FINALIZE]
 |  output: min:merge(timestamp_col)
 |  limit: 10
+|  row-size=16B cardinality=1
 |
 03:EXCHANGE [UNPARTITIONED]
 |
 01:AGGREGATE
 |  output: min(timestamp_col)
+|  row-size=16B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=16B cardinality=7.30K
 ====
 # Test correct placement of limit. Simplifies to a single class with a non-distinct agg.
 select d from (
@@ -116,23 +131,28 @@ PLAN-ROOT SINK
 01:AGGREGATE [FINALIZE]
 |  output: max(timestamp_col)
 |  limit: 10
+|  row-size=16B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=16B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 04:AGGREGATE [FINALIZE]
 |  output: max:merge(timestamp_col)
 |  limit: 10
+|  row-size=16B cardinality=1
 |
 03:EXCHANGE [UNPARTITIONED]
 |
 01:AGGREGATE
 |  output: max(timestamp_col)
+|  row-size=16B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=16B cardinality=7.30K
 ====
 # Test correct placement of limit. Simplifies to a single class with distinct
 # and non-distinct aggss.
@@ -147,35 +167,43 @@ PLAN-ROOT SINK
 02:AGGREGATE [FINALIZE]
 |  output: count(smallint_col), max:merge(timestamp_col)
 |  limit: 10
+|  row-size=24B cardinality=1
 |
 01:AGGREGATE
 |  output: max(timestamp_col)
 |  group by: smallint_col
+|  row-size=18B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=18B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: count:merge(smallint_col), max:merge(timestamp_col)
 |  limit: 10
+|  row-size=24B cardinality=1
 |
 06:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: count(smallint_col), max:merge(timestamp_col)
+|  row-size=24B cardinality=1
 |
 05:AGGREGATE
 |  output: max:merge(timestamp_col)
 |  group by: smallint_col
+|  row-size=18B cardinality=10
 |
 04:EXCHANGE [HASH(smallint_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: max(timestamp_col)
 |  group by: smallint_col
+|  row-size=18B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=18B cardinality=7.30K
 ====


[03/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/union.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/union.test b/testdata/workloads/functional-planner/queries/PlannerTest/union.test
index 72eb346..fa557b6 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/union.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/union.test
@@ -13,20 +13,28 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: month = b.month
 |  runtime filters: RF000 <- b.month
+|  row-size=97B cardinality=1.24K
 |
 |--03:SCAN HDFS [functional.alltypessmall b]
+|     partition predicates: b.month = 1
 |     partitions=1/4 files=1 size=1.57KB
+|     row-size=89B cardinality=25
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=8B cardinality=1.24K
 |
 |--02:SCAN HDFS [functional.alltypes]
+|     partition predicates: functional.alltypes.month = 1
 |     partitions=2/24 files=2 size=40.32KB
 |     runtime filters: RF000 -> functional.alltypes.month
+|     row-size=8B cardinality=620
 |
 01:SCAN HDFS [functional.alltypes]
+   partition predicates: functional.alltypes.month = 1
    partitions=2/24 files=2 size=40.32KB
    runtime filters: RF000 -> functional.alltypes.month
+   row-size=8B cardinality=620
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -35,22 +43,30 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: month = b.month
 |  runtime filters: RF000 <- b.month
+|  row-size=97B cardinality=1.24K
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  03:SCAN HDFS [functional.alltypessmall b]
+|     partition predicates: b.month = 1
 |     partitions=1/4 files=1 size=1.57KB
+|     row-size=89B cardinality=25
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=8B cardinality=1.24K
 |
 |--02:SCAN HDFS [functional.alltypes]
+|     partition predicates: functional.alltypes.month = 1
 |     partitions=2/24 files=2 size=40.32KB
 |     runtime filters: RF000 -> functional.alltypes.month
+|     row-size=8B cardinality=620
 |
 01:SCAN HDFS [functional.alltypes]
+   partition predicates: functional.alltypes.month = 1
    partitions=2/24 files=2 size=40.32KB
    runtime filters: RF000 -> functional.alltypes.month
+   row-size=8B cardinality=620
 ====
 // Only UNION ALL, no nested unions
 select * from functional.alltypestiny where year=2009 and month=1
@@ -63,15 +79,22 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -86,15 +109,22 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Only UNION ALL with limit inside operands. One of the operands also has an order by.
 select * from functional.alltypestiny where year=2009 and month=1 limit 1
@@ -107,20 +137,28 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=3
 |
 |--04:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
 |     limit: 1
+|     row-size=89B cardinality=1
 |
 |--03:TOP-N [LIMIT=1]
 |  |  order by: int_col ASC
+|  |  row-size=89B cardinality=1
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
    limit: 1
+   row-size=89B cardinality=1
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -133,13 +171,16 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=3
 |
 |--07:EXCHANGE [UNPARTITIONED]
 |  |  limit: 1
 |  |
 |  04:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
 |     limit: 1
+|     row-size=89B cardinality=1
 |
 |--06:MERGING-EXCHANGE [UNPARTITIONED]
 |  |  order by: int_col ASC
@@ -147,16 +188,21 @@ PLAN-ROOT SINK
 |  |
 |  03:TOP-N [LIMIT=1]
 |  |  order by: int_col ASC
+|  |  row-size=89B cardinality=1
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 05:EXCHANGE [UNPARTITIONED]
 |  limit: 1
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
    limit: 1
+   row-size=89B cardinality=1
 ====
 // Only UNION DISTINCT, no nested unions
 select * from functional.alltypestiny where year=2009 and month=1
@@ -169,18 +215,26 @@ PLAN-ROOT SINK
 |
 04:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -195,23 +249,32 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 05:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 04:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Only UNION ALL, mixed selects with and without from clauses, no nested unions
 select * from functional.alltypestiny where year=2009 and month=1
@@ -227,12 +290,17 @@ PLAN-ROOT SINK
 00:UNION
 |  constant-operands=2
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -246,12 +314,17 @@ PLAN-ROOT SINK
 00:UNION
 |  constant-operands=2
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Only UNION DISTINCT, mixed selects with and without from clauses, no nested unions
 select * from functional.alltypestiny where year=2009 and month=1
@@ -266,16 +339,22 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 00:UNION
 |  constant-operands=2
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -288,21 +367,28 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 04:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 03:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 00:UNION
 |  constant-operands=2
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Mixed UNION ALL/DISTINCT but effectively only UNION DISTINCT, no nested unions,
 // with order by and limit
@@ -319,24 +405,35 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=3]
 |  order by: tinyint_col ASC
+|  row-size=89B cardinality=3
 |
 05:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=8
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--04:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -355,29 +452,41 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=3]
 |  order by: tinyint_col ASC
+|  row-size=89B cardinality=3
 |
 08:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=8
 |
 07:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 05:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=8
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--04:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Mixed UNION ALL/DISTINCT, no nested unions, with order by and limit
 select * from functional.alltypestiny where year=2009 and month=1
@@ -393,27 +502,39 @@ PLAN-ROOT SINK
 |
 07:TOP-N [LIMIT=3]
 |  order by: tinyint_col ASC, smallint_col ASC
+|  row-size=89B cardinality=3
 |
 04:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--03:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  00:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 1
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--06:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 05:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 2
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -432,32 +553,45 @@ PLAN-ROOT SINK
 |
 07:TOP-N [LIMIT=3]
 |  order by: tinyint_col ASC, smallint_col ASC
+|  row-size=89B cardinality=3
 |
 04:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--09:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  08:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |  |
 |  03:AGGREGATE [STREAMING]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  00:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 1
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--06:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 05:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 2
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Mixed UNION ALL/DISTINCT, no nested unions, with order by and limit
 select * from functional.alltypestiny where year=2009 and month=1
@@ -473,27 +607,39 @@ PLAN-ROOT SINK
 |
 07:TOP-N [LIMIT=4]
 |  order by: tinyint_col ASC, smallint_col ASC
+|  row-size=89B cardinality=4
 |
 05:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--04:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=6
 |  |
 |  00:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=6
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 1
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 06:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 2
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -512,32 +658,45 @@ PLAN-ROOT SINK
 |
 07:TOP-N [LIMIT=4]
 |  order by: tinyint_col ASC, smallint_col ASC
+|  row-size=89B cardinality=4
 |
 05:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--09:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=6
 |  |
 |  08:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |  |
 |  04:AGGREGATE [STREAMING]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=6
 |  |
 |  00:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=6
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 1
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 06:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 2
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Union unnesting: Only UNION ALL, first operand is nested
 (select * from functional.alltypestiny where year=2009 and month=1
@@ -550,15 +709,22 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -573,15 +739,22 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Union unnesting: Only UNION ALL, second operand is nested
 select * from functional.alltypestiny where year=2009 and month=1
@@ -594,15 +767,22 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -617,15 +797,22 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Union unnesting: Only UNION DISTINCT, first operand is nested
 (select * from functional.alltypestiny where year=2009 and month=1
@@ -638,18 +825,26 @@ PLAN-ROOT SINK
 |
 04:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -664,23 +859,32 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 05:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 04:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Union unnesting: Only UNION DISTINCT, second operand is nested
 select * from functional.alltypestiny where year=2009 and month=1
@@ -693,18 +897,26 @@ PLAN-ROOT SINK
 |
 04:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -719,23 +931,32 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 05:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 04:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Union unnesting: UNION ALL doesn't absorb nested union with DISTINCT,
 // first operand is nested
@@ -749,21 +970,30 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--05:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 04:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=4
 |
 01:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=4
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 02:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 2:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -778,26 +1008,36 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--05:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 07:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=4
 |
 06:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 04:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=4
 |
 01:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=4
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 02:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Union unnesting: UNION ALL doesn't absorb nested union with DISTINCT,
 // second operand is nested
@@ -811,21 +1051,30 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--05:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  02:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -840,26 +1089,36 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--07:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  06:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |  |
 |  05:AGGREGATE [STREAMING]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  02:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Union unnesting: UNION ALL absorbs the children but not directly the operands
 // of a nested union with mixed ALL/DISTINCT, first operand is nested
@@ -875,24 +1134,35 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--06:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--05:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  02:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 2
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=2/090201.txt 0:115
@@ -909,29 +1179,41 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--06:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--08:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  07:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |  |
 |  05:AGGREGATE [STREAMING]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  02:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 2
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Union unnesting: UNION ALL absorbs the children but not directly the operands
 // of a nested union with mixed ALL/DISTINCT, second operand is nested
@@ -947,24 +1229,35 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--06:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  03:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--05:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  04:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -981,29 +1274,41 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--08:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  07:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |  |
 |  06:AGGREGATE [STREAMING]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  03:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--05:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  04:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Union unnesting: UNION ALL doesn't absorb the children of a nested union
 // with mixed ALL/DISTINCT and limit, second operand is nested
@@ -1020,28 +1325,40 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--06:UNION
 |  |  pass-through-operands: all
 |  |  limit: 10
+|  |  row-size=89B cardinality=6
 |  |
 |  |--05:AGGREGATE [FINALIZE]
 |  |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  02:UNION
 |  |  |  pass-through-operands: all
+|  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  |--04:SCAN HDFS [functional.alltypestiny]
+|  |  |     partition predicates: year = 2009, month = 2
 |  |  |     partitions=1/4 files=1 size=115B
+|  |  |     row-size=89B cardinality=2
 |  |  |
 |  |  03:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 1
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  07:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -1058,6 +1375,7 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--11:EXCHANGE [RANDOM]
 |  |
@@ -1067,29 +1385,41 @@ PLAN-ROOT SINK
 |  06:UNION
 |  |  pass-through-operands: all
 |  |  limit: 10
+|  |  row-size=89B cardinality=6
 |  |
 |  |--09:AGGREGATE [FINALIZE]
 |  |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  08:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |  |  |
 |  |  05:AGGREGATE [STREAMING]
 |  |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  02:UNION
 |  |  |  pass-through-operands: all
+|  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  |--04:SCAN HDFS [functional.alltypestiny]
+|  |  |     partition predicates: year = 2009, month = 2
 |  |  |     partitions=1/4 files=1 size=115B
+|  |  |     row-size=89B cardinality=2
 |  |  |
 |  |  03:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 1
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  07:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Union unnesting: UNION ALL doesn't absorb nested union with order by and limit,
 // first operand is nested
@@ -1104,21 +1434,30 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=5
 |
 |--05:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 04:TOP-N [LIMIT=3]
 |  order by: tinyint_col ASC
+|  row-size=89B cardinality=3
 |
 01:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=4
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 02:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 2:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -1133,9 +1472,12 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=5
 |
 |--05:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 07:EXCHANGE [RANDOM]
 |
@@ -1145,15 +1487,21 @@ PLAN-ROOT SINK
 |
 04:TOP-N [LIMIT=3]
 |  order by: tinyint_col ASC
+|  row-size=89B cardinality=3
 |
 01:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=4
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 02:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Union unnesting: UNION ALL doesn't absorb nested union with order by and limit,
 // second operand is nested
@@ -1168,21 +1516,30 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=5
 |
 |--05:TOP-N [LIMIT=3]
 |  |  order by: tinyint_col ASC
+|  |  row-size=89B cardinality=3
 |  |
 |  02:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -1197,6 +1554,7 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=5
 |
 |--07:EXCHANGE [RANDOM]
 |  |
@@ -1206,18 +1564,26 @@ PLAN-ROOT SINK
 |  |
 |  05:TOP-N [LIMIT=3]
 |  |  order by: tinyint_col ASC
+|  |  row-size=89B cardinality=3
 |  |
 |  02:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Union unnesting: UNION DISTINCT absorbs nested union with ALL
 // first operand is nested
@@ -1231,18 +1597,26 @@ PLAN-ROOT SINK
 |
 04:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -1257,23 +1631,32 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 05:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 04:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Union unnesting: UNION DISTINCT absorbs nested union with ALL,
 // second operand is nested
@@ -1287,18 +1670,26 @@ PLAN-ROOT SINK
 |
 04:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -1313,23 +1704,32 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 05:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 04:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Union unnesting: UNION DISTINCT absorbs nested union with mixed ALL/DISTINCT,
 // first operand is nested
@@ -1345,21 +1745,31 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=8
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--04:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -1376,26 +1786,37 @@ PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=8
 |
 06:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 05:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=8
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--04:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Union unnesting: UNION DISTINCT absorbs nested union with mixed ALL/DISTINCT,
 // second operand is nested
@@ -1411,21 +1832,31 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=8
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--04:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -1442,26 +1873,37 @@ PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=8
 |
 06:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 05:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=8
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=8
 |
 |--04:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Union unnesting: UNION DISTINCT doesn't absorb nested union with order by and limit,
 // first operand is nested
@@ -1476,24 +1918,34 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=5
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=5
 |
 |--05:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 04:TOP-N [LIMIT=3]
 |  order by: tinyint_col ASC
+|  row-size=89B cardinality=3
 |
 01:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=4
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 02:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 2:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -1508,17 +1960,22 @@ PLAN-ROOT SINK
 |
 10:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=5
 |
 09:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 06:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=5
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=5
 |
 |--05:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 08:EXCHANGE [RANDOM]
 |
@@ -1528,15 +1985,21 @@ PLAN-ROOT SINK
 |
 04:TOP-N [LIMIT=3]
 |  order by: tinyint_col ASC
+|  row-size=89B cardinality=3
 |
 01:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=4
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 02:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Union unnesting: UNION DISTINCT doesn't absorb nested union with order by and limit
 // second operand is nested
@@ -1551,24 +2014,34 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=5
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=5
 |
 |--05:TOP-N [LIMIT=3]
 |  |  order by: tinyint_col ASC
+|  |  row-size=89B cardinality=3
 |  |
 |  02:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -1583,14 +2056,17 @@ PLAN-ROOT SINK
 |
 10:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=5
 |
 09:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 06:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=5
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=5
 |
 |--08:EXCHANGE [RANDOM]
 |  |
@@ -1600,18 +2076,26 @@ PLAN-ROOT SINK
 |  |
 |  05:TOP-N [LIMIT=3]
 |  |  order by: tinyint_col ASC
+|  |  row-size=89B cardinality=3
 |  |
 |  02:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Complex union unnesting: Multiple levels of UNION ALL, fully unnestable
 select * from functional.alltypestiny where year=2009 and month=1
@@ -1628,21 +2112,32 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=10
 |
 |--05:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 3
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--04:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -1661,21 +2156,32 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=10
 |
 |--05:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 3
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--04:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Complex union unnesting: Multiple levels of UNION DISTINCT, fully unnestable
 select * from functional.alltypestiny where year=2009 and month=1
@@ -1692,24 +2198,36 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=10
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=10
 |
 |--05:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 3
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--04:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -1728,29 +2246,42 @@ PLAN-ROOT SINK
 |
 08:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=10
 |
 07:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 06:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=10
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=10
 |
 |--05:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 3
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--04:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Complex union unnesting: Partially unnestable up to 2nd level
 select * from functional.alltypestiny where year=2009 and month=1
@@ -1768,36 +2299,52 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=9
 |
 |--10:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=7
 |  |
 |  02:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=7
 |  |
 |  |--09:TOP-N [LIMIT=3]
 |  |  |  order by: tinyint_col ASC
+|  |  |  row-size=89B cardinality=3
 |  |  |
 |  |  08:AGGREGATE [FINALIZE]
 |  |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  05:UNION
 |  |  |  pass-through-operands: all
+|  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  |--07:SCAN HDFS [functional.alltypestiny]
+|  |  |     partition predicates: year = 2009, month = 3
 |  |  |     partitions=1/4 files=1 size=115B
+|  |  |     row-size=89B cardinality=2
 |  |  |
 |  |  06:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -1816,17 +2363,21 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=9
 |
 |--16:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=7
 |  |
 |  15:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |  |
 |  10:AGGREGATE [STREAMING]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=7
 |  |
 |  02:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=7
 |  |
 |  |--14:EXCHANGE [RANDOM]
 |  |  |
@@ -1836,32 +2387,46 @@ PLAN-ROOT SINK
 |  |  |
 |  |  09:TOP-N [LIMIT=3]
 |  |  |  order by: tinyint_col ASC
+|  |  |  row-size=89B cardinality=3
 |  |  |
 |  |  12:AGGREGATE [FINALIZE]
 |  |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  11:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |  |  |
 |  |  08:AGGREGATE [STREAMING]
 |  |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  05:UNION
 |  |  |  pass-through-operands: all
+|  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  |--07:SCAN HDFS [functional.alltypestiny]
+|  |  |     partition predicates: year = 2009, month = 3
 |  |  |     partitions=1/4 files=1 size=115B
+|  |  |     row-size=89B cardinality=2
 |  |  |
 |  |  06:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Complex union unnesting: Partially unnestable up to 1st level
 select * from functional.alltypestiny where year=2009 and month=1
@@ -1879,33 +2444,48 @@ PLAN-ROOT SINK
 |
 09:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=9
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=9
 |
 |--08:TOP-N [LIMIT=3]
 |  |  order by: tinyint_col ASC
+|  |  row-size=89B cardinality=3
 |  |
 |  07:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  04:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--06:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 3
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  05:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -1924,14 +2504,17 @@ PLAN-ROOT SINK
 |
 15:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=9
 |
 14:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 09:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=9
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=9
 |
 |--13:EXCHANGE [RANDOM]
 |  |
@@ -1941,32 +2524,46 @@ PLAN-ROOT SINK
 |  |
 |  08:TOP-N [LIMIT=3]
 |  |  order by: tinyint_col ASC
+|  |  row-size=89B cardinality=3
 |  |
 |  11:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  10:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |  |
 |  07:AGGREGATE [STREAMING]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  04:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--06:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 3
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  05:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // Complex union unnesting: Multiple nested unions to test all rules in a single query
 select * from functional.alltypestiny where year=2009 and month=1
@@ -1993,51 +2590,76 @@ PLAN-ROOT SINK
 |
 09:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=15
 |
 |--08:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=9
 |  |
 |  00:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=9
 |  |
 |  |--07:TOP-N [LIMIT=3]
 |  |  |  order by: tinyint_col ASC
+|  |  |  row-size=89B cardinality=3
 |  |  |
 |  |  04:UNION
 |  |  |  pass-through-operands: all
+|  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  |--06:SCAN HDFS [functional.alltypestiny]
+|  |  |     partition predicates: year = 2009, month = 3
 |  |  |     partitions=1/4 files=1 size=115B
+|  |  |     row-size=89B cardinality=2
 |  |  |
 |  |  05:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 1
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--15:TOP-N [LIMIT=3]
 |  |  order by: tinyint_col ASC
+|  |  row-size=89B cardinality=2
 |  |
 |  12:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=2
 |  |
 |  |--14:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 5
 |  |     partitions=0/4 files=0 size=0B
+|  |     row-size=89B cardinality=0
 |  |
 |  13:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 4
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--11:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 4
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 10:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 3
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -2063,17 +2685,21 @@ PLAN-ROOT SINK
 |
 09:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=15
 |
 |--20:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=9
 |  |
 |  19:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |  |
 |  08:AGGREGATE [STREAMING]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=9
 |  |
 |  00:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=9
 |  |
 |  |--18:EXCHANGE [RANDOM]
 |  |  |
@@ -2083,24 +2709,36 @@ PLAN-ROOT SINK
 |  |  |
 |  |  07:TOP-N [LIMIT=3]
 |  |  |  order by: tinyint_col ASC
+|  |  |  row-size=89B cardinality=3
 |  |  |
 |  |  04:UNION
 |  |  |  pass-through-operands: all
+|  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  |--06:SCAN HDFS [functional.alltypestiny]
+|  |  |     partition predicates: year = 2009, month = 3
 |  |  |     partitions=1/4 files=1 size=115B
+|  |  |     row-size=89B cardinality=2
 |  |  |
 |  |  05:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 1
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--21:EXCHANGE [RANDOM]
 |  |
@@ -2110,21 +2748,31 @@ PLAN-ROOT SINK
 |  |
 |  15:TOP-N [LIMIT=3]
 |  |  order by: tinyint_col ASC
+|  |  row-size=89B cardinality=2
 |  |
 |  12:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=2
 |  |
 |  |--14:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 5
 |  |     partitions=0/4 files=0 size=0B
+|  |     row-size=89B cardinality=0
 |  |
 |  13:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 4
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--11:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 4
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 10:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 3
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // UNION ALL in subquery
 select x.* from
@@ -2139,21 +2787,30 @@ PLAN-ROOT SINK
 |
 05:TOP-N [LIMIT=5]
 |  order by: tinyint_col ASC
+|  row-size=89B cardinality=5
 |
 00:UNION
 |  pass-through-operands: 04
+|  row-size=89B cardinality=6
 |
 |--01:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 1
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 04:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 2
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 2:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -2170,21 +2827,30 @@ PLAN-ROOT SINK
 |
 05:TOP-N [LIMIT=5]
 |  order by: tinyint_col ASC
+|  row-size=89B cardinality=5
 |
 00:UNION
 |  pass-through-operands: 04
+|  row-size=89B cardinality=6
 |
 |--01:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 1
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 04:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 2
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // UNION DISTINCT in subquery
 select x.* from
@@ -2199,27 +2865,38 @@ PLAN-ROOT SINK
 |
 07:TOP-N [LIMIT=3]
 |  order by: tinyint_col ASC
+|  row-size=89B cardinality=3
 |
 06:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 00:UNION
 |  pass-through-operands: 05
+|  row-size=89B cardinality=6
 |
 |--04:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  01:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 1
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 05:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 2
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
 NODE 2:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -2236,37 +2913,50 @@ PLAN-ROOT SINK
 |
 07:TOP-N [LIMIT=3]
 |  order by: tinyint_col ASC
+|  row-size=89B cardinality=3
 |
 11:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 10:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 06:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=6
 |
 00:UNION
 |  pass-through-operands: 05
+|  row-size=89B cardinality=6
 |
 |--09:AGGREGATE [FINALIZE]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  08:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |  |
 |  04:AGGREGATE [STREAMING]
 |  |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  |  row-size=89B cardinality=4
 |  |
 |  01:UNION
 |  |  pass-through-operands: all
+|  |  row-size=89B cardinality=4
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
+|  |     partition predicates: year = 2009, month = 1
 |  |     partitions=1/4 files=1 size=115B
+|  |     row-size=89B cardinality=2
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 05:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 2
    partitions=1/4 files=1 size=115B
+   row-size=89B cardinality=2
 ====
 // UNION ALL in subquery with a WHERE condition in the outer select.
 select x.* from
@@ -2281,22 +2971,29 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=13B cardinality=2
 |
 |--04:AGGREGATE [FINALIZE]
 |  |  output: count(*)
 |  |  group by: int_col, bool_col
+|  |  row-size=13B cardinality=1
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
 |     predicates: functional.alltypestiny.int_col < 5, functional.alltypestiny.bool_col = FALSE
+|     row-size=5B cardinality=1
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: int_col, bool_col
+|  row-size=13B cardinality=1
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
    predicates: functional.alltypestiny.int_col < 5, functional.alltypestiny.bool_col = FALSE
+   row-size=5B cardinality=1
 ---- SCANRANGELOCATIONS
 NODE 1:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypestiny/year=2009/month=1/090101.txt 0:115
@@ -2309,34 +3006,43 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=13B cardinality=2
 |
 |--08:AGGREGATE [FINALIZE]
 |  |  output: count:merge(*)
 |  |  group by: int_col, bool_col
+|  |  row-size=13B cardinality=1
 |  |
 |  07:EXCHANGE [HASH(int_col,bool_col)]
 |  |
 |  04:AGGREGATE [STREAMING]
 |  |  output: count(*)
 |  |  group by: int_col, bool_col
+|  |  row-size=13B cardinality=1
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
 |     predicates: functional.alltypestiny.int_col < 5, functional.alltypestiny.bool_col = FALSE
+|     row-size=5B cardinality=1
 |
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: int_col, bool_col
+|  row-size=13B cardinality=1
 |
 05:EXCHANGE [HASH(int_col,bool_col)]
 |
 02:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: int_col, bool_col
+|  row-size=13B cardinality=1
 |
 01:SCAN HDFS [functional.alltypestiny]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=115B
    predicates: functional.alltypestiny.int_col < 5, functional.alltypestiny.bool_col = FALSE
+   row-size=5B cardinality=1
 ====
 // UNION ALL with only constant selects
 select 1, 'a', NULL, 10.f
@@ -2349,11 +3055,13 @@ PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=3
+   row-size=15B cardinality=3
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=3
+   row-size=15B cardinality=3
 ====
 // UNION DISTINCT with only constant selects
 select 1, 'a', NULL, 10.0f
@@ -2366,17 +3074,21 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  group by: 1, 'a', null, f
+|  row-size=18B cardinality=3
 |
 00:UNION
    constant-operands=3
+   row-size=18B cardinality=3
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  group by: 1, 'a', null, f
+|  row-size=18B cardinality=3
 |
 00:UNION
    constant-operands=3
+   row-size=18B cardinality=3
 ====
 // UNION ALL/DISTINCT with mixed constant selects and non-constant selects
 select 1, 'a', NULL, 10.f
@@ -2391,19

<TRUNCATED>

[11/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/sort-expr-materialization.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/sort-expr-materialization.test b/testdata/workloads/functional-planner/queries/PlannerTest/sort-expr-materialization.test
index 3088a58..433ea07 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/sort-expr-materialization.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/sort-expr-materialization.test
@@ -10,7 +10,7 @@ PLAN-ROOT SINK
 |  order by: random() ASC
 |  materialized: random()
 |  mem-estimate=12.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=97B cardinality=7300
+|  tuple-ids=1 row-size=97B cardinality=7.30K
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypes]
@@ -21,7 +21,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=89B cardinality=7300
+   tuple-ids=0 row-size=89B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # sort on a deterministic expr that exceeds the cost threshold
@@ -36,7 +36,7 @@ PLAN-ROOT SINK
 |  order by: abs(id) + abs(id) ASC
 |  materialized: abs(id) + abs(id)
 |  mem-estimate=12.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=97B cardinality=7300
+|  tuple-ids=1 row-size=97B cardinality=7.30K
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypes]
@@ -47,7 +47,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=89B cardinality=7300
+   tuple-ids=0 row-size=89B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # sort on a deterministic expr that doesn't exceed the cost threshold
@@ -61,7 +61,7 @@ PLAN-ROOT SINK
 01:SORT
 |  order by: tinyint_col + 1 ASC
 |  mem-estimate=12.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=89B cardinality=7300
+|  tuple-ids=1 row-size=89B cardinality=7.30K
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypes]
@@ -72,7 +72,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=89B cardinality=7300
+   tuple-ids=0 row-size=89B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # sort on multiple exprs, subset is materialized
@@ -88,7 +88,7 @@ PLAN-ROOT SINK
 |  order by: dayofweek(timestamp_col) ASC, TRUE ASC, id + 1 ASC, string_col = date_string_col ASC, id = tinyint_col ASC
 |  materialized: dayofweek(timestamp_col), string_col = date_string_col
 |  mem-estimate=12.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=94B cardinality=7300
+|  tuple-ids=1 row-size=94B cardinality=7.30K
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypes]
@@ -99,7 +99,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=89B cardinality=7300
+   tuple-ids=0 row-size=89B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # expensive analytic order by expr gets materialized
@@ -116,14 +116,14 @@ PLAN-ROOT SINK
 |  order by: to_date(timestamp_col) ASC, bool_col IS NULL ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
 |  mem-estimate=4.00MB mem-reservation=4.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=3,2 row-size=37B cardinality=7300
+|  tuple-ids=3,2 row-size=37B cardinality=7.30K
 |  in pipelines: 01(GETNEXT)
 |
 01:SORT
 |  order by: to_date(timestamp_col) ASC, bool_col IS NULL ASC
 |  materialized: to_date(timestamp_col)
 |  mem-estimate=12.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=3 row-size=33B cardinality=7300
+|  tuple-ids=3 row-size=33B cardinality=7.30K
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypes]
@@ -134,7 +134,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=21B cardinality=7300
+   tuple-ids=0 row-size=21B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # expensive order by expr in top-n gets materialized
@@ -161,7 +161,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=25B cardinality=7300
+   tuple-ids=0 row-size=25B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # sort on udf, gets materialized
@@ -176,7 +176,7 @@ PLAN-ROOT SINK
 |  order by: default.testfn(double_col) ASC
 |  materialized: default.testfn(double_col)
 |  mem-estimate=12.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=93B cardinality=7300
+|  tuple-ids=1 row-size=93B cardinality=7.30K
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypes]
@@ -187,7 +187,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=89B cardinality=7300
+   tuple-ids=0 row-size=89B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # sort expr contains SlotRefs that don't need to be materialized separately
@@ -202,7 +202,7 @@ PLAN-ROOT SINK
 |  order by: concat(date_string_col, string_col) ASC
 |  materialized: concat(date_string_col, string_col)
 |  mem-estimate=12.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=12B cardinality=7300
+|  tuple-ids=1 row-size=12B cardinality=7.30K
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypes]
@@ -213,7 +213,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=33B cardinality=7300
+   tuple-ids=0 row-size=33B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # IMPALA-5270: Rand() and udf inside inline view referenced by analytic function.

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/spillable-buffer-sizing.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/spillable-buffer-sizing.test b/testdata/workloads/functional-planner/queries/PlannerTest/spillable-buffer-sizing.test
index 1ddc976..8d35928 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/spillable-buffer-sizing.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/spillable-buffer-sizing.test
@@ -17,7 +17,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.33MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1 row-size=327B cardinality=150000
+|  tuple-ids=0,1 row-size=327B cardinality=150.00K
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=1 instances=1
@@ -27,7 +27,7 @@ Per-Host Resources: mem-estimate=26.95MB mem-reservation=18.94MB thread-reservat
 |  fk/pk conjuncts: c_nationkey = n_nationkey
 |  runtime filters: RF000[bloom] <- n_nationkey
 |  mem-estimate=1.94MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
-|  tuple-ids=0,1 row-size=327B cardinality=150000
+|  tuple-ids=0,1 row-size=327B cardinality=150.00K
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--03:EXCHANGE [BROADCAST]
@@ -55,7 +55,7 @@ Per-Host Resources: mem-estimate=26.95MB mem-reservation=18.94MB thread-reservat
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=150000
    mem-estimate=24.00MB mem-reservation=16.00MB thread-reservation=1
-   tuple-ids=0 row-size=218B cardinality=150000
+   tuple-ids=0 row-size=218B cardinality=150.00K
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=37.94MB Threads=5
@@ -72,7 +72,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.65MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1 row-size=327B cardinality=150000
+|  tuple-ids=0,1 row-size=327B cardinality=150.00K
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=1 instances=2
@@ -83,7 +83,7 @@ Per-Host Resources: mem-estimate=53.88MB mem-reservation=37.88MB thread-reservat
 |  fk/pk conjuncts: c_nationkey = n_nationkey
 |  runtime filters: RF000[bloom] <- n_nationkey
 |  mem-estimate=1.94MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
-|  tuple-ids=0,1 row-size=327B cardinality=150000
+|  tuple-ids=0,1 row-size=327B cardinality=150.00K
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--F03:PLAN FRAGMENT [RANDOM] hosts=1 instances=2
@@ -118,7 +118,7 @@ Per-Host Resources: mem-estimate=53.88MB mem-reservation=37.88MB thread-reservat
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=150000
    mem-estimate=24.00MB mem-reservation=16.00MB thread-reservation=0
-   tuple-ids=0 row-size=218B cardinality=150000
+   tuple-ids=0 row-size=218B cardinality=150.00K
    in pipelines: 00(GETNEXT)
 ====
 # Join with large build side - should use default-sized buffers.
@@ -140,7 +140,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=11.20MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1N row-size=402B cardinality=6001215
+|  tuple-ids=0,1N row-size=402B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -149,12 +149,12 @@ Per-Host Resources: mem-estimate=359.29MB mem-reservation=74.00MB thread-reserva
 |  hash predicates: l_orderkey = o_orderkey
 |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  mem-estimate=268.94MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=0,1N row-size=402B cardinality=6001215
+|  tuple-ids=0,1N row-size=402B cardinality=6.00M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--03:EXCHANGE [BROADCAST]
 |  |  mem-estimate=10.34MB mem-reservation=0B thread-reservation=0
-|  |  tuple-ids=1 row-size=171B cardinality=1500000
+|  |  tuple-ids=1 row-size=171B cardinality=1.50M
 |  |  in pipelines: 01(GETNEXT)
 |  |
 |  F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=2
@@ -164,19 +164,19 @@ Per-Host Resources: mem-estimate=359.29MB mem-reservation=74.00MB thread-reserva
 |     stored statistics:
 |       table: rows=1500000 size=54.07MB
 |       columns: all
-|     extrapolated-rows=disabled max-scan-range-rows=1177135
+|     extrapolated-rows=disabled max-scan-range-rows=1177136
 |     mem-estimate=40.00MB mem-reservation=24.00MB thread-reservation=1
-|     tuple-ids=1 row-size=171B cardinality=1500000
+|     tuple-ids=1 row-size=171B cardinality=1.50M
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=40.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=196.00MB Threads=5
@@ -193,7 +193,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=12.40MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1N row-size=402B cardinality=6001215
+|  tuple-ids=0,1N row-size=402B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
@@ -203,7 +203,7 @@ Per-Host Resources: mem-estimate=697.89MB mem-reservation=148.00MB thread-reserv
 |  hash predicates: l_orderkey = o_orderkey
 |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  mem-estimate=268.94MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=0,1N row-size=402B cardinality=6001215
+|  tuple-ids=0,1N row-size=402B cardinality=6.00M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--F03:PLAN FRAGMENT [RANDOM] hosts=2 instances=4
@@ -215,7 +215,7 @@ Per-Host Resources: mem-estimate=697.89MB mem-reservation=148.00MB thread-reserv
 |  |
 |  03:EXCHANGE [BROADCAST]
 |  |  mem-estimate=10.68MB mem-reservation=0B thread-reservation=0
-|  |  tuple-ids=1 row-size=171B cardinality=1500000
+|  |  tuple-ids=1 row-size=171B cardinality=1.50M
 |  |  in pipelines: 01(GETNEXT)
 |  |
 |  F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=4
@@ -225,19 +225,19 @@ Per-Host Resources: mem-estimate=697.89MB mem-reservation=148.00MB thread-reserv
 |     stored statistics:
 |       table: rows=1500000 size=54.07MB
 |       columns: all
-|     extrapolated-rows=disabled max-scan-range-rows=1177135
+|     extrapolated-rows=disabled max-scan-range-rows=1177136
 |     mem-estimate=40.00MB mem-reservation=24.00MB thread-reservation=0
-|     tuple-ids=1 row-size=171B cardinality=1500000
+|     tuple-ids=1 row-size=171B cardinality=1.50M
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=40.00MB thread-reservation=0
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # Shuffle join with mid-sized input.
@@ -260,7 +260,7 @@ PLAN-ROOT SINK
 |
 05:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.77MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1 row-size=388B cardinality=1500000
+|  tuple-ids=0,1 row-size=388B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 F02:PLAN FRAGMENT [HASH(o_custkey)] hosts=2 instances=2
@@ -270,12 +270,12 @@ Per-Host Resources: mem-estimate=38.68MB mem-reservation=18.00MB thread-reservat
 |  fk/pk conjuncts: o_custkey = c_custkey
 |  runtime filters: RF000[bloom] <- c_custkey
 |  mem-estimate=17.12MB mem-reservation=17.00MB spill-buffer=1.00MB thread-reservation=0
-|  tuple-ids=0,1 row-size=388B cardinality=1500000
+|  tuple-ids=0,1 row-size=388B cardinality=1.50M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--04:EXCHANGE [HASH(c_custkey)]
 |  |  mem-estimate=10.22MB mem-reservation=0B thread-reservation=0
-|  |  tuple-ids=1 row-size=218B cardinality=150000
+|  |  tuple-ids=1 row-size=218B cardinality=150.00K
 |  |  in pipelines: 01(GETNEXT)
 |  |
 |  F01:PLAN FRAGMENT [RANDOM] hosts=1 instances=1
@@ -287,12 +287,12 @@ Per-Host Resources: mem-estimate=38.68MB mem-reservation=18.00MB thread-reservat
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=150000
 |     mem-estimate=24.00MB mem-reservation=16.00MB thread-reservation=1
-|     tuple-ids=1 row-size=218B cardinality=150000
+|     tuple-ids=1 row-size=218B cardinality=150.00K
 |     in pipelines: 01(GETNEXT)
 |
 03:EXCHANGE [HASH(o_custkey)]
 |  mem-estimate=10.34MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=171B cardinality=1500000
+|  tuple-ids=0 row-size=171B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=2 instances=2
@@ -303,9 +303,9 @@ Per-Host Resources: mem-estimate=41.00MB mem-reservation=25.00MB thread-reservat
    stored statistics:
      table: rows=1500000 size=54.07MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=1177135
+   extrapolated-rows=disabled max-scan-range-rows=1177136
    mem-estimate=40.00MB mem-reservation=24.00MB thread-reservation=1
-   tuple-ids=0 row-size=171B cardinality=1500000
+   tuple-ids=0 row-size=171B cardinality=1.50M
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=101.00MB Threads=7
@@ -323,7 +323,7 @@ PLAN-ROOT SINK
 |
 05:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=11.55MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1 row-size=388B cardinality=1500000
+|  tuple-ids=0,1 row-size=388B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 F02:PLAN FRAGMENT [HASH(o_custkey)] hosts=2 instances=4
@@ -334,7 +334,7 @@ Per-Host Resources: mem-estimate=40.48MB mem-reservation=19.00MB thread-reservat
 |  fk/pk conjuncts: o_custkey = c_custkey
 |  runtime filters: RF000[bloom] <- c_custkey
 |  mem-estimate=8.56MB mem-reservation=8.50MB spill-buffer=512.00KB thread-reservation=0
-|  tuple-ids=0,1 row-size=388B cardinality=1500000
+|  tuple-ids=0,1 row-size=388B cardinality=1.50M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--F04:PLAN FRAGMENT [HASH(o_custkey)] hosts=1 instances=2
@@ -346,7 +346,7 @@ Per-Host Resources: mem-estimate=40.48MB mem-reservation=19.00MB thread-reservat
 |  |
 |  04:EXCHANGE [HASH(c_custkey)]
 |  |  mem-estimate=10.43MB mem-reservation=0B thread-reservation=0
-|  |  tuple-ids=1 row-size=218B cardinality=150000
+|  |  tuple-ids=1 row-size=218B cardinality=150.00K
 |  |  in pipelines: 01(GETNEXT)
 |  |
 |  F01:PLAN FRAGMENT [RANDOM] hosts=1 instances=2
@@ -358,12 +358,12 @@ Per-Host Resources: mem-estimate=40.48MB mem-reservation=19.00MB thread-reservat
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=150000
 |     mem-estimate=24.00MB mem-reservation=16.00MB thread-reservation=0
-|     tuple-ids=1 row-size=218B cardinality=150000
+|     tuple-ids=1 row-size=218B cardinality=150.00K
 |     in pipelines: 01(GETNEXT)
 |
 03:EXCHANGE [HASH(o_custkey)]
 |  mem-estimate=10.68MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=171B cardinality=1500000
+|  tuple-ids=0 row-size=171B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=2 instances=4
@@ -374,9 +374,9 @@ Per-Host Resources: mem-estimate=82.00MB mem-reservation=50.00MB thread-reservat
    stored statistics:
      table: rows=1500000 size=54.07MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=1177135
+   extrapolated-rows=disabled max-scan-range-rows=1177136
    mem-estimate=40.00MB mem-reservation=24.00MB thread-reservation=0
-   tuple-ids=0 row-size=171B cardinality=1500000
+   tuple-ids=0 row-size=171B cardinality=1.50M
    in pipelines: 00(GETNEXT)
 ====
 # Broadcast join with mid-sized input - should use larger buffers than shuffle join.
@@ -399,7 +399,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.77MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1 row-size=388B cardinality=1500000
+|  tuple-ids=0,1 row-size=388B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=2 instances=2
@@ -409,12 +409,12 @@ Per-Host Resources: mem-estimate=85.45MB mem-reservation=59.00MB thread-reservat
 |  fk/pk conjuncts: o_custkey = c_custkey
 |  runtime filters: RF000[bloom] <- c_custkey
 |  mem-estimate=34.23MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=0,1 row-size=388B cardinality=1500000
+|  tuple-ids=0,1 row-size=388B cardinality=1.50M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--03:EXCHANGE [BROADCAST]
 |  |  mem-estimate=10.22MB mem-reservation=0B thread-reservation=0
-|  |  tuple-ids=1 row-size=218B cardinality=150000
+|  |  tuple-ids=1 row-size=218B cardinality=150.00K
 |  |  in pipelines: 01(GETNEXT)
 |  |
 |  F01:PLAN FRAGMENT [RANDOM] hosts=1 instances=1
@@ -426,7 +426,7 @@ Per-Host Resources: mem-estimate=85.45MB mem-reservation=59.00MB thread-reservat
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=150000
 |     mem-estimate=24.00MB mem-reservation=16.00MB thread-reservation=1
-|     tuple-ids=1 row-size=218B cardinality=150000
+|     tuple-ids=1 row-size=218B cardinality=150.00K
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpch_parquet.orders, RANDOM]
@@ -435,9 +435,9 @@ Per-Host Resources: mem-estimate=85.45MB mem-reservation=59.00MB thread-reservat
    stored statistics:
      table: rows=1500000 size=54.07MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=1177135
+   extrapolated-rows=disabled max-scan-range-rows=1177136
    mem-estimate=40.00MB mem-reservation=24.00MB thread-reservation=1
-   tuple-ids=0 row-size=171B cardinality=1500000
+   tuple-ids=0 row-size=171B cardinality=1.50M
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=150.00MB Threads=5
@@ -455,7 +455,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=11.55MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0,1 row-size=388B cardinality=1500000
+|  tuple-ids=0,1 row-size=388B cardinality=1.50M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=2 instances=4
@@ -466,7 +466,7 @@ Per-Host Resources: mem-estimate=150.47MB mem-reservation=118.00MB thread-reserv
 |  fk/pk conjuncts: o_custkey = c_custkey
 |  runtime filters: RF000[bloom] <- c_custkey
 |  mem-estimate=34.23MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=0,1 row-size=388B cardinality=1500000
+|  tuple-ids=0,1 row-size=388B cardinality=1.50M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--F03:PLAN FRAGMENT [RANDOM] hosts=1 instances=2
@@ -478,7 +478,7 @@ Per-Host Resources: mem-estimate=150.47MB mem-reservation=118.00MB thread-reserv
 |  |
 |  03:EXCHANGE [BROADCAST]
 |  |  mem-estimate=10.43MB mem-reservation=0B thread-reservation=0
-|  |  tuple-ids=1 row-size=218B cardinality=150000
+|  |  tuple-ids=1 row-size=218B cardinality=150.00K
 |  |  in pipelines: 01(GETNEXT)
 |  |
 |  F01:PLAN FRAGMENT [RANDOM] hosts=1 instances=2
@@ -490,7 +490,7 @@ Per-Host Resources: mem-estimate=150.47MB mem-reservation=118.00MB thread-reserv
 |       columns: all
 |     extrapolated-rows=disabled max-scan-range-rows=150000
 |     mem-estimate=24.00MB mem-reservation=16.00MB thread-reservation=0
-|     tuple-ids=1 row-size=218B cardinality=150000
+|     tuple-ids=1 row-size=218B cardinality=150.00K
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [tpch_parquet.orders, RANDOM]
@@ -499,9 +499,9 @@ Per-Host Resources: mem-estimate=150.47MB mem-reservation=118.00MB thread-reserv
    stored statistics:
      table: rows=1500000 size=54.07MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=1177135
+   extrapolated-rows=disabled max-scan-range-rows=1177136
    mem-estimate=40.00MB mem-reservation=24.00MB thread-reservation=0
-   tuple-ids=0 row-size=171B cardinality=1500000
+   tuple-ids=0 row-size=171B cardinality=1.50M
    in pipelines: 00(GETNEXT)
 ====
 # Join with no stats for right input - should use default buffers.
@@ -556,7 +556,7 @@ Per-Host Resources: mem-estimate=2.02GB mem-reservation=34.09MB thread-reservati
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [functional_parquet.alltypes, RANDOM]
-   partitions=24/24 files=24 size=189.12KB
+   partitions=24/24 files=24 size=189.28KB
    stored statistics:
      table: rows=unavailable size=unavailable
      partitions: 0/24 rows=unavailable
@@ -621,7 +621,7 @@ Per-Host Resources: mem-estimate=4.03GB mem-reservation=68.17MB thread-reservati
 |     in pipelines: 01(GETNEXT)
 |
 00:SCAN HDFS [functional_parquet.alltypes, RANDOM]
-   partitions=24/24 files=24 size=189.12KB
+   partitions=24/24 files=24 size=189.28KB
    stored statistics:
      table: rows=unavailable size=unavailable
      partitions: 0/24 rows=unavailable
@@ -681,7 +681,7 @@ Per-Host Resources: mem-estimate=34.00MB mem-reservation=4.00MB thread-reservati
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=150000
    mem-estimate=24.00MB mem-reservation=2.00MB thread-reservation=1
-   tuple-ids=0 row-size=10B cardinality=150000
+   tuple-ids=0 row-size=10B cardinality=150.00K
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=11.88MB Threads=5
@@ -729,7 +729,7 @@ Per-Host Resources: mem-estimate=68.00MB mem-reservation=8.00MB thread-reservati
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=150000
    mem-estimate=24.00MB mem-reservation=2.00MB thread-reservation=0
-   tuple-ids=0 row-size=10B cardinality=150000
+   tuple-ids=0 row-size=10B cardinality=150.00K
    in pipelines: 00(GETNEXT)
 ====
 # Mid NDV aggregation - should scale down buffers to intermediate size.
@@ -754,7 +754,7 @@ PLAN-ROOT SINK
 |
 08:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.10MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=2 row-size=29B cardinality=4690314
+|  tuple-ids=2 row-size=29B cardinality=4.69M
 |  in pipelines: 07(GETNEXT)
 |
 F03:PLAN FRAGMENT [HASH(l_orderkey,o_orderstatus)] hosts=3 instances=3
@@ -764,12 +764,12 @@ Per-Host Resources: mem-estimate=27.10MB mem-reservation=17.00MB thread-reservat
 |  group by: l_orderkey, o_orderstatus
 |  having: count(*) = CAST(1 AS BIGINT)
 |  mem-estimate=17.00MB mem-reservation=17.00MB spill-buffer=1.00MB thread-reservation=0
-|  tuple-ids=2 row-size=29B cardinality=4690314
+|  tuple-ids=2 row-size=29B cardinality=4.69M
 |  in pipelines: 07(GETNEXT), 00(OPEN)
 |
 06:EXCHANGE [HASH(l_orderkey,o_orderstatus)]
 |  mem-estimate=10.10MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=2 row-size=29B cardinality=4690314
+|  tuple-ids=2 row-size=29B cardinality=4.69M
 |  in pipelines: 00(GETNEXT)
 |
 F02:PLAN FRAGMENT [HASH(l_orderkey)] hosts=3 instances=3
@@ -778,7 +778,7 @@ Per-Host Resources: mem-estimate=85.65MB mem-reservation=52.00MB thread-reservat
 |  output: count(*)
 |  group by: l_orderkey, o_orderstatus
 |  mem-estimate=47.56MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=2 row-size=29B cardinality=4690314
+|  tuple-ids=2 row-size=29B cardinality=4.69M
 |  in pipelines: 00(GETNEXT)
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
@@ -786,12 +786,12 @@ Per-Host Resources: mem-estimate=85.65MB mem-reservation=52.00MB thread-reservat
 |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  runtime filters: RF000[bloom] <- o_orderkey
 |  mem-estimate=17.00MB mem-reservation=17.00MB spill-buffer=1.00MB thread-reservation=0
-|  tuple-ids=0,1 row-size=29B cardinality=5757710
+|  tuple-ids=0,1 row-size=29B cardinality=5.76M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--05:EXCHANGE [HASH(o_orderkey)]
 |  |  mem-estimate=10.05MB mem-reservation=0B thread-reservation=0
-|  |  tuple-ids=1 row-size=21B cardinality=1500000
+|  |  tuple-ids=1 row-size=21B cardinality=1.50M
 |  |  in pipelines: 01(GETNEXT)
 |  |
 |  F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=2
@@ -801,27 +801,27 @@ Per-Host Resources: mem-estimate=85.65MB mem-reservation=52.00MB thread-reservat
 |     stored statistics:
 |       table: rows=1500000 size=54.07MB
 |       columns: all
-|     extrapolated-rows=disabled max-scan-range-rows=1177135
+|     extrapolated-rows=disabled max-scan-range-rows=1177136
 |     mem-estimate=40.00MB mem-reservation=8.00MB thread-reservation=1
-|     tuple-ids=1 row-size=21B cardinality=1500000
+|     tuple-ids=1 row-size=21B cardinality=1.50M
 |     in pipelines: 01(GETNEXT)
 |
 04:EXCHANGE [HASH(l_orderkey)]
 |  mem-estimate=10.04MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=8B cardinality=6001215
+|  tuple-ids=0 row-size=8B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
 Per-Host Resources: mem-estimate=81.00MB mem-reservation=5.00MB thread-reservation=2 runtime-filters-memory=1.00MB
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    runtime filters: RF000[bloom] -> l_orderkey
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=4.00MB thread-reservation=1
-   tuple-ids=0 row-size=8B cardinality=6001215
+   tuple-ids=0 row-size=8B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=130.00MB Threads=9
@@ -839,7 +839,7 @@ PLAN-ROOT SINK
 |
 08:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.19MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=2 row-size=29B cardinality=4690314
+|  tuple-ids=2 row-size=29B cardinality=4.69M
 |  in pipelines: 07(GETNEXT)
 |
 F03:PLAN FRAGMENT [HASH(l_orderkey,o_orderstatus)] hosts=3 instances=6
@@ -849,12 +849,12 @@ Per-Host Resources: mem-estimate=40.39MB mem-reservation=17.00MB thread-reservat
 |  group by: l_orderkey, o_orderstatus
 |  having: count(*) = CAST(1 AS BIGINT)
 |  mem-estimate=10.00MB mem-reservation=8.50MB spill-buffer=512.00KB thread-reservation=0
-|  tuple-ids=2 row-size=29B cardinality=4690314
+|  tuple-ids=2 row-size=29B cardinality=4.69M
 |  in pipelines: 07(GETNEXT), 00(OPEN)
 |
 06:EXCHANGE [HASH(l_orderkey,o_orderstatus)]
 |  mem-estimate=10.19MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=2 row-size=29B cardinality=4690314
+|  tuple-ids=2 row-size=29B cardinality=4.69M
 |  in pipelines: 00(GETNEXT)
 |
 F02:PLAN FRAGMENT [HASH(l_orderkey)] hosts=3 instances=6
@@ -863,7 +863,7 @@ Per-Host Resources: mem-estimate=107.20MB mem-reservation=87.00MB thread-reserva
 |  output: count(*)
 |  group by: l_orderkey, o_orderstatus
 |  mem-estimate=34.00MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=2 row-size=29B cardinality=4690314
+|  tuple-ids=2 row-size=29B cardinality=4.69M
 |  in pipelines: 00(GETNEXT)
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
@@ -872,7 +872,7 @@ Per-Host Resources: mem-estimate=107.20MB mem-reservation=87.00MB thread-reserva
 |  fk/pk conjuncts: l_orderkey = o_orderkey
 |  runtime filters: RF000[bloom] <- o_orderkey
 |  mem-estimate=8.50MB mem-reservation=8.50MB spill-buffer=512.00KB thread-reservation=0
-|  tuple-ids=0,1 row-size=29B cardinality=5757710
+|  tuple-ids=0,1 row-size=29B cardinality=5.76M
 |  in pipelines: 00(GETNEXT), 01(OPEN)
 |
 |--F05:PLAN FRAGMENT [HASH(l_orderkey)] hosts=2 instances=4
@@ -884,7 +884,7 @@ Per-Host Resources: mem-estimate=107.20MB mem-reservation=87.00MB thread-reserva
 |  |
 |  05:EXCHANGE [HASH(o_orderkey)]
 |  |  mem-estimate=10.10MB mem-reservation=0B thread-reservation=0
-|  |  tuple-ids=1 row-size=21B cardinality=1500000
+|  |  tuple-ids=1 row-size=21B cardinality=1.50M
 |  |  in pipelines: 01(GETNEXT)
 |  |
 |  F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=4
@@ -894,27 +894,27 @@ Per-Host Resources: mem-estimate=107.20MB mem-reservation=87.00MB thread-reserva
 |     stored statistics:
 |       table: rows=1500000 size=54.07MB
 |       columns: all
-|     extrapolated-rows=disabled max-scan-range-rows=1177135
+|     extrapolated-rows=disabled max-scan-range-rows=1177136
 |     mem-estimate=40.00MB mem-reservation=8.00MB thread-reservation=0
-|     tuple-ids=1 row-size=21B cardinality=1500000
+|     tuple-ids=1 row-size=21B cardinality=1.50M
 |     in pipelines: 01(GETNEXT)
 |
 04:EXCHANGE [HASH(l_orderkey)]
 |  mem-estimate=10.07MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=0 row-size=8B cardinality=6001215
+|  tuple-ids=0 row-size=8B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
 Per-Host Resources: mem-estimate=162.00MB mem-reservation=10.00MB thread-reservation=2 runtime-filters-memory=1.00MB
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    runtime filters: RF000[bloom] -> l_orderkey
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=4.00MB thread-reservation=0
-   tuple-ids=0 row-size=8B cardinality=6001215
+   tuple-ids=0 row-size=8B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # High NDV aggregation - should use default buffer size.
@@ -932,7 +932,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=10.69MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=1 row-size=231B cardinality=6001215
+|  tuple-ids=1 row-size=231B cardinality=6.00M
 |  in pipelines: 03(GETNEXT)
 |
 F01:PLAN FRAGMENT [HASH(tpch_parquet.lineitem.l_orderkey,tpch_parquet.lineitem.l_partkey,tpch_parquet.lineitem.l_suppkey,tpch_parquet.lineitem.l_linenumber,tpch_parquet.lineitem.l_quantity,tpch_parquet.lineitem.l_extendedprice,tpch_parquet.lineitem.l_discount,tpch_parquet.lineitem.l_tax,tpch_parquet.lineitem.l_returnflag,tpch_parquet.lineitem.l_linestatus,tpch_parquet.lineitem.l_shipdate,tpch_parquet.lineitem.l_commitdate,tpch_parquet.lineitem.l_receiptdate,tpch_parquet.lineitem.l_shipinstruct,tpch_parquet.lineitem.l_shipmode,tpch_parquet.lineitem.l_comment)] hosts=3 instances=3
@@ -940,12 +940,12 @@ Per-Host Resources: mem-estimate=737.12MB mem-reservation=34.00MB thread-reserva
 03:AGGREGATE [FINALIZE]
 |  group by: tpch_parquet.lineitem.l_orderkey, tpch_parquet.lineitem.l_partkey, tpch_parquet.lineitem.l_suppkey, tpch_parquet.lineitem.l_linenumber, tpch_parquet.lineitem.l_quantity, tpch_parquet.lineitem.l_extendedprice, tpch_parquet.lineitem.l_discount, tpch_parquet.lineitem.l_tax, tpch_parquet.lineitem.l_returnflag, tpch_parquet.lineitem.l_linestatus, tpch_parquet.lineitem.l_shipdate, tpch_parquet.lineitem.l_commitdate, tpch_parquet.lineitem.l_receiptdate, tpch_parquet.lineitem.l_shipinstruct, tpch_parquet.lineitem.l_shipmode, tpch_parquet.lineitem.l_comment
 |  mem-estimate=726.43MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=231B cardinality=6001215
+|  tuple-ids=1 row-size=231B cardinality=6.00M
 |  in pipelines: 03(GETNEXT), 00(OPEN)
 |
 02:EXCHANGE [HASH(tpch_parquet.lineitem.l_orderkey,tpch_parquet.lineitem.l_partkey,tpch_parquet.lineitem.l_suppkey,tpch_parquet.lineitem.l_linenumber,tpch_parquet.lineitem.l_quantity,tpch_parquet.lineitem.l_extendedprice,tpch_parquet.lineitem.l_discount,tpch_parquet.lineitem.l_tax,tpch_parquet.lineitem.l_returnflag,tpch_parquet.lineitem.l_linestatus,tpch_parquet.lineitem.l_shipdate,tpch_parquet.lineitem.l_commitdate,tpch_parquet.lineitem.l_receiptdate,tpch_parquet.lineitem.l_shipinstruct,tpch_parquet.lineitem.l_shipmode,tpch_parquet.lineitem.l_comment)]
 |  mem-estimate=10.69MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=1 row-size=231B cardinality=6001215
+|  tuple-ids=1 row-size=231B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -953,17 +953,17 @@ Per-Host Resources: mem-estimate=806.43MB mem-reservation=74.00MB thread-reserva
 01:AGGREGATE [STREAMING]
 |  group by: tpch_parquet.lineitem.l_orderkey, tpch_parquet.lineitem.l_partkey, tpch_parquet.lineitem.l_suppkey, tpch_parquet.lineitem.l_linenumber, tpch_parquet.lineitem.l_quantity, tpch_parquet.lineitem.l_extendedprice, tpch_parquet.lineitem.l_discount, tpch_parquet.lineitem.l_tax, tpch_parquet.lineitem.l_returnflag, tpch_parquet.lineitem.l_linestatus, tpch_parquet.lineitem.l_shipdate, tpch_parquet.lineitem.l_commitdate, tpch_parquet.lineitem.l_receiptdate, tpch_parquet.lineitem.l_shipinstruct, tpch_parquet.lineitem.l_shipmode, tpch_parquet.lineitem.l_comment
 |  mem-estimate=726.43MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=231B cardinality=6001215
+|  tuple-ids=1 row-size=231B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=40.00MB thread-reservation=1
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=216.00MB Threads=5
@@ -977,7 +977,7 @@ PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |  mem-estimate=11.38MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=1 row-size=231B cardinality=6001215
+|  tuple-ids=1 row-size=231B cardinality=6.00M
 |  in pipelines: 03(GETNEXT)
 |
 F01:PLAN FRAGMENT [HASH(tpch_parquet.lineitem.l_orderkey,tpch_parquet.lineitem.l_partkey,tpch_parquet.lineitem.l_suppkey,tpch_parquet.lineitem.l_linenumber,tpch_parquet.lineitem.l_quantity,tpch_parquet.lineitem.l_extendedprice,tpch_parquet.lineitem.l_discount,tpch_parquet.lineitem.l_tax,tpch_parquet.lineitem.l_returnflag,tpch_parquet.lineitem.l_linestatus,tpch_parquet.lineitem.l_shipdate,tpch_parquet.lineitem.l_commitdate,tpch_parquet.lineitem.l_receiptdate,tpch_parquet.lineitem.l_shipinstruct,tpch_parquet.lineitem.l_shipmode,tpch_parquet.lineitem.l_comment)] hosts=3 instances=6
@@ -985,12 +985,12 @@ Per-Host Resources: mem-estimate=749.19MB mem-reservation=68.00MB thread-reserva
 03:AGGREGATE [FINALIZE]
 |  group by: tpch_parquet.lineitem.l_orderkey, tpch_parquet.lineitem.l_partkey, tpch_parquet.lineitem.l_suppkey, tpch_parquet.lineitem.l_linenumber, tpch_parquet.lineitem.l_quantity, tpch_parquet.lineitem.l_extendedprice, tpch_parquet.lineitem.l_discount, tpch_parquet.lineitem.l_tax, tpch_parquet.lineitem.l_returnflag, tpch_parquet.lineitem.l_linestatus, tpch_parquet.lineitem.l_shipdate, tpch_parquet.lineitem.l_commitdate, tpch_parquet.lineitem.l_receiptdate, tpch_parquet.lineitem.l_shipinstruct, tpch_parquet.lineitem.l_shipmode, tpch_parquet.lineitem.l_comment
 |  mem-estimate=363.22MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=231B cardinality=6001215
+|  tuple-ids=1 row-size=231B cardinality=6.00M
 |  in pipelines: 03(GETNEXT), 00(OPEN)
 |
 02:EXCHANGE [HASH(tpch_parquet.lineitem.l_orderkey,tpch_parquet.lineitem.l_partkey,tpch_parquet.lineitem.l_suppkey,tpch_parquet.lineitem.l_linenumber,tpch_parquet.lineitem.l_quantity,tpch_parquet.lineitem.l_extendedprice,tpch_parquet.lineitem.l_discount,tpch_parquet.lineitem.l_tax,tpch_parquet.lineitem.l_returnflag,tpch_parquet.lineitem.l_linestatus,tpch_parquet.lineitem.l_shipdate,tpch_parquet.lineitem.l_commitdate,tpch_parquet.lineitem.l_receiptdate,tpch_parquet.lineitem.l_shipinstruct,tpch_parquet.lineitem.l_shipmode,tpch_parquet.lineitem.l_comment)]
 |  mem-estimate=11.38MB mem-reservation=0B thread-reservation=0
-|  tuple-ids=1 row-size=231B cardinality=6001215
+|  tuple-ids=1 row-size=231B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
@@ -998,17 +998,17 @@ Per-Host Resources: mem-estimate=886.43MB mem-reservation=148.00MB thread-reserv
 01:AGGREGATE [STREAMING]
 |  group by: tpch_parquet.lineitem.l_orderkey, tpch_parquet.lineitem.l_partkey, tpch_parquet.lineitem.l_suppkey, tpch_parquet.lineitem.l_linenumber, tpch_parquet.lineitem.l_quantity, tpch_parquet.lineitem.l_extendedprice, tpch_parquet.lineitem.l_discount, tpch_parquet.lineitem.l_tax, tpch_parquet.lineitem.l_returnflag, tpch_parquet.lineitem.l_linestatus, tpch_parquet.lineitem.l_shipdate, tpch_parquet.lineitem.l_commitdate, tpch_parquet.lineitem.l_receiptdate, tpch_parquet.lineitem.l_shipinstruct, tpch_parquet.lineitem.l_shipmode, tpch_parquet.lineitem.l_comment
 |  mem-estimate=363.22MB mem-reservation=34.00MB spill-buffer=2.00MB thread-reservation=0
-|  tuple-ids=1 row-size=231B cardinality=6001215
+|  tuple-ids=1 row-size=231B cardinality=6.00M
 |  in pipelines: 00(GETNEXT)
 |
 00:SCAN HDFS [tpch_parquet.lineitem, RANDOM]
-   partitions=1/1 files=3 size=193.72MB
+   partitions=1/1 files=3 size=193.60MB
    stored statistics:
-     table: rows=6001215 size=193.72MB
+     table: rows=6001215 size=193.60MB
      columns: all
-   extrapolated-rows=disabled max-scan-range-rows=2141609
+   extrapolated-rows=disabled max-scan-range-rows=2141702
    mem-estimate=80.00MB mem-reservation=40.00MB thread-reservation=0
-   tuple-ids=0 row-size=231B cardinality=6001215
+   tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
 # Aggregation with unknown input - should use default buffer size.


[10/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/subquery-rewrite.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/subquery-rewrite.test b/testdata/workloads/functional-planner/queries/PlannerTest/subquery-rewrite.test
index 1e59828..5176491 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/subquery-rewrite.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/subquery-rewrite.test
@@ -9,13 +9,16 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: id = id
 |  runtime filters: RF000 <- id
+|  row-size=89B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypesagg]
 |     partitions=11/11 files=11 size=814.73KB
+|     row-size=4B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> id
+   row-size=89B cardinality=7.30K
 ====
 # NOT IN predicate rewritten into a null-aware anti join
 select *
@@ -27,12 +30,15 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [NULL AWARE LEFT ANTI JOIN]
 |  hash predicates: id = id
+|  row-size=89B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypesagg]
 |     partitions=11/11 files=11 size=814.73KB
+|     row-size=4B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # Correlated NOT IN rewritten into a null-aware anti join
 select *
@@ -48,13 +54,16 @@ PLAN-ROOT SINK
 02:HASH JOIN [NULL AWARE LEFT ANTI JOIN]
 |  hash predicates: a.int_col = int_col
 |  other join predicates: a.id = g.id, g.bigint_col < a.bigint_col
+|  row-size=89B cardinality=730
 |
 |--01:SCAN HDFS [functional.alltypesagg g]
 |     partitions=11/11 files=11 size=814.73KB
+|     row-size=16B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.int_col < 100
+   row-size=89B cardinality=730
 ====
 # Correlated NOT IN subquery resulting in the same eq conjunct
 # being used in both the hash and the other join predicate
@@ -67,12 +76,15 @@ PLAN-ROOT SINK
 02:HASH JOIN [NULL AWARE LEFT ANTI JOIN]
 |  hash predicates: a.id = id
 |  other join predicates: a.id = b.id
+|  row-size=89B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # Subquery with predicate in the WHERE clause
 select count(*)
@@ -85,19 +97,23 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 02:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: a.id = g.id, int_col = int_col
 |  runtime filters: RF000 <- g.id, RF001 <- int_col
+|  row-size=9B cardinality=1.10K
 |
 |--01:SCAN HDFS [functional.alltypesagg g]
 |     partitions=11/11 files=11 size=814.73KB
 |     predicates: g.bigint_col < 10
+|     row-size=16B cardinality=1.10K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: bool_col = FALSE
    runtime filters: RF000 -> a.id, RF001 -> int_col
+   row-size=9B cardinality=3.65K
 ====
 # IMPALA-4325: Preserve parenthesis of expressions when rewriting subqueries
 select *
@@ -113,14 +129,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: t1.int_col = t2.int_col
 |  runtime filters: RF000 <- t2.int_col
+|  row-size=89B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: (t2.int_col IS NOT NULL AND (t2.int_col < 0 OR t2.int_col > 10) OR t2.bigint_col IS NOT NULL AND (t2.bigint_col < 0 OR t2.bigint_col > 10))
+|     row-size=12B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t1.int_col
+   row-size=89B cardinality=7.30K
 ====
 # Complex expression in the IN predicate
 select *
@@ -133,13 +152,16 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: t.int_col + 1 = int_col + bigint_col
 |  runtime filters: RF000 <- int_col + bigint_col
+|  row-size=89B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypesagg]
 |     partitions=11/11 files=11 size=814.73KB
+|     row-size=12B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypes t]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t.int_col + 1
+   row-size=89B cardinality=7.30K
 ====
 # Multiple subqueries in the WHERE clause
 select *
@@ -153,22 +175,27 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [NULL AWARE LEFT ANTI JOIN]
 |  hash predicates: t.tinyint_col = tinyint_col
+|  row-size=89B cardinality=730
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=1B cardinality=8
 |
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: t.id = id
 |  runtime filters: RF000 <- id
+|  row-size=89B cardinality=730
 |
 |--01:SCAN HDFS [functional.alltypesagg]
 |     partitions=11/11 files=11 size=814.73KB
 |     predicates: bool_col = FALSE
+|     row-size=5B cardinality=5.50K
 |
 00:SCAN HDFS [functional.alltypes t]
    partitions=24/24 files=24 size=478.45KB
    predicates: t.bigint_col < 1000
    runtime filters: RF000 -> t.id
+   row-size=89B cardinality=730
 ====
 # Multiple tables in the FROM clause of the outer query block
 select count(*)
@@ -180,25 +207,31 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 04:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: a.int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=12B cardinality=16
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     predicates: bool_col = FALSE
+|     row-size=5B cardinality=4
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = t.id
 |  runtime filters: RF002 <- t.id
+|  row-size=12B cardinality=7.81K
 |
 |--01:SCAN HDFS [functional.alltypes t]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> a.int_col, RF002 -> a.id
+   row-size=8B cardinality=11.00K
 ====
 # Multiple tables in the subquery
 select count(*)
@@ -213,26 +246,32 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 04:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: a.bool_col = s.bool_col, a.id = s.id
 |  runtime filters: RF000 <- s.bool_col, RF001 <- s.id
+|  row-size=9B cardinality=80
 |
 |--03:HASH JOIN [INNER JOIN]
 |  |  hash predicates: s.int_col = t.int_col
 |  |  runtime filters: RF004 <- t.int_col
+|  |  row-size=13B cardinality=80
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny t]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=4B cardinality=8
 |  |
 |  01:SCAN HDFS [functional.alltypessmall s]
 |     partitions=4/4 files=4 size=6.32KB
 |     runtime filters: RF004 -> s.int_col
+|     row-size=9B cardinality=100
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    predicates: a.int_col < 10
    runtime filters: RF000 -> a.bool_col, RF001 -> a.id
+   row-size=9B cardinality=1.10K
 ====
 # Outer join between the tables in the outer query block
 select count(*)
@@ -246,25 +285,31 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 04:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: a.id = id
 |  runtime filters: RF000 <- id
+|  row-size=13B cardinality=33
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 03:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: a.int_col = t.int_col
 |  other predicates: t.bool_col = FALSE
+|  row-size=13B cardinality=41.95K
 |
 |--01:SCAN HDFS [functional.alltypes t]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: t.bool_col = FALSE
+|     row-size=5B cardinality=3.65K
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> a.id
+   row-size=8B cardinality=11.00K
 ====
 # Subquery in the outer-joined table
 select count(*)
@@ -278,26 +323,32 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 04:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: a.int_col = g.int_col
 |  other predicates: g.bigint_col < 100
+|  row-size=21B cardinality=5.84K
 |
 |--03:HASH JOIN [LEFT SEMI JOIN]
 |  |  hash predicates: id = id
 |  |  runtime filters: RF000 <- id
+|  |  row-size=16B cardinality=8
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=4B cardinality=8
 |  |
 |  01:SCAN HDFS [functional.alltypesagg g]
 |     partitions=11/11 files=11 size=814.73KB
 |     predicates: g.bigint_col < 100
 |     runtime filters: RF000 -> id
+|     row-size=16B cardinality=1.10K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.bool_col = FALSE
+   row-size=5B cardinality=3.65K
 ====
 # Multiple tables in the FROM clause of the subquery
 select count(distinct id)
@@ -311,37 +362,46 @@ PLAN-ROOT SINK
 |
 08:AGGREGATE [FINALIZE]
 |  output: count(id)
+|  row-size=8B cardinality=1
 |
 07:AGGREGATE
 |  group by: id
+|  row-size=4B cardinality=115
 |
 06:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: a.int_col = t.int_col
 |  runtime filters: RF000 <- t.int_col
+|  row-size=8B cardinality=115
 |
 |--05:HASH JOIN [INNER JOIN]
 |  |  hash predicates: s.bigint_col = n.bigint_col
 |  |  runtime filters: RF002 <- n.bigint_col
+|  |  row-size=29B cardinality=40
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny n]
 |  |     partitions=4/4 files=4 size=460B
 |  |     predicates: n.bool_col = FALSE
+|  |     row-size=9B cardinality=4
 |  |
 |  04:HASH JOIN [INNER JOIN]
 |  |  hash predicates: t.id = s.id
 |  |  runtime filters: RF004 <- s.id
+|  |  row-size=20B cardinality=99
 |  |
 |  |--02:SCAN HDFS [functional.alltypessmall s]
 |  |     partitions=4/4 files=4 size=6.32KB
 |  |     runtime filters: RF002 -> s.bigint_col
+|  |     row-size=12B cardinality=100
 |  |
 |  01:SCAN HDFS [functional.alltypes t]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF004 -> t.id
+|     row-size=8B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> a.int_col
+   row-size=8B cardinality=11.00K
 ====
 # Subqueries with inline views
 select *
@@ -356,26 +416,32 @@ PLAN-ROOT SINK
 05:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: t.id = a.id
 |  runtime filters: RF000 <- a.id
+|  row-size=89B cardinality=8
 |
 |--04:HASH JOIN [INNER JOIN]
 |  |  hash predicates: id = a.id
 |  |  runtime filters: RF002 <- a.id
+|  |  row-size=16B cardinality=8
 |  |
 |  |--01:SCAN HDFS [functional.alltypestiny a]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=4B cardinality=8
 |  |
 |  03:AGGREGATE [FINALIZE]
 |  |  output: count(*)
 |  |  group by: id
 |  |  having: count(*) = 10
+|  |  row-size=12B cardinality=99
 |  |
 |  02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
 |     runtime filters: RF002 -> functional.alltypessmall.id
+|     row-size=4B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes t]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t.id
+   row-size=89B cardinality=7.30K
 ====
 with t as (select a.* from functional.alltypes a where id in
   (select id from functional.alltypestiny))
@@ -386,14 +452,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: id = id
 |  runtime filters: RF000 <- id
+|  row-size=89B cardinality=8
 |
 |--01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.int_col = 10, a.bool_col = FALSE
    runtime filters: RF000 -> id
+   row-size=89B cardinality=516
 ====
 # Subqueries in WITH, FROM and WHERE clauses
 with t as (select a.* from functional.alltypes a
@@ -410,36 +479,45 @@ PLAN-ROOT SINK
 08:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: a.int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=109B cardinality=91
 |
 |--06:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: g.string_col = a.string_col
 |  runtime filters: RF002 <- a.string_col
+|  row-size=109B cardinality=91
 |
 |--02:HASH JOIN [LEFT SEMI JOIN]
 |  |  hash predicates: id = id
 |  |  runtime filters: RF006 <- id
+|  |  row-size=89B cardinality=8
 |  |
 |  |--01:SCAN HDFS [functional.alltypestiny]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=4B cardinality=8
 |  |
 |  00:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> a.int_col, RF006 -> id
+|     row-size=89B cardinality=7.30K
 |
 05:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: g.id = id
 |  runtime filters: RF004 <- id
+|  row-size=20B cardinality=5.50K
 |
 |--04:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 03:SCAN HDFS [functional.alltypesagg g]
    partitions=11/11 files=11 size=814.73KB
    predicates: g.bool_col = FALSE
    runtime filters: RF002 -> g.string_col, RF004 -> g.id
+   row-size=20B cardinality=5.50K
 ====
 # Correlated subqueries
 select *
@@ -453,14 +531,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: id = id, t.int_col = a.int_col
 |  runtime filters: RF000 <- id, RF001 <- a.int_col
+|  row-size=89B cardinality=3.65K
 |
 |--01:SCAN HDFS [functional.alltypesagg a]
 |     partitions=11/11 files=11 size=814.73KB
+|     row-size=8B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypes t]
    partitions=24/24 files=24 size=478.45KB
    predicates: t.bool_col = FALSE
    runtime filters: RF000 -> id, RF001 -> t.int_col
+   row-size=89B cardinality=3.65K
 ====
 # Multiple nesting levels (uncorrelated queries)
 select *
@@ -476,23 +557,28 @@ PLAN-ROOT SINK
 04:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: id = id
 |  runtime filters: RF000 <- id
+|  row-size=89B cardinality=11
 |
 |--03:HASH JOIN [LEFT SEMI JOIN]
 |  |  hash predicates: int_col = int_col
 |  |  runtime filters: RF002 <- int_col
+|  |  row-size=9B cardinality=11
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=4B cardinality=8
 |  |
 |  01:SCAN HDFS [functional.alltypesagg]
 |     partitions=11/11 files=11 size=814.73KB
 |     predicates: bool_col = FALSE
 |     runtime filters: RF002 -> int_col
+|     row-size=9B cardinality=5.50K
 |
 00:SCAN HDFS [functional.alltypes t]
    partitions=24/24 files=24 size=478.45KB
    predicates: bigint_col < 1000
    runtime filters: RF000 -> id
+   row-size=89B cardinality=730
 ====
 # Multiple nesting levels (correlated queries)
 select *
@@ -508,21 +594,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: id = id, t.int_col = a.int_col
 |  runtime filters: RF000 <- id, RF001 <- a.int_col
+|  row-size=89B cardinality=22
 |
 |--03:HASH JOIN [LEFT SEMI JOIN]
 |  |  hash predicates: a.bigint_col = s.bigint_col, a.tinyint_col = tinyint_col
 |  |  runtime filters: RF004 <- s.bigint_col, RF005 <- tinyint_col
+|  |  row-size=17B cardinality=22
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny s]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=9B cardinality=8
 |  |
 |  01:SCAN HDFS [functional.alltypesagg a]
 |     partitions=11/11 files=11 size=814.73KB
 |     runtime filters: RF004 -> a.bigint_col, RF005 -> a.tinyint_col
+|     row-size=17B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypes t]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> id, RF001 -> t.int_col
+   row-size=89B cardinality=7.30K
 ====
 # Multiple nesting levels (correlated and uncorrelated queries)
 select *
@@ -536,21 +627,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: id = id
 |  runtime filters: RF000 <- id
+|  row-size=89B cardinality=22
 |
 |--03:HASH JOIN [LEFT SEMI JOIN]
 |  |  hash predicates: a.bigint_col = s.bigint_col, a.int_col = int_col
 |  |  runtime filters: RF002 <- s.bigint_col, RF003 <- int_col
+|  |  row-size=16B cardinality=22
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny s]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=12B cardinality=8
 |  |
 |  01:SCAN HDFS [functional.alltypesagg a]
 |     partitions=11/11 files=11 size=814.73KB
 |     runtime filters: RF002 -> a.bigint_col, RF003 -> a.int_col
+|     row-size=16B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypes t]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> id
+   row-size=89B cardinality=7.30K
 ====
 # Predicate propagation with uncorrelated subqueries
 select *
@@ -563,15 +659,18 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: id = id
 |  runtime filters: RF000 <- id
+|  row-size=89B cardinality=730
 |
 |--01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: id < 10
+|     row-size=4B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id < 10
    runtime filters: RF000 -> id
+   row-size=89B cardinality=730
 ====
 # Predicate propagation with correlated subqueries
 select *
@@ -584,23 +683,28 @@ PLAN-ROOT SINK
 04:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: t.id = s.id, t.int_col = int_col
 |  runtime filters: RF000 <- s.id, RF001 <- int_col
+|  row-size=184B cardinality=10
 |
 |--02:SCAN HDFS [functional.alltypessmall s]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: s.int_col < 10
+|     row-size=8B cardinality=10
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = t.id
 |  runtime filters: RF004 <- t.id
+|  row-size=184B cardinality=782
 |
 |--01:SCAN HDFS [functional.alltypes t]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: t.int_col < 10
 |     runtime filters: RF000 -> t.id, RF001 -> t.int_col
+|     row-size=89B cardinality=730
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> a.id, RF004 -> a.id
+   row-size=95B cardinality=11.00K
 ====
 # Correlated EXISTS
 select count(*)
@@ -612,17 +716,21 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 02:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: a.id = t.id
 |  runtime filters: RF000 <- t.id
+|  row-size=4B cardinality=7.30K
 |
 |--00:SCAN HDFS [functional.alltypes t]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> a.id
+   row-size=4B cardinality=11.00K
 ====
 # Correlated EXISTS with an analytic function and a group by clause
 select 1
@@ -639,17 +747,21 @@ PLAN-ROOT SINK
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: a.tinyint_col = b.tinyint_col
 |  runtime filters: RF000 <- b.tinyint_col
+|  row-size=1B cardinality=244
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  group by: id, int_col, bool_col, b.tinyint_col
+|  |  row-size=10B cardinality=8
 |  |
 |  01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
+|     row-size=10B cardinality=8
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    predicates: tinyint_col < 10
    runtime filters: RF000 -> a.tinyint_col
+   row-size=1B cardinality=1.10K
 ====
 # Correlated NOT EXISTS
 select count(*)
@@ -661,15 +773,19 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 02:HASH JOIN [RIGHT ANTI JOIN]
 |  hash predicates: a.int_col = t.int_col
+|  row-size=4B cardinality=7.30K
 |
 |--00:SCAN HDFS [functional.alltypes t]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
+   row-size=4B cardinality=11.00K
 ====
 # Correlated NOT EXISTS with an analytic function and a group by clause
 select count(*)
@@ -685,28 +801,35 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 05:HASH JOIN [LEFT ANTI JOIN]
 |  hash predicates: a.int_col = b.int_col
+|  row-size=5B cardinality=5.50K
 |
 |--04:AGGREGATE [FINALIZE]
 |  |  group by: b.id, b.int_col, b.bigint_col
+|  |  row-size=16B cardinality=50
 |  |
 |  03:HASH JOIN [INNER JOIN]
 |  |  hash predicates: c.id = b.id
 |  |  runtime filters: RF000 <- b.id
+|  |  row-size=21B cardinality=50
 |  |
 |  |--01:SCAN HDFS [functional.alltypessmall b]
 |  |     partitions=4/4 files=4 size=6.32KB
+|  |     row-size=16B cardinality=100
 |  |
 |  02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: c.bool_col = FALSE
 |     runtime filters: RF000 -> c.id
+|     row-size=5B cardinality=3.65K
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    predicates: bool_col = FALSE
+   row-size=5B cardinality=5.50K
 ====
 # Uncorrelated EXISTS
 select *
@@ -716,14 +839,17 @@ where exists (select * from functional.alltypessmall s where s.id < 5)
 PLAN-ROOT SINK
 |
 02:NESTED LOOP JOIN [LEFT SEMI JOIN]
+|  row-size=89B cardinality=8
 |
 |--01:SCAN HDFS [functional.alltypessmall s]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: s.id < 5
 |     limit: 1
+|     row-size=4B cardinality=1
 |
 00:SCAN HDFS [functional.alltypestiny t]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ====
 # Uncorrelated EXISTS with an analytic function and a group by clause
 select 1
@@ -736,17 +862,21 @@ where exists
 PLAN-ROOT SINK
 |
 03:NESTED LOOP JOIN [RIGHT SEMI JOIN]
+|  row-size=0B cardinality=8
 |
 |--00:SCAN HDFS [functional.alltypestiny t]
 |     partitions=4/4 files=4 size=460B
+|     row-size=0B cardinality=8
 |
 02:AGGREGATE [FINALIZE]
 |  group by: id, int_col, bigint_col
 |  limit: 1
+|  row-size=16B cardinality=1
 |
 01:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
    predicates: tinyint_col = 10
+   row-size=17B cardinality=1.22K
 ====
 # Uncorrelated EXISTS with a LIMIT 0 clause
 select 1
@@ -765,14 +895,17 @@ where not exists (select * from functional.alltypessmall s where s.id < 5)
 PLAN-ROOT SINK
 |
 02:NESTED LOOP JOIN [LEFT ANTI JOIN]
+|  row-size=89B cardinality=8
 |
 |--01:SCAN HDFS [functional.alltypessmall s]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: s.id < 5
 |     limit: 1
+|     row-size=4B cardinality=1
 |
 00:SCAN HDFS [functional.alltypestiny t]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ====
 # Uncorrelated NOT exists referencing a WITH clause
 with
@@ -785,14 +918,17 @@ where not exists (select 1 from w2)
 PLAN-ROOT SINK
 |
 02:NESTED LOOP JOIN [LEFT ANTI JOIN]
+|  row-size=89B cardinality=8
 |
 |--01:SCAN HDFS [functional.alltypessmall s]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: s.id < 0
 |     limit: 1
+|     row-size=4B cardinality=1
 |
 00:SCAN HDFS [functional.alltypestiny t]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ====
 # Uncorrelated NOT EXISTS with an analytic function and a group by clause
 select 1
@@ -805,17 +941,21 @@ where not exists
 PLAN-ROOT SINK
 |
 03:NESTED LOOP JOIN [RIGHT ANTI JOIN]
+|  row-size=0B cardinality=8
 |
 |--00:SCAN HDFS [functional.alltypestiny t]
 |     partitions=4/4 files=4 size=460B
+|     row-size=0B cardinality=8
 |
 02:AGGREGATE [FINALIZE]
 |  group by: id, int_col, bigint_col
 |  limit: 1
+|  row-size=16B cardinality=1
 |
 01:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
    predicates: tinyint_col = 10
+   row-size=17B cardinality=1.22K
 ====
 # Uncorrelated NOT EXISTS with a LIMIT 0 clause
 select 1
@@ -826,6 +966,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypestiny t]
    partitions=4/4 files=4 size=460B
+   row-size=0B cardinality=8
 ====
 # Multiple nesting levels
 select count(*)
@@ -839,26 +980,32 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 04:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: a.id = t.id
 |  runtime filters: RF000 <- t.id
+|  row-size=4B cardinality=8
 |
 |--03:HASH JOIN [RIGHT SEMI JOIN]
 |  |  hash predicates: g.int_col = t.int_col
 |  |  runtime filters: RF002 <- t.int_col
+|  |  row-size=8B cardinality=8
 |  |
 |  |--01:SCAN HDFS [functional.alltypestiny t]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=8B cardinality=8
 |  |
 |  02:SCAN HDFS [functional.alltypesagg g]
 |     partitions=11/11 files=11 size=814.73KB
 |     predicates: g.bool_col = FALSE
 |     runtime filters: RF002 -> g.int_col
+|     row-size=5B cardinality=5.50K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.id
+   row-size=4B cardinality=7.30K
 ====
 # Multiple subquery predicates
 select g.int_col, count(*)
@@ -885,50 +1032,61 @@ PLAN-ROOT SINK
 |  output: count(*)
 |  group by: g.int_col
 |  having: count(*) < 100
+|  row-size=12B cardinality=4
 |
 09:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: t.id = g.id
 |  other predicates: g.tinyint_col < zeroifnull(count(*))
 |  runtime filters: RF000 <- g.id
+|  row-size=33B cardinality=4
 |
 |--08:HASH JOIN [RIGHT SEMI JOIN]
 |  |  hash predicates: bigint_col = g.bigint_col, s.id = g.id
 |  |  runtime filters: RF002 <- g.bigint_col, RF003 <- g.id
+|  |  row-size=21B cardinality=4
 |  |
 |  |--07:HASH JOIN [LEFT SEMI JOIN]
 |  |  |  hash predicates: g.id = t.id
 |  |  |  runtime filters: RF006 <- t.id
+|  |  |  row-size=21B cardinality=4
 |  |  |
 |  |  |--02:SCAN HDFS [functional.alltypestiny t]
 |  |  |     partitions=4/4 files=4 size=460B
 |  |  |     predicates: t.bool_col = FALSE
+|  |  |     row-size=5B cardinality=4
 |  |  |
 |  |  06:HASH JOIN [RIGHT OUTER JOIN]
 |  |  |  hash predicates: a.id = g.id
 |  |  |  runtime filters: RF008 <- g.id
+|  |  |  row-size=21B cardinality=1.10K
 |  |  |
 |  |  |--00:SCAN HDFS [functional.alltypesagg g]
 |  |  |     partitions=11/11 files=11 size=814.73KB
 |  |  |     predicates: g.int_col < 100
 |  |  |     runtime filters: RF006 -> g.id
+|  |  |     row-size=17B cardinality=1.10K
 |  |  |
 |  |  01:SCAN HDFS [functional.alltypes a]
 |  |     partitions=24/24 files=24 size=478.45KB
 |  |     runtime filters: RF006 -> a.id, RF008 -> a.id
+|  |     row-size=4B cardinality=7.30K
 |  |
 |  03:SCAN HDFS [functional.alltypessmall s]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: s.int_col > 10
 |     runtime filters: RF002 -> bigint_col, RF003 -> s.id
+|     row-size=16B cardinality=10
 |
 05:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: t.id
+|  row-size=12B cardinality=3.65K
 |
 04:SCAN HDFS [functional.alltypes t]
    partitions=24/24 files=24 size=478.45KB
    predicates: t.bool_col = TRUE
    runtime filters: RF000 -> t.id
+   row-size=5B cardinality=3.65K
 ====
 # Subqueries with aggregation
 select *
@@ -943,19 +1101,23 @@ PLAN-ROOT SINK
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: a.int_col = count(int_col)
 |  runtime filters: RF000 <- count(int_col)
+|  row-size=89B cardinality=730
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: count(int_col)
 |  |  group by: int_col
+|  |  row-size=12B cardinality=957
 |  |
 |  01:SCAN HDFS [functional.alltypesagg g]
 |     partitions=11/11 files=11 size=814.73KB
 |     predicates: g.bool_col
+|     row-size=5B cardinality=1.10K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.bigint_col < 10
    runtime filters: RF000 -> a.int_col
+   row-size=89B cardinality=730
 ====
 # Uncorrelated aggregation subquery
 select *
@@ -968,17 +1130,21 @@ PLAN-ROOT SINK
 |
 03:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: a.int_col < max(int_col)
+|  row-size=93B cardinality=730
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: max(int_col)
+|  |  row-size=4B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypesagg g]
 |     partitions=11/11 files=11 size=814.73KB
 |     predicates: g.bool_col = TRUE
+|     row-size=5B cardinality=5.50K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.bigint_col > 10
+   row-size=89B cardinality=730
 ====
 # Aggregation subquery with constant comparison expr
 select *
@@ -989,18 +1155,22 @@ and a.int_col < 10
 PLAN-ROOT SINK
 |
 03:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=99B cardinality=0
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: max(id)
 |  |  having: max(id) > 10
+|  |  row-size=4B cardinality=0
 |  |
 |  01:SCAN HDFS [functional.alltypes t]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: t.bool_col = FALSE
+|     row-size=5B cardinality=3.65K
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    predicates: a.int_col < 10
+   row-size=95B cardinality=1.10K
 ====
 # Correlated aggregation subquery
 select a.int_col, count(*)
@@ -1017,23 +1187,28 @@ PLAN-ROOT SINK
 04:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: a.int_col
+|  row-size=12B cardinality=10
 |
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: a.id = min(id), a.int_col = t.int_col
 |  runtime filters: RF000 <- min(id), RF001 <- t.int_col
+|  row-size=9B cardinality=10
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: min(id)
 |  |  group by: t.int_col
+|  |  row-size=8B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypes t]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: t.tinyint_col < 10
+|     row-size=9B cardinality=730
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    predicates: a.bool_col = FALSE
    runtime filters: RF000 -> a.id, RF001 -> a.int_col
+   row-size=9B cardinality=5.50K
 ====
 # Aggregation subquery with multiple tables
 select t.tinyint_col, count(*)
@@ -1051,37 +1226,46 @@ PLAN-ROOT SINK
 08:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: t.tinyint_col
+|  row-size=9B cardinality=10
 |
 07:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: t.bigint_col = s.bigint_col
 |  other join predicates: t.int_col < min(s.int_col)
 |  runtime filters: RF000 <- s.bigint_col
+|  row-size=22B cardinality=7.30K
 |
 |--05:AGGREGATE [FINALIZE]
 |  |  output: min(s.int_col)
 |  |  group by: s.bigint_col
+|  |  row-size=12B cardinality=10
 |  |
 |  04:HASH JOIN [LEFT OUTER JOIN]
 |  |  hash predicates: s.id = p.id
+|  |  row-size=21B cardinality=50
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny p]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=4B cardinality=8
 |  |
 |  02:SCAN HDFS [functional.alltypessmall s]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: s.bool_col = FALSE
+|     row-size=17B cardinality=50
 |
 06:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t.id = a.id
 |  other predicates: a.bool_col = FALSE
+|  row-size=22B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypesagg a]
 |     partitions=11/11 files=11 size=814.73KB
 |     predicates: a.bool_col = FALSE
+|     row-size=5B cardinality=5.50K
 |
 00:SCAN HDFS [functional.alltypes t]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t.bigint_col
+   row-size=17B cardinality=7.30K
 ====
 # Multiple aggregation subqueries
 select *
@@ -1098,39 +1282,48 @@ PLAN-ROOT SINK
 |
 08:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: a.tinyint_col > max(tinyint_col)
+|  row-size=185B cardinality=781
 |
 |--05:AGGREGATE [FINALIZE]
 |  |  output: max(tinyint_col)
+|  |  row-size=1B cardinality=1
 |  |
 |  04:SCAN HDFS [functional.alltypessmall s]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: s.id < 10
+|     row-size=5B cardinality=10
 |
 07:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: t.bigint_col = g.bigint_col
 |  other join predicates: a.int_col < min(int_col)
 |  runtime filters: RF000 <- g.bigint_col
+|  row-size=184B cardinality=781
 |
 |--03:AGGREGATE [FINALIZE]
 |  |  output: min(int_col)
 |  |  group by: g.bigint_col
+|  |  row-size=12B cardinality=2
 |  |
 |  02:SCAN HDFS [functional.alltypestiny g]
 |     partitions=4/4 files=4 size=460B
 |     predicates: g.bool_col = FALSE
+|     row-size=13B cardinality=4
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = t.id
 |  runtime filters: RF002 <- t.id
+|  row-size=184B cardinality=3.91K
 |
 |--01:SCAN HDFS [functional.alltypes t]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: t.bool_col = FALSE
 |     runtime filters: RF000 -> t.bigint_col
+|     row-size=89B cardinality=3.65K
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF002 -> a.id
+   row-size=95B cardinality=11.00K
 ====
 # Multiple nesting levels with aggregation subqueries
 select *
@@ -1150,29 +1343,36 @@ PLAN-ROOT SINK
 |  hash predicates: t.id = g.id
 |  other join predicates: t.int_col < avg(g.int_col) * 2
 |  runtime filters: RF000 <- g.id
+|  row-size=89B cardinality=7.30K
 |
 |--05:AGGREGATE [FINALIZE]
 |  |  output: avg(g.int_col)
 |  |  group by: g.id
+|  |  row-size=12B cardinality=10.28K
 |  |
 |  04:HASH JOIN [LEFT OUTER JOIN]
 |  |  hash predicates: g.id = a.id
 |  |  other predicates: g.bigint_col < zeroifnull(count(*))
+|  |  row-size=28B cardinality=11.00K
 |  |
 |  |--03:AGGREGATE [FINALIZE]
 |  |  |  output: count(*)
 |  |  |  group by: a.id
+|  |  |  row-size=12B cardinality=4
 |  |  |
 |  |  02:SCAN HDFS [functional.alltypestiny a]
 |  |     partitions=4/4 files=4 size=460B
 |  |     predicates: a.bool_col = FALSE
+|  |     row-size=5B cardinality=4
 |  |
 |  01:SCAN HDFS [functional.alltypesagg g]
 |     partitions=11/11 files=11 size=814.73KB
+|     row-size=16B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypes t]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t.id
+   row-size=89B cardinality=7.30K
 ====
 # Multiple nesting of aggregate subquery predicates with count
 select *
@@ -1189,32 +1389,41 @@ PLAN-ROOT SINK
 08:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: a.id = s.id
 |  other predicates: a.int_col < zeroifnull(count(*))
+|  row-size=196B cardinality=11.00K
 |
 |--06:AGGREGATE [FINALIZE]
 |  |  output: count(*)
 |  |  group by: s.id
+|  |  row-size=12B cardinality=99
 |  |
 |  05:NESTED LOOP JOIN [INNER JOIN]
 |  |  predicates: s.tinyint_col > count(*)
+|  |  row-size=13B cardinality=100
 |  |
 |  |--04:AGGREGATE [FINALIZE]
 |  |  |  output: count(*)
+|  |  |  row-size=8B cardinality=1
 |  |  |
 |  |  03:SCAN HDFS [functional.alltypestiny]
 |  |     partitions=4/4 files=4 size=460B
 |  |     predicates: bool_col = FALSE
+|  |     row-size=1B cardinality=4
 |  |
 |  02:SCAN HDFS [functional.alltypessmall s]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=5B cardinality=100
 |
 07:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: a.id = t.id
+|  row-size=184B cardinality=11.00K
 |
 |--01:SCAN HDFS [functional.alltypes t]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
+   row-size=95B cardinality=11.00K
 ====
 # Distinct in the outer select block
 select distinct id, bool_col
@@ -1226,20 +1435,25 @@ PLAN-ROOT SINK
 |
 04:AGGREGATE [FINALIZE]
 |  group by: id, bool_col
+|  row-size=5B cardinality=0
 |
 03:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=13B cardinality=0
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: count(*)
 |  |  having: count(*) > 100
+|  |  row-size=8B cardinality=0
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: id < 5, bool_col = FALSE
+|     row-size=5B cardinality=516
 |
 00:SCAN HDFS [functional.alltypesagg g]
    partitions=11/11 files=11 size=814.73KB
    predicates: bool_col = FALSE
+   row-size=5B cardinality=5.50K
 ====
 # Distinct with an unqualified star in the outer select block
 select distinct *
@@ -1251,23 +1465,29 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  group by: g.id, g.bool_col, g.tinyint_col, g.smallint_col, g.int_col, g.bigint_col, g.float_col, g.double_col, g.date_string_col, g.string_col, g.timestamp_col, g.year, g.month, g.day
+|  row-size=95B cardinality=0
 |
 04:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=103B cardinality=0
 |
 |--03:AGGREGATE [FINALIZE]
 |  |  output: count(id)
 |  |  having: count(id) < 100
+|  |  row-size=8B cardinality=0
 |  |
 |  02:AGGREGATE
 |  |  group by: id
+|  |  row-size=4B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     predicates: int_col < 5
+|     row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypesagg g]
    partitions=11/11 files=11 size=814.73KB
    predicates: g.bigint_col = 1
+   row-size=95B cardinality=11
 ====
 # Aggregate subquery in an IS NULL predicate
 select *
@@ -1277,17 +1497,21 @@ where (select max(int_col) from functional.alltypesagg where int_col is null) is
 PLAN-ROOT SINK
 |
 03:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=93B cardinality=0
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: max(int_col)
 |  |  having: max(int_col) IS NULL
+|  |  row-size=4B cardinality=0
 |  |
 |  01:SCAN HDFS [functional.alltypesagg]
 |     partitions=11/11 files=11 size=814.73KB
 |     predicates: int_col IS NULL
+|     row-size=4B cardinality=20
 |
 00:SCAN HDFS [functional.alltypestiny t]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ====
 # Correlated aggregate subquery with a count in an IS NULL predicate
 select int_col, count(*)
@@ -1301,24 +1525,29 @@ PLAN-ROOT SINK
 04:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: int_col
+|  row-size=12B cardinality=2
 |
 03:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: g.id = t.id
 |  other predicates: zeroifnull(count(*)) IS NULL
 |  runtime filters: RF000 <- t.id
+|  row-size=21B cardinality=4
 |
 |--00:SCAN HDFS [functional.alltypestiny t]
 |     partitions=4/4 files=4 size=460B
 |     predicates: bool_col = FALSE
+|     row-size=9B cardinality=4
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: g.id
 |  having: zeroifnull(count(*)) IS NULL
+|  row-size=12B cardinality=1.03K
 |
 01:SCAN HDFS [functional.alltypesagg g]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> g.id
+   row-size=4B cardinality=11.00K
 ====
 # Correlated aggregate subquery in an IS NULL predicate
 select *
@@ -1334,20 +1563,24 @@ PLAN-ROOT SINK
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: t.id = g.id
 |  runtime filters: RF000 <- g.id
+|  row-size=89B cardinality=4
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: max(int_col)
 |  |  group by: g.id
 |  |  having: max(int_col) IS NULL
+|  |  row-size=8B cardinality=20
 |  |
 |  01:SCAN HDFS [functional.alltypesagg g]
 |     partitions=11/11 files=11 size=814.73KB
 |     predicates: g.int_col IS NULL
+|     row-size=8B cardinality=20
 |
 00:SCAN HDFS [functional.alltypestiny t]
    partitions=4/4 files=4 size=460B
    predicates: t.bool_col = FALSE
    runtime filters: RF000 -> t.id
+   row-size=89B cardinality=4
 ====
 # Complex expr with a scalar subquery
 select *
@@ -1361,18 +1594,22 @@ PLAN-ROOT SINK
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: t.int_col + 2 = 1 + count(*)
 |  runtime filters: RF000 <- 1 + count(*)
+|  row-size=89B cardinality=1
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: count(*)
+|  |  row-size=8B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypesagg]
 |     partitions=11/11 files=11 size=814.73KB
 |     predicates: bool_col = FALSE
+|     row-size=1B cardinality=5.50K
 |
 00:SCAN HDFS [functional.alltypestiny t]
    partitions=4/4 files=4 size=460B
    predicates: t.bigint_col < 100
    runtime filters: RF000 -> t.int_col + 2
+   row-size=89B cardinality=1
 ====
 # Scalar subquery in a function
 select *
@@ -1384,18 +1621,22 @@ and t.id < 10
 PLAN-ROOT SINK
 |
 03:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=93B cardinality=0
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: min(id)
 |  |  having: nullifzero(min(id)) IS NULL
+|  |  row-size=4B cardinality=0
 |  |
 |  01:SCAN HDFS [functional.alltypessmall s]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: s.bool_col = FALSE
+|     row-size=5B cardinality=50
 |
 00:SCAN HDFS [functional.alltypestiny t]
    partitions=4/4 files=4 size=460B
    predicates: t.id < 10
+   row-size=89B cardinality=1
 ====
 # Correlated aggregate subquery with a LIMIT clause that is removed during the rewrite
 select min(t.id)
@@ -1412,22 +1653,27 @@ PLAN-ROOT SINK
 04:AGGREGATE [FINALIZE]
 |  output: min(t.id)
 |  group by: t.bool_col
+|  row-size=5B cardinality=2
 |
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: t.id = s.id
 |  other join predicates: t.int_col < sum(s.int_col)
 |  runtime filters: RF000 <- s.id
+|  row-size=9B cardinality=99
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: sum(s.int_col)
 |  |  group by: s.id
+|  |  row-size=12B cardinality=99
 |  |
 |  01:SCAN HDFS [functional.alltypessmall s]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=8B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes t]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t.id
+   row-size=9B cardinality=7.30K
 ====
 # Between predicate with subqueries
 select *
@@ -1440,26 +1686,33 @@ PLAN-ROOT SINK
 |
 06:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: int_col <= max(int_col)
+|  row-size=97B cardinality=8
 |
 |--04:AGGREGATE [FINALIZE]
 |  |  output: max(int_col)
+|  |  row-size=4B cardinality=1
 |  |
 |  03:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: bool_col = TRUE
+|     row-size=5B cardinality=50
 |
 05:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: int_col >= min(int_col)
+|  row-size=93B cardinality=8
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: min(int_col)
+|  |  row-size=4B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: bool_col = FALSE
+|     row-size=5B cardinality=50
 |
 00:SCAN HDFS [functional.alltypestiny t]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ====
 # Aggregate subquery with count (subquery op slotRef)
 select t1.id
@@ -1475,17 +1728,21 @@ PLAN-ROOT SINK
 |  hash predicates: tt1.month = t1.id
 |  other predicates: t1.id > zeroifnull(count(tt1.smallint_col))
 |  runtime filters: RF000 <- t1.id
+|  row-size=16B cardinality=8
 |
 |--00:SCAN HDFS [functional.alltypestiny t1]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(tt1.smallint_col)
 |  group by: tt1.month
+|  row-size=12B cardinality=4
 |
 01:SCAN HDFS [functional.alltypestiny tt1]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> tt1.month
+   row-size=6B cardinality=8
 ====
 # Correlated aggregate subquery with count in a function participating in
 # a complex arithmetic expr
@@ -1501,22 +1758,27 @@ PLAN-ROOT SINK
 04:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: int_col
+|  row-size=12B cardinality=2
 |
 03:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: s.id = t.id
 |  other predicates: 1 + log(abs(zeroifnull(count(int_col))), 2) < 10
 |  runtime filters: RF000 <- t.id
+|  row-size=20B cardinality=8
 |
 |--00:SCAN HDFS [functional.alltypestiny t]
 |     partitions=4/4 files=4 size=460B
+|     row-size=8B cardinality=8
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(int_col)
 |  group by: s.id
+|  row-size=12B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypes s]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> s.id
+   row-size=8B cardinality=7.30K
 ====
 # Correlated scalar subquery with an aggregate function that returns a
 # non-numeric type on empty input
@@ -1532,23 +1794,28 @@ PLAN-ROOT SINK
 04:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: int_col
+|  row-size=12B cardinality=2
 |
 03:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: s.id = t.id
 |  other predicates: t.string_col = ifnull(sample(int_col), '')
 |  runtime filters: RF000 <- t.id
+|  row-size=38B cardinality=4
 |
 |--00:SCAN HDFS [functional.alltypestiny t]
 |     partitions=4/4 files=4 size=460B
 |     predicates: bool_col = FALSE
+|     row-size=22B cardinality=4
 |
 02:AGGREGATE [FINALIZE]
 |  output: sample(int_col)
 |  group by: s.id
+|  row-size=16B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypes s]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> s.id
+   row-size=8B cardinality=7.30K
 ====
 # Uncorrelated scalar subquery where columns from the outer appear in both sides
 # of the binary predicate
@@ -1560,15 +1827,19 @@ PLAN-ROOT SINK
 |
 03:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: count(*) + t1.int_col = t1.bigint_col - 1
+|  row-size=20B cardinality=8
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: count(*)
+|  |  row-size=8B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=0B cardinality=100
 |
 00:SCAN HDFS [functional.alltypestiny t1]
    partitions=4/4 files=4 size=460B
+   row-size=12B cardinality=8
 ====
 # Uncorrelated scalar subquery in complex binary predicate that contains columns
 # from two tables of the outer
@@ -1580,23 +1851,29 @@ PLAN-ROOT SINK
 |
 05:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: t1.int_col + t2.int_col = count(*) + 1
+|  row-size=16B cardinality=9
 |
 |--03:AGGREGATE [FINALIZE]
 |  |  output: count(*)
+|  |  row-size=8B cardinality=1
 |  |
 |  02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=0B cardinality=7.30K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: t2.id = t1.id
 |  runtime filters: RF000 <- t1.id
+|  row-size=16B cardinality=9
 |
 |--00:SCAN HDFS [functional.alltypestiny t1]
 |     partitions=4/4 files=4 size=460B
+|     row-size=8B cardinality=8
 |
 01:SCAN HDFS [functional.alltypessmall t2]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> t2.id
+   row-size=8B cardinality=100
 ====
 # Uncorrelated scalar subquery in complex binary predicate that contains columns
 # from two tables of the outer that appear in both sides of the predicate
@@ -1609,23 +1886,29 @@ PLAN-ROOT SINK
 |
 05:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: count(*) + t2.bigint_col = t1.int_col + t2.int_col
+|  row-size=32B cardinality=9
 |
 |--03:AGGREGATE [FINALIZE]
 |  |  output: count(*)
+|  |  row-size=8B cardinality=1
 |  |
 |  02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=0B cardinality=7.30K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: t2.id = t1.id
 |  runtime filters: RF000 <- t1.id
+|  row-size=24B cardinality=9
 |
 |--00:SCAN HDFS [functional.alltypestiny t1]
 |     partitions=4/4 files=4 size=460B
+|     row-size=8B cardinality=8
 |
 01:SCAN HDFS [functional.alltypessmall t2]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> t2.id
+   row-size=16B cardinality=100
 ====
 # Correlated scalar subquery with complex correlated predicate (IMPALA-1335)
 select 1
@@ -1639,25 +1922,31 @@ PLAN-ROOT SINK
 |
 05:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: t1.id + t2.id = t.int_col
+|  row-size=4B cardinality=8
 |
 |--00:SCAN HDFS [functional.alltypestiny t]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 04:AGGREGATE [FINALIZE]
 |  output: sum(t1.id)
 |  group by: t1.id + t2.id
 |  having: sum(t1.id) = t1.id + t2.id
+|  row-size=16B cardinality=1.03K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF002 <- t2.id
+|  row-size=8B cardinality=7.81K
 |
 |--02:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF002 -> t1.id
+   row-size=4B cardinality=11.00K
 ====
 # Correlated scalar subquery with complex correlared predicate (IMPALA-1335)
 select 1
@@ -1671,24 +1960,30 @@ PLAN-ROOT SINK
 |
 05:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: t1.id + t2.id = t.bigint_col, sum(t1.id) = t.int_col
+|  row-size=12B cardinality=8
 |
 |--00:SCAN HDFS [functional.alltypestiny t]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 04:AGGREGATE [FINALIZE]
 |  output: sum(t1.id)
 |  group by: t1.id + t2.id
+|  row-size=16B cardinality=7.81K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF004 <- t2.id
+|  row-size=8B cardinality=7.81K
 |
 |--02:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF004 -> t1.id
+   row-size=4B cardinality=11.00K
 ====
 # Outer query block with multiple tables and a correlated scalar subquery with
 # complex correlated predicate that references multiple subquery tables and multiple
@@ -1705,32 +2000,40 @@ PLAN-ROOT SINK
 |
 07:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: sum(tt1.id) = t1.bigint_col, tt1.id + tt2.id = t1.int_col - t2.int_col
+|  row-size=24B cardinality=9
 |
 |--06:HASH JOIN [INNER JOIN]
 |  |  hash predicates: t2.id = t1.id
 |  |  runtime filters: RF004 <- t1.id
+|  |  row-size=24B cardinality=9
 |  |
 |  |--00:SCAN HDFS [functional.alltypestiny t1]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=16B cardinality=8
 |  |
 |  01:SCAN HDFS [functional.alltypessmall t2]
 |     partitions=4/4 files=4 size=6.32KB
 |     runtime filters: RF004 -> t2.id
+|     row-size=8B cardinality=100
 |
 05:AGGREGATE [FINALIZE]
 |  output: sum(tt1.id)
 |  group by: tt1.id + tt2.id
+|  row-size=16B cardinality=10.28K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: tt1.int_col = tt2.int_col
 |  runtime filters: RF002 <- tt2.int_col
+|  row-size=16B cardinality=83.91K
 |
 |--03:SCAN HDFS [functional.alltypes tt2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 02:SCAN HDFS [functional.alltypesagg tt1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF002 -> tt1.int_col
+   row-size=8B cardinality=11.00K
 ====
 # IMPALA-1550/IMPALA-4423: Correlated EXISTS and NOT EXISTS subqueries with aggregates
 # that can be evaluated at query compile time. All predicates evaluate to FALSE.
@@ -1778,6 +2081,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypestiny t1]
    partitions=4/4 files=4 size=460B
+   row-size=0B cardinality=8
 ====
 # Correlated EXISTS and NOT EXISTS subqueries with limit 0 and
 # aggregates. Some predicates evaluate to TRUE while others need to
@@ -1802,30 +2106,37 @@ PLAN-ROOT SINK
 |
 06:HASH JOIN [LEFT ANTI JOIN]
 |  hash predicates: t1.tinyint_col = t4.int_col
+|  row-size=5B cardinality=1
 |
 |--04:AGGREGATE [FINALIZE]
 |  |  output: count(id)
 |  |  group by: t4.int_col
 |  |  having: count(id) > 200
+|  |  row-size=12B cardinality=0
 |  |
 |  03:SCAN HDFS [functional.alltypestiny t4]
 |     partitions=4/4 files=4 size=460B
+|     row-size=8B cardinality=8
 |
 05:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: t3.id = t1.id
 |  runtime filters: RF000 <- t1.id
+|  row-size=5B cardinality=1
 |
 |--00:SCAN HDFS [functional.alltypestiny t1]
 |     partitions=4/4 files=4 size=460B
 |     predicates: t1.id > 100
+|     row-size=5B cardinality=1
 |
 02:AGGREGATE [FINALIZE]
 |  group by: int_col, t3.id
+|  row-size=8B cardinality=1.10K
 |
 01:SCAN HDFS [functional.alltypesagg t3]
    partitions=11/11 files=11 size=814.73KB
    predicates: t3.id > 100
    runtime filters: RF000 -> t3.id
+   row-size=8B cardinality=1.10K
 ====
 # Tests for <=> (aka IS NOT DISTINCT FROM) and IS DISTINCT FROM
 select * from functional.alltypesagg t1
@@ -1838,17 +2149,21 @@ PLAN-ROOT SINK
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: t1.id IS NOT DISTINCT FROM min(id), t1.int_col IS NOT DISTINCT FROM t2.int_col
 |  runtime filters: RF000 <- min(id), RF001 <- t2.int_col
+|  row-size=95B cardinality=11
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: min(id)
 |  |  group by: t2.int_col
+|  |  row-size=8B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.id, RF001 -> t1.int_col
+   row-size=95B cardinality=11.00K
 ====
 select * from functional.alltypesagg t1
 where t1.id is distinct from
@@ -1861,17 +2176,21 @@ PLAN-ROOT SINK
 |  hash predicates: t1.int_col IS NOT DISTINCT FROM t2.int_col
 |  other join predicates: t1.id IS DISTINCT FROM min(id)
 |  runtime filters: RF000 <- t2.int_col
+|  row-size=95B cardinality=115
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: min(id)
 |  |  group by: t2.int_col
+|  |  row-size=8B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.int_col
+   row-size=95B cardinality=11.00K
 ====
 select * from functional.alltypesagg t1
 where t1.id =
@@ -1883,17 +2202,21 @@ PLAN-ROOT SINK
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: t1.id = min(id), t1.int_col IS NOT DISTINCT FROM t2.int_col
 |  runtime filters: RF000 <- min(id), RF001 <- t2.int_col
+|  row-size=95B cardinality=11
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: min(id)
 |  |  group by: t2.int_col
+|  |  row-size=8B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.id, RF001 -> t1.int_col
+   row-size=95B cardinality=11.00K
 ====
 select * from functional.alltypesagg t1
 where t1.id !=
@@ -1906,17 +2229,21 @@ PLAN-ROOT SINK
 |  hash predicates: t1.int_col IS NOT DISTINCT FROM t2.int_col
 |  other join predicates: t1.id != min(id)
 |  runtime filters: RF000 <- t2.int_col
+|  row-size=95B cardinality=115
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: min(id)
 |  |  group by: t2.int_col
+|  |  row-size=8B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.int_col
+   row-size=95B cardinality=11.00K
 ====
 select * from functional.alltypesagg t1
 where t1.id is not distinct from
@@ -1928,17 +2255,21 @@ PLAN-ROOT SINK
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: t1.id IS NOT DISTINCT FROM min(id), t1.int_col = t2.int_col
 |  runtime filters: RF000 <- min(id), RF001 <- t2.int_col
+|  row-size=95B cardinality=11
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: min(id)
 |  |  group by: t2.int_col
+|  |  row-size=8B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.id, RF001 -> t1.int_col
+   row-size=95B cardinality=11.00K
 ====
 select * from functional.alltypesagg t1
 where t1.id is distinct from
@@ -1951,17 +2282,21 @@ PLAN-ROOT SINK
 |  hash predicates: t1.int_col = t2.int_col
 |  other join predicates: t1.id IS DISTINCT FROM min(id)
 |  runtime filters: RF000 <- t2.int_col
+|  row-size=95B cardinality=115
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: min(id)
 |  |  group by: t2.int_col
+|  |  row-size=8B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.int_col
+   row-size=95B cardinality=11.00K
 ====
 select * from functional.alltypesagg t1
 where t1.id =
@@ -1973,17 +2308,21 @@ PLAN-ROOT SINK
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: t1.id = min(id), t1.int_col = t2.int_col
 |  runtime filters: RF000 <- min(id), RF001 <- t2.int_col
+|  row-size=95B cardinality=11
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: min(id)
 |  |  group by: t2.int_col
+|  |  row-size=8B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.id, RF001 -> t1.int_col
+   row-size=95B cardinality=11.00K
 ====
 select * from functional.alltypesagg t1
 where t1.id !=
@@ -1996,17 +2335,21 @@ PLAN-ROOT SINK
 |  hash predicates: t1.int_col = t2.int_col
 |  other join predicates: t1.id != min(id)
 |  runtime filters: RF000 <- t2.int_col
+|  row-size=95B cardinality=115
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: min(id)
 |  |  group by: t2.int_col
+|  |  row-size=8B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.int_col
+   row-size=95B cardinality=11.00K
 ====
 # IMPALA-3861: Test that IN subqueries with correlated BETWEEN predicates work.
 select 1 from functional.alltypes t where id in
@@ -2025,15 +2368,18 @@ PLAN-ROOT SINK
 |  hash predicates: id = id
 |  other join predicates: a.tinyint_col >= t.tinyint_col, t.float_col >= a.float_col, a.smallint_col <= t.int_col, a.tinyint_col <= t.smallint_col, t.float_col <= a.double_col, a.double_col <= CAST(t.string_col AS INT), t.string_col >= a.string_col, a.double_col >= round(acos(t.float_col), 2)
 |  runtime filters: RF000 <- id
+|  row-size=56B cardinality=730
 |
 |--00:SCAN HDFS [functional.alltypes t]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: t.bigint_col <= 20, t.string_col <= t.date_string_col
+|     row-size=56B cardinality=730
 |
 01:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    predicates: a.int_col >= 20, a.smallint_col >= 10
    runtime filters: RF000 -> id
+   row-size=38B cardinality=1.10K
 ====
 # IMPALA-4423: Correlated EXISTS and NOT EXISTS subqueries with aggregates. Both
 # subqueries can be evaluated at query compile time. The first one evaluates to
@@ -2065,22 +2411,30 @@ PLAN-ROOT SINK
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=12
 |
 |--05:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--04:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 03:NESTED LOOP JOIN [LEFT SEMI JOIN]
+|  row-size=89B cardinality=8
 |
 |--02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: int_col < 10
 |     limit: 1
+|     row-size=4B cardinality=1
 |
 01:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ====
 # IMPALA-4303: Test subquery rewriting with nested unions.
 select * from functional.alltypestiny
@@ -2094,25 +2448,34 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=12
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=12
 |
 |--05:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 |--04:SCAN HDFS [functional.alltypestiny]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=115B
+|     row-size=89B cardinality=2
 |
 03:NESTED LOOP JOIN [LEFT SEMI JOIN]
+|  row-size=89B cardinality=8
 |
 |--02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: int_col < 10
 |     limit: 1
+|     row-size=4B cardinality=1
 |
 01:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ====
 # Constant on LHS of IN, uncorrelated subquery
 select * from functional.alltypessmall where
@@ -2121,13 +2484,16 @@ select * from functional.alltypessmall where
 PLAN-ROOT SINK
 |
 02:NESTED LOOP JOIN [LEFT SEMI JOIN]
+|  row-size=89B cardinality=100
 |
 |--01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     predicates: 1 = functional.alltypestiny.int_col
+|     row-size=4B cardinality=4
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=89B cardinality=100
 ====
 # Constant on LHS of NOT IN, uncorrelated subquery
 select * from functional.alltypessmall where
@@ -2136,14 +2502,17 @@ select * from functional.alltypessmall where
 PLAN-ROOT SINK
 |
 02:NESTED LOOP JOIN [LEFT ANTI JOIN]
+|  row-size=89B cardinality=100
 |
 |--01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     predicates: 1 IS NULL OR functional.alltypestiny.int_col IS NULL OR functional.alltypestiny.int_col = 1
 |     limit: 1
+|     row-size=4B cardinality=1
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=89B cardinality=100
 ====
 # Constant on LHS of IN, correlated subquery
 select * from functional.alltypessmall a where
@@ -2154,14 +2523,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF000 <- b.id
+|  row-size=89B cardinality=4
 |
 |--01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
 |     predicates: 1 = b.int_col
+|     row-size=8B cardinality=4
 |
 00:SCAN HDFS [functional.alltypessmall a]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> a.id
+   row-size=89B cardinality=100
 ====
 # Constant on LHS of IN, subquery with group by
 select * from functional.alltypessmall where
@@ -2170,16 +2542,20 @@ select * from functional.alltypessmall where
 PLAN-ROOT SINK
 |
 03:NESTED LOOP JOIN [LEFT SEMI JOIN]
+|  row-size=89B cardinality=100
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  group by: int_col
+|  |  row-size=4B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     predicates: 1 = functional.alltypestiny.int_col
+|     row-size=4B cardinality=4
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=89B cardinality=100
 ====
 # Constant on LHS of NOT IN, subquery with group by
 select * from functional.alltypessmall where
@@ -2188,17 +2564,21 @@ select * from functional.alltypessmall where
 PLAN-ROOT SINK
 |
 03:NESTED LOOP JOIN [LEFT ANTI JOIN]
+|  row-size=89B cardinality=100
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  group by: int_col
 |  |  limit: 1
+|  |  row-size=4B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     predicates: 1 IS NULL OR functional.alltypestiny.int_col IS NULL OR functional.alltypestiny.int_col = 1
+|     row-size=4B cardinality=1
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=89B cardinality=100
 ====
 # Constant on LHS of IN, subquery with aggregate
 select * from functional.alltypessmall where
@@ -2207,16 +2587,20 @@ select * from functional.alltypessmall where
 PLAN-ROOT SINK
 |
 03:NESTED LOOP JOIN [LEFT SEMI JOIN]
+|  row-size=89B cardinality=100
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: max(int_col)
 |  |  having: 1 = max(int_col)
+|  |  row-size=4B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=89B cardinality=100
 ====
 # Constant on LHS of NOT IN, subquery with aggregate
 select * from functional.alltypessmall where
@@ -2225,16 +2609,20 @@ select * from functional.alltypessmall where
 PLAN-ROOT SINK
 |
 03:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=93B cardinality=0
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: max(int_col)
 |  |  having: 1 != max(int_col)
+|  |  row-size=4B cardinality=0
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=89B cardinality=100
 ====
 # Constant on LHS of IN, subquery with limit
 select * from functional.alltypessmall where
@@ -2243,16 +2631,20 @@ select * from functional.alltypessmall where
 PLAN-ROOT SINK
 |
 03:NESTED LOOP JOIN [LEFT SEMI JOIN]
+|  row-size=89B cardinality=100
 |
 |--02:SELECT
 |  |  predicates: 1 = int_col
+|  |  row-size=4B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     limit: 1
+|     row-size=4B cardinality=1
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=89B cardinality=100
 ====
 # Constant on LHS of NOT IN, subquery with limit
 select * from functional.alltypessmall where
@@ -2261,16 +2653,20 @@ select * from functional.alltypessmall where
 PLAN-ROOT SINK
 |
 03:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=93B cardinality=0
 |
 |--02:SELECT
 |  |  predicates: 1 != int_col
+|  |  row-size=4B cardinality=0
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     limit: 1
+|     row-size=4B cardinality=1
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=89B cardinality=100
 ====
 # Constant on LHS of IN for nested subqueries (no correlation)
 select * from functional.alltypes t where 1 in
@@ -2280,19 +2676,24 @@ select * from functional.alltypes t where 1 in
 PLAN-ROOT SINK
 |
 04:NESTED LOOP JOIN [LEFT SEMI JOIN]
+|  row-size=89B cardinality=7.30K
 |
 |--03:NESTED LOOP JOIN [LEFT SEMI JOIN]
+|  |  row-size=4B cardinality=unavailable
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
 |  |     partitions=4/4 files=4 size=460B
 |  |     predicates: 1 = functional.alltypestiny.int_col
+|  |     row-size=4B cardinality=4
 |  |
 |  01:SCAN HDFS [functional.tinyinttable]
 |     partitions=1/1 files=1 size=20B
 |     predicates: 1 = functional.tinyinttable.int_col
+|     row-size=4B cardinality=unavailable
 |
 00:SCAN HDFS [functional.alltypes t]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # Constant on LHS of IN for nested subqueries (correlation)
 select * from functional.alltypes t where 1 in
@@ -2302,21 +2703,26 @@ select * from functional.alltypes t where 1 in
 PLAN-ROOT SINK
 |
 04:NESTED LOOP JOIN [LEFT SEMI JOIN]
+|  row-size=89B cardinality=7.30K
 |
 |--03:HASH JOIN [LEFT SEMI JOIN]
 |  |  hash predicates: bigint_col = bigint_col, t.id = id
 |  |  runtime filters: RF000 <- bigint_col, RF001 <- id
+|  |  row-size=16B cardinality=2
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=12B cardinality=8
 |  |
 |  01:SCAN HDFS [functional.alltypessmall t]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: 1 = t.int_col
 |     runtime filters: RF000 -> bigint_col, RF001 -> t.id
+|     row-size=16B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes t]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # EXISTS subquery containing ORDER BY, LIMIT, and OFFSET (IMPALA-6934)
 select count(*) from functional.alltypestiny t where exists
@@ -2326,18 +2732,23 @@ PLAN-ROOT SINK
 |
 04:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 03:NESTED LOOP JOIN [RIGHT SEMI JOIN]
+|  row-size=0B cardinality=8
 |
 |--00:SCAN HDFS [functional.alltypestiny t]
 |     partitions=4/4 files=4 size=460B
+|     row-size=0B cardinality=8
 |
 02:TOP-N [LIMIT=1 OFFSET=6]
 |  order by: id ASC
+|  row-size=4B cardinality=1
 |
 01:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    predicates: id < 5
+   row-size=4B cardinality=1
 ====
 # Subquery in binary predicate that needs cardinality check at runtime
 select bigint_col from functional.alltypes where id =
@@ -2350,19 +2761,23 @@ PLAN-ROOT SINK
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: id = id
 |  runtime filters: RF000 <- id
+|  row-size=12B cardinality=1
 |
 |--02:CARDINALITY CHECK
 |  |  limit: 1
+|  |  row-size=4B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: id = 1
 |     limit: 2
+|     row-size=4B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id = 1
    runtime filters: RF000 -> id
+   row-size=12B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -2371,11 +2786,13 @@ PLAN-ROOT SINK
 03:HASH JOIN [LEFT SEMI JOIN, PARTITIONED]
 |  hash predicates: id = id
 |  runtime filters: RF000 <- id
+|  row-size=12B cardinality=1
 |
 |--06:EXCHANGE [HASH(id)]
 |  |
 |  02:CARDINALITY CHECK
 |  |  limit: 1
+|  |  row-size=4B cardinality=1
 |  |
 |  04:EXCHANGE [UNPARTITIONED]
 |  |  limit: 2
@@ -2384,6 +2801,7 @@ PLAN-ROOT SINK
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: id = 1
 |     limit: 2
+|     row-size=4B cardinality=1
 |
 05:EXCHANGE [HASH(id)]
 |
@@ -2391,6 +2809,7 @@ PLAN-ROOT SINK
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id = 1
    runtime filters: RF000 -> id
+   row-size=12B cardinality=1
 ====
 # Subquery in arithmetic expression that needs cardinality check at runtime
 select bigint_col from functional.alltypes where id =
@@ -2403,18 +2822,22 @@ PLAN-ROOT SINK
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: id = 3 * id
 |  runtime filters: RF000 <- 3 * id
+|  row-size=12B cardinality=7.30K
 |
 |--02:CARDINALITY CHECK
 |  |  limit: 1
+|  |  row-size=4B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: id = 1
 |     limit: 2
+|     row-size=4B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> id
+   row-size=12B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -2423,11 +2846,13 @@ PLAN-ROOT SINK
 03:HASH JOIN [LEFT SEMI JOIN, BROADCAST]
 |  hash predicates: id = 3 * id
 |  runtime filters: RF000 <- 3 * id
+|  row-size=12B cardinality=7.30K
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  02:CARDINALITY CHECK
 |  |  limit: 1
+|  |  row-size=4B cardinality=1
 |  |
 |  04:EXCHANGE [UNPARTITIONED]
 |  |  limit: 2
@@ -2436,10 +2861,12 @@ PLAN-ROOT SINK
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: id = 1
 |     limit: 2
+|     row-size=4B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> id
+   row-size=12B cardinality=7.30K
 ====
 # Subquery that contains union and needs cardinality check at runtime
 select * from functional.alltypes where id =
@@ -2452,26 +2879,33 @@ PLAN-ROOT SINK
 06:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: id = i
 |  runtime filters: RF000 <- i
+|  row-size=89B cardinality=1
 |
 |--05:CARDINALITY CHECK
 |  |  limit: 1
+|  |  row-size=8B cardinality=1
 |  |
 |  04:AGGREGATE [FINALIZE]
 |  |  group by: i
 |  |  limit: 2
+|  |  row-size=8B cardinality=2
 |  |
 |  01:UNION
 |  |  pass-through-operands: 02
+|  |  row-size=8B cardinality=14.60K
 |  |
 |  |--03:SCAN HDFS [functional.alltypes]
 |  |     partitions=24/24 files=24 size=478.45KB
+|  |     row-size=2B cardinality=7.30K
 |  |
 |  02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> id
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -2480,11 +2914,13 @@ PLAN-ROOT SINK
 06:HASH JOIN [LEFT SEMI JOIN, BROADCAST]
 |  hash predicates: id = i
 |  runtime filters: RF000 <- i
+|  row-size=89B cardinality=1
 |
 |--10:EXCHANGE [BROADCAST]
 |  |
 |  05:CARDINALITY CHECK
 |  |  limit: 1
+|  |  row-size=8B cardinality=1
 |  |
 |  09:EXCHANGE [UNPARTITIONED]
 |  |  limit: 2
@@ -2492,24 +2928,30 @@ PLAN-ROOT SINK
 |  08:AGGREGATE [FINALIZE]
 |  |  group by: i
 |  |  limit: 2
+|  |  row-size=8B cardinality=2
 |  |
 |  07:EXCHANGE [HASH(i)]
 |  |
 |  04:AGGREGATE [STREAMING]
 |  |  group by: i
+|  |  row-size=8B cardinality=20
 |  |
 |  01:UNION
 |  |  pass-through-operands: 02
+|  |  row-size=8B cardinality=14.60K
 |  |
 |  |--03:SCAN HDFS [functional.alltypes]
 |  |     partitions=24/24 files=24 size=478.45KB
+|  |     row-size=2B cardinality=7.30K
 |  |
 |  02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> id
+   row-size=89B cardinality=7.30K
 ====
 # Subquery that contains join and GROUP BY and needs cardinality check at runtime
 select * from functional.alltypes where id =
@@ -2521,29 +2963,37 @@ PLAN-ROOT SINK
 06:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: id = max(allt.smallint_col)
 |  runtime filters: RF000 <- max(allt.smallint_col)
+|  row-size=89B cardinality=1
 |
 |--05:CARDINALITY CHECK
 |  |  limit: 1
+|  |  row-size=6B cardinality=1
 |  |
 |  04:AGGREGATE [FINALIZE]
 |  |  output: max(allt.smallint_col)
 |  |  group by: ata.month
 |  |  limit: 2
+|  |  row-size=6B cardinality=1
 |  |
 |  03:HASH JOIN [INNER JOIN]
 |  |  hash predicates: ata.id = allt.id
 |  |  runtime filters: RF002 <- allt.id
+|  |  row-size=14B cardinality=7.81K
 |  |
 |  |--01:SCAN HDFS [functional.alltypes allt]
 |  |     partitions=24/24 files=24 size=478.45KB
+|  |     row-size=6B cardinality=7.30K
 |  |
 |  02:SCAN HDFS [functional.alltypesagg ata]
+|     partition predicates: ata.month = 1
 |     partitions=11/11 files=11 size=814.73KB
 |     runtime filters: RF002 -> ata.id
+|     row-size=8B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> id
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -2552,11 +3002,13 @@ PLAN-ROOT SINK
 06:HASH JOIN [LEFT SEMI JOIN, BROADCAST]
 |  hash predicates: id = max(allt.smallint_col)
 |  runtime filters: RF000 <- max(allt.smallint_col)
+|  row-size=89B cardinality=1
 |
 |--12:EXCHANGE [BROADCAST]
 |  |
 |  05:CARDINALITY CHECK
 |  |  limit: 1
+|  |  row-size=6B cardinality=1
 |  |
 |  11:EXCHANGE [UNPARTITIONED]
 |  |  limit: 2
@@ -2565,31 +3017,38 @@ PLAN-ROOT SINK
 |  |  output: max:merge(allt.smallint_col)
 |  |  group by: ata.month
 |  |  limit: 2
+|  |  row-size=6B cardinality=1
 |  |
 |  09:EXCHANGE [HASH(ata.month)]
 |  |
 |  04:AGGREGATE [STREAMING]
 |  |  output: max(allt.smallint_col)
 |  |  group by: ata.month
+|  |  row-size=6B cardinality=1
 |  |
 |  03:HASH JOIN [INNER JOIN, PARTITIONED]
 |  |  hash predicates: ata.id = allt.id
 |  |  runtime filters: RF002 <- allt.id
+|  |  row-size=14B cardinality=7.81K
 |  |
 |  |--08:EXCHANGE [HASH(allt.id)]
 |  |  |
 |  |  01:SCAN HDFS [functional.alltypes allt]
 |  |     partitions=24/24 files=24 size=478.45KB
+|  |     row-size=6B cardinality=7.30K
 |  |
 |  07:EXCHANGE [HASH(ata.id)]
 |  |
 |  02:SCAN HDFS [functional.alltypesagg ata]
+|     partition predicates: ata.month = 1
 |     partitions=11/11 files=11 size=814.73KB
 |     runtime filters: RF002 -> ata.id
+|     row-size=8B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> id
+   row-size=89B cardinality=7.30K
 ====
 # IS NULL predicate must not be pushed down to the scan node of the inline view.
 select count(1) from functional.alltypes
@@ -2599,41 +3058,52 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 04:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=4B cardinality=0
 |
 |--03:SELECT
 |  |  predicates: int_col IS NULL
+|  |  row-size=4B cardinality=0
 |  |
 |  02:CARDINALITY CHECK
 |  |  limit: 1
+|  |  row-size=4B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     limit: 2
+|     row-size=4B cardinality=2
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 09:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=0
 |
 08:EXCHANGE [UNPARTITIONED]
 |
 05:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 04:NESTED LOOP JOIN [CROSS JOIN, BROADCAST]
+|  row-size=4B cardinality=0
 |
 |--07:EXCHANGE [BROADCAST]
 |  |
 |  03:SELECT
 |  |  predicates: int_col IS NULL
+|  |  row-size=4B cardinality=0
 |  |
 |  02:CARDINALITY CHECK
 |  |  limit: 1
+|  |  row-size=4B cardinality=1
 |  |
 |  06:EXCHANGE [UNPARTITIONED]
 |  |  limit: 2
@@ -2641,9 +3111,11 @@ PLAN-ROOT SINK
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     limit: 2
+|     row-size=4B cardinality=2
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ====
 # Binary predicate with constant must not be pushed down
 # to the scan node of the inline view.
@@ -2654,41 +3126,52 @@ PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 04:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=4B cardinality=0
 |
 |--03:SELECT
 |  |  predicates: int_col > 10
+|  |  row-size=4B cardinality=0
 |  |
 |  02:CARDINALITY CHECK
 |  |  limit: 1
+|  |  row-size=4B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     limit: 2
+|     row-size=4B cardinality=2
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 09:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=0
 |
 08:EXCHANGE [UNPARTITIONED]
 |
 05:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 04:NESTED LOOP JOIN [CROSS JOIN, BROADCAST]
+|  row-size=4B cardinality=0
 |
 |--07:EXCHANGE [BROADCAST]
 |  |
 |  03:SELECT
 |  |  predicates: int_col > 10
+|  |  row-size=4B cardinality=0
 |  |
 |  02:CARDINALITY CHECK
 |  |  limit: 1
+|  |  row-size=4B cardinality=1
 |  |
 |  06:EXCHANGE [UNPARTITIONED]
 |  |  limit: 2
@@ -2696,9 +3179,11 @@ PLAN-ROOT SINK
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     limit: 2
+|     row-size=4B cardinality=2
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ====
 # Runtime scalar subquery with offset.
 select count(*) from functional.alltypes
@@ -2709,43 +3194,55 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 05:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=4B cardinality=0
 |
 |--04:SELECT
 |  |  predicates: id = 7
+|  |  row-size=4B cardinality=0
 |  |
 |  03:CARDINALITY CHECK
 |  |  limit: 1
+|  |  row-size=4B cardinality=1
 |  |
 |  02:TOP-N [LIMIT=2 OFFSET=7]
 |  |  order by: id ASC
+|  |  row-size=4B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 10:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=0
 |
 09:EXCHANGE [UNPARTITIONED]
 |
 06:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 05:NESTED LOOP JOIN [CROSS JOIN, BROADCAST]
+|  row-size=4B cardinality=0
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
 |  04:SELECT
 |  |  predicates: id = 7
+|  |  row-size=4B cardinality=0
 |  |
 |  03:CARDINALITY CHECK
 |  |  limit: 1
+|  |  row-size=4B cardinality=1
 |  |
 |  07:MERGING-EXCHANGE [UNPARTITIONED]
 |  |  offset: 7
@@ -2754,12 +3251,15 @@ PLAN-ROOT SINK
 |  |
 |  02:TOP-N [LIMIT=9]
 |  |  order by: id ASC
+|  |  row-size=4B cardinality=8
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ====
 # IMPALA-7108: Select from an inline view that returns a single row.
 select * from functional.alltypes
@@ -2771,16 +3271,20 @@ PLAN-ROOT SINK
 03:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: int_col = id
 |  runtime filters: RF000 <- id
+|  row-size=89B cardinality=730
 |
 |--02:TOP-N [LIMIT=1]
 |  |  order by: id ASC
+|  |  row-size=4B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> int_col
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -2789,6 +3293,7 @@ PLAN-ROOT SINK
 03:HASH JOIN [LEFT SEMI JOIN, BROADCAST]
 |  hash predicates: int_col = id
 |  runtime filters: RF000 <- id
+|  row-size=89B cardinality=730
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
@@ -2798,11 +3303,14 @@ PLAN-ROOT SINK
 |  |
 |  02:TOP-N [LIMIT=1]
 |  |  order by: id ASC
+|  |  row-size=4B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> int_col
+   row-size=89B cardinality=7.30K
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/tablesample.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/tablesample.test b/testdata/workloads/functional-planner/queries/PlannerTest/tablesample.test
index 6d434e5..9569998 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/tablesample.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/tablesample.test
@@ -33,7 +33,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=621
    mem-estimate=80.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=89B cardinality=3650
+   tuple-ids=0 row-size=89B cardinality=3.65K
    in pipelines: 00(GETNEXT)
 ====
 # Sampling and scan predicates. Scan predicates are evaluated after sampling and
@@ -68,6 +68,7 @@ PLAN-ROOT SINK
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = CAST(2009 AS INT)
    partitions=6/24 files=6 size=119.70KB
    stored statistics:
      table: rows=7300 size=478.45KB
@@ -75,7 +76,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=1241
    mem-estimate=48.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=89B cardinality=1825
+   tuple-ids=0 row-size=89B cardinality=1.82K
    in pipelines: 00(GETNEXT)
 ====
 # Edge case: sample 0%, no files should be selected
@@ -126,6 +127,7 @@ PLAN-ROOT SINK
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = CAST(2010 AS INT)
    partitions=1/24 files=1 size=20.36KB
    stored statistics:
      table: rows=7300 size=478.45KB
@@ -152,7 +154,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=89B cardinality=7300
+   tuple-ids=0 row-size=89B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # Table that has no stats.
@@ -164,7 +166,7 @@ PLAN-ROOT SINK
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partitions=3/24 files=3 size=24.18KB
+   partitions=3/24 files=3 size=23.96KB
    stored statistics:
      table: rows=unavailable size=unavailable
      partitions: 0/24 rows=unavailable
@@ -211,7 +213,7 @@ PLAN-ROOT SINK
      columns: all
    extrapolated-rows=disabled max-scan-range-rows=310
    mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
-   tuple-ids=0 row-size=4B cardinality=7300
+   tuple-ids=0 row-size=4B cardinality=7.30K
    in pipelines: 00(GETNEXT)
 ====
 # Sampling in WITH-clause view.

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/topn-bytes-limit-small.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/topn-bytes-limit-small.test b/testdata/workloads/functional-planner/queries/PlannerTest/topn-bytes-limit-small.test
index db3e9a5..e02e669 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/topn-bytes-limit-small.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/topn-bytes-limit-small.test
@@ -6,9 +6,11 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=1]
 |  order by: int_col ASC
+|  row-size=4B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -18,9 +20,11 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=1]
 |  order by: int_col ASC
+|  row-size=4B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 # returns 2 ints, with a total size of 8 bytes, which exceeds the limit of 6 and thus triggers a SORT
 select int_col from functional.alltypes order by 1 limit 2
@@ -29,9 +33,11 @@ PLAN-ROOT SINK
 |
 01:SORT [LIMIT=2]
 |  order by: int_col ASC
+|  row-size=4B cardinality=2
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -41,9 +47,11 @@ PLAN-ROOT SINK
 |
 01:SORT [LIMIT=2]
 |  order by: int_col ASC
+|  row-size=4B cardinality=2
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 # test that offset is taken into account; the query only returns a single row but needs to sort two rows
 # sorting two ints requires 8 bytes of memory, which exceeds the threshold of 6
@@ -53,9 +61,11 @@ PLAN-ROOT SINK
 |
 01:SORT [LIMIT=1 OFFSET=1]
 |  order by: int_col ASC
+|  row-size=4B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -66,7 +76,9 @@ PLAN-ROOT SINK
 |
 01:SORT [LIMIT=2]
 |  order by: int_col ASC
+|  row-size=4B cardinality=2
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/topn-bytes-limit.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/topn-bytes-limit.test b/testdata/workloads/functional-planner/queries/PlannerTest/topn-bytes-limit.test
index 9ad000e..f28ee9a 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/topn-bytes-limit.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/topn-bytes-limit.test
@@ -5,9 +5,11 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=7]
 |  order by: id ASC
+|  row-size=4B cardinality=7
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=4B cardinality=8
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -17,7 +19,9 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=7]
 |  order by: id ASC
+|  row-size=4B cardinality=7
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
-====
\ No newline at end of file
+   row-size=4B cardinality=8
+====


[07/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/tpch-all.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/tpch-all.test b/testdata/workloads/functional-planner/queries/PlannerTest/tpch-all.test
index 4bab956..2497fd7 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/tpch-all.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/tpch-all.test
@@ -28,14 +28,17 @@ PLAN-ROOT SINK
 |
 02:SORT
 |  order by: l_returnflag ASC, l_linestatus ASC
+|  row-size=122B cardinality=6
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum(l_quantity), sum(l_extendedprice), sum(l_extendedprice * (1 - l_discount)), sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)), avg(l_quantity), avg(l_extendedprice), avg(l_discount), count(*)
 |  group by: l_returnflag, l_linestatus
+|  row-size=122B cardinality=6
 |
 00:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_shipdate <= '1998-09-02'
+   row-size=80B cardinality=600.12K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=23.94MB Threads=4
 Per-Host Resource Estimates: Memory=296MB
@@ -46,20 +49,24 @@ PLAN-ROOT SINK
 |
 02:SORT
 |  order by: l_returnflag ASC, l_linestatus ASC
+|  row-size=122B cardinality=6
 |
 04:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_quantity), sum:merge(l_extendedprice), sum:merge(l_extendedprice * (1 - l_discount)), sum:merge(l_extendedprice * (1 - l_discount) * (1 + l_tax)), avg:merge(l_quantity), avg:merge(l_extendedprice), avg:merge(l_discount), count:merge(*)
 |  group by: l_returnflag, l_linestatus
+|  row-size=122B cardinality=6
 |
 03:EXCHANGE [HASH(l_returnflag,l_linestatus)]
 |
 01:AGGREGATE [STREAMING]
 |  output: sum(l_quantity), sum(l_extendedprice), sum(l_extendedprice * (1 - l_discount)), sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)), avg(l_quantity), avg(l_extendedprice), avg(l_discount), count(*)
 |  group by: l_returnflag, l_linestatus
+|  row-size=122B cardinality=6
 |
 00:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_shipdate <= '1998-09-02'
+   row-size=80B cardinality=600.12K
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=47.88MB Threads=5
 Per-Host Resource Estimates: Memory=240MB
@@ -70,20 +77,24 @@ PLAN-ROOT SINK
 |
 02:SORT
 |  order by: l_returnflag ASC, l_linestatus ASC
+|  row-size=122B cardinality=6
 |
 04:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_quantity), sum:merge(l_extendedprice), sum:merge(l_extendedprice * (1 - l_discount)), sum:merge(l_extendedprice * (1 - l_discount) * (1 + l_tax)), avg:merge(l_quantity), avg:merge(l_extendedprice), avg:merge(l_discount), count:merge(*)
 |  group by: l_returnflag, l_linestatus
+|  row-size=122B cardinality=6
 |
 03:EXCHANGE [HASH(l_returnflag,l_linestatus)]
 |
 01:AGGREGATE [STREAMING]
 |  output: sum(l_quantity), sum(l_extendedprice), sum(l_extendedprice * (1 - l_discount)), sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)), avg(l_quantity), avg(l_extendedprice), avg(l_discount), count(*)
 |  group by: l_returnflag, l_linestatus
+|  row-size=122B cardinality=6
 |
 00:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_shipdate <= '1998-09-02'
+   row-size=80B cardinality=600.12K
 ====
 # TPCH-Q2
 # Q2 - Minimum Cost Supplier Query
@@ -138,78 +149,97 @@ PLAN-ROOT SINK
 |
 18:TOP-N [LIMIT=100]
 |  order by: s_acctbal DESC, n_name ASC, s_name ASC, p_partkey ASC
+|  row-size=230B cardinality=100
 |
 17:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: ps_partkey = p_partkey, min(ps_supplycost) = ps_supplycost
 |  runtime filters: RF000 <- p_partkey
+|  row-size=325B cardinality=1.01K
 |
 |--16:HASH JOIN [INNER JOIN]
 |  |  hash predicates: n_regionkey = r_regionkey
 |  |  runtime filters: RF010 <- r_regionkey
+|  |  row-size=325B cardinality=1.01K
 |  |
 |  |--04:SCAN HDFS [tpch.region]
 |  |     partitions=1/1 files=1 size=384B
 |  |     predicates: r_name = 'EUROPE'
+|  |     row-size=21B cardinality=1
 |  |
 |  15:HASH JOIN [INNER JOIN]
 |  |  hash predicates: s_nationkey = n_nationkey
 |  |  runtime filters: RF012 <- n_nationkey
+|  |  row-size=304B cardinality=5.05K
 |  |
 |  |--03:SCAN HDFS [tpch.nation]
 |  |     partitions=1/1 files=1 size=2.15KB
 |  |     runtime filters: RF010 -> n_regionkey
+|  |     row-size=23B cardinality=25
 |  |
 |  14:HASH JOIN [INNER JOIN]
 |  |  hash predicates: s_suppkey = ps_suppkey
 |  |  runtime filters: RF014 <- ps_suppkey
+|  |  row-size=281B cardinality=5.05K
 |  |
 |  |--13:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: ps_partkey = p_partkey
 |  |  |  runtime filters: RF016 <- p_partkey
+|  |  |  row-size=95B cardinality=5.05K
 |  |  |
 |  |  |--00:SCAN HDFS [tpch.part]
 |  |  |     partitions=1/1 files=1 size=22.83MB
 |  |  |     predicates: p_size = 15, p_type LIKE '%BRASS'
+|  |  |     row-size=71B cardinality=1.26K
 |  |  |
 |  |  02:SCAN HDFS [tpch.partsupp]
 |  |     partitions=1/1 files=1 size=112.71MB
 |  |     runtime filters: RF016 -> ps_partkey
+|  |     row-size=24B cardinality=800.00K
 |  |
 |  01:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF012 -> s_nationkey, RF014 -> s_suppkey
+|     row-size=187B cardinality=10.00K
 |
 12:AGGREGATE [FINALIZE]
 |  output: min(ps_supplycost)
 |  group by: ps_partkey
+|  row-size=16B cardinality=160.00K
 |
 11:HASH JOIN [INNER JOIN]
 |  hash predicates: n_regionkey = r_regionkey
 |  runtime filters: RF004 <- r_regionkey
+|  row-size=59B cardinality=160.00K
 |
 |--08:SCAN HDFS [tpch.region]
 |     partitions=1/1 files=1 size=384B
 |     predicates: r_name = 'EUROPE'
+|     row-size=21B cardinality=1
 |
 10:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF006 <- n_nationkey
+|  row-size=38B cardinality=800.00K
 |
 |--07:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     runtime filters: RF004 -> n_regionkey
+|     row-size=4B cardinality=25
 |
 09:HASH JOIN [INNER JOIN]
 |  hash predicates: ps_suppkey = s_suppkey
 |  runtime filters: RF008 <- s_suppkey
+|  row-size=34B cardinality=800.00K
 |
 |--06:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF006 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 05:SCAN HDFS [tpch.partsupp]
    partitions=1/1 files=1 size=112.71MB
    runtime filters: RF000 -> tpch.partsupp.ps_partkey, RF008 -> ps_suppkey
+   row-size=24B cardinality=800.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=65.28MB Threads=21
 Per-Host Resource Estimates: Memory=662MB
@@ -221,102 +251,122 @@ PLAN-ROOT SINK
 |
 18:TOP-N [LIMIT=100]
 |  order by: s_acctbal DESC, n_name ASC, s_name ASC, p_partkey ASC
+|  row-size=230B cardinality=100
 |
 17:HASH JOIN [RIGHT SEMI JOIN, PARTITIONED]
 |  hash predicates: ps_partkey = p_partkey, min(ps_supplycost) = ps_supplycost
 |  runtime filters: RF000 <- p_partkey
+|  row-size=325B cardinality=1.01K
 |
 |--29:EXCHANGE [HASH(p_partkey,ps_supplycost)]
 |  |
 |  16:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: n_regionkey = r_regionkey
 |  |  runtime filters: RF010 <- r_regionkey
+|  |  row-size=325B cardinality=1.01K
 |  |
 |  |--27:EXCHANGE [BROADCAST]
 |  |  |
 |  |  04:SCAN HDFS [tpch.region]
 |  |     partitions=1/1 files=1 size=384B
 |  |     predicates: r_name = 'EUROPE'
+|  |     row-size=21B cardinality=1
 |  |
 |  15:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: s_nationkey = n_nationkey
 |  |  runtime filters: RF012 <- n_nationkey
+|  |  row-size=304B cardinality=5.05K
 |  |
 |  |--26:EXCHANGE [BROADCAST]
 |  |  |
 |  |  03:SCAN HDFS [tpch.nation]
 |  |     partitions=1/1 files=1 size=2.15KB
 |  |     runtime filters: RF010 -> n_regionkey
+|  |     row-size=23B cardinality=25
 |  |
 |  14:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: s_suppkey = ps_suppkey
 |  |  runtime filters: RF014 <- ps_suppkey
+|  |  row-size=281B cardinality=5.05K
 |  |
 |  |--25:EXCHANGE [BROADCAST]
 |  |  |
 |  |  13:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  |  hash predicates: ps_partkey = p_partkey
 |  |  |  runtime filters: RF016 <- p_partkey
+|  |  |  row-size=95B cardinality=5.05K
 |  |  |
 |  |  |--24:EXCHANGE [BROADCAST]
 |  |  |  |
 |  |  |  00:SCAN HDFS [tpch.part]
 |  |  |     partitions=1/1 files=1 size=22.83MB
 |  |  |     predicates: p_size = 15, p_type LIKE '%BRASS'
+|  |  |     row-size=71B cardinality=1.26K
 |  |  |
 |  |  02:SCAN HDFS [tpch.partsupp]
 |  |     partitions=1/1 files=1 size=112.71MB
 |  |     runtime filters: RF016 -> ps_partkey
+|  |     row-size=24B cardinality=800.00K
 |  |
 |  01:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF012 -> s_nationkey, RF014 -> s_suppkey
+|     row-size=187B cardinality=10.00K
 |
 28:EXCHANGE [HASH(ps_partkey,min(ps_supplycost))]
 |
 23:AGGREGATE [FINALIZE]
 |  output: min:merge(ps_supplycost)
 |  group by: ps_partkey
+|  row-size=16B cardinality=160.00K
 |
 22:EXCHANGE [HASH(ps_partkey)]
 |
 12:AGGREGATE [STREAMING]
 |  output: min(ps_supplycost)
 |  group by: ps_partkey
+|  row-size=16B cardinality=160.00K
 |
 11:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: n_regionkey = r_regionkey
 |  runtime filters: RF004 <- r_regionkey
+|  row-size=59B cardinality=160.00K
 |
 |--21:EXCHANGE [BROADCAST]
 |  |
 |  08:SCAN HDFS [tpch.region]
 |     partitions=1/1 files=1 size=384B
 |     predicates: r_name = 'EUROPE'
+|     row-size=21B cardinality=1
 |
 10:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF006 <- n_nationkey
+|  row-size=38B cardinality=800.00K
 |
 |--20:EXCHANGE [BROADCAST]
 |  |
 |  07:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     runtime filters: RF004 -> n_regionkey
+|     row-size=4B cardinality=25
 |
 09:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ps_suppkey = s_suppkey
 |  runtime filters: RF008 <- s_suppkey
+|  row-size=34B cardinality=800.00K
 |
 |--19:EXCHANGE [BROADCAST]
 |  |
 |  06:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF006 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 05:SCAN HDFS [tpch.partsupp]
    partitions=1/1 files=1 size=112.71MB
    runtime filters: RF000 -> tpch.partsupp.ps_partkey, RF008 -> ps_suppkey
+   row-size=24B cardinality=800.00K
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=122.81MB Threads=23
 Per-Host Resource Estimates: Memory=713MB
@@ -328,10 +378,12 @@ PLAN-ROOT SINK
 |
 18:TOP-N [LIMIT=100]
 |  order by: s_acctbal DESC, n_name ASC, s_name ASC, p_partkey ASC
+|  row-size=230B cardinality=100
 |
 17:HASH JOIN [RIGHT SEMI JOIN, PARTITIONED]
 |  hash predicates: ps_partkey = p_partkey, min(ps_supplycost) = ps_supplycost
 |  runtime filters: RF000 <- p_partkey
+|  row-size=325B cardinality=1.01K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -342,6 +394,7 @@ PLAN-ROOT SINK
 |  16:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: n_regionkey = r_regionkey
 |  |  runtime filters: RF010 <- r_regionkey
+|  |  row-size=325B cardinality=1.01K
 |  |
 |  |--JOIN BUILD
 |  |  |  join-table-id=01 plan-id=02 cohort-id=02
@@ -352,10 +405,12 @@ PLAN-ROOT SINK
 |  |  04:SCAN HDFS [tpch.region]
 |  |     partitions=1/1 files=1 size=384B
 |  |     predicates: r_name = 'EUROPE'
+|  |     row-size=21B cardinality=1
 |  |
 |  15:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: s_nationkey = n_nationkey
 |  |  runtime filters: RF012 <- n_nationkey
+|  |  row-size=304B cardinality=5.05K
 |  |
 |  |--JOIN BUILD
 |  |  |  join-table-id=02 plan-id=03 cohort-id=02
@@ -366,10 +421,12 @@ PLAN-ROOT SINK
 |  |  03:SCAN HDFS [tpch.nation]
 |  |     partitions=1/1 files=1 size=2.15KB
 |  |     runtime filters: RF010 -> n_regionkey
+|  |     row-size=23B cardinality=25
 |  |
 |  14:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: s_suppkey = ps_suppkey
 |  |  runtime filters: RF014 <- ps_suppkey
+|  |  row-size=281B cardinality=5.05K
 |  |
 |  |--JOIN BUILD
 |  |  |  join-table-id=03 plan-id=04 cohort-id=02
@@ -380,6 +437,7 @@ PLAN-ROOT SINK
 |  |  13:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  |  hash predicates: ps_partkey = p_partkey
 |  |  |  runtime filters: RF016 <- p_partkey
+|  |  |  row-size=95B cardinality=5.05K
 |  |  |
 |  |  |--JOIN BUILD
 |  |  |  |  join-table-id=04 plan-id=05 cohort-id=03
@@ -390,30 +448,36 @@ PLAN-ROOT SINK
 |  |  |  00:SCAN HDFS [tpch.part]
 |  |  |     partitions=1/1 files=1 size=22.83MB
 |  |  |     predicates: p_size = 15, p_type LIKE '%BRASS'
+|  |  |     row-size=71B cardinality=1.26K
 |  |  |
 |  |  02:SCAN HDFS [tpch.partsupp]
 |  |     partitions=1/1 files=1 size=112.71MB
 |  |     runtime filters: RF016 -> ps_partkey
+|  |     row-size=24B cardinality=800.00K
 |  |
 |  01:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF012 -> s_nationkey, RF014 -> s_suppkey
+|     row-size=187B cardinality=10.00K
 |
 28:EXCHANGE [HASH(ps_partkey,min(ps_supplycost))]
 |
 23:AGGREGATE [FINALIZE]
 |  output: min:merge(ps_supplycost)
 |  group by: ps_partkey
+|  row-size=16B cardinality=160.00K
 |
 22:EXCHANGE [HASH(ps_partkey)]
 |
 12:AGGREGATE [STREAMING]
 |  output: min(ps_supplycost)
 |  group by: ps_partkey
+|  row-size=16B cardinality=160.00K
 |
 11:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: n_regionkey = r_regionkey
 |  runtime filters: RF004 <- r_regionkey
+|  row-size=59B cardinality=160.00K
 |
 |--JOIN BUILD
 |  |  join-table-id=05 plan-id=06 cohort-id=01
@@ -424,10 +488,12 @@ PLAN-ROOT SINK
 |  08:SCAN HDFS [tpch.region]
 |     partitions=1/1 files=1 size=384B
 |     predicates: r_name = 'EUROPE'
+|     row-size=21B cardinality=1
 |
 10:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF006 <- n_nationkey
+|  row-size=38B cardinality=800.00K
 |
 |--JOIN BUILD
 |  |  join-table-id=06 plan-id=07 cohort-id=01
@@ -438,10 +504,12 @@ PLAN-ROOT SINK
 |  07:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     runtime filters: RF004 -> n_regionkey
+|     row-size=4B cardinality=25
 |
 09:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ps_suppkey = s_suppkey
 |  runtime filters: RF008 <- s_suppkey
+|  row-size=34B cardinality=800.00K
 |
 |--JOIN BUILD
 |  |  join-table-id=07 plan-id=08 cohort-id=01
@@ -452,10 +520,12 @@ PLAN-ROOT SINK
 |  06:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF006 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 05:SCAN HDFS [tpch.partsupp]
    partitions=1/1 files=1 size=112.71MB
    runtime filters: RF000 -> tpch.partsupp.ps_partkey, RF008 -> ps_suppkey
+   row-size=24B cardinality=800.00K
 ====
 # TPCH-Q3
 # Q3 - Shipping Priority Query
@@ -489,32 +559,39 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=10]
 |  order by: sum(l_extendedprice * (1 - l_discount)) DESC, o_orderdate ASC
+|  row-size=50B cardinality=10
 |
 05:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: l_orderkey, o_orderdate, o_shippriority
+|  row-size=50B cardinality=17.56K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF000 <- c_custkey
+|  row-size=117B cardinality=17.56K
 |
 |--00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     predicates: c_mktsegment = 'BUILDING'
+|     row-size=29B cardinality=30.00K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF002 <- o_orderkey
+|  row-size=88B cardinality=57.58K
 |
 |--01:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1995-03-15'
 |     runtime filters: RF000 -> o_custkey
+|     row-size=42B cardinality=150.00K
 |
 02:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_shipdate > '1995-03-15'
    runtime filters: RF002 -> l_orderkey
+   row-size=46B cardinality=600.12K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=41.38MB Threads=8
 Per-Host Resource Estimates: Memory=545MB
@@ -526,30 +603,36 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=10]
 |  order by: sum(l_extendedprice * (1 - l_discount)) DESC, o_orderdate ASC
+|  row-size=50B cardinality=10
 |
 10:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * (1 - l_discount))
 |  group by: l_orderkey, o_orderdate, o_shippriority
+|  row-size=50B cardinality=17.56K
 |
 09:EXCHANGE [HASH(l_orderkey,o_orderdate,o_shippriority)]
 |
 05:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: l_orderkey, o_orderdate, o_shippriority
+|  row-size=50B cardinality=17.56K
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF000 <- c_custkey
+|  row-size=117B cardinality=17.56K
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
 |  00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     predicates: c_mktsegment = 'BUILDING'
+|     row-size=29B cardinality=30.00K
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF002 <- o_orderkey
+|  row-size=88B cardinality=57.58K
 |
 |--07:EXCHANGE [BROADCAST]
 |  |
@@ -557,11 +640,13 @@ PLAN-ROOT SINK
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1995-03-15'
 |     runtime filters: RF000 -> o_custkey
+|     row-size=42B cardinality=150.00K
 |
 02:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_shipdate > '1995-03-15'
    runtime filters: RF002 -> l_orderkey
+   row-size=46B cardinality=600.12K
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=82.75MB Threads=9
 Per-Host Resource Estimates: Memory=484MB
@@ -573,20 +658,24 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=10]
 |  order by: sum(l_extendedprice * (1 - l_discount)) DESC, o_orderdate ASC
+|  row-size=50B cardinality=10
 |
 10:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * (1 - l_discount))
 |  group by: l_orderkey, o_orderdate, o_shippriority
+|  row-size=50B cardinality=17.56K
 |
 09:EXCHANGE [HASH(l_orderkey,o_orderdate,o_shippriority)]
 |
 05:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: l_orderkey, o_orderdate, o_shippriority
+|  row-size=50B cardinality=17.56K
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF000 <- c_custkey
+|  row-size=117B cardinality=17.56K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -597,10 +686,12 @@ PLAN-ROOT SINK
 |  00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     predicates: c_mktsegment = 'BUILDING'
+|     row-size=29B cardinality=30.00K
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF002 <- o_orderkey
+|  row-size=88B cardinality=57.58K
 |
 |--JOIN BUILD
 |  |  join-table-id=01 plan-id=02 cohort-id=01
@@ -612,11 +703,13 @@ PLAN-ROOT SINK
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1995-03-15'
 |     runtime filters: RF000 -> o_custkey
+|     row-size=42B cardinality=150.00K
 |
 02:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_shipdate > '1995-03-15'
    runtime filters: RF002 -> l_orderkey
+   row-size=46B cardinality=600.12K
 ====
 # TPCH-Q4
 # Q4 - Order Priority Checking Query
@@ -648,23 +741,28 @@ PLAN-ROOT SINK
 |
 04:SORT
 |  order by: o_orderpriority ASC
+|  row-size=28B cardinality=5
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: o_orderpriority
+|  row-size=28B cardinality=5
 |
 02:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF000 <- o_orderkey
+|  row-size=50B cardinality=150.00K
 |
 |--00:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1993-10-01', o_orderdate >= '1993-07-01'
+|     row-size=50B cardinality=150.00K
 |
 01:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_commitdate < l_receiptdate
    runtime filters: RF000 -> l_orderkey
+   row-size=52B cardinality=600.12K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=38.69MB Threads=7
 Per-Host Resource Estimates: Memory=493MB
@@ -675,26 +773,31 @@ PLAN-ROOT SINK
 |
 04:SORT
 |  order by: o_orderpriority ASC
+|  row-size=28B cardinality=5
 |
 08:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: o_orderpriority
+|  row-size=28B cardinality=5
 |
 07:EXCHANGE [HASH(o_orderpriority)]
 |
 03:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: o_orderpriority
+|  row-size=28B cardinality=5
 |
 02:HASH JOIN [RIGHT SEMI JOIN, PARTITIONED]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF000 <- o_orderkey
+|  row-size=50B cardinality=150.00K
 |
 |--06:EXCHANGE [HASH(o_orderkey)]
 |  |
 |  00:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1993-10-01', o_orderdate >= '1993-07-01'
+|     row-size=50B cardinality=150.00K
 |
 05:EXCHANGE [HASH(l_orderkey)]
 |
@@ -702,6 +805,7 @@ PLAN-ROOT SINK
    partitions=1/1 files=1 size=718.94MB
    predicates: l_commitdate < l_receiptdate
    runtime filters: RF000 -> l_orderkey
+   row-size=52B cardinality=600.12K
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=73.62MB Threads=9
 Per-Host Resource Estimates: Memory=446MB
@@ -712,20 +816,24 @@ PLAN-ROOT SINK
 |
 04:SORT
 |  order by: o_orderpriority ASC
+|  row-size=28B cardinality=5
 |
 08:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: o_orderpriority
+|  row-size=28B cardinality=5
 |
 07:EXCHANGE [HASH(o_orderpriority)]
 |
 03:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: o_orderpriority
+|  row-size=28B cardinality=5
 |
 02:HASH JOIN [RIGHT SEMI JOIN, PARTITIONED]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF000 <- o_orderkey
+|  row-size=50B cardinality=150.00K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -736,6 +844,7 @@ PLAN-ROOT SINK
 |  00:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1993-10-01', o_orderdate >= '1993-07-01'
+|     row-size=50B cardinality=150.00K
 |
 05:EXCHANGE [HASH(l_orderkey)]
 |
@@ -743,6 +852,7 @@ PLAN-ROOT SINK
    partitions=1/1 files=1 size=718.94MB
    predicates: l_commitdate < l_receiptdate
    runtime filters: RF000 -> l_orderkey
+   row-size=52B cardinality=600.12K
 ====
 # TPCH-Q5
 # Q5 - Local Supplier Volume Query
@@ -777,55 +887,68 @@ PLAN-ROOT SINK
 |
 12:SORT
 |  order by: sum(l_extendedprice * (1 - l_discount)) DESC
+|  row-size=35B cardinality=25
 |
 11:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: n_name
+|  row-size=35B cardinality=25
 |
 10:HASH JOIN [INNER JOIN]
 |  hash predicates: n_regionkey = r_regionkey
 |  runtime filters: RF000 <- r_regionkey
+|  row-size=134B cardinality=115.16K
 |
 |--05:SCAN HDFS [tpch.region]
 |     partitions=1/1 files=1 size=384B
 |     predicates: r_name = 'ASIA'
+|     row-size=21B cardinality=1
 |
 09:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF002 <- n_nationkey
+|  row-size=113B cardinality=575.77K
 |
 |--04:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     runtime filters: RF000 -> n_regionkey
+|     row-size=23B cardinality=25
 |
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: c_nationkey = s_nationkey, l_suppkey = s_suppkey
 |  runtime filters: RF004 <- s_nationkey, RF005 <- s_suppkey
+|  row-size=90B cardinality=575.77K
 |
 |--03:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF002 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF008 <- c_custkey
+|  row-size=80B cardinality=575.77K
 |
 |--00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     runtime filters: RF002 -> tpch.customer.c_nationkey, RF004 -> c_nationkey
+|     row-size=10B cardinality=150.00K
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF010 <- o_orderkey
+|  row-size=70B cardinality=575.77K
 |
 |--01:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1995-01-01', o_orderdate >= '1994-01-01'
 |     runtime filters: RF008 -> o_custkey
+|     row-size=38B cardinality=150.00K
 |
 02:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    runtime filters: RF005 -> l_suppkey, RF010 -> l_orderkey
+   row-size=32B cardinality=6.00M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=70.14MB Threads=14
 Per-Host Resource Estimates: Memory=667MB
@@ -836,60 +959,72 @@ PLAN-ROOT SINK
 |
 12:SORT
 |  order by: sum(l_extendedprice * (1 - l_discount)) DESC
+|  row-size=35B cardinality=25
 |
 19:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * (1 - l_discount))
 |  group by: n_name
+|  row-size=35B cardinality=25
 |
 18:EXCHANGE [HASH(n_name)]
 |
 11:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: n_name
+|  row-size=35B cardinality=25
 |
 10:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: n_regionkey = r_regionkey
 |  runtime filters: RF000 <- r_regionkey
+|  row-size=134B cardinality=115.16K
 |
 |--17:EXCHANGE [BROADCAST]
 |  |
 |  05:SCAN HDFS [tpch.region]
 |     partitions=1/1 files=1 size=384B
 |     predicates: r_name = 'ASIA'
+|     row-size=21B cardinality=1
 |
 09:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF002 <- n_nationkey
+|  row-size=113B cardinality=575.77K
 |
 |--16:EXCHANGE [BROADCAST]
 |  |
 |  04:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     runtime filters: RF000 -> n_regionkey
+|     row-size=23B cardinality=25
 |
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: c_nationkey = s_nationkey, l_suppkey = s_suppkey
 |  runtime filters: RF004 <- s_nationkey, RF005 <- s_suppkey
+|  row-size=90B cardinality=575.77K
 |
 |--15:EXCHANGE [BROADCAST]
 |  |
 |  03:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF002 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 07:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF008 <- c_custkey
+|  row-size=80B cardinality=575.77K
 |
 |--14:EXCHANGE [BROADCAST]
 |  |
 |  00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     runtime filters: RF002 -> tpch.customer.c_nationkey, RF004 -> c_nationkey
+|     row-size=10B cardinality=150.00K
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF010 <- o_orderkey
+|  row-size=70B cardinality=575.77K
 |
 |--13:EXCHANGE [BROADCAST]
 |  |
@@ -897,10 +1032,12 @@ PLAN-ROOT SINK
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1995-01-01', o_orderdate >= '1994-01-01'
 |     runtime filters: RF008 -> o_custkey
+|     row-size=38B cardinality=150.00K
 |
 02:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    runtime filters: RF005 -> l_suppkey, RF010 -> l_orderkey
+   row-size=32B cardinality=6.00M
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=140.28MB Threads=15
 Per-Host Resource Estimates: Memory=632MB
@@ -911,20 +1048,24 @@ PLAN-ROOT SINK
 |
 12:SORT
 |  order by: sum(l_extendedprice * (1 - l_discount)) DESC
+|  row-size=35B cardinality=25
 |
 19:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * (1 - l_discount))
 |  group by: n_name
+|  row-size=35B cardinality=25
 |
 18:EXCHANGE [HASH(n_name)]
 |
 11:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: n_name
+|  row-size=35B cardinality=25
 |
 10:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: n_regionkey = r_regionkey
 |  runtime filters: RF000 <- r_regionkey
+|  row-size=134B cardinality=115.16K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -935,10 +1076,12 @@ PLAN-ROOT SINK
 |  05:SCAN HDFS [tpch.region]
 |     partitions=1/1 files=1 size=384B
 |     predicates: r_name = 'ASIA'
+|     row-size=21B cardinality=1
 |
 09:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF002 <- n_nationkey
+|  row-size=113B cardinality=575.77K
 |
 |--JOIN BUILD
 |  |  join-table-id=01 plan-id=02 cohort-id=01
@@ -949,10 +1092,12 @@ PLAN-ROOT SINK
 |  04:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     runtime filters: RF000 -> n_regionkey
+|     row-size=23B cardinality=25
 |
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: c_nationkey = s_nationkey, l_suppkey = s_suppkey
 |  runtime filters: RF004 <- s_nationkey, RF005 <- s_suppkey
+|  row-size=90B cardinality=575.77K
 |
 |--JOIN BUILD
 |  |  join-table-id=02 plan-id=03 cohort-id=01
@@ -963,10 +1108,12 @@ PLAN-ROOT SINK
 |  03:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF002 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 07:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF008 <- c_custkey
+|  row-size=80B cardinality=575.77K
 |
 |--JOIN BUILD
 |  |  join-table-id=03 plan-id=04 cohort-id=01
@@ -977,10 +1124,12 @@ PLAN-ROOT SINK
 |  00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     runtime filters: RF002 -> tpch.customer.c_nationkey, RF004 -> c_nationkey
+|     row-size=10B cardinality=150.00K
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF010 <- o_orderkey
+|  row-size=70B cardinality=575.77K
 |
 |--JOIN BUILD
 |  |  join-table-id=04 plan-id=05 cohort-id=01
@@ -992,10 +1141,12 @@ PLAN-ROOT SINK
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1995-01-01', o_orderdate >= '1994-01-01'
 |     runtime filters: RF008 -> o_custkey
+|     row-size=38B cardinality=150.00K
 |
 02:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    runtime filters: RF005 -> l_suppkey, RF010 -> l_orderkey
+   row-size=32B cardinality=6.00M
 ====
 # TPCH-Q6
 # Q6 - Forecasting Revenue Change Query
@@ -1015,10 +1166,12 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * l_discount)
+|  row-size=16B cardinality=1
 |
 00:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_discount <= 0.07, l_discount >= 0.05, l_quantity < 24, l_shipdate < '1995-01-01', l_shipdate >= '1994-01-01'
+   row-size=46B cardinality=600.12K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=8.00MB Threads=3
 Per-Host Resource Estimates: Memory=284MB
@@ -1026,15 +1179,18 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * l_discount)
+|  row-size=16B cardinality=1
 |
 02:EXCHANGE [UNPARTITIONED]
 |
 01:AGGREGATE
 |  output: sum(l_extendedprice * l_discount)
+|  row-size=16B cardinality=1
 |
 00:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_discount <= 0.07, l_discount >= 0.05, l_quantity < 24, l_shipdate < '1995-01-01', l_shipdate >= '1994-01-01'
+   row-size=46B cardinality=600.12K
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=16.00MB Threads=3
 Per-Host Resource Estimates: Memory=206MB
@@ -1042,15 +1198,18 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * l_discount)
+|  row-size=16B cardinality=1
 |
 02:EXCHANGE [UNPARTITIONED]
 |
 01:AGGREGATE
 |  output: sum(l_extendedprice * l_discount)
+|  row-size=16B cardinality=1
 |
 00:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_discount <= 0.07, l_discount >= 0.05, l_quantity < 24, l_shipdate < '1995-01-01', l_shipdate >= '1994-01-01'
+   row-size=46B cardinality=600.12K
 ====
 # TPCH-Q7
 # Q7 - Volume Shipping Query
@@ -1099,54 +1258,67 @@ PLAN-ROOT SINK
 |
 12:SORT
 |  order by: supp_nation ASC, cust_nation ASC, l_year ASC
+|  row-size=58B cardinality=575.77K
 |
 11:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: n1.n_name, n2.n_name, year(l_shipdate)
+|  row-size=58B cardinality=575.77K
 |
 10:HASH JOIN [INNER JOIN]
 |  hash predicates: c_nationkey = n2.n_nationkey
 |  other predicates: ((n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE'))
 |  runtime filters: RF000 <- n2.n_nationkey
+|  row-size=132B cardinality=575.77K
 |
 |--05:SCAN HDFS [tpch.nation n2]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 09:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n1.n_nationkey
 |  runtime filters: RF002 <- n1.n_nationkey
+|  row-size=111B cardinality=575.77K
 |
 |--04:SCAN HDFS [tpch.nation n1]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF004 <- c_custkey
+|  row-size=90B cardinality=575.77K
 |
 |--03:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     runtime filters: RF000 -> c_nationkey
+|     row-size=10B cardinality=150.00K
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: l_suppkey = s_suppkey
 |  runtime filters: RF006 <- s_suppkey
+|  row-size=80B cardinality=575.77K
 |
 |--00:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF002 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF008 <- o_orderkey
+|  row-size=70B cardinality=575.77K
 |
 |--02:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
 |     runtime filters: RF004 -> o_custkey
+|     row-size=16B cardinality=1.50M
 |
 01:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_shipdate <= '1996-12-31', l_shipdate >= '1995-01-01'
    runtime filters: RF006 -> l_suppkey, RF008 -> l_orderkey
+   row-size=54B cardinality=600.12K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=86.95MB Threads=15
 Per-Host Resource Estimates: Memory=699MB
@@ -1157,65 +1329,78 @@ PLAN-ROOT SINK
 |
 12:SORT
 |  order by: supp_nation ASC, cust_nation ASC, l_year ASC
+|  row-size=58B cardinality=575.77K
 |
 20:AGGREGATE [FINALIZE]
 |  output: sum:merge(volume)
 |  group by: supp_nation, cust_nation, l_year
+|  row-size=58B cardinality=575.77K
 |
 19:EXCHANGE [HASH(supp_nation,cust_nation,l_year)]
 |
 11:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: n1.n_name, n2.n_name, year(l_shipdate)
+|  row-size=58B cardinality=575.77K
 |
 10:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: c_nationkey = n2.n_nationkey
 |  other predicates: ((n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE'))
 |  runtime filters: RF000 <- n2.n_nationkey
+|  row-size=132B cardinality=575.77K
 |
 |--18:EXCHANGE [BROADCAST]
 |  |
 |  05:SCAN HDFS [tpch.nation n2]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 09:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n1.n_nationkey
 |  runtime filters: RF002 <- n1.n_nationkey
+|  row-size=111B cardinality=575.77K
 |
 |--17:EXCHANGE [BROADCAST]
 |  |
 |  04:SCAN HDFS [tpch.nation n1]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF004 <- c_custkey
+|  row-size=90B cardinality=575.77K
 |
 |--16:EXCHANGE [BROADCAST]
 |  |
 |  03:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     runtime filters: RF000 -> c_nationkey
+|     row-size=10B cardinality=150.00K
 |
 07:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_suppkey = s_suppkey
 |  runtime filters: RF006 <- s_suppkey
+|  row-size=80B cardinality=575.77K
 |
 |--15:EXCHANGE [BROADCAST]
 |  |
 |  00:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF002 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 06:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF008 <- o_orderkey
+|  row-size=70B cardinality=575.77K
 |
 |--14:EXCHANGE [HASH(o_orderkey)]
 |  |
 |  02:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
 |     runtime filters: RF004 -> o_custkey
+|     row-size=16B cardinality=1.50M
 |
 13:EXCHANGE [HASH(l_orderkey)]
 |
@@ -1223,6 +1408,7 @@ PLAN-ROOT SINK
    partitions=1/1 files=1 size=718.94MB
    predicates: l_shipdate <= '1996-12-31', l_shipdate >= '1995-01-01'
    runtime filters: RF006 -> l_suppkey, RF008 -> l_orderkey
+   row-size=54B cardinality=600.12K
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=144.78MB Threads=17
 Per-Host Resource Estimates: Memory=655MB
@@ -1233,21 +1419,25 @@ PLAN-ROOT SINK
 |
 12:SORT
 |  order by: supp_nation ASC, cust_nation ASC, l_year ASC
+|  row-size=58B cardinality=575.77K
 |
 20:AGGREGATE [FINALIZE]
 |  output: sum:merge(volume)
 |  group by: supp_nation, cust_nation, l_year
+|  row-size=58B cardinality=575.77K
 |
 19:EXCHANGE [HASH(supp_nation,cust_nation,l_year)]
 |
 11:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: n1.n_name, n2.n_name, year(l_shipdate)
+|  row-size=58B cardinality=575.77K
 |
 10:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: c_nationkey = n2.n_nationkey
 |  other predicates: ((n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE'))
 |  runtime filters: RF000 <- n2.n_nationkey
+|  row-size=132B cardinality=575.77K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -1257,10 +1447,12 @@ PLAN-ROOT SINK
 |  |
 |  05:SCAN HDFS [tpch.nation n2]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 09:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n1.n_nationkey
 |  runtime filters: RF002 <- n1.n_nationkey
+|  row-size=111B cardinality=575.77K
 |
 |--JOIN BUILD
 |  |  join-table-id=01 plan-id=02 cohort-id=01
@@ -1270,10 +1462,12 @@ PLAN-ROOT SINK
 |  |
 |  04:SCAN HDFS [tpch.nation n1]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF004 <- c_custkey
+|  row-size=90B cardinality=575.77K
 |
 |--JOIN BUILD
 |  |  join-table-id=02 plan-id=03 cohort-id=01
@@ -1284,10 +1478,12 @@ PLAN-ROOT SINK
 |  03:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     runtime filters: RF000 -> c_nationkey
+|     row-size=10B cardinality=150.00K
 |
 07:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_suppkey = s_suppkey
 |  runtime filters: RF006 <- s_suppkey
+|  row-size=80B cardinality=575.77K
 |
 |--JOIN BUILD
 |  |  join-table-id=03 plan-id=04 cohort-id=01
@@ -1298,10 +1494,12 @@ PLAN-ROOT SINK
 |  00:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF002 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 06:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF008 <- o_orderkey
+|  row-size=70B cardinality=575.77K
 |
 |--JOIN BUILD
 |  |  join-table-id=04 plan-id=05 cohort-id=01
@@ -1312,6 +1510,7 @@ PLAN-ROOT SINK
 |  02:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
 |     runtime filters: RF004 -> o_custkey
+|     row-size=16B cardinality=1.50M
 |
 13:EXCHANGE [HASH(l_orderkey)]
 |
@@ -1319,6 +1518,7 @@ PLAN-ROOT SINK
    partitions=1/1 files=1 size=718.94MB
    predicates: l_shipdate <= '1996-12-31', l_shipdate >= '1995-01-01'
    runtime filters: RF006 -> l_suppkey, RF008 -> l_orderkey
+   row-size=54B cardinality=600.12K
 ====
 # TPCH-Q8
 # Q8 - National Market Share Query
@@ -1366,70 +1566,87 @@ PLAN-ROOT SINK
 |
 16:SORT
 |  order by: o_year ASC
+|  row-size=36B cardinality=761
 |
 15:AGGREGATE [FINALIZE]
 |  output: sum(CASE WHEN n2.n_name = 'BRAZIL' THEN l_extendedprice * (1 - l_discount) ELSE 0 END), sum(l_extendedprice * (1 - l_discount))
 |  group by: year(o_orderdate)
+|  row-size=36B cardinality=761
 |
 14:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n2.n_nationkey
 |  runtime filters: RF000 <- n2.n_nationkey
+|  row-size=184B cardinality=761
 |
 |--06:SCAN HDFS [tpch.nation n2]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 13:HASH JOIN [INNER JOIN]
 |  hash predicates: n1.n_regionkey = r_regionkey
 |  runtime filters: RF002 <- r_regionkey
+|  row-size=163B cardinality=761
 |
 |--07:SCAN HDFS [tpch.region]
 |     partitions=1/1 files=1 size=384B
 |     predicates: r_name = 'AMERICA'
+|     row-size=21B cardinality=1
 |
 12:HASH JOIN [INNER JOIN]
 |  hash predicates: c_nationkey = n1.n_nationkey
 |  runtime filters: RF004 <- n1.n_nationkey
+|  row-size=143B cardinality=3.81K
 |
 |--05:SCAN HDFS [tpch.nation n1]
 |     partitions=1/1 files=1 size=2.15KB
 |     runtime filters: RF002 -> n1.n_regionkey
+|     row-size=4B cardinality=25
 |
 11:HASH JOIN [INNER JOIN]
 |  hash predicates: c_custkey = o_custkey
 |  runtime filters: RF006 <- o_custkey
+|  row-size=139B cardinality=3.81K
 |
 |--10:HASH JOIN [INNER JOIN]
 |  |  hash predicates: l_suppkey = s_suppkey
 |  |  runtime filters: RF008 <- s_suppkey
+|  |  row-size=129B cardinality=3.81K
 |  |
 |  |--01:SCAN HDFS [tpch.supplier]
 |  |     partitions=1/1 files=1 size=1.33MB
 |  |     runtime filters: RF000 -> s_nationkey
+|  |     row-size=10B cardinality=10.00K
 |  |
 |  09:HASH JOIN [INNER JOIN]
 |  |  hash predicates: o_orderkey = l_orderkey
 |  |  runtime filters: RF010 <- l_orderkey
+|  |  row-size=119B cardinality=3.81K
 |  |
 |  |--08:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: l_partkey = p_partkey
 |  |  |  runtime filters: RF012 <- p_partkey
+|  |  |  row-size=81B cardinality=39.66K
 |  |  |
 |  |  |--00:SCAN HDFS [tpch.part]
 |  |  |     partitions=1/1 files=1 size=22.83MB
 |  |  |     predicates: p_type = 'ECONOMY ANODIZED STEEL'
+|  |  |     row-size=41B cardinality=1.32K
 |  |  |
 |  |  02:SCAN HDFS [tpch.lineitem]
 |  |     partitions=1/1 files=1 size=718.94MB
 |  |     runtime filters: RF008 -> l_suppkey, RF012 -> l_partkey
+|  |     row-size=40B cardinality=6.00M
 |  |
 |  03:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate <= '1996-12-31', o_orderdate >= '1995-01-01'
 |     runtime filters: RF010 -> o_orderkey
+|     row-size=38B cardinality=150.00K
 |
 04:SCAN HDFS [tpch.customer]
    partitions=1/1 files=1 size=23.08MB
    runtime filters: RF004 -> c_nationkey, RF006 -> c_custkey
+   row-size=10B cardinality=150.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=71.46MB Threads=21
 Per-Host Resource Estimates: Memory=756MB
@@ -1440,73 +1657,87 @@ PLAN-ROOT SINK
 |
 16:SORT
 |  order by: o_year ASC
+|  row-size=36B cardinality=761
 |
 28:AGGREGATE [FINALIZE]
 |  output: sum:merge(CASE WHEN nation = 'BRAZIL' THEN volume ELSE 0 END), sum:merge(volume)
 |  group by: o_year
+|  row-size=36B cardinality=761
 |
 27:EXCHANGE [HASH(o_year)]
 |
 15:AGGREGATE [STREAMING]
 |  output: sum(CASE WHEN n2.n_name = 'BRAZIL' THEN l_extendedprice * (1 - l_discount) ELSE 0 END), sum(l_extendedprice * (1 - l_discount))
 |  group by: year(o_orderdate)
+|  row-size=36B cardinality=761
 |
 14:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n2.n_nationkey
 |  runtime filters: RF000 <- n2.n_nationkey
+|  row-size=184B cardinality=761
 |
 |--26:EXCHANGE [BROADCAST]
 |  |
 |  06:SCAN HDFS [tpch.nation n2]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 13:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: n1.n_regionkey = r_regionkey
 |  runtime filters: RF002 <- r_regionkey
+|  row-size=163B cardinality=761
 |
 |--25:EXCHANGE [BROADCAST]
 |  |
 |  07:SCAN HDFS [tpch.region]
 |     partitions=1/1 files=1 size=384B
 |     predicates: r_name = 'AMERICA'
+|     row-size=21B cardinality=1
 |
 12:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: c_nationkey = n1.n_nationkey
 |  runtime filters: RF004 <- n1.n_nationkey
+|  row-size=143B cardinality=3.81K
 |
 |--24:EXCHANGE [BROADCAST]
 |  |
 |  05:SCAN HDFS [tpch.nation n1]
 |     partitions=1/1 files=1 size=2.15KB
 |     runtime filters: RF002 -> n1.n_regionkey
+|     row-size=4B cardinality=25
 |
 11:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF006 <- c_custkey
+|  row-size=139B cardinality=3.81K
 |
 |--23:EXCHANGE [HASH(c_custkey)]
 |  |
 |  04:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     runtime filters: RF004 -> c_nationkey
+|     row-size=10B cardinality=150.00K
 |
 22:EXCHANGE [HASH(o_custkey)]
 |
 10:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: l_suppkey = s_suppkey
 |  runtime filters: RF008 <- s_suppkey
+|  row-size=129B cardinality=3.81K
 |
 |--21:EXCHANGE [HASH(s_suppkey)]
 |  |
 |  01:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF000 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 20:EXCHANGE [HASH(l_suppkey)]
 |
 09:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF010 <- o_orderkey
+|  row-size=119B cardinality=3.81K
 |
 |--19:EXCHANGE [HASH(o_orderkey)]
 |  |
@@ -1514,22 +1745,26 @@ PLAN-ROOT SINK
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate <= '1996-12-31', o_orderdate >= '1995-01-01'
 |     runtime filters: RF006 -> o_custkey
+|     row-size=38B cardinality=150.00K
 |
 18:EXCHANGE [HASH(l_orderkey)]
 |
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_partkey = p_partkey
 |  runtime filters: RF012 <- p_partkey
+|  row-size=81B cardinality=39.66K
 |
 |--17:EXCHANGE [BROADCAST]
 |  |
 |  00:SCAN HDFS [tpch.part]
 |     partitions=1/1 files=1 size=22.83MB
 |     predicates: p_type = 'ECONOMY ANODIZED STEEL'
+|     row-size=41B cardinality=1.32K
 |
 02:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    runtime filters: RF008 -> l_suppkey, RF010 -> l_orderkey, RF012 -> l_partkey
+   row-size=40B cardinality=6.00M
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=141.05MB Threads=25
 Per-Host Resource Estimates: Memory=724MB
@@ -1540,20 +1775,24 @@ PLAN-ROOT SINK
 |
 16:SORT
 |  order by: o_year ASC
+|  row-size=36B cardinality=761
 |
 28:AGGREGATE [FINALIZE]
 |  output: sum:merge(CASE WHEN nation = 'BRAZIL' THEN volume ELSE 0 END), sum:merge(volume)
 |  group by: o_year
+|  row-size=36B cardinality=761
 |
 27:EXCHANGE [HASH(o_year)]
 |
 15:AGGREGATE [STREAMING]
 |  output: sum(CASE WHEN n2.n_name = 'BRAZIL' THEN l_extendedprice * (1 - l_discount) ELSE 0 END), sum(l_extendedprice * (1 - l_discount))
 |  group by: year(o_orderdate)
+|  row-size=36B cardinality=761
 |
 14:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n2.n_nationkey
 |  runtime filters: RF000 <- n2.n_nationkey
+|  row-size=184B cardinality=761
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -1563,10 +1802,12 @@ PLAN-ROOT SINK
 |  |
 |  06:SCAN HDFS [tpch.nation n2]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 13:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: n1.n_regionkey = r_regionkey
 |  runtime filters: RF002 <- r_regionkey
+|  row-size=163B cardinality=761
 |
 |--JOIN BUILD
 |  |  join-table-id=01 plan-id=02 cohort-id=01
@@ -1577,10 +1818,12 @@ PLAN-ROOT SINK
 |  07:SCAN HDFS [tpch.region]
 |     partitions=1/1 files=1 size=384B
 |     predicates: r_name = 'AMERICA'
+|     row-size=21B cardinality=1
 |
 12:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: c_nationkey = n1.n_nationkey
 |  runtime filters: RF004 <- n1.n_nationkey
+|  row-size=143B cardinality=3.81K
 |
 |--JOIN BUILD
 |  |  join-table-id=02 plan-id=03 cohort-id=01
@@ -1591,10 +1834,12 @@ PLAN-ROOT SINK
 |  05:SCAN HDFS [tpch.nation n1]
 |     partitions=1/1 files=1 size=2.15KB
 |     runtime filters: RF002 -> n1.n_regionkey
+|     row-size=4B cardinality=25
 |
 11:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF006 <- c_custkey
+|  row-size=139B cardinality=3.81K
 |
 |--JOIN BUILD
 |  |  join-table-id=03 plan-id=04 cohort-id=01
@@ -1605,12 +1850,14 @@ PLAN-ROOT SINK
 |  04:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     runtime filters: RF004 -> c_nationkey
+|     row-size=10B cardinality=150.00K
 |
 22:EXCHANGE [HASH(o_custkey)]
 |
 10:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: l_suppkey = s_suppkey
 |  runtime filters: RF008 <- s_suppkey
+|  row-size=129B cardinality=3.81K
 |
 |--JOIN BUILD
 |  |  join-table-id=04 plan-id=05 cohort-id=01
@@ -1621,12 +1868,14 @@ PLAN-ROOT SINK
 |  01:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF000 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 20:EXCHANGE [HASH(l_suppkey)]
 |
 09:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF010 <- o_orderkey
+|  row-size=119B cardinality=3.81K
 |
 |--JOIN BUILD
 |  |  join-table-id=05 plan-id=06 cohort-id=01
@@ -1638,12 +1887,14 @@ PLAN-ROOT SINK
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate <= '1996-12-31', o_orderdate >= '1995-01-01'
 |     runtime filters: RF006 -> o_custkey
+|     row-size=38B cardinality=150.00K
 |
 18:EXCHANGE [HASH(l_orderkey)]
 |
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_partkey = p_partkey
 |  runtime filters: RF012 <- p_partkey
+|  row-size=81B cardinality=39.66K
 |
 |--JOIN BUILD
 |  |  join-table-id=06 plan-id=07 cohort-id=01
@@ -1654,10 +1905,12 @@ PLAN-ROOT SINK
 |  00:SCAN HDFS [tpch.part]
 |     partitions=1/1 files=1 size=22.83MB
 |     predicates: p_type = 'ECONOMY ANODIZED STEEL'
+|     row-size=41B cardinality=1.32K
 |
 02:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    runtime filters: RF008 -> l_suppkey, RF010 -> l_orderkey, RF012 -> l_partkey
+   row-size=40B cardinality=6.00M
 ====
 # TPCH-Q9
 # Q9 - Product Type Measure Query
@@ -1699,52 +1952,65 @@ PLAN-ROOT SINK
 |
 12:SORT
 |  order by: nation ASC, o_year DESC
+|  row-size=39B cardinality=61.70K
 |
 11:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity)
 |  group by: n_name, year(o_orderdate)
+|  row-size=39B cardinality=61.70K
 |
 10:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=186B cardinality=574.29K
 |
 |--05:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 09:HASH JOIN [INNER JOIN]
 |  hash predicates: l_partkey = ps_partkey, l_suppkey = ps_suppkey
 |  runtime filters: RF002 <- ps_partkey, RF003 <- ps_suppkey
+|  row-size=165B cardinality=574.29K
 |
 |--03:SCAN HDFS [tpch.partsupp]
 |     partitions=1/1 files=1 size=112.71MB
+|     row-size=24B cardinality=800.00K
 |
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: l_suppkey = s_suppkey
 |  runtime filters: RF006 <- s_suppkey
+|  row-size=141B cardinality=574.29K
 |
 |--01:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF000 -> s_nationkey, RF003 -> tpch.supplier.s_suppkey
+|     row-size=10B cardinality=10.00K
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF008 <- o_orderkey
+|  row-size=131B cardinality=574.29K
 |
 |--04:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
+|     row-size=30B cardinality=1.50M
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: l_partkey = p_partkey
 |  runtime filters: RF010 <- p_partkey
+|  row-size=101B cardinality=598.58K
 |
 |--00:SCAN HDFS [tpch.part]
 |     partitions=1/1 files=1 size=22.83MB
 |     predicates: p_name LIKE '%green%'
 |     runtime filters: RF002 -> tpch.part.p_partkey
+|     row-size=53B cardinality=20.00K
 |
 02:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    runtime filters: RF002 -> l_partkey, RF003 -> l_suppkey, RF006 -> l_suppkey, RF008 -> l_orderkey, RF010 -> l_partkey
+   row-size=48B cardinality=6.00M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=123.70MB Threads=15
 Per-Host Resource Estimates: Memory=879MB
@@ -1755,59 +2021,71 @@ PLAN-ROOT SINK
 |
 12:SORT
 |  order by: nation ASC, o_year DESC
+|  row-size=39B cardinality=61.70K
 |
 20:AGGREGATE [FINALIZE]
 |  output: sum:merge(amount)
 |  group by: nation, o_year
+|  row-size=39B cardinality=61.70K
 |
 19:EXCHANGE [HASH(nation,o_year)]
 |
 11:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity)
 |  group by: n_name, year(o_orderdate)
+|  row-size=39B cardinality=61.70K
 |
 10:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=186B cardinality=574.29K
 |
 |--18:EXCHANGE [BROADCAST]
 |  |
 |  05:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 09:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_partkey = ps_partkey, l_suppkey = ps_suppkey
 |  runtime filters: RF002 <- ps_partkey, RF003 <- ps_suppkey
+|  row-size=165B cardinality=574.29K
 |
 |--17:EXCHANGE [BROADCAST]
 |  |
 |  03:SCAN HDFS [tpch.partsupp]
 |     partitions=1/1 files=1 size=112.71MB
+|     row-size=24B cardinality=800.00K
 |
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_suppkey = s_suppkey
 |  runtime filters: RF006 <- s_suppkey
+|  row-size=141B cardinality=574.29K
 |
 |--16:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF000 -> s_nationkey, RF003 -> tpch.supplier.s_suppkey
+|     row-size=10B cardinality=10.00K
 |
 07:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF008 <- o_orderkey
+|  row-size=131B cardinality=574.29K
 |
 |--15:EXCHANGE [HASH(o_orderkey)]
 |  |
 |  04:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
+|     row-size=30B cardinality=1.50M
 |
 14:EXCHANGE [HASH(l_orderkey)]
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_partkey = p_partkey
 |  runtime filters: RF010 <- p_partkey
+|  row-size=101B cardinality=598.58K
 |
 |--13:EXCHANGE [BROADCAST]
 |  |
@@ -1815,10 +2093,12 @@ PLAN-ROOT SINK
 |     partitions=1/1 files=1 size=22.83MB
 |     predicates: p_name LIKE '%green%'
 |     runtime filters: RF002 -> tpch.part.p_partkey
+|     row-size=53B cardinality=20.00K
 |
 02:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    runtime filters: RF002 -> l_partkey, RF003 -> l_suppkey, RF006 -> l_suppkey, RF008 -> l_orderkey, RF010 -> l_partkey
+   row-size=48B cardinality=6.00M
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=230.39MB Threads=17
 Per-Host Resource Estimates: Memory=867MB
@@ -1829,20 +2109,24 @@ PLAN-ROOT SINK
 |
 12:SORT
 |  order by: nation ASC, o_year DESC
+|  row-size=39B cardinality=61.70K
 |
 20:AGGREGATE [FINALIZE]
 |  output: sum:merge(amount)
 |  group by: nation, o_year
+|  row-size=39B cardinality=61.70K
 |
 19:EXCHANGE [HASH(nation,o_year)]
 |
 11:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity)
 |  group by: n_name, year(o_orderdate)
+|  row-size=39B cardinality=61.70K
 |
 10:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=186B cardinality=574.29K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -1852,10 +2136,12 @@ PLAN-ROOT SINK
 |  |
 |  05:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 09:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_partkey = ps_partkey, l_suppkey = ps_suppkey
 |  runtime filters: RF002 <- ps_partkey, RF003 <- ps_suppkey
+|  row-size=165B cardinality=574.29K
 |
 |--JOIN BUILD
 |  |  join-table-id=01 plan-id=02 cohort-id=01
@@ -1865,10 +2151,12 @@ PLAN-ROOT SINK
 |  |
 |  03:SCAN HDFS [tpch.partsupp]
 |     partitions=1/1 files=1 size=112.71MB
+|     row-size=24B cardinality=800.00K
 |
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_suppkey = s_suppkey
 |  runtime filters: RF006 <- s_suppkey
+|  row-size=141B cardinality=574.29K
 |
 |--JOIN BUILD
 |  |  join-table-id=02 plan-id=03 cohort-id=01
@@ -1879,10 +2167,12 @@ PLAN-ROOT SINK
 |  01:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF000 -> s_nationkey, RF003 -> tpch.supplier.s_suppkey
+|     row-size=10B cardinality=10.00K
 |
 07:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF008 <- o_orderkey
+|  row-size=131B cardinality=574.29K
 |
 |--JOIN BUILD
 |  |  join-table-id=03 plan-id=04 cohort-id=01
@@ -1892,12 +2182,14 @@ PLAN-ROOT SINK
 |  |
 |  04:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
+|     row-size=30B cardinality=1.50M
 |
 14:EXCHANGE [HASH(l_orderkey)]
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_partkey = p_partkey
 |  runtime filters: RF010 <- p_partkey
+|  row-size=101B cardinality=598.58K
 |
 |--JOIN BUILD
 |  |  join-table-id=04 plan-id=05 cohort-id=01
@@ -1909,10 +2201,12 @@ PLAN-ROOT SINK
 |     partitions=1/1 files=1 size=22.83MB
 |     predicates: p_name LIKE '%green%'
 |     runtime filters: RF002 -> tpch.part.p_partkey
+|     row-size=53B cardinality=20.00K
 |
 02:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    runtime filters: RF002 -> l_partkey, RF003 -> l_suppkey, RF006 -> l_suppkey, RF008 -> l_orderkey, RF010 -> l_partkey
+   row-size=48B cardinality=6.00M
 ====
 # TPCH-Q10
 # Q10 - Returned Item Reporting Query
@@ -1956,38 +2250,47 @@ PLAN-ROOT SINK
 |
 08:TOP-N [LIMIT=20]
 |  order by: sum(l_extendedprice * (1 - l_discount)) DESC
+|  row-size=230B cardinality=20
 |
 07:AGGREGATE [FINALIZE]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment
+|  row-size=230B cardinality=191.92K
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: c_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=293B cardinality=191.92K
 |
 |--03:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: c_custkey = o_custkey
 |  runtime filters: RF002 <- o_custkey
+|  row-size=272B cardinality=191.92K
 |
 |--04:HASH JOIN [INNER JOIN]
 |  |  hash predicates: l_orderkey = o_orderkey
 |  |  runtime filters: RF004 <- o_orderkey
+|  |  row-size=75B cardinality=191.92K
 |  |
 |  |--01:SCAN HDFS [tpch.orders]
 |  |     partitions=1/1 files=1 size=162.56MB
 |  |     predicates: o_orderdate < '1994-01-01', o_orderdate >= '1993-10-01'
+|  |     row-size=38B cardinality=150.00K
 |  |
 |  02:SCAN HDFS [tpch.lineitem]
 |     partitions=1/1 files=1 size=718.94MB
 |     predicates: l_returnflag = 'R'
 |     runtime filters: RF004 -> l_orderkey
+|     row-size=37B cardinality=2.00M
 |
 00:SCAN HDFS [tpch.customer]
    partitions=1/1 files=1 size=23.08MB
    runtime filters: RF000 -> c_nationkey, RF002 -> c_custkey
+   row-size=197B cardinality=150.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=124.45MB Threads=11
 Per-Host Resource Estimates: Memory=668MB
@@ -1999,41 +2302,49 @@ PLAN-ROOT SINK
 |
 08:TOP-N [LIMIT=20]
 |  order by: sum(l_extendedprice * (1 - l_discount)) DESC
+|  row-size=230B cardinality=20
 |
 14:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * (1 - l_discount))
 |  group by: c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment
+|  row-size=230B cardinality=191.92K
 |
 13:EXCHANGE [HASH(c_custkey,c_name,c_acctbal,c_phone,n_name,c_address,c_comment)]
 |
 07:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment
+|  row-size=230B cardinality=191.92K
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: c_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=293B cardinality=191.92K
 |
 |--12:EXCHANGE [BROADCAST]
 |  |
 |  03:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 05:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF002 <- c_custkey
+|  row-size=272B cardinality=191.92K
 |
 |--11:EXCHANGE [HASH(c_custkey)]
 |  |
 |  00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     runtime filters: RF000 -> c_nationkey
+|     row-size=197B cardinality=150.00K
 |
 10:EXCHANGE [HASH(o_custkey)]
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF004 <- o_orderkey
+|  row-size=75B cardinality=191.92K
 |
 |--09:EXCHANGE [BROADCAST]
 |  |
@@ -2041,11 +2352,13 @@ PLAN-ROOT SINK
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1994-01-01', o_orderdate >= '1993-10-01'
 |     runtime filters: RF002 -> o_custkey
+|     row-size=38B cardinality=150.00K
 |
 02:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_returnflag = 'R'
    runtime filters: RF004 -> l_orderkey
+   row-size=37B cardinality=2.00M
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=163.89MB Threads=13
 Per-Host Resource Estimates: Memory=607MB
@@ -2057,20 +2370,24 @@ PLAN-ROOT SINK
 |
 08:TOP-N [LIMIT=20]
 |  order by: sum(l_extendedprice * (1 - l_discount)) DESC
+|  row-size=230B cardinality=20
 |
 14:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * (1 - l_discount))
 |  group by: c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment
+|  row-size=230B cardinality=191.92K
 |
 13:EXCHANGE [HASH(c_custkey,c_name,c_acctbal,c_phone,n_name,c_address,c_comment)]
 |
 07:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment
+|  row-size=230B cardinality=191.92K
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: c_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=293B cardinality=191.92K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -2080,10 +2397,12 @@ PLAN-ROOT SINK
 |  |
 |  03:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
+|     row-size=21B cardinality=25
 |
 05:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF002 <- c_custkey
+|  row-size=272B cardinality=191.92K
 |
 |--JOIN BUILD
 |  |  join-table-id=01 plan-id=02 cohort-id=01
@@ -2094,12 +2413,14 @@ PLAN-ROOT SINK
 |  00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
 |     runtime filters: RF000 -> c_nationkey
+|     row-size=197B cardinality=150.00K
 |
 10:EXCHANGE [HASH(o_custkey)]
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF004 <- o_orderkey
+|  row-size=75B cardinality=191.92K
 |
 |--JOIN BUILD
 |  |  join-table-id=02 plan-id=03 cohort-id=01
@@ -2111,11 +2432,13 @@ PLAN-ROOT SINK
 |     partitions=1/1 files=1 size=162.56MB
 |     predicates: o_orderdate < '1994-01-01', o_orderdate >= '1993-10-01'
 |     runtime filters: RF002 -> o_custkey
+|     row-size=38B cardinality=150.00K
 |
 02:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_returnflag = 'R'
    runtime filters: RF004 -> l_orderkey
+   row-size=37B cardinality=2.00M
 ====
 # TPCH-Q11
 # Q11 - Important Stock Identification
@@ -2159,56 +2482,70 @@ PLAN-ROOT SINK
 |
 13:SORT
 |  order by: value DESC
+|  row-size=24B cardinality=32.00K
 |
 12:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: sum(ps_supplycost * ps_availqty) > sum(ps_supplycost * ps_availqty) * 0.0001
+|  row-size=40B cardinality=32.00K
 |
 |--11:AGGREGATE [FINALIZE]
 |  |  output: sum(ps_supplycost * ps_availqty)
+|  |  row-size=16B cardinality=1
 |  |
 |  10:HASH JOIN [INNER JOIN]
 |  |  hash predicates: s_nationkey = n_nationkey
 |  |  runtime filters: RF004 <- n_nationkey
+|  |  row-size=51B cardinality=32.00K
 |  |
 |  |--08:SCAN HDFS [tpch.nation]
 |  |     partitions=1/1 files=1 size=2.15KB
 |  |     predicates: n_name = 'GERMANY'
+|  |     row-size=21B cardinality=1
 |  |
 |  09:HASH JOIN [INNER JOIN]
 |  |  hash predicates: ps_suppkey = s_suppkey
 |  |  runtime filters: RF006 <- s_suppkey
+|  |  row-size=30B cardinality=800.00K
 |  |
 |  |--07:SCAN HDFS [tpch.supplier]
 |  |     partitions=1/1 files=1 size=1.33MB
 |  |     runtime filters: RF004 -> s_nationkey
+|  |     row-size=10B cardinality=10.00K
 |  |
 |  06:SCAN HDFS [tpch.partsupp]
 |     partitions=1/1 files=1 size=112.71MB
 |     runtime filters: RF006 -> ps_suppkey
+|     row-size=20B cardinality=800.00K
 |
 05:AGGREGATE [FINALIZE]
 |  output: sum(ps_supplycost * ps_availqty)
 |  group by: ps_partkey
+|  row-size=24B cardinality=32.00K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=59B cardinality=32.00K
 |
 |--02:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     predicates: n_name = 'GERMANY'
+|     row-size=21B cardinality=1
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: ps_suppkey = s_suppkey
 |  runtime filters: RF002 <- s_suppkey
+|  row-size=38B cardinality=800.00K
 |
 |--01:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF000 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 00:SCAN HDFS [tpch.partsupp]
    partitions=1/1 files=1 size=112.71MB
    runtime filters: RF002 -> ps_suppkey
+   row-size=28B cardinality=800.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=43.70MB Threads=15
 Per-Host Resource Estimates: Memory=541MB
@@ -2219,77 +2556,93 @@ PLAN-ROOT SINK
 |
 13:SORT
 |  order by: value DESC
+|  row-size=24B cardinality=32.00K
 |
 12:NESTED LOOP JOIN [INNER JOIN, BROADCAST]
 |  predicates: sum(ps_supplycost * ps_availqty) > sum(ps_supplycost * ps_availqty) * 0.0001
+|  row-size=40B cardinality=32.00K
 |
 |--22:EXCHANGE [BROADCAST]
 |  |
 |  21:AGGREGATE [FINALIZE]
 |  |  output: sum:merge(ps_supplycost * ps_availqty)
+|  |  row-size=16B cardinality=1
 |  |
 |  20:EXCHANGE [UNPARTITIONED]
 |  |
 |  11:AGGREGATE
 |  |  output: sum(ps_supplycost * ps_availqty)
+|  |  row-size=16B cardinality=1
 |  |
 |  10:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: s_nationkey = n_nationkey
 |  |  runtime filters: RF004 <- n_nationkey
+|  |  row-size=51B cardinality=32.00K
 |  |
 |  |--19:EXCHANGE [BROADCAST]
 |  |  |
 |  |  08:SCAN HDFS [tpch.nation]
 |  |     partitions=1/1 files=1 size=2.15KB
 |  |     predicates: n_name = 'GERMANY'
+|  |     row-size=21B cardinality=1
 |  |
 |  09:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: ps_suppkey = s_suppkey
 |  |  runtime filters: RF006 <- s_suppkey
+|  |  row-size=30B cardinality=800.00K
 |  |
 |  |--18:EXCHANGE [BROADCAST]
 |  |  |
 |  |  07:SCAN HDFS [tpch.supplier]
 |  |     partitions=1/1 files=1 size=1.33MB
 |  |     runtime filters: RF004 -> s_nationkey
+|  |     row-size=10B cardinality=10.00K
 |  |
 |  06:SCAN HDFS [tpch.partsupp]
 |     partitions=1/1 files=1 size=112.71MB
 |     runtime filters: RF006 -> ps_suppkey
+|     row-size=20B cardinality=800.00K
 |
 17:AGGREGATE [FINALIZE]
 |  output: sum:merge(ps_supplycost * ps_availqty)
 |  group by: ps_partkey
+|  row-size=24B cardinality=32.00K
 |
 16:EXCHANGE [HASH(ps_partkey)]
 |
 05:AGGREGATE [STREAMING]
 |  output: sum(ps_supplycost * ps_availqty)
 |  group by: ps_partkey
+|  row-size=24B cardinality=32.00K
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=59B cardinality=32.00K
 |
 |--15:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     predicates: n_name = 'GERMANY'
+|     row-size=21B cardinality=1
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ps_suppkey = s_suppkey
 |  runtime filters: RF002 <- s_suppkey
+|  row-size=38B cardinality=800.00K
 |
 |--14:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF000 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 00:SCAN HDFS [tpch.partsupp]
    partitions=1/1 files=1 size=112.71MB
    runtime filters: RF002 -> ps_suppkey
+   row-size=28B cardinality=800.00K
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=87.41MB Threads=16
 Per-Host Resource Estimates: Memory=590MB
@@ -2300,10 +2653,12 @@ PLAN-ROOT SINK
 |
 13:SORT
 |  order by: value DESC
+|  row-size=24B cardinality=32.00K
 |
 12:NESTED LOOP JOIN [INNER JOIN, BROADCAST]
 |  join table id: 00
 |  predicates: sum(ps_supplycost * ps_availqty) > sum(ps_supplycost * ps_availqty) * 0.0001
+|  row-size=40B cardinality=32.00K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -2312,15 +2667,18 @@ PLAN-ROOT SINK
 |  |
 |  21:AGGREGATE [FINALIZE]
 |  |  output: sum:merge(ps_supplycost * ps_availqty)
+|  |  row-size=16B cardinality=1
 |  |
 |  20:EXCHANGE [UNPARTITIONED]
 |  |
 |  11:AGGREGATE
 |  |  output: sum(ps_supplycost * ps_availqty)
+|  |  row-size=16B cardinality=1
 |  |
 |  10:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: s_nationkey = n_nationkey
 |  |  runtime filters: RF004 <- n_nationkey
+|  |  row-size=51B cardinality=32.00K
 |  |
 |  |--JOIN BUILD
 |  |  |  join-table-id=01 plan-id=02 cohort-id=02
@@ -2331,10 +2689,12 @@ PLAN-ROOT SINK
 |  |  08:SCAN HDFS [tpch.nation]
 |  |     partitions=1/1 files=1 size=2.15KB
 |  |     predicates: n_name = 'GERMANY'
+|  |     row-size=21B cardinality=1
 |  |
 |  09:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: ps_suppkey = s_suppkey
 |  |  runtime filters: RF006 <- s_suppkey
+|  |  row-size=30B cardinality=800.00K
 |  |
 |  |--JOIN BUILD
 |  |  |  join-table-id=02 plan-id=03 cohort-id=02
@@ -2345,24 +2705,29 @@ PLAN-ROOT SINK
 |  |  07:SCAN HDFS [tpch.supplier]
 |  |     partitions=1/1 files=1 size=1.33MB
 |  |     runtime filters: RF004 -> s_nationkey
+|  |     row-size=10B cardinality=10.00K
 |  |
 |  06:SCAN HDFS [tpch.partsupp]
 |     partitions=1/1 files=1 size=112.71MB
 |     runtime filters: RF006 -> ps_suppkey
+|     row-size=20B cardinality=800.00K
 |
 17:AGGREGATE [FINALIZE]
 |  output: sum:merge(ps_supplycost * ps_availqty)
 |  group by: ps_partkey
+|  row-size=24B cardinality=32.00K
 |
 16:EXCHANGE [HASH(ps_partkey)]
 |
 05:AGGREGATE [STREAMING]
 |  output: sum(ps_supplycost * ps_availqty)
 |  group by: ps_partkey
+|  row-size=24B cardinality=32.00K
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: s_nationkey = n_nationkey
 |  runtime filters: RF000 <- n_nationkey
+|  row-size=59B cardinality=32.00K
 |
 |--JOIN BUILD
 |  |  join-table-id=03 plan-id=04 cohort-id=01
@@ -2373,10 +2738,12 @@ PLAN-ROOT SINK
 |  02:SCAN HDFS [tpch.nation]
 |     partitions=1/1 files=1 size=2.15KB
 |     predicates: n_name = 'GERMANY'
+|     row-size=21B cardinality=1
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ps_suppkey = s_suppkey
 |  runtime filters: RF002 <- s_suppkey
+|  row-size=38B cardinality=800.00K
 |
 |--JOIN BUILD
 |  |  join-table-id=04 plan-id=05 cohort-id=01
@@ -2387,10 +2754,12 @@ PLAN-ROOT SINK
 |  01:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
 |     runtime filters: RF000 -> s_nationkey
+|     row-size=10B cardinality=10.00K
 |
 00:SCAN HDFS [tpch.partsupp]
    partitions=1/1 files=1 size=112.71MB
    runtime filters: RF002 -> ps_suppkey
+   row-size=28B cardinality=800.00K
 ====
 # TPCH-Q12
 # Q12 - Shipping Mode and Order Priority Query
@@ -2429,22 +2798,27 @@ PLAN-ROOT SINK
 |
 04:SORT
 |  order by: l_shipmode ASC
+|  row-size=32B cardinality=7
 |
 03:AGGREGATE [FINALIZE]
 |  output: sum(CASE WHEN o_orderpriority IN ('1-URGENT', '2-HIGH') THEN 1 ELSE 0 END), sum(CASE WHEN o_orderpriority != '1-URGENT' AND o_orderpriority != '2-HIGH' THEN 1 ELSE 0 END)
 |  group by: l_shipmode
+|  row-size=32B cardinality=7
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: o_orderkey = l_orderkey
 |  runtime filters: RF000 <- l_orderkey
+|  row-size=119B cardinality=320.78K
 |
 |--01:SCAN HDFS [tpch.lineitem]
 |     partitions=1/1 files=1 size=718.94MB
 |     predicates: l_shipmode IN ('MAIL', 'SHIP'), l_commitdate < l_receiptdate, l_receiptdate < '1995-01-01', l_receiptdate >= '1994-01-01', l_shipdate < l_commitdate
+|     row-size=90B cardinality=320.78K
 |
 00:SCAN HDFS [tpch.orders]
    partitions=1/1 files=1 size=162.56MB
    runtime filters: RF000 -> o_orderkey
+   row-size=28B cardinality=1.50M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=67.94MB Threads=7
 Per-Host Resource Estimates: Memory=528MB
@@ -2455,25 +2829,30 @@ PLAN-ROOT SINK
 |
 04:SORT
 |  order by: l_shipmode ASC
+|  row-size=32B cardinality=7
 |
 08:AGGREGATE [FINALIZE]
 |  output: sum:merge(CASE WHEN o_orderpriority IN ('1-URGENT', '2-HIGH') THEN 1 ELSE 0 END), sum:merge(CASE WHEN o_orderpriority != '1-URGENT' AND o_orderpriority != '2-HIGH' THEN 1 ELSE 0 END)
 |  group by: l_shipmode
+|  row-size=32B cardinality=7
 |
 07:EXCHANGE [HASH(l_shipmode)]
 |
 03:AGGREGATE [STREAMING]
 |  output: sum(CASE WHEN o_orderpriority IN ('1-URGENT', '2-HIGH') THEN 1 ELSE 0 END), sum(CASE WHEN o_orderpriority != '1-URGENT' AND o_orderpriority != '2-HIGH' THEN 1 ELSE 0 END)
 |  group by: l_shipmode
+|  row-size=32B cardinality=7
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF000 <- o_orderkey
+|  row-size=119B cardinality=320.78K
 |
 |--06:EXCHANGE [HASH(o_orderkey)]
 |  |
 |  00:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
+|     row-size=28B cardinality=1.50M
 |
 05:EXCHANGE [HASH(l_orderkey)]
 |
@@ -2481,6 +2860,7 @@ PLAN-ROOT SINK
    partitions=1/1 files=1 size=718.94MB
    predicates: l_shipmode IN ('MAIL', 'SHIP'), l_commitdate < l_receiptdate, l_receiptdate < '1995-01-01', l_receiptdate >= '1994-01-01', l_shipdate < l_commitdate
    runtime filters: RF000 -> l_orderkey
+   row-size=90B cardinality=320.78K
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=101.88MB Threads=9
 Per-Host Resource Estimates: Memory=474MB
@@ -2491,20 +2871,24 @@ PLAN-ROOT SINK
 |
 04:SORT
 |  order by: l_shipmode ASC
+|  row-size=32B cardinality=7
 |
 08:AGGREGATE [FINALIZE]
 |  output: sum:merge(CASE WHEN o_orderpriority IN ('1-URGENT', '2-HIGH') THEN 1 ELSE 0 END), sum:merge(CASE WHEN o_orderpriority != '1-URGENT' AND o_orderpriority != '2-HIGH' THEN 1 ELSE 0 END)
 |  group by: l_shipmode
+|  row-size=32B cardinality=7
 |
 07:EXCHANGE [HASH(l_shipmode)]
 |
 03:AGGREGATE [STREAMING]
 |  output: sum(CASE WHEN o_orderpriority IN ('1-URGENT', '2-HIGH') THEN 1 ELSE 0 END), sum(CASE WHEN o_orderpriority != '1-URGENT' AND o_orderpriority != '2-HIGH' THEN 1 ELSE 0 END)
 |  group by: l_shipmode
+|  row-size=32B cardinality=7
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: l_orderkey = o_orderkey
 |  runtime filters: RF000 <- o_orderkey
+|  row-size=119B cardinality=320.78K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -2514,6 +2898,7 @@ PLAN-ROOT SINK
 |  |
 |  00:SCAN HDFS [tpch.orders]
 |     partitions=1/1 files=1 size=162.56MB
+|     row-size=28B cardinality=1.50M
 |
 05:EXCHANGE [HASH(l_orderkey)]
 |
@@ -2521,6 +2906,7 @@ PLAN-ROOT SINK
    partitions=1/1 files=1 size=718.94MB
    predicates: l_shipmode IN ('MAIL', 'SHIP'), l_commitdate < l_receiptdate, l_receiptdate < '1995-01-01', l_receiptdate >= '1994-01-01', l_shipdate < l_commitdate
    runtime filters: RF000 -> l_orderkey
+   row-size=90B cardinality=320.78K
 ====
 # TPCH-Q13
 # Q13 - Customer Distribution Query
@@ -2551,26 +2937,32 @@ PLAN-ROOT SINK
 |
 05:SORT
 |  order by: count(*) DESC, c_count DESC
+|  row-size=16B cardinality=150.00K
 |
 04:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: count(o_orderkey)
+|  row-size=16B cardinality=150.00K
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(o_orderkey)
 |  group by: c_custkey
+|  row-size=16B cardinality=150.00K
 |
 02:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF000 <- c_custkey
+|  row-size=85B cardinality=150.00K
 |
 |--00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
+|     row-size=8B cardinality=150.00K
 |
 01:SCAN HDFS [tpch.orders]
    partitions=1/1 files=1 size=162.56MB
    predicates: NOT o_comment LIKE '%special%requests%'
    runtime filters: RF000 -> o_custkey
+   row-size=77B cardinality=150.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=41.38MB Threads=7
 Per-Host Resource Estimates: Memory=289MB
@@ -2581,29 +2973,35 @@ PLAN-ROOT SINK
 |
 05:SORT
 |  order by: count(*) DESC, c_count DESC
+|  row-size=16B cardinality=150.00K
 |
 09:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: c_count
+|  row-size=16B cardinality=150.00K
 |
 08:EXCHANGE [HASH(c_count)]
 |
 04:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: count(o_orderkey)
+|  row-size=16B cardinality=150.00K
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(o_orderkey)
 |  group by: c_custkey
+|  row-size=16B cardinality=150.00K
 |
 02:HASH JOIN [RIGHT OUTER JOIN, PARTITIONED]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF000 <- c_custkey
+|  row-size=85B cardinality=150.00K
 |
 |--07:EXCHANGE [HASH(c_custkey)]
 |  |
 |  00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
+|     row-size=8B cardinality=150.00K
 |
 06:EXCHANGE [HASH(o_custkey)]
 |
@@ -2611,6 +3009,7 @@ PLAN-ROOT SINK
    partitions=1/1 files=1 size=162.56MB
    predicates: NOT o_comment LIKE '%special%requests%'
    runtime filters: RF000 -> o_custkey
+   row-size=77B cardinality=150.00K
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=69.38MB Threads=9
 Per-Host Resource Estimates: Memory=334MB
@@ -2621,24 +3020,29 @@ PLAN-ROOT SINK
 |
 05:SORT
 |  order by: count(*) DESC, c_count DESC
+|  row-size=16B cardinality=150.00K
 |
 09:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: c_count
+|  row-size=16B cardinality=150.00K
 |
 08:EXCHANGE [HASH(c_count)]
 |
 04:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: count(o_orderkey)
+|  row-size=16B cardinality=150.00K
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(o_orderkey)
 |  group by: c_custkey
+|  row-size=16B cardinality=150.00K
 |
 02:HASH JOIN [RIGHT OUTER JOIN, PARTITIONED]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF000 <- c_custkey
+|  row-size=85B cardinality=150.00K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -2648,6 +3052,7 @@ PLAN-ROOT SINK
 |  |
 |  00:SCAN HDFS [tpch.customer]
 |     partitions=1/1 files=1 size=23.08MB
+|     row-size=8B cardinality=150.00K
 |
 06:EXCHANGE [HASH(o_custkey)]
 |
@@ -2655,6 +3060,7 @@ PLAN-ROOT SINK
    partitions=1/1 files=1 size=162.56MB
    predicates: NOT o_comment LIKE '%special%requests%'
    runtime filters: RF000 -> o_custkey
+   row-size=77B cardinality=150.00K
 ====
 # TPCH-Q14
 # Q14 - Promotion Effect
@@ -2678,18 +3084,22 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: sum(CASE WHEN p_type LIKE 'PROMO%' THEN l_extendedprice * (1 - l_discount) ELSE 0 END), sum(l_extendedprice * (1 - l_discount))
+|  row-size=32B cardinality=1
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: l_partkey = p_partkey
 |  runtime filters: RF000 <- p_partkey
+|  row-size=87B cardinality=598.58K
 |
 |--01:SCAN HDFS [tpch.part]
 |     partitions=1/1 files=1 size=22.83MB
+|     row-size=41B cardinality=200.00K
 |
 00:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_shipdate < '1995-10-01', l_shipdate >= '1995-09-01'
    runtime filters: RF000 -> l_partkey
+   row-size=46B cardinality=600.12K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=22.75MB Threads=6
 Per-Host Resource Estimates: Memory=364MB
@@ -2697,20 +3107,24 @@ PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: sum:merge(CASE WHEN p_type LIKE 'PROMO%' THEN l_extendedprice * (1 - l_discount) ELSE 0 END), sum:merge(l_extendedprice * (1 - l_discount))
+|  row-size=32B cardinality=1
 |
 06:EXCHANGE [UNPARTITIONED]
 |
 03:AGGREGATE
 |  output: sum(CASE WHEN p_type LIKE 'PROMO%' THEN l_extendedprice * (1 - l_discount) ELSE 0 END), sum(l_extendedprice * (1 - l_discount))
+|  row-size=32B cardinality=1
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: l_partkey = p_partkey
 |  runtime filters: RF000 <- p_partkey
+|  row-size=87B cardinality=598.58K
 |
 |--05:EXCHANGE [HASH(p_partkey)]
 |  |
 |  01:SCAN HDFS [tpch.part]
 |     partitions=1/1 files=1 size=22.83MB
+|     row-size=41B cardinality=200.00K
 |
 04:EXCHANGE [HASH(l_partkey)]
 |
@@ -2718,6 +3132,7 @@ PLAN-ROOT SINK
    partitions=1/1 files=1 size=718.94MB
    predicates: l_shipdate < '1995-10-01', l_shipdate >= '1995-09-01'
    runtime filters: RF000 -> l_partkey
+   row-size=46B cardinality=600.12K
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=41.75MB Threads=7
 Per-Host Resource Estimates: Memory=298MB
@@ -2725,15 +3140,18 @@ PLAN-ROOT SINK
 |
 07:AGGREGATE [FINALIZE]
 |  output: sum:merge(CASE WHEN p_type LIKE 'PROMO%' THEN l_extendedprice * (1 - l_discount) ELSE 0 END), sum:merge(l_extendedprice * (1 - l_discount))
+|  row-size=32B cardinality=1
 |
 06:EXCHANGE [UNPARTITIONED]
 |
 03:AGGREGATE
 |  output: sum(CASE WHEN p_type LIKE 'PROMO%' THEN l_extendedprice * (1 - l_discount) ELSE 0 END), sum(l_extendedprice * (1 - l_discount))
+|  row-size=32B cardinality=1
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: l_partkey = p_partkey
 |  runtime filters: RF000 <- p_partkey
+|  row-size=87B cardinality=598.58K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -2743,6 +3161,7 @@ PLAN-ROOT SINK
 |  |
 |  01:SCAN HDFS [tpch.part]
 |     partitions=1/1 files=1 size=22.83MB
+|     row-size=41B cardinality=200.00K
 |
 04:EXCHANGE [HASH(l_partkey)]
 |
@@ -2750,6 +3169,7 @@ PLAN-ROOT SINK
    partitions=1/1 files=1 size=718.94MB
    predicates: l_shipdate < '1995-10-01', l_shipdate >= '1995-09-01'
    runtime filters: RF000 -> l_partkey
+   row-size=46B cardinality=600.12K
 ====
 # TPCH-Q15
 # Q15 - Top Supplier Query
@@ -2790,36 +3210,45 @@ PLAN-ROOT SINK
 |
 08:SORT
 |  order by: s_suppkey ASC
+|  row-size=118B cardinality=1
 |
 07:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: sum(l_extendedprice * (1 - l_discount)) = max(total_revenue)
+|  row-size=126B cardinality=1
 |
 |--05:AGGREGATE [FINALIZE]
 |  |  output: max(sum(l_extendedprice * (1 - l_discount)))
+|  |  row-size=16B cardinality=1
 |  |
 |  04:AGGREGATE [FINALIZE]
 |  |  output: sum(l_extendedprice * (1 - l_discount))
 |  |  group by: l_suppkey
+|  |  row-size=24B cardinality=9.71K
 |  |
 |  03:SCAN HDFS [tpch.lineitem]
 |     partitions=1/1 files=1 size=718.94MB
 |     predicates: l_shipdate < '1996-04-01', l_shipdate >= '1996-01-01'
+|     row-size=46B cardinality=600.12K
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: s_suppkey = l_suppkey
 |  runtime filters: RF000 <- l_suppkey
+|  row-size=126B cardinality=10.00K
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: sum(l_extendedprice * (1 - l_discount))
 |  |  group by: l_suppkey
+|  |  row-size=24B cardinality=9.71K
 |  |
 |  01:SCAN HDFS [tpch.lineitem]
 |     partitions=1/1 files=1 size=718.94MB
 |     predicates: l_shipdate < '1996-04-01', l_shipdate >= '1996-01-01'
+|     row-size=46B cardinality=600.12K
 |
 00:SCAN HDFS [tpch.supplier]
    partitions=1/1 files=1 size=1.33MB
    runtime filters: RF000 -> s_suppkey
+   row-size=102B cardinality=10.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=43.75MB Threads=10
 Per-Host Resource Estimates: Memory=638MB
@@ -2830,57 +3259,69 @@ PLAN-ROOT SINK
 |
 08:SORT
 |  order by: s_suppkey ASC
+|  row-size=118B cardinality=1
 |
 07:HASH JOIN [LEFT SEMI JOIN, BROADCAST]
 |  hash predicates: sum(l_extendedprice * (1 - l_discount)) = max(total_revenue)
+|  row-size=126B cardinality=1
 |
 |--16:EXCHANGE [BROADCAST]
 |  |
 |  15:AGGREGATE [FINALIZE]
 |  |  output: max:merge(total_revenue)
+|  |  row-size=16B cardinality=1
 |  |
 |  14:EXCHANGE [UNPARTITIONED]
 |  |
 |  05:AGGREGATE
 |  |  output: max(sum(l_extendedprice * (1 - l_discount)))
+|  |  row-size=16B cardinality=1
 |  |
 |  13:AGGREGATE [FINALIZE]
 |  |  output: sum:merge(l_extendedprice * (1 - l_discount))
 |  |  group by: l_suppkey
+|  |  row-size=24B cardinality=9.71K
 |  |
 |  12:EXCHANGE [HASH(l_suppkey)]
 |  |
 |  04:AGGREGATE [STREAMING]
 |  |  output: sum(l_extendedprice * (1 - l_discount))
 |  |  group by: l_suppkey
+|  |  row-size=24B cardinality=9.71K
 |  |
 |  03:SCAN HDFS [tpch.lineitem]
 |     partitions=1/1 files=1 size=718.94MB
 |     predicates: l_shipdate < '1996-04-01', l_shipdate >= '1996-01-01'
+|     row-size=46B cardinality=600.12K
 |
 06:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: l_suppkey = s_suppkey
 |  runtime filters: RF000 <- s_suppkey
+|  row-size=126B cardinality=10.00K
 |
 |--11:EXCHANGE [HASH(s_suppkey)]
 |  |
 |  00:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
+|     row-size=102B cardinality=10.00K
 |
 10:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * (1 - l_discount))
 |  group by: l_suppkey
+|  row-size=24B cardinality=9.71K
 |
 09:EXCHANGE [HASH(l_suppkey)]
 |
 02:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: l_suppkey
+|  row-size=24B cardinality=9.71K
 |
 01:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_shipdate < '1996-04-01', l_shipdate >= '1996-01-01'
    runtime filters: RF000 -> tpch.lineitem.l_suppkey
+   row-size=46B cardinality=600.12K
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=87.50MB Threads=12
 Per-Host Resource Estimates: Memory=530MB
@@ -2891,9 +3332,11 @@ PLAN-ROOT SINK
 |
 08:SORT
 |  order by: s_suppkey ASC
+|  row-size=118B cardinality=1
 |
 07:HASH JOIN [LEFT SEMI JOIN, BROADCAST]
 |  hash predicates: sum(l_extendedprice * (1 - l_discount)) = max(total_revenue)
+|  row-size=126B cardinality=1
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -2903,29 +3346,35 @@ PLAN-ROOT SINK
 |  |
 |  15:AGGREGATE [FINALIZE]
 |  |  output: max:merge(total_revenue)
+|  |  row-size=16B cardinality=1
 |  |
 |  14:EXCHANGE [UNPARTITIONED]
 |  |
 |  05:AGGREGATE
 |  |  output: max(sum(l_extendedprice * (1 - l_discount)))
+|  |  row-size=16B cardinality=1
 |  |
 |  13:AGGREGATE [FINALIZE]
 |  |  output: sum:merge(l_extendedprice * (1 - l_discount))
 |  |  group by: l_suppkey
+|  |  row-size=24B cardinality=9.71K
 |  |
 |  12:EXCHANGE [HASH(l_suppkey)]
 |  |
 |  04:AGGREGATE [STREAMING]
 |  |  output: sum(l_extendedprice * (1 - l_discount))
 |  |  group by: l_suppkey
+|  |  row-size=24B cardinality=9.71K
 |  |
 |  03:SCAN HDFS [tpch.lineitem]
 |     partitions=1/1 files=1 size=718.94MB
 |     predicates: l_shipdate < '1996-04-01', l_shipdate >= '1996-01-01'
+|     row-size=46B cardinality=600.12K
 |
 06:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: l_suppkey = s_suppkey
 |  runtime filters: RF000 <- s_suppkey
+|  row-size=126B cardinality=10.00K
 |
 |--JOIN BUILD
 |  |  join-table-id=01 plan-id=02 cohort-id=01
@@ -2935,21 +3384,25 @@ PLAN-ROOT SINK
 |  |
 |  00:SCAN HDFS [tpch.supplier]
 |     partitions=1/1 files=1 size=1.33MB
+|     row-size=102B cardinality=10.00K
 |
 10:AGGREGATE [FINALIZE]
 |  output: sum:merge(l_extendedprice * (1 - l_discount))
 |  group by: l_suppkey
+|  row-size=24B cardinality=9.71K
 |
 09:EXCHANGE [HASH(l_suppkey)]
 |
 02:AGGREGATE [STREAMING]
 |  output: sum(l_extendedprice * (1 - l_discount))
 |  group by: l_suppkey
+|  row-size=24B cardinality=9.71K
 |
 01:SCAN HDFS [tpch.lineitem]
    partitions=1/1 files=1 size=718.94MB
    predicates: l_shipdate < '1996-04-01', l_shipdate >= '1996-01-01'
    runtime filters: RF000 -> tpch.lineitem.l_suppkey
+   row-size=46B cardinality=600.12K
 ====
 # TPCH-Q16
 # Q16 - Parts/Su

<TRUNCATED>

[25/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
IMPALA-8021: Add estimated cardinality to EXPLAIN output

Cardinality is vital to understanding why a plan has the form it does,
yet the planner normally emits cardinality information only for the
detailed levels. Unfortunately, most query profiles we see are at the
standard level without this information (except in the summary table),
making it hard to understand what happened.

This patch adds cardinality to the standard EXPLAIN output. It also
changes the displayed cardinality value to be in abbreviated "metric"
form: 1.23K instead of 1234, etc.

Changing the DESCRIBE output has a huge impact on PlannerTest: all the
"golden" test files must change. To avoid doing this twice, this patch
also includes:

IMPALA-7919: Add predicates line in plan output for partition key
predicates

This is also the time to also include:

IMPALA-8022: Add cardinality checks to PlannerTest

The comparison code was changed to allow a set of validators, one of
which compares cardinality to ensure it is within 5% of the expected
value. This should ensure we don't change estimates unintentionally.

While many planner tests are concerned with cardinality, many others are
not. Testing showed that the cardinality is actually unstable within
tests. For such tests, added filters to ignore cardinality. The filter
is enabled by default (for backward compatibility) but disabled (to
allow cardinality verification) for the critical tests.

Rebasing the tests was complicated by a bug in the error-matching code,
so this patch also fixes:

IMPALA-8023: Fix PlannerTest to handle error lines consistently

Now, the error output written to the output "save results" file matches
that expected in the "golden" file -- no more handling these specially.

Testing:

* Added cardinality verification.
* Reran all FE tests.
* Rebased all PlannerTest .test files.
* Adjusted the metadata/test_explain.py test to handle the changed
  EXPLAIN output.

Change-Id: Ie9aa2d715b04cbb279aaffec8c5692686562d986
Reviewed-on: http://gerrit.cloudera.org:8080/12136
Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
Tested-by: Impala Public Jenkins <im...@cloudera.com>


Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/a7ea86b7
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/a7ea86b7
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/a7ea86b7

Branch: refs/heads/master
Commit: a7ea86b768247ff5388174445e7c91736b99c2de
Parents: 3a3ab7f
Author: paul-rogers <pr...@cloudera.com>
Authored: Thu Dec 27 17:55:16 2018 -0800
Committer: Impala Public Jenkins <im...@cloudera.com>
Committed: Sat Jan 12 04:03:26 2019 +0000

----------------------------------------------------------------------
 .../apache/impala/analysis/PartitionSet.java    |    2 +-
 .../org/apache/impala/common/PrintUtils.java    |   74 +-
 .../org/apache/impala/planner/EmptySetNode.java |    2 +
 .../org/apache/impala/planner/ExchangeNode.java |   11 +
 .../impala/planner/HdfsPartitionPruner.java     |   12 +-
 .../org/apache/impala/planner/HdfsScanNode.java |   48 +-
 .../impala/planner/NestedLoopJoinNode.java      |    1 +
 .../org/apache/impala/planner/PlanNode.java     |   28 +-
 .../impala/planner/SingleNodePlanner.java       |   11 +-
 .../apache/impala/planner/CardinalityTest.java  |    9 +
 .../org/apache/impala/planner/PlannerTest.java  |   53 +-
 .../apache/impala/planner/PlannerTestBase.java  |   15 +-
 .../org/apache/impala/testutil/TestUtils.java   |   55 +-
 .../queries/PlannerTest/aggregation.test        |  269 +++-
 .../queries/PlannerTest/analytic-fns.test       |  423 +++++-
 .../PlannerTest/complex-types-file-formats.test |   37 +-
 .../queries/PlannerTest/conjunct-ordering.test  |   17 +
 .../queries/PlannerTest/constant-folding.test   |   38 +-
 .../PlannerTest/constant-propagation.test       |   49 +-
 .../queries/PlannerTest/constant.test           |    2 +
 .../queries/PlannerTest/data-source-tables.test |    9 +-
 .../queries/PlannerTest/ddl.test                |   48 +
 .../default-join-distr-mode-broadcast.test      |   13 +-
 .../default-join-distr-mode-shuffle.test        |   13 +-
 .../queries/PlannerTest/disable-codegen.test    |   38 +-
 .../PlannerTest/disable-preaggregations.test    |    8 +
 .../queries/PlannerTest/distinct-estimate.test  |   13 +
 .../queries/PlannerTest/distinct.test           |  123 ++
 .../queries/PlannerTest/empty.test              |   67 +-
 .../PlannerTest/fk-pk-join-detection.test       |   72 +-
 .../queries/PlannerTest/hbase.test              |   76 +-
 .../queries/PlannerTest/hdfs.test               |  226 +++-
 .../queries/PlannerTest/implicit-joins.test     |   84 ++
 .../queries/PlannerTest/inline-view-limit.test  |  109 ++
 .../queries/PlannerTest/inline-view.test        |  209 +++
 .../queries/PlannerTest/insert-sort-by.test     |   72 +
 .../queries/PlannerTest/insert.test             |  129 ++
 .../queries/PlannerTest/join-order.test         |  285 ++++
 .../queries/PlannerTest/joins.test              |  441 +++++++
 .../queries/PlannerTest/kudu-delete.test        |   14 +
 .../queries/PlannerTest/kudu-selectivity.test   |    6 +-
 .../queries/PlannerTest/kudu-update.test        |   24 +
 .../queries/PlannerTest/kudu-upsert.test        |   50 +
 .../queries/PlannerTest/kudu.test               |   82 +-
 .../queries/PlannerTest/lineage.test            |  192 ++-
 .../queries/PlannerTest/max-row-size.test       |  110 +-
 .../PlannerTest/mem-limit-broadcast-join.test   |    3 +
 .../PlannerTest/min-max-runtime-filters.test    |   22 +-
 .../queries/PlannerTest/mt-dop-validation.test  |   40 +-
 .../PlannerTest/multiple-distinct-limit.test    |   28 +
 .../multiple-distinct-materialization.test      |  141 ++
 .../multiple-distinct-predicates.test           |   60 +
 .../queries/PlannerTest/multiple-distinct.test  |  160 +++
 .../queries/PlannerTest/nested-collections.test |  575 +++++++-
 .../queries/PlannerTest/nested-loop-join.test   |   52 +
 .../queries/PlannerTest/order.test              |  298 +++++
 .../queries/PlannerTest/outer-joins.test        |  173 +++
 .../PlannerTest/parquet-filtering-disabled.test |   32 +-
 .../queries/PlannerTest/parquet-filtering.test  |   20 +-
 .../queries/PlannerTest/parquet-stats-agg.test  |  124 +-
 .../PlannerTest/partition-key-scans.test        |   57 +
 .../queries/PlannerTest/partition-pruning.test  |    1 +
 .../PlannerTest/predicate-propagation.test      |  235 +++-
 .../PlannerTest/resource-requirements.test      |  965 +++++++-------
 .../PlannerTest/runtime-filter-propagation.test |  284 +++-
 .../runtime-filter-query-options.test           |  113 ++
 .../PlannerTest/shuffle-by-distinct-exprs.test  |   55 +
 .../queries/PlannerTest/small-query-opt.test    |   53 +-
 .../PlannerTest/sort-expr-materialization.test  |   32 +-
 .../PlannerTest/spillable-buffer-sizing.test    |  192 +--
 .../queries/PlannerTest/subquery-rewrite.test   |  508 +++++++
 .../queries/PlannerTest/tablesample.test        |   12 +-
 .../PlannerTest/topn-bytes-limit-small.test     |   12 +
 .../queries/PlannerTest/topn-bytes-limit.test   |    6 +-
 .../queries/PlannerTest/topn.test               |   73 ++
 .../queries/PlannerTest/tpcds-all.test          | 1234 ++++++++++++++++++
 .../queries/PlannerTest/tpch-all.test           |  658 ++++++++++
 .../queries/PlannerTest/tpch-kudu.test          |  200 +++
 .../queries/PlannerTest/tpch-nested.test        |  701 +++++++++-
 .../queries/PlannerTest/tpch-views.test         |  200 +++
 .../queries/PlannerTest/union.test              |  913 ++++++++++++-
 .../queries/PlannerTest/values.test             |   12 +
 .../queries/PlannerTest/views.test              |   88 ++
 .../queries/PlannerTest/with-clause.test        |  138 ++
 .../queries/QueryTest/corrupt-stats.test        |   14 +-
 .../queries/QueryTest/explain-level1.test       |    3 +
 .../queries/QueryTest/explain-level2.test       |   30 +-
 .../queries/QueryTest/explain-level3.test       |   24 +-
 .../queries/QueryTest/stats-extrapolation.test  |   11 +-
 tests/metadata/test_explain.py                  |    6 +-
 90 files changed, 11078 insertions(+), 1149 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/fe/src/main/java/org/apache/impala/analysis/PartitionSet.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/PartitionSet.java b/fe/src/main/java/org/apache/impala/analysis/PartitionSet.java
index 53a321f..e9972bb 100644
--- a/fe/src/main/java/org/apache/impala/analysis/PartitionSet.java
+++ b/fe/src/main/java/org/apache/impala/analysis/PartitionSet.java
@@ -86,7 +86,7 @@ public class PartitionSet extends PartitionSpecBase {
 
     try {
       HdfsPartitionPruner pruner = new HdfsPartitionPruner(desc);
-      partitions_ = pruner.prunePartitions(analyzer, transformedConjuncts, true);
+      partitions_ = pruner.prunePartitions(analyzer, transformedConjuncts, true).first;
     } catch (ImpalaException e) {
       if (e instanceof AnalysisException) throw (AnalysisException) e;
       throw new AnalysisException("Partition expr evaluation failed in the backend.", e);

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/fe/src/main/java/org/apache/impala/common/PrintUtils.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/PrintUtils.java b/fe/src/main/java/org/apache/impala/common/PrintUtils.java
index f4814b5..2636914 100644
--- a/fe/src/main/java/org/apache/impala/common/PrintUtils.java
+++ b/fe/src/main/java/org/apache/impala/common/PrintUtils.java
@@ -24,6 +24,8 @@ import static org.apache.impala.common.ByteUnits.PETABYTE;
 import static org.apache.impala.common.ByteUnits.TERABYTE;
 
 import java.text.DecimalFormat;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.lang3.text.WordUtils;
@@ -49,6 +51,73 @@ public class PrintUtils {
     return bytes + "B";
   }
 
+  public static final long KILO = 1000;
+  public static final long MEGA = KILO * 1000;
+  public static final long GIGA = MEGA * 1000;
+  public static final long TERA = GIGA * 1000;
+
+  /**
+   * Print a value using simple metric (power of 1000) units. Units are
+   * (none), K, M, G or T. Value has two digits past the decimal point.
+   */
+  public static String printMetric(long value) {
+    double result = value;
+    if (value >= TERA) return new DecimalFormat(".00T").format(result / TERA);
+    if (value >= GIGA) return new DecimalFormat(".00G").format(result / GIGA);
+    if (value >= MEGA) return new DecimalFormat(".00M").format(result / MEGA);
+    if (value >= KILO) return new DecimalFormat(".00K").format(result / KILO);
+    return Long.toString(value);
+  }
+
+  /**
+   * Pattern to use when searching for a metric-encoded value.
+   */
+  public static final String METRIC_REGEX = "(\\d+(?:.\\d+)?)([TGMK]?)";
+
+  /**
+   * Pattern to use when searching for or parsing a metric-encoded value.
+   */
+  public static final Pattern METRIC_PATTERN =
+      Pattern.compile(METRIC_REGEX, Pattern.CASE_INSENSITIVE);
+
+  /**
+   * Decode a value metric-encoded using {@link #printMetric(long)}.
+   * @param value metric-encoded string
+   * @return approximate numeric value, or -1 if the value is invalid
+   * (metric encoded strings can never be negative normally)
+   */
+  public static double decodeMetric(String value) {
+    Matcher m = METRIC_PATTERN.matcher(value);
+    if (! m.matches()) return -1;
+    return decodeMetric(m.group(1), m.group(2));
+  }
+
+  /**
+   * Decode a metric-encoded string already parsed into parts.
+   * @param valueStr numeric part of the value
+   * @param units units part of the value
+   * @return approximate numeric value
+   */
+  // Yes, "PrintUtils" is an odd place for a parse function, but
+  // best to keep the formatter and parser together.
+  public static double decodeMetric(String valueStr, String units) {
+    double value = Double.parseDouble(valueStr);
+    switch (units.toUpperCase()) {
+    case "":
+      return value;
+    case "K":
+      return value * KILO;
+    case "M":
+      return value * MEGA;
+    case "G":
+      return value * GIGA;
+    case "T":
+      return value * TERA;
+    default:
+      return -1;
+    }
+  }
+
   /**
    * Same as printBytes() except 0 decimal points are shown for MB and KB.
    */
@@ -65,9 +134,8 @@ public class PrintUtils {
     return bytes + "B";
   }
 
-  public static String printCardinality(String prefix, long cardinality) {
-    return prefix + "cardinality=" +
-        ((cardinality != -1) ? String.valueOf(cardinality) : "unavailable");
+  public static String printCardinality(long cardinality) {
+    return (cardinality != -1) ? printMetric(cardinality) : "unavailable";
   }
 
   public static String printNumHosts(String prefix, long numHosts) {

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/fe/src/main/java/org/apache/impala/planner/EmptySetNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/EmptySetNode.java b/fe/src/main/java/org/apache/impala/planner/EmptySetNode.java
index 7b3ea33..55830c2 100644
--- a/fe/src/main/java/org/apache/impala/planner/EmptySetNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/EmptySetNode.java
@@ -76,4 +76,6 @@ public class EmptySetNode extends PlanNode {
     msg.node_type = TPlanNodeType.EMPTY_SET_NODE;
   }
 
+  @Override
+  protected boolean displayCardinality(TExplainLevel detailLevel) { return false; }
 }

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/fe/src/main/java/org/apache/impala/planner/ExchangeNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/ExchangeNode.java b/fe/src/main/java/org/apache/impala/planner/ExchangeNode.java
index 356ae6b..a140dc2 100644
--- a/fe/src/main/java/org/apache/impala/planner/ExchangeNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/ExchangeNode.java
@@ -173,6 +173,17 @@ public class ExchangeNode extends PlanNode {
     return output.toString();
   }
 
+  /**
+   * An Exchange simply moves rows over the network: its row width
+   * and cardinality are identical to its input. So, for standard
+   * level, there is no need to repeat these values. Retained in
+   * higher levels for backward compatibility.
+   */
+  @Override
+  protected boolean displayCardinality(TExplainLevel detailLevel) {
+    return detailLevel.ordinal() >= TExplainLevel.EXTENDED.ordinal();
+  }
+
   @Override
   protected String getDisplayLabelDetail() {
     // For the non-fragmented explain levels, print the data partition

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
index 9fb204a..e9deb44 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
@@ -44,6 +44,7 @@ import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.PrunablePartition;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.Pair;
 import org.apache.impala.rewrite.BetweenToCompoundRule;
 import org.apache.impala.rewrite.ExprRewriter;
 import org.slf4j.Logger;
@@ -97,16 +98,20 @@ public class HdfsPartitionPruner {
 
   /**
    * Return a list of partitions left after applying the conjuncts. Please note
-   * that conjuncts used for filtering will be removed from the list 'conjuncts'.
+   * that conjuncts used for filtering will be removed from the list 'conjuncts' and
+   * returned as the second item in the returned Pair. These expressions can be
+   * shown in the EXPLAIN output.
+   *
    * If 'allowEmpty' is False, empty partitions are not returned.
    */
-  public List<? extends FeFsPartition> prunePartitions(
+  public Pair<List<? extends FeFsPartition>, List<Expr>> prunePartitions(
       Analyzer analyzer, List<Expr> conjuncts, boolean allowEmpty)
       throws ImpalaException {
     // Start with creating a collection of partition filters for the applicable conjuncts.
     List<HdfsPartitionFilter> partitionFilters = new ArrayList<>();
     // Conjuncts that can be evaluated from the partition key values.
     List<Expr> simpleFilterConjuncts = new ArrayList<>();
+    List<Expr> partitionConjuncts = new ArrayList<>();
 
     // Simple predicates (e.g. binary predicates of the form
     // <SlotRef> <op> <LiteralExpr>) can be used to derive lists
@@ -128,6 +133,7 @@ public class HdfsPartitionPruner {
         } else {
           partitionFilters.add(new HdfsPartitionFilter(clonedConjunct, tbl_, analyzer));
         }
+        partitionConjuncts.add(conjunct);
         it.remove();
       }
     }
@@ -168,7 +174,7 @@ public class HdfsPartitionPruner {
             }
           }));
     }
-    return results;
+    return new Pair<>(results, partitionConjuncts);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java b/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
index 4f9a961..cc31ce4 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
@@ -49,9 +49,9 @@ import org.apache.impala.analysis.TupleDescriptor;
 import org.apache.impala.analysis.TupleId;
 import org.apache.impala.catalog.Column;
 import org.apache.impala.catalog.ColumnStats;
-import org.apache.impala.catalog.HdfsCompression;
 import org.apache.impala.catalog.FeFsPartition;
 import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.HdfsCompression;
 import org.apache.impala.catalog.HdfsFileFormat;
 import org.apache.impala.catalog.HdfsPartition.FileBlock;
 import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
@@ -68,8 +68,8 @@ import org.apache.impala.fb.FbFileBlock;
 import org.apache.impala.service.BackendConfig;
 import org.apache.impala.thrift.TExplainLevel;
 import org.apache.impala.thrift.TExpr;
-import org.apache.impala.thrift.THdfsFileSplit;
 import org.apache.impala.thrift.TFileSplitGeneratorSpec;
+import org.apache.impala.thrift.THdfsFileSplit;
 import org.apache.impala.thrift.THdfsScanNode;
 import org.apache.impala.thrift.TNetworkAddress;
 import org.apache.impala.thrift.TPlanNode;
@@ -91,8 +91,6 @@ import com.google.common.base.Objects;
 import com.google.common.base.Objects.ToStringHelper;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
 
 /**
  * Scan of a single table.
@@ -261,10 +259,13 @@ public class HdfsScanNode extends ScanNode {
   // parquet::Statistics.
   private TupleDescriptor minMaxTuple_;
 
-  // Slot that is used to record the Parquet metatdata for the count(*) aggregation if
+  // Slot that is used to record the Parquet metadata for the count(*) aggregation if
   // this scan node has the count(*) optimization enabled.
   private SlotDescriptor countStarSlot_ = null;
 
+  // Conjuncts used to trim the set of partitions passed to this node.
+  // Used only to display EXPLAIN information.
+  private final List<Expr> partitionConjuncts_;
   /**
    * Construct a node to scan given data files into tuples described by 'desc',
    * with 'conjuncts' being the unevaluated conjuncts bound by the tuple and
@@ -272,12 +273,14 @@ public class HdfsScanNode extends ScanNode {
    * class comments above for details.
    */
   public HdfsScanNode(PlanNodeId id, TupleDescriptor desc, List<Expr> conjuncts,
-      List<? extends FeFsPartition> partitions, TableRef hdfsTblRef, AggregateInfo aggInfo) {
+      List<? extends FeFsPartition> partitions, TableRef hdfsTblRef,
+      AggregateInfo aggInfo, List<Expr> partConjuncts) {
     super(id, desc, "SCAN HDFS");
     Preconditions.checkState(desc.getTable() instanceof FeFsTable);
     tbl_ = (FeFsTable)desc.getTable();
     conjuncts_ = conjuncts;
     partitions_ = partitions;
+    partitionConjuncts_ = partConjuncts;
     sampleParams_ = hdfsTblRef.getSampleParams();
     replicaPreference_ = hdfsTblRef.getReplicaPreference();
     randomReplica_ = hdfsTblRef.getRandomReplica();
@@ -1224,20 +1227,27 @@ public class HdfsScanNode extends ScanNode {
     }
     output.append("]\n");
     if (detailLevel.ordinal() >= TExplainLevel.STANDARD.ordinal()) {
+      if (partitionConjuncts_ != null && !partitionConjuncts_.isEmpty()) {
+        output.append(detailPrefix)
+          .append(String.format("partition predicates: %s\n",
+              getExplainString(partitionConjuncts_, detailLevel)));
+      }
       if (tbl_.getNumClusteringCols() == 0) numPartitions_ = 1;
-      output.append(String.format("%spartitions=%s/%s files=%s size=%s", detailPrefix,
-          numPartitions_, table.getPartitions().size(), totalFiles_,
-          PrintUtils.printBytes(totalBytes_)));
-      output.append("\n");
+      output.append(detailPrefix)
+        .append(String.format("partitions=%d/%d files=%d size=%s\n",
+            numPartitions_, table.getPartitions().size(), totalFiles_,
+            PrintUtils.printBytes(totalBytes_)));
       if (!conjuncts_.isEmpty()) {
-        output.append(String.format("%spredicates: %s\n", detailPrefix,
+        output.append(detailPrefix)
+          .append(String.format("predicates: %s\n",
             getExplainString(conjuncts_, detailLevel)));
       }
       if (!collectionConjuncts_.isEmpty()) {
         for (Map.Entry<TupleDescriptor, List<Expr>> entry:
           collectionConjuncts_.entrySet()) {
           String alias = entry.getKey().getAlias();
-          output.append(String.format("%spredicates on %s: %s\n", detailPrefix, alias,
+          output.append(detailPrefix)
+            .append(String.format("predicates on %s: %s\n", alias,
               getExplainString(entry.getValue(), detailLevel)));
         }
       }
@@ -1255,14 +1265,16 @@ public class HdfsScanNode extends ScanNode {
       } else if (extrapolatedNumRows_ == -1) {
         extrapRows = "unavailable";
       }
-      output.append(String.format("%sextrapolated-rows=%s", detailPrefix, extrapRows));
+      output.append(detailPrefix)
+        .append(String.format("extrapolated-rows=%s", extrapRows));
       output.append(String.format(" max-scan-range-rows=%s",
           maxScanRangeNumRows_ == -1 ? "unavailable" : maxScanRangeNumRows_));
       output.append("\n");
       if (numScanRangesNoDiskIds_ > 0) {
-        output.append(String.format("%smissing disk ids: "
+        output.append(detailPrefix)
+          .append(String.format("missing disk ids: "
                 + "partitions=%s/%s files=%s/%s scan ranges %s/%s\n",
-            detailPrefix, numPartitionsNoDiskIds_, numPartitions_, numFilesNoDiskIds_,
+            numPartitionsNoDiskIds_, numPartitions_, numFilesNoDiskIds_,
             totalFiles_, numScanRangesNoDiskIds_,
             scanRangeSpecs_.getConcrete_rangesSize() + generatedScanRangeCount_));
       }
@@ -1283,10 +1295,12 @@ public class HdfsScanNode extends ScanNode {
       TupleDescriptor tupleDesc = entry.getKey();
       List<Expr> exprs = entry.getValue();
       if (tupleDesc == getTupleDesc()) {
-        output.append(String.format("%sparquet statistics predicates: %s\n", prefix,
+        output.append(prefix)
+        .append(String.format("parquet statistics predicates: %s\n",
             getExplainString(exprs, detailLevel)));
       } else {
-        output.append(String.format("%sparquet statistics predicates on %s: %s\n", prefix,
+        output.append(prefix)
+        .append(String.format("parquet statistics predicates on %s: %s\n",
             tupleDesc.getAlias(), getExplainString(exprs, detailLevel)));
       }
     }

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/fe/src/main/java/org/apache/impala/planner/NestedLoopJoinNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/NestedLoopJoinNode.java b/fe/src/main/java/org/apache/impala/planner/NestedLoopJoinNode.java
index 1ecd1c5..afa75e7 100644
--- a/fe/src/main/java/org/apache/impala/planner/NestedLoopJoinNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/NestedLoopJoinNode.java
@@ -30,6 +30,7 @@ import org.apache.impala.thrift.TNestedLoopJoinNode;
 import org.apache.impala.thrift.TPlanNode;
 import org.apache.impala.thrift.TPlanNodeType;
 import org.apache.impala.thrift.TQueryOptions;
+
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/fe/src/main/java/org/apache/impala/planner/PlanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/PlanNode.java b/fe/src/main/java/org/apache/impala/planner/PlanNode.java
index a329dc2..520cd8f 100644
--- a/fe/src/main/java/org/apache/impala/planner/PlanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/PlanNode.java
@@ -320,6 +320,7 @@ abstract public class PlanNode extends TreeNode<PlanNode> {
 
     // Output cardinality, cost estimates and tuple Ids only when explain plan level
     // is extended or above.
+    boolean displayCardinality = displayCardinality(detailLevel);
     if (detailLevel.ordinal() >= TExplainLevel.EXTENDED.ordinal()) {
       // Print resource profile.
       expBuilder.append(detailPrefix);
@@ -334,9 +335,18 @@ abstract public class PlanNode extends TreeNode<PlanNode> {
         expBuilder.append(tupleId.asInt() + nullIndicator);
         if (i + 1 != tupleIds_.size()) expBuilder.append(",");
       }
-      expBuilder.append(" row-size=" + PrintUtils.printBytes(Math.round(avgRowSize_)));
-      expBuilder.append(PrintUtils.printCardinality(" ", cardinality_));
-      expBuilder.append("\n");
+      expBuilder.append(displayCardinality ? " " : "\n");
+    }
+    // Output cardinality: in standard and above levels.
+    // In standard, on a line by itself (if wanted). In extended, on
+    // a line with tuple ids.
+    if (displayCardinality) {
+      if (detailLevel == TExplainLevel.STANDARD) expBuilder.append(detailPrefix);
+      expBuilder.append("row-size=")
+        .append(PrintUtils.printBytes(Math.round(avgRowSize_)))
+        .append(" cardinality=")
+        .append(PrintUtils.printCardinality(cardinality_))
+        .append("\n");
     }
 
     if (detailLevel.ordinal() >= TExplainLevel.EXTENDED.ordinal()) {
@@ -353,7 +363,6 @@ abstract public class PlanNode extends TreeNode<PlanNode> {
       }
     }
 
-
     // Print the children. Do not traverse into the children of an Exchange node to
     // avoid crossing fragment boundaries.
     if (traverseChildren) {
@@ -386,6 +395,17 @@ abstract public class PlanNode extends TreeNode<PlanNode> {
   }
 
   /**
+   * Per-node setting whether to include cardinality in the node overview.
+   * Some nodes omit cardinality because either a) it is not needed
+   * (Empty set, Exchange), or b) it is printed by the node itself (HDFS scan.)
+   * @return true if cardinality should be included in the generic
+   * node details, false if it should be omitted.
+   */
+  protected boolean displayCardinality(TExplainLevel detailLevel) {
+    return detailLevel.ordinal() >= TExplainLevel.STANDARD.ordinal();
+  }
+
+  /**
    * Return the node-specific details.
    * Subclass should override this function.
    * Each line should be prefixed by detailPrefix.

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java b/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
index 53b224e..a31bb50 100644
--- a/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
@@ -1280,7 +1280,9 @@ public class SingleNodePlanner {
     // Do partition pruning before deciding which slots to materialize because we might
     // end up removing some predicates.
     HdfsPartitionPruner pruner = new HdfsPartitionPruner(tupleDesc);
-    List<? extends FeFsPartition> partitions = pruner.prunePartitions(analyzer, conjuncts, false);
+    Pair<List<? extends FeFsPartition>, List<Expr>> pair =
+        pruner.prunePartitions(analyzer, conjuncts, false);
+    List<? extends FeFsPartition> partitions = pair.first;
 
     // Mark all slots referenced by the remaining conjuncts as materialized.
     analyzer.materializeSlots(conjuncts);
@@ -1323,9 +1325,9 @@ public class SingleNodePlanner {
       unionNode.init(analyzer);
       return unionNode;
     } else {
-      ScanNode scanNode =
+      HdfsScanNode scanNode =
           new HdfsScanNode(ctx_.getNextNodeId(), tupleDesc, conjuncts, partitions,
-              hdfsTblRef, aggInfo);
+              hdfsTblRef, aggInfo, pair.second);
       scanNode.init(analyzer);
       return scanNode;
     }
@@ -1409,7 +1411,6 @@ public class SingleNodePlanner {
     ((HBaseScanNode)scanNode).setKeyRanges(keyRanges);
     scanNode.addConjuncts(conjuncts);
     scanNode.init(analyzer);
-
     return scanNode;
   }
 
@@ -1424,7 +1425,7 @@ public class SingleNodePlanner {
    * - for outer joins: same type of conjuncts as inner joins, but only from the
    *   ON or USING clause
    * Predicates that are redundant based on equivalence classes are intentionally
-   * returneded by this function because the removal of redundant predicates and the
+   * returned by this function because the removal of redundant predicates and the
    * creation of new predicates for enforcing slot equivalences go hand-in-hand
    * (see analyzer.createEquivConjuncts()).
    */

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/fe/src/test/java/org/apache/impala/planner/CardinalityTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/planner/CardinalityTest.java b/fe/src/test/java/org/apache/impala/planner/CardinalityTest.java
index da59860..3bbb903 100644
--- a/fe/src/test/java/org/apache/impala/planner/CardinalityTest.java
+++ b/fe/src/test/java/org/apache/impala/planner/CardinalityTest.java
@@ -93,6 +93,15 @@ public class CardinalityTest extends PlannerTestBase {
         "SELECT COUNT(*) FROM functional.alltypes GROUP BY bool_col", 2);
   }
 
+  @Test
+  public void testNullColumnJoinCardinality() throws ImpalaException {
+    // IMPALA-7565: Make sure there is no division by zero during cardinality calculation
+    // in a many to many join on null columns (ndv = 0).
+    String query = "select * from functional.nulltable t1 "
+        + "inner join [shuffle] functional.nulltable t2 on t1.d = t2.d";
+    checkCardinality(query, 1, 1);
+  }
+
   /**
    * Joins should multiply out cardinalities.
    */

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/fe/src/test/java/org/apache/impala/planner/PlannerTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/planner/PlannerTest.java b/fe/src/test/java/org/apache/impala/planner/PlannerTest.java
index c8dbf1f..683f702 100644
--- a/fe/src/test/java/org/apache/impala/planner/PlannerTest.java
+++ b/fe/src/test/java/org/apache/impala/planner/PlannerTest.java
@@ -31,6 +31,7 @@ import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.RuntimeEnv;
 import org.apache.impala.service.Frontend.PlanCtx;
 import org.apache.impala.testutil.TestUtils;
+import org.apache.impala.testutil.TestUtils.IgnoreValueFilter;
 import org.apache.impala.thrift.TExecRequest;
 import org.apache.impala.thrift.TExplainLevel;
 import org.apache.impala.thrift.TJoinDistributionMode;
@@ -75,7 +76,8 @@ public class PlannerTest extends PlannerTestBase {
 
   @Test
   public void testEmpty() {
-    runPlannerTestFile("empty");
+    runPlannerTestFile("empty",
+        ImmutableSet.of(PlannerTestOption.VALIDATE_CARDINALITY));
   }
 
   @Test
@@ -163,29 +165,34 @@ public class PlannerTest extends PlannerTestBase {
 
   @Test
   public void testJoins() {
-    runPlannerTestFile("joins");
+    runPlannerTestFile("joins",
+        ImmutableSet.of(PlannerTestOption.VALIDATE_CARDINALITY));
   }
 
   @Test
   public void testJoinOrder() {
-    runPlannerTestFile("join-order");
+    runPlannerTestFile("join-order",
+        ImmutableSet.of(PlannerTestOption.VALIDATE_CARDINALITY));
   }
 
   @Test
   public void testOuterJoins() {
-    runPlannerTestFile("outer-joins");
+    runPlannerTestFile("outer-joins",
+        ImmutableSet.of(PlannerTestOption.VALIDATE_CARDINALITY));
   }
 
   @Test
   public void testImplicitJoins() {
-    runPlannerTestFile("implicit-joins");
+    runPlannerTestFile("implicit-joins",
+        ImmutableSet.of(PlannerTestOption.VALIDATE_CARDINALITY));
   }
 
   @Test
   public void testFkPkJoinDetection() {
     // The FK/PK detection result is included in EXTENDED or higher.
     runPlannerTestFile("fk-pk-join-detection",
-        ImmutableSet.of(PlannerTestOption.EXTENDED_EXPLAIN));
+        ImmutableSet.of(PlannerTestOption.EXTENDED_EXPLAIN,
+            PlannerTestOption.VALIDATE_CARDINALITY));
   }
 
   @Test
@@ -278,7 +285,8 @@ public class PlannerTest extends PlannerTestBase {
   public void testTpch() {
     runPlannerTestFile("tpch-all", "tpch",
         ImmutableSet.of(PlannerTestOption.INCLUDE_RESOURCE_HEADER,
-            PlannerTestOption.VALIDATE_RESOURCES));
+            PlannerTestOption.VALIDATE_RESOURCES,
+            PlannerTestOption.VALIDATE_CARDINALITY));
   }
 
   @Test
@@ -298,7 +306,8 @@ public class PlannerTest extends PlannerTestBase {
   public void testTpchNested() {
     runPlannerTestFile("tpch-nested", "tpch_nested_parquet",
         ImmutableSet.of(PlannerTestOption.INCLUDE_RESOURCE_HEADER,
-            PlannerTestOption.VALIDATE_RESOURCES));
+            PlannerTestOption.VALIDATE_RESOURCES,
+            PlannerTestOption.VALIDATE_CARDINALITY));
   }
 
   @Test
@@ -539,11 +548,16 @@ public class PlannerTest extends PlannerTestBase {
   }
 
   @Test
-  public void testDefaultJoinDistributionMode() {
+  public void testDefaultJoinDistributionBroadcastMode() {
     TQueryOptions options = defaultQueryOptions();
     Preconditions.checkState(
         options.getDefault_join_distribution_mode() == TJoinDistributionMode.BROADCAST);
     runPlannerTestFile("default-join-distr-mode-broadcast", options);
+  }
+
+  @Test
+  public void testDefaultJoinDistributionShuffleMode() {
+    TQueryOptions options = defaultQueryOptions();
     options.setDefault_join_distribution_mode(TJoinDistributionMode.SHUFFLE);
     runPlannerTestFile("default-join-distr-mode-shuffle", options);
   }
@@ -707,12 +721,21 @@ public class PlannerTest extends PlannerTestBase {
         8 * 1024 * 1024);
   }
 
+  /**
+   * Verify that various expected-result filters work on a
+   * variety of sample input lines.
+   */
   @Test
-  public void testNullColumnJoinCardinality() throws ImpalaException {
-    // IMPALA-7565: Make sure there is no division by zero during cardinality calculation
-    // in a many to many join on null columns (ndv = 0).
-    String query = "select * from functional.nulltable t1 "
-        + "inner join [shuffle] functional.nulltable t2 on t1.d = t2.d";
-    checkCardinality(query, 1, 1);
+  public void testFilters() {
+    IgnoreValueFilter filter = TestUtils.CARDINALITY_FILTER;
+    assertEquals(" foo=bar cardinality=",
+        filter.transform(" foo=bar cardinality=10"));
+    assertEquals(" foo=bar cardinality=",
+        filter.transform(" foo=bar cardinality=10.3K"));
+    assertEquals(" foo=bar cardinality=",
+        filter.transform(" foo=bar cardinality=unavailable"));
+    filter = TestUtils.ROW_SIZE_FILTER;
+    assertEquals(" row-size= cardinality=10.3K",
+        filter.transform(" row-size=10B cardinality=10.3K"));
   }
 }

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/fe/src/test/java/org/apache/impala/planner/PlannerTestBase.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/planner/PlannerTestBase.java b/fe/src/test/java/org/apache/impala/planner/PlannerTestBase.java
index 7db5d37..d9e168f 100644
--- a/fe/src/test/java/org/apache/impala/planner/PlannerTestBase.java
+++ b/fe/src/test/java/org/apache/impala/planner/PlannerTestBase.java
@@ -74,8 +74,8 @@ import org.apache.impala.util.ExecutorMembershipSnapshot;
 import org.apache.kudu.client.KuduClient;
 import org.apache.kudu.client.KuduScanToken;
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -357,7 +357,8 @@ public class PlannerTestBase extends FrontendTestBase {
 
   private void handleException(String query, String expectedErrorMsg,
       StringBuilder errorLog, StringBuilder actualOutput, Throwable e) {
-    actualOutput.append(e.toString() + "\n");
+    String actualErrorMsg = e.getClass().getSimpleName() + ": " + e.getMessage();
+    actualOutput.append(actualErrorMsg).append("\n");
     if (expectedErrorMsg == null) {
       // Exception is unexpected
       errorLog.append(String.format("Query:\n%s\nError Stack:\n%s\n", query,
@@ -365,7 +366,6 @@ public class PlannerTestBase extends FrontendTestBase {
     } else {
       // Compare actual and expected error messages.
       if (expectedErrorMsg != null && !expectedErrorMsg.isEmpty()) {
-        String actualErrorMsg = e.getClass().getSimpleName() + ": " + e.getMessage();
         if (!actualErrorMsg.toLowerCase().startsWith(expectedErrorMsg.toLowerCase())) {
           errorLog.append("query:\n" + query + "\nExpected error message: '"
               + expectedErrorMsg + "'\nActual error message: '" + actualErrorMsg + "'\n");
@@ -535,6 +535,10 @@ public class PlannerTestBase extends FrontendTestBase {
       if (!testOptions.contains(PlannerTestOption.VALIDATE_RESOURCES)) {
         resultFilters.addAll(TestUtils.RESOURCE_FILTERS);
       }
+      if (!testOptions.contains(PlannerTestOption.VALIDATE_CARDINALITY)) {
+        resultFilters.add(TestUtils.ROW_SIZE_FILTER);
+        resultFilters.add(TestUtils.CARDINALITY_FILTER);
+      }
       String planDiff = TestUtils.compareOutput(
           Lists.newArrayList(explainStr.split("\n")), expectedPlan, true, resultFilters);
       if (!planDiff.isEmpty()) {
@@ -804,6 +808,11 @@ public class PlannerTestBase extends FrontendTestBase {
     // ignore differences in resource values). Operator- and fragment-level resource
     // requirements are only included if EXTENDED_EXPLAIN is also enabled.
     VALIDATE_RESOURCES,
+    // Verify the row size and cardinality fields in the plan. Default is
+    // to ignore these values (for backward compatibility.) Turn this option
+    // on for test that validate cardinality calculations: joins, scan
+    // cardinality, etc.
+    VALIDATE_CARDINALITY
   }
 
   protected void runPlannerTestFile(String testFile, TQueryOptions options) {

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/fe/src/test/java/org/apache/impala/testutil/TestUtils.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/testutil/TestUtils.java b/fe/src/test/java/org/apache/impala/testutil/TestUtils.java
index 5e678d2..26412fb 100644
--- a/fe/src/test/java/org/apache/impala/testutil/TestUtils.java
+++ b/fe/src/test/java/org/apache/impala/testutil/TestUtils.java
@@ -16,6 +16,7 @@
 // under the License.
 
 package org.apache.impala.testutil;
+
 import java.io.StringReader;
 import java.io.StringWriter;
 import java.text.SimpleDateFormat;
@@ -80,8 +81,10 @@ public class TestUtils {
 
     public PathFilter(String prefix) { filterKey_ = prefix; }
 
+    @Override
     public boolean matches(String input) { return input.contains(filterKey_); }
 
+    @Override
     public String transform(String input) {
       String result = input.replaceFirst(filterKey_, "");
       result = result.replaceAll(PATH_FILTER, " ");
@@ -109,8 +112,10 @@ public class TestUtils {
       this.valueRegex = valueRegex;
     }
 
+    @Override
     public boolean matches(String input) { return input.contains(keyPrefix); }
 
+    @Override
     public String transform(String input) {
       return input.replaceAll(keyPrefix + valueRegex, keyPrefix);
     }
@@ -121,6 +126,15 @@ public class TestUtils {
   public static final IgnoreValueFilter FILE_SIZE_FILTER =
       new IgnoreValueFilter("size", BYTE_VALUE_REGEX);
 
+  // Ignore the row-size=8B entries
+  public static final IgnoreValueFilter ROW_SIZE_FILTER =
+      new IgnoreValueFilter("row-size", "\\S+");
+
+  // Ignore cardinality=27.30K or cardinality=unavailable
+  // entries
+  public static final IgnoreValueFilter CARDINALITY_FILTER =
+      new IgnoreValueFilter("cardinality", "\\S+");
+
   // Ignore the exact estimated row count, which depends on the file sizes.
   static IgnoreValueFilter SCAN_RANGE_ROW_COUNT_FILTER =
       new IgnoreValueFilter("max-scan-range-rows", NUMBER_REGEX);
@@ -160,6 +174,7 @@ public class TestUtils {
     }
     int mismatch = -1; // line in actual w/ mismatch
     int maxLen = Math.min(actual.size(), expected.size());
+    outer:
     for (int i = 0; i < maxLen; ++i) {
       String expectedStr = expected.get(i).trim();
       String actualStr = actual.get(i);
@@ -192,33 +207,31 @@ public class TestUtils {
       }
 
       // do a whitespace-insensitive comparison
-      Scanner e = new Scanner(expectedStr);
-      Scanner a = new Scanner(actualStr);
-      while (a.hasNext() && e.hasNext()) {
-        if (containsPrefix) {
-          if (!a.next().contains(e.next())) {
-            mismatch = i;
-            break;
-          }
-        } else {
-          if (!a.next().equals(e.next())) {
+      try (Scanner e = new Scanner(expectedStr);
+           Scanner a = new Scanner(actualStr)) {
+        while (a.hasNext() && e.hasNext()) {
+          String aToken = a.next();
+          String eToken = e.next();
+          if (containsPrefix) {
+            if (!aToken.contains(eToken)) {
+              mismatch = i;
+              break outer;
+            }
+          } else if (!aToken.equals(eToken)) {
             mismatch = i;
-            break;
+            break outer;
           }
         }
-      }
-      if (mismatch != -1) {
-        break;
-      }
 
-      if (ignoreAfter) {
-        if (e.hasNext() && !a.hasNext()) {
+        if (ignoreAfter) {
+          if (e.hasNext() && !a.hasNext()) {
+            mismatch = i;
+            break outer;
+          }
+        } else if (a.hasNext() != e.hasNext()) {
           mismatch = i;
-          break;
+          break outer;
         }
-      } else if (a.hasNext() != e.hasNext()) {
-        mismatch = i;
-        break;
       }
     }
     if (mismatch == -1 && actual.size() < expected.size()) {

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/aggregation.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/aggregation.test b/testdata/workloads/functional-planner/queries/PlannerTest/aggregation.test
index cf5a78b..18230ab 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/aggregation.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/aggregation.test
@@ -7,22 +7,27 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*), count(tinyint_col), min(tinyint_col), max(tinyint_col), sum(tinyint_col), avg(tinyint_col)
+|  row-size=34B cardinality=1
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=1B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count:merge(*), count:merge(tinyint_col), min:merge(tinyint_col), max:merge(tinyint_col), sum:merge(tinyint_col), avg:merge(tinyint_col)
+|  row-size=34B cardinality=1
 |
 02:EXCHANGE [UNPARTITIONED]
 |
 01:AGGREGATE
 |  output: count(*), count(tinyint_col), min(tinyint_col), max(tinyint_col), sum(tinyint_col), avg(tinyint_col)
+|  row-size=34B cardinality=1
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=1B cardinality=11.00K
 ====
 # with grouping
 select tinyint_col, bigint_col, count(*), min(tinyint_col), max(tinyint_col), sum(tinyint_col),
@@ -35,9 +40,11 @@ PLAN-ROOT SINK
 01:AGGREGATE [FINALIZE]
 |  output: count(*), min(tinyint_col), max(tinyint_col), sum(tinyint_col), avg(tinyint_col)
 |  group by: bigint_col, tinyint_col
+|  row-size=35B cardinality=9.07K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=9B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -46,15 +53,18 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: count:merge(*), min:merge(tinyint_col), max:merge(tinyint_col), sum:merge(tinyint_col), avg:merge(tinyint_col)
 |  group by: bigint_col, tinyint_col
+|  row-size=35B cardinality=9.07K
 |
 02:EXCHANGE [HASH(bigint_col,tinyint_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: count(*), min(tinyint_col), max(tinyint_col), sum(tinyint_col), avg(tinyint_col)
 |  group by: bigint_col, tinyint_col
+|  row-size=35B cardinality=9.07K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=9B cardinality=11.00K
 ====
 # avg substitution
 select avg(id)
@@ -66,30 +76,37 @@ PLAN-ROOT SINK
 |
 02:TOP-N [LIMIT=10]
 |  order by: avg(zip) ASC
+|  row-size=16B cardinality=0
 |
 01:AGGREGATE [FINALIZE]
 |  output: avg(id), count(id), avg(zip)
 |  having: count(id) > 0
+|  row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
+   row-size=12B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 02:TOP-N [LIMIT=10]
 |  order by: avg(zip) ASC
+|  row-size=16B cardinality=0
 |
 04:AGGREGATE [FINALIZE]
 |  output: avg:merge(id), count:merge(id), avg:merge(zip)
 |  having: count(id) > 0
+|  row-size=24B cardinality=0
 |
 03:EXCHANGE [UNPARTITIONED]
 |
 01:AGGREGATE
 |  output: avg(id), count(id), avg(zip)
+|  row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl]
    partitions=1/1 files=0 size=0B
+   row-size=12B cardinality=0
 ====
 # Test correct removal of redundant group-by expressions (IMPALA-817)
 select int_col + int_col, int_col * int_col
@@ -103,9 +120,11 @@ PLAN-ROOT SINK
 |  group by: int_col + int_col, int_col * int_col
 |  having: int_col * int_col < 0
 |  limit: 10
+|  row-size=16B cardinality=10
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=4B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -116,14 +135,17 @@ PLAN-ROOT SINK
 |  group by: int_col + int_col, int_col * int_col
 |  having: int_col * int_col < 0
 |  limit: 10
+|  row-size=16B cardinality=10
 |
 02:EXCHANGE [HASH(int_col + int_col,int_col * int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: int_col + int_col, int_col * int_col
+|  row-size=16B cardinality=11.00K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=4B cardinality=11.00K
 ====
 # Tests that a having predicate triggers slot materialization (IMPALA-846).
 select count(*) from
@@ -138,17 +160,21 @@ PLAN-ROOT SINK
 |  output: count(*), count(t2.int_col), count(t1.bigint_col)
 |  group by: t1.tinyint_col, t2.smallint_col
 |  having: count(t2.int_col) = count(t1.bigint_col)
+|  row-size=27B cardinality=2
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.smallint_col = t2.smallint_col
 |  runtime filters: RF000 <- t2.smallint_col
+|  row-size=17B cardinality=5.84K
 |
 |--01:SCAN HDFS [functional.alltypestiny t2]
 |     partitions=4/4 files=4 size=460B
+|     row-size=6B cardinality=8
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> t1.smallint_col
+   row-size=11B cardinality=7.30K
 ====
 # Tests proper slot materialization of agg-tuple slots for avg (IMP-1271).
 # 't.x > 10' is picked up as an unassigned conjunct, and not as a binding
@@ -165,9 +191,11 @@ PLAN-ROOT SINK
 |  output: avg(bigint_col)
 |  group by: int_col
 |  having: avg(bigint_col) > 10
+|  row-size=12B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ====
 # test distributed aggregation over unions (IMPALA-831)
 # non-distinct agg without grouping over a union
@@ -182,35 +210,44 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  limit: 10
+|  row-size=8B cardinality=1
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=0B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=0B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 05:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  limit: 10
+|  row-size=8B cardinality=1
 |
 04:EXCHANGE [UNPARTITIONED]
 |
 03:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=0B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=0B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ====
 # non-distinct agg with grouping over a union
 select count(*) from
@@ -226,15 +263,19 @@ PLAN-ROOT SINK
 |  output: count(*)
 |  group by: bigint_col
 |  limit: 10
+|  row-size=16B cardinality=10
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=8B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=8B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=8B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -245,21 +286,26 @@ PLAN-ROOT SINK
 |  output: count:merge(*)
 |  group by: t.bigint_col
 |  limit: 10
+|  row-size=16B cardinality=10
 |
 04:EXCHANGE [HASH(t.bigint_col)]
 |
 03:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: bigint_col
+|  row-size=16B cardinality=20
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=8B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=8B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=8B cardinality=7.30K
 ====
 # distinct agg without grouping over a union
 select count(distinct int_col)
@@ -274,46 +320,58 @@ PLAN-ROOT SINK
 04:AGGREGATE [FINALIZE]
 |  output: count(int_col)
 |  limit: 10
+|  row-size=8B cardinality=1
 |
 03:AGGREGATE
 |  group by: int_col
+|  row-size=4B cardinality=20
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=4B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 08:AGGREGATE [FINALIZE]
 |  output: count:merge(int_col)
 |  limit: 10
+|  row-size=8B cardinality=1
 |
 07:EXCHANGE [UNPARTITIONED]
 |
 04:AGGREGATE
 |  output: count(int_col)
+|  row-size=8B cardinality=1
 |
 06:AGGREGATE
 |  group by: int_col
+|  row-size=4B cardinality=20
 |
 05:EXCHANGE [HASH(int_col)]
 |
 03:AGGREGATE [STREAMING]
 |  group by: int_col
+|  row-size=4B cardinality=20
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=4B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 # distinct agg with grouping over a union
 select count(distinct int_col)
@@ -330,18 +388,23 @@ PLAN-ROOT SINK
 |  output: count(int_col)
 |  group by: t.bigint_col
 |  limit: 10
+|  row-size=16B cardinality=10
 |
 03:AGGREGATE
 |  group by: bigint_col, int_col
+|  row-size=12B cardinality=400
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=12B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=12B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -352,29 +415,36 @@ PLAN-ROOT SINK
 |  output: count:merge(int_col)
 |  group by: t.bigint_col
 |  limit: 10
+|  row-size=16B cardinality=10
 |
 07:EXCHANGE [HASH(t.bigint_col)]
 |
 04:AGGREGATE [STREAMING]
 |  output: count(int_col)
 |  group by: t.bigint_col
+|  row-size=16B cardinality=10
 |
 06:AGGREGATE
 |  group by: t.bigint_col, int_col
+|  row-size=12B cardinality=400
 |
 05:EXCHANGE [HASH(t.bigint_col,int_col)]
 |
 03:AGGREGATE [STREAMING]
 |  group by: bigint_col, int_col
+|  row-size=12B cardinality=400
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=12B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=12B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ====
 # mixed distinct and non-distinct agg without grouping over a union
 select count(smallint_col), count(distinct int_col)
@@ -389,49 +459,61 @@ PLAN-ROOT SINK
 04:AGGREGATE [FINALIZE]
 |  output: count(int_col), count:merge(smallint_col)
 |  limit: 10
+|  row-size=16B cardinality=1
 |
 03:AGGREGATE
 |  output: count(smallint_col)
 |  group by: int_col
+|  row-size=12B cardinality=20
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=6B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=6B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=6B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 08:AGGREGATE [FINALIZE]
 |  output: count:merge(int_col), count:merge(smallint_col)
 |  limit: 10
+|  row-size=16B cardinality=1
 |
 07:EXCHANGE [UNPARTITIONED]
 |
 04:AGGREGATE
 |  output: count(int_col), count:merge(smallint_col)
+|  row-size=16B cardinality=1
 |
 06:AGGREGATE
 |  output: count:merge(smallint_col)
 |  group by: int_col
+|  row-size=12B cardinality=20
 |
 05:EXCHANGE [HASH(int_col)]
 |
 03:AGGREGATE [STREAMING]
 |  output: count(smallint_col)
 |  group by: int_col
+|  row-size=12B cardinality=20
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=6B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=6B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=6B cardinality=7.30K
 ====
 # mixed distinct and non-distinct agg with grouping over a union
 select count(smallint_col), count(distinct int_col)
@@ -448,19 +530,24 @@ PLAN-ROOT SINK
 |  output: count(int_col), count:merge(smallint_col)
 |  group by: t.bigint_col
 |  limit: 10
+|  row-size=24B cardinality=10
 |
 03:AGGREGATE
 |  output: count(smallint_col)
 |  group by: bigint_col, int_col
+|  row-size=20B cardinality=400
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=14B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=14B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=14B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -471,31 +558,38 @@ PLAN-ROOT SINK
 |  output: count:merge(int_col), count:merge(smallint_col)
 |  group by: t.bigint_col
 |  limit: 10
+|  row-size=24B cardinality=10
 |
 07:EXCHANGE [HASH(t.bigint_col)]
 |
 04:AGGREGATE [STREAMING]
 |  output: count(int_col), count:merge(smallint_col)
 |  group by: t.bigint_col
+|  row-size=24B cardinality=10
 |
 06:AGGREGATE
 |  output: count:merge(smallint_col)
 |  group by: t.bigint_col, int_col
+|  row-size=20B cardinality=400
 |
 05:EXCHANGE [HASH(t.bigint_col,int_col)]
 |
 03:AGGREGATE [STREAMING]
 |  output: count(smallint_col)
 |  group by: bigint_col, int_col
+|  row-size=20B cardinality=400
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=14B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=14B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=14B cardinality=7.30K
 ====
 # mixed distinct and non-distinct agg with grouping over a union distinct
 select count(smallint_col), count(distinct int_col)
@@ -512,22 +606,28 @@ PLAN-ROOT SINK
 |  output: count(int_col), count:merge(smallint_col)
 |  group by: t.bigint_col
 |  limit: 10
+|  row-size=24B cardinality=10
 |
 04:AGGREGATE
 |  output: count(smallint_col)
 |  group by: bigint_col, int_col
+|  row-size=20B cardinality=400
 |
 03:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=7.40K
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -538,39 +638,48 @@ PLAN-ROOT SINK
 |  output: count:merge(int_col), count:merge(smallint_col)
 |  group by: t.bigint_col
 |  limit: 10
+|  row-size=24B cardinality=10
 |
 10:EXCHANGE [HASH(t.bigint_col)]
 |
 05:AGGREGATE [STREAMING]
 |  output: count(int_col), count:merge(smallint_col)
 |  group by: t.bigint_col
+|  row-size=24B cardinality=10
 |
 09:AGGREGATE
 |  output: count:merge(smallint_col)
 |  group by: t.bigint_col, int_col
+|  row-size=20B cardinality=400
 |
 08:EXCHANGE [HASH(t.bigint_col,int_col)]
 |
 04:AGGREGATE [STREAMING]
 |  output: count(smallint_col)
 |  group by: bigint_col, int_col
+|  row-size=20B cardinality=400
 |
 07:AGGREGATE [FINALIZE]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=7.40K
 |
 06:EXCHANGE [HASH(id,bool_col,tinyint_col,smallint_col,int_col,bigint_col,float_col,double_col,date_string_col,string_col,timestamp_col,year,month)]
 |
 03:AGGREGATE [STREAMING]
 |  group by: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  row-size=89B cardinality=7.40K
 |
 00:UNION
 |  pass-through-operands: all
+|  row-size=89B cardinality=7.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # Mixed distinct and non-distinct agg with intermediate type different from input type
 # Regression test for IMPALA-5251 to exercise validateMergeAggFn() in FunctionCallExpr.
@@ -581,36 +690,44 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(l_partkey), avg:merge(l_quantity), ndv:merge(l_discount)
+|  row-size=24B cardinality=1
 |
 01:AGGREGATE
 |  output: avg(l_quantity), ndv(l_discount)
 |  group by: l_partkey
+|  row-size=24B cardinality=200.52K
 |
 00:SCAN HDFS [tpch_parquet.lineitem]
-   partitions=1/1 files=3 size=193.92MB
+   partitions=1/1 files=3 size=193.60MB
+   row-size=24B cardinality=6.00M
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(l_partkey), avg:merge(l_quantity), ndv:merge(l_discount)
+|  row-size=24B cardinality=1
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: count(l_partkey), avg:merge(l_quantity), ndv:merge(l_discount)
+|  row-size=24B cardinality=1
 |
 04:AGGREGATE
 |  output: avg:merge(l_quantity), ndv:merge(l_discount)
 |  group by: l_partkey
+|  row-size=24B cardinality=200.52K
 |
 03:EXCHANGE [HASH(l_partkey)]
 |
 01:AGGREGATE [STREAMING]
 |  output: avg(l_quantity), ndv(l_discount)
 |  group by: l_partkey
+|  row-size=24B cardinality=200.52K
 |
 00:SCAN HDFS [tpch_parquet.lineitem]
-   partitions=1/1 files=3 size=193.92MB
+   partitions=1/1 files=3 size=193.60MB
+   row-size=24B cardinality=6.00M
 ====
 # test that aggregations are not placed below an unpartitioned exchange with a limit
 select count(*) from (select * from functional.alltypes limit 10) t
@@ -619,15 +736,18 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    limit: 10
+   row-size=0B cardinality=10
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 02:EXCHANGE [UNPARTITIONED]
 |  limit: 10
@@ -635,6 +755,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    limit: 10
+   row-size=0B cardinality=10
 ====
 # test that aggregations are not placed below an unpartitioned exchange with a limit
 select count(*) from
@@ -646,21 +767,26 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 00:UNION
 |  pass-through-operands: all
 |  limit: 10
+|  row-size=0B cardinality=10
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=0B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 04:EXCHANGE [UNPARTITIONED]
 |  limit: 10
@@ -668,12 +794,15 @@ PLAN-ROOT SINK
 00:UNION
 |  pass-through-operands: all
 |  limit: 10
+|  row-size=0B cardinality=10
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=0B cardinality=100
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=0B cardinality=7.30K
 ====
 # test that limits are applied at the top-level merge aggregation node for non-grouping
 # distinct aggregation (IMPALA-1802)
@@ -690,66 +819,82 @@ PLAN-ROOT SINK
 06:AGGREGATE [FINALIZE]
 |  output: count(cnt)
 |  limit: 1
+|  row-size=8B cardinality=1
 |
 05:AGGREGATE
 |  group by: count(t1.id)
+|  row-size=8B cardinality=1
 |
 04:AGGREGATE [FINALIZE]
 |  output: count(t1.id)
 |  limit: 10
+|  row-size=8B cardinality=1
 |
 03:AGGREGATE
 |  group by: t1.id
+|  row-size=4B cardinality=9
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=8B cardinality=9
 |
 |--01:SCAN HDFS [functional.alltypestiny t2]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.id
+   row-size=4B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count(cnt)
 |  limit: 1
+|  row-size=8B cardinality=1
 |
 05:AGGREGATE
 |  group by: count(t1.id)
+|  row-size=8B cardinality=1
 |
 11:AGGREGATE [FINALIZE]
 |  output: count:merge(t1.id)
 |  limit: 10
+|  row-size=8B cardinality=1
 |
 10:EXCHANGE [UNPARTITIONED]
 |
 04:AGGREGATE
 |  output: count(t1.id)
+|  row-size=8B cardinality=1
 |
 09:AGGREGATE
 |  group by: t1.id
+|  row-size=4B cardinality=9
 |
 08:EXCHANGE [HASH(t1.id)]
 |
 03:AGGREGATE [STREAMING]
 |  group by: t1.id
+|  row-size=4B cardinality=9
 |
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=8B cardinality=9
 |
 |--07:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny t2]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.id
+   row-size=4B cardinality=11.00K
 ====
 # IMPALA-2089: Tests correct elimination of redundant predicates.
 # The equivalences between inline-view slots are enforced inside the inline-view plan.
@@ -768,10 +913,12 @@ PLAN-ROOT SINK
 01:AGGREGATE [FINALIZE]
 |  group by: tinyint_col, smallint_col, int_col + int_col, coalesce(bigint_col, year)
 |  having: int_col + int_col = coalesce(bigint_col, year), smallint_col = int_col + int_col
+|  row-size=19B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.tinyint_col = functional.alltypes.smallint_col
+   row-size=19B cardinality=730
 ====
 # IMPALA-1917: Test NULL literals inside inline view with grouping aggregation.
 select cnt from
@@ -784,9 +931,11 @@ PLAN-ROOT SINK
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: bool_col, CAST(NULL AS INT)
+|  row-size=13B cardinality=2
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=1B cardinality=8
 ====
 # IMPALA-1917: Test NULL literals inside inline view with grouping aggregation.
 select cnt from
@@ -799,12 +948,15 @@ PLAN-ROOT SINK
 02:AGGREGATE [FINALIZE]
 |  output: count(int_col)
 |  group by: bool_col, NULL
+|  row-size=10B cardinality=2
 |
 01:AGGREGATE
 |  group by: bool_col, NULL, int_col
+|  row-size=6B cardinality=4
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=5B cardinality=8
 ====
 # test simple group_concat with distinct
 select group_concat(distinct string_col) from functional.alltypesagg
@@ -813,33 +965,41 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: group_concat(string_col)
+|  row-size=12B cardinality=1
 |
 01:AGGREGATE
 |  group by: string_col
+|  row-size=15B cardinality=963
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=15B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: group_concat:merge(string_col)
+|  row-size=12B cardinality=1
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: group_concat(string_col)
+|  row-size=12B cardinality=1
 |
 04:AGGREGATE
 |  group by: string_col
+|  row-size=15B cardinality=963
 |
 03:EXCHANGE [HASH(string_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: string_col
+|  row-size=15B cardinality=963
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=15B cardinality=11.00K
 ====
 # test group_concat and a group by
 select day, group_concat(distinct string_col)
@@ -851,25 +1011,31 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: group_concat(string_col)
 |  group by: day
+|  row-size=16B cardinality=11
 |
 02:AGGREGATE
 |  group by: day, string_col
+|  row-size=19B cardinality=1.10K
 |
 01:TOP-N [LIMIT=99999]
 |  order by: id ASC
+|  row-size=23B cardinality=1.10K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
    predicates: day = id % 100
+   row-size=23B cardinality=1.10K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: group_concat(string_col)
 |  group by: day
+|  row-size=16B cardinality=11
 |
 02:AGGREGATE
 |  group by: day, string_col
+|  row-size=19B cardinality=1.10K
 |
 04:MERGING-EXCHANGE [UNPARTITIONED]
 |  order by: id ASC
@@ -877,10 +1043,12 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=99999]
 |  order by: id ASC
+|  row-size=23B cardinality=1.10K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
    predicates: day = id % 100
+   row-size=23B cardinality=1.10K
 ====
 # test group_concat with distinct together with another distinct aggregate function
 select count(distinct cast(timestamp_col as string)),
@@ -892,12 +1060,15 @@ PLAN-ROOT SINK
 02:AGGREGATE [FINALIZE]
 |  output: count(CAST(timestamp_col AS STRING)), group_concat(CAST(timestamp_col AS STRING))
 |  group by: year
+|  row-size=24B cardinality=1
 |
 01:AGGREGATE
 |  group by: year, CAST(timestamp_col AS STRING)
+|  row-size=20B cardinality=10.21K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=20B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -906,23 +1077,28 @@ PLAN-ROOT SINK
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(CAST(timestamp_col AS STRING)), group_concat:merge(CAST(timestamp_col AS STRING))
 |  group by: year
+|  row-size=24B cardinality=1
 |
 05:EXCHANGE [HASH(year)]
 |
 02:AGGREGATE [STREAMING]
 |  output: count(CAST(timestamp_col AS STRING)), group_concat(CAST(timestamp_col AS STRING))
 |  group by: year
+|  row-size=24B cardinality=1
 |
 04:AGGREGATE
 |  group by: year, CAST(timestamp_col AS STRING)
+|  row-size=20B cardinality=10.21K
 |
 03:EXCHANGE [HASH(year,CAST(timestamp_col AS STRING))]
 |
 01:AGGREGATE [STREAMING]
 |  group by: year, CAST(timestamp_col AS STRING)
+|  row-size=20B cardinality=10.21K
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=20B cardinality=11.00K
 ====
 # test group_concat distinct with other non-distinct aggregate functions
  select group_concat(distinct string_col), count(*) from functional.alltypesagg
@@ -931,36 +1107,44 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: group_concat(string_col), count:merge(*)
+|  row-size=20B cardinality=1
 |
 01:AGGREGATE
 |  output: count(*)
 |  group by: string_col
+|  row-size=23B cardinality=963
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=15B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: group_concat:merge(string_col), count:merge(*)
+|  row-size=20B cardinality=1
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: group_concat(string_col), count:merge(*)
+|  row-size=20B cardinality=1
 |
 04:AGGREGATE
 |  output: count:merge(*)
 |  group by: string_col
+|  row-size=23B cardinality=963
 |
 03:EXCHANGE [HASH(string_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: string_col
+|  row-size=23B cardinality=963
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=15B cardinality=11.00K
 ====
 # test group_concat distinct with other aggregate functions, with custom separator
 select group_concat(distinct string_col, '-'), sum(int_col), count(distinct string_col)
@@ -970,36 +1154,44 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: group_concat(string_col, '-'), count(string_col), sum:merge(int_col)
+|  row-size=28B cardinality=1
 |
 01:AGGREGATE
 |  output: sum(int_col)
 |  group by: string_col
+|  row-size=23B cardinality=963
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=19B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: group_concat:merge(string_col, '-'), count:merge(string_col), sum:merge(int_col)
+|  row-size=28B cardinality=1
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: group_concat(string_col, '-'), count(string_col), sum:merge(int_col)
+|  row-size=28B cardinality=1
 |
 04:AGGREGATE
 |  output: sum:merge(int_col)
 |  group by: string_col
+|  row-size=23B cardinality=963
 |
 03:EXCHANGE [HASH(string_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: sum(int_col)
 |  group by: string_col
+|  row-size=23B cardinality=963
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=19B cardinality=11.00K
 ====
 # test group_concat distinct with other aggregate functions, with custom separator
 # and a group by
@@ -1012,13 +1204,16 @@ PLAN-ROOT SINK
 02:AGGREGATE [FINALIZE]
 |  output: count(date_string_col), group_concat(date_string_col, '-'), count:merge(*)
 |  group by: month, year
+|  row-size=36B cardinality=1
 |
 01:AGGREGATE
 |  output: count(*)
 |  group by: month, year, date_string_col
+|  row-size=36B cardinality=10
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=28B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -1027,25 +1222,30 @@ PLAN-ROOT SINK
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(date_string_col), group_concat:merge(date_string_col, '-'), count:merge(*)
 |  group by: month, year
+|  row-size=36B cardinality=1
 |
 05:EXCHANGE [HASH(month,year)]
 |
 02:AGGREGATE [STREAMING]
 |  output: count(date_string_col), group_concat(date_string_col, '-'), count:merge(*)
 |  group by: month, year
+|  row-size=36B cardinality=1
 |
 04:AGGREGATE
 |  output: count:merge(*)
 |  group by: month, year, date_string_col
+|  row-size=36B cardinality=10
 |
 03:EXCHANGE [HASH(month,year,date_string_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: month, year, date_string_col
+|  row-size=36B cardinality=10
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=28B cardinality=11.00K
 ====
 # test multiple group_concat distinct, each with a different separator
 select group_concat(distinct string_col), group_concat(distinct string_col, '-'),
@@ -1055,33 +1255,41 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: group_concat(string_col), group_concat(string_col, '-'), group_concat(string_col, '---')
+|  row-size=36B cardinality=1
 |
 01:AGGREGATE
 |  group by: string_col
+|  row-size=15B cardinality=963
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=15B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: group_concat:merge(string_col), group_concat:merge(string_col, '-'), group_concat:merge(string_col, '---')
+|  row-size=36B cardinality=1
 |
 05:EXCHANGE [UNPARTITIONED]
 |
 02:AGGREGATE
 |  output: group_concat(string_col), group_concat(string_col, '-'), group_concat(string_col, '---')
+|  row-size=36B cardinality=1
 |
 04:AGGREGATE
 |  group by: string_col
+|  row-size=15B cardinality=963
 |
 03:EXCHANGE [HASH(string_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: string_col
+|  row-size=15B cardinality=963
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
+   row-size=15B cardinality=11.00K
 ====
 # IMPALA-852: Aggregation only in the HAVING clause.
 select 1 from functional.alltypestiny having count(*) > 0
@@ -1091,9 +1299,11 @@ PLAN-ROOT SINK
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  having: count(*) > 0
+|  row-size=8B cardinality=0
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=0B cardinality=8
 ====
 # Grouping aggregation where input is partitioned on grouping expr.
 # Planner should not redundantly repartition the data that was already partitioned on
@@ -1115,22 +1325,26 @@ PLAN-ROOT SINK
 |  group by: c_custkey
 |  having: count(*) < 150000
 |  limit: 1000000
+|  row-size=16B cardinality=15.00K
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: c_custkey = o_custkey
 |  runtime filters: RF000 <- o_custkey
+|  row-size=18B cardinality=91.47K
 |
 |--05:EXCHANGE [HASH(o_custkey)]
 |  |
 |  01:SCAN HDFS [tpch_parquet.orders]
-|     partitions=1/1 files=2 size=54.20MB
+|     partitions=1/1 files=2 size=54.07MB
+|     row-size=8B cardinality=1.50M
 |
 04:EXCHANGE [HASH(c_custkey)]
 |
 00:SCAN HDFS [tpch_parquet.customer]
-   partitions=1/1 files=1 size=12.34MB
+   partitions=1/1 files=1 size=12.31MB
    predicates: c_nationkey = 16
    runtime filters: RF000 -> c_custkey
+   row-size=10B cardinality=6.00K
 ====
 # Distinct aggregation where input is partitioned on distinct expr.
 # Planner should not redundantly repartition the data that was already partitioned on
@@ -1147,29 +1361,35 @@ PLAN-ROOT SINK
 |  output: count:merge(c_custkey)
 |  having: count(c_custkey) > 50
 |  limit: 50
+|  row-size=8B cardinality=0
 |
 07:EXCHANGE [UNPARTITIONED]
 |
 04:AGGREGATE
 |  output: count(c_custkey)
+|  row-size=8B cardinality=0
 |
 03:AGGREGATE
 |  group by: c_custkey
+|  row-size=8B cardinality=150.00K
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF000 <- c_custkey
+|  row-size=16B cardinality=1.50M
 |
 |--06:EXCHANGE [HASH(c_custkey)]
 |  |
 |  01:SCAN HDFS [tpch_parquet.customer]
-|     partitions=1/1 files=1 size=12.34MB
+|     partitions=1/1 files=1 size=12.31MB
+|     row-size=8B cardinality=150.00K
 |
 05:EXCHANGE [HASH(o_custkey)]
 |
 00:SCAN HDFS [tpch_parquet.orders]
-   partitions=1/1 files=2 size=54.20MB
+   partitions=1/1 files=2 size=54.07MB
    runtime filters: RF000 -> o_custkey
+   row-size=8B cardinality=1.50M
 ====
 # Distinct grouping aggregation where input is partitioned on distinct and grouping exprs.
 # Planner should not redundantly repartition the data that was already partitioned on
@@ -1185,30 +1405,36 @@ PLAN-ROOT SINK
 08:AGGREGATE [FINALIZE]
 |  output: count:merge(c_custkey)
 |  group by: c_custkey
+|  row-size=16B cardinality=150.00K
 |
 07:EXCHANGE [HASH(c_custkey)]
 |
 04:AGGREGATE [STREAMING]
 |  output: count(c_custkey)
 |  group by: c_custkey
+|  row-size=16B cardinality=150.00K
 |
 03:AGGREGATE
 |  group by: c_custkey, c_custkey
+|  row-size=16B cardinality=1.50M
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: o_custkey = c_custkey
 |  runtime filters: RF000 <- c_custkey
+|  row-size=16B cardinality=1.50M
 |
 |--06:EXCHANGE [HASH(c_custkey)]
 |  |
 |  01:SCAN HDFS [tpch_parquet.customer]
-|     partitions=1/1 files=1 size=12.34MB
+|     partitions=1/1 files=1 size=12.31MB
+|     row-size=8B cardinality=150.00K
 |
 05:EXCHANGE [HASH(o_custkey)]
 |
 00:SCAN HDFS [tpch_parquet.orders]
-   partitions=1/1 files=2 size=54.20MB
+   partitions=1/1 files=2 size=54.07MB
    runtime filters: RF000 -> o_custkey
+   row-size=8B cardinality=1.50M
 ====
 # Complex aggregation when two joins and an agg end up in same fragment.
 select l_orderkey, l_returnflag, count(*) from (
@@ -1233,31 +1459,37 @@ PLAN-ROOT SINK
 |  group by: tpch_parquet.lineitem.l_orderkey, tpch_parquet.lineitem.l_returnflag
 |  having: count(*) > 10
 |  limit: 10
+|  row-size=29B cardinality=10
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: o_custkey = c_custkey, o_comment = c_phone
 |  runtime filters: RF000 <- c_custkey, RF001 <- c_phone
+|  row-size=160B cardinality=607.19K
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [tpch_parquet.customer]
-|     partitions=1/1 files=1 size=12.34MB
+|     partitions=1/1 files=1 size=12.31MB
+|     row-size=35B cardinality=150.00K
 |
 03:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: l_orderkey = o_orderkey, l_returnflag = o_clerk
 |  runtime filters: RF004 <- o_orderkey, RF005 <- o_clerk
+|  row-size=125B cardinality=5.76M
 |
 |--07:EXCHANGE [HASH(o_orderkey,o_clerk)]
 |  |
 |  01:SCAN HDFS [tpch_parquet.orders]
-|     partitions=1/1 files=2 size=54.20MB
+|     partitions=1/1 files=2 size=54.07MB
 |     runtime filters: RF000 -> o_custkey, RF001 -> o_comment
+|     row-size=104B cardinality=1.50M
 |
 06:EXCHANGE [HASH(l_orderkey,l_returnflag)]
 |
 00:SCAN HDFS [tpch_parquet.lineitem]
-   partitions=1/1 files=3 size=193.92MB
+   partitions=1/1 files=3 size=193.60MB
    runtime filters: RF004 -> l_orderkey, RF005 -> l_returnflag
+   row-size=21B cardinality=6.00M
 ====
 # IMPALA-4263: Grouping agg needs a merge step because the grouping exprs reference a
 # tuple that is made nullable in the join fragment.
@@ -1274,25 +1506,30 @@ PLAN-ROOT SINK
 07:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
 |  group by: t2.id
+|  row-size=12B cardinality=99
 |
 06:EXCHANGE [HASH(t2.id)]
 |
 03:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: t2.id
+|  row-size=12B cardinality=99
 |
 02:HASH JOIN [LEFT OUTER JOIN, PARTITIONED]
 |  hash predicates: t1.id = t2.id
+|  row-size=8B cardinality=7.30K
 |
 |--05:EXCHANGE [HASH(t2.id)]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall t2]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 04:EXCHANGE [HASH(t1.id)]
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 # IMPALA-4263: Grouping agg is placed in the join fragment and has no merge step.
 select /* +straight_join */ t1.id, count(*)
@@ -1308,19 +1545,23 @@ PLAN-ROOT SINK
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: t1.id
+|  row-size=12B cardinality=7.30K
 |
 02:HASH JOIN [LEFT OUTER JOIN, PARTITIONED]
 |  hash predicates: t1.id = t2.id
+|  row-size=8B cardinality=7.30K
 |
 |--05:EXCHANGE [HASH(t2.id)]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall t2]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 04:EXCHANGE [HASH(t1.id)]
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 # IMPALA-4263: Grouping agg is placed in the second join fragment and has no merge step.
 # The grouping exprs reference a nullable tuple (t2), but that tuple is made nullable in
@@ -1341,27 +1582,33 @@ PLAN-ROOT SINK
 05:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: t2.id
+|  row-size=12B cardinality=99
 |
 04:HASH JOIN [LEFT OUTER JOIN, PARTITIONED]
 |  hash predicates: t2.id = t3.id
+|  row-size=16B cardinality=73.00K
 |
 |--09:EXCHANGE [HASH(t3.id)]
 |  |
 |  02:SCAN HDFS [functional.alltypestiny t3]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 08:EXCHANGE [HASH(t2.id)]
 |
 03:HASH JOIN [LEFT OUTER JOIN, PARTITIONED]
 |  hash predicates: t1.int_col = t2.int_col
+|  row-size=12B cardinality=73.00K
 |
 |--07:EXCHANGE [HASH(t2.int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall t2]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=8B cardinality=100
 |
 06:EXCHANGE [HASH(t1.int_col)]
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====


[16/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/nested-collections.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/nested-collections.test b/testdata/workloads/functional-planner/queries/PlannerTest/nested-collections.test
index bf7f8b8..406f83b 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/nested-collections.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/nested-collections.test
@@ -7,6 +7,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.allcomplextypes.int_array_col]
    partitions=0/0 files=0 size=0B
    predicates: item > 10
+   row-size=4B cardinality=0
 ====
 # Scan of a nested map with scalar key and value.
 select * from functional.allcomplextypes.int_map_col
@@ -17,6 +18,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.allcomplextypes.int_map_col]
    partitions=0/0 files=0 size=0B
    predicates: value < 10, key = 'test'
+   row-size=16B cardinality=0
 ====
 # Scan of a deeply nested collection.
 select count(f21) from functional.allcomplextypes.complex_nested_struct_col.f2.f12
@@ -26,10 +28,12 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(f21)
+|  row-size=8B cardinality=0
 |
 00:SCAN HDFS [functional.allcomplextypes.complex_nested_struct_col.f2.f12]
    partitions=0/0 files=0 size=0B
    predicates: key = 'test'
+   row-size=20B cardinality=0
 ====
 # Join on two nested collections with structs.
 select count(*) from
@@ -42,19 +46,23 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: a.f1 = b.f1
 |  runtime filters: RF000 <- b.f1
+|  row-size=40B cardinality=0
 |
 |--01:SCAN HDFS [functional.allcomplextypes.struct_map_col b]
 |     partitions=0/0 files=0 size=0B
 |     predicates: b.f2 = 'test_b'
+|     row-size=20B cardinality=0
 |
 00:SCAN HDFS [functional.allcomplextypes.struct_array_col a]
    partitions=0/0 files=0 size=0B
    predicates: a.f2 = 'test_a'
    runtime filters: RF000 -> a.f1
+   row-size=20B cardinality=0
 ====
 # Test assignment of equi-join conjuncts and enforcement of
 # slot equivalences in a bushy join.
@@ -71,30 +79,39 @@ PLAN-ROOT SINK
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: c_nationkey = n_nationkey, s_comment = n_comment
 |  runtime filters: RF000 <- n_nationkey, RF001 <- n_comment
+|  row-size=189B cardinality=10.16K
 |
 |--01:SUBPLAN
+|  |  row-size=26B cardinality=50
 |  |
 |  |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=26B cardinality=10
 |  |  |
 |  |  |--02:SINGULAR ROW SRC
+|  |  |     row-size=12B cardinality=1
 |  |  |
 |  |  03:UNNEST [r.r_nations n]
+|  |     row-size=0B cardinality=10
 |  |
 |  00:SCAN HDFS [tpch_nested_parquet.region r]
-|     partitions=1/1 files=1 size=4.18KB
+|     partitions=1/1 files=1 size=3.44KB
 |     predicates: !empty(r.r_nations)
+|     row-size=12B cardinality=5
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: c.c_nationkey = s.s_nationkey, c_comment = s_comment
 |  runtime filters: RF004 <- s.s_nationkey, RF005 <- s_comment
+|  row-size=163B cardinality=10.16K
 |
 |--06:SCAN HDFS [tpch_nested_parquet.supplier s]
-|     partitions=1/1 files=1 size=111.08MB
+|     partitions=1/1 files=1 size=41.79MB
 |     runtime filters: RF000 -> s.s_nationkey, RF001 -> s_comment
+|     row-size=77B cardinality=10.00K
 |
 05:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=577.87MB
+   partitions=1/1 files=4 size=288.99MB
    runtime filters: RF000 -> c_nationkey, RF001 -> c.c_comment, RF004 -> c.c_nationkey, RF005 -> c_comment
+   row-size=87B cardinality=150.00K
 ====
 # Test subplans: Cross join of parent and relative ref.
 select a.id, b.item from functional.allcomplextypes a cross join a.int_array_col b
@@ -103,17 +120,22 @@ where a.id < 10 and b.item % 2 = 0
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=20B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10, !empty(a.int_array_col)
    predicates on b: b.item % 2 = 0
+   row-size=16B cardinality=0
 ====
 # Test subplans: Left semi join of parent and relative ref without On-clause.
 select a.id from functional.allcomplextypes a left semi join a.int_array_col b
@@ -122,16 +144,21 @@ where a.id < 10
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=16B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT SEMI JOIN]
+|  |  row-size=16B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10, !empty(a.int_array_col)
+   row-size=16B cardinality=0
 ====
 # Test subplans: Right semi join of parent and relative ref without On-clause.
 select b.item from functional.allcomplextypes a right semi join a.int_array_col b
@@ -140,17 +167,22 @@ where b.item % 2 = 0
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=4B cardinality=0
 |
 |--04:NESTED LOOP JOIN [LEFT SEMI JOIN]
+|  |  row-size=4B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=12B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: !empty(a.int_array_col)
    predicates on b: b.item % 2 = 0
+   row-size=12B cardinality=0
 ====
 # Test subplans: Left anti join of parent and relative ref without On-clause.
 select a.id from functional.allcomplextypes a left anti join a.int_array_col b
@@ -159,16 +191,21 @@ where a.id < 10
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=16B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT ANTI JOIN]
+|  |  row-size=16B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10
+   row-size=16B cardinality=0
 ====
 # Test subplans: Same as above but with an equivalent inline view on the rhs.
 select a.id from functional.allcomplextypes a
@@ -178,16 +215,21 @@ where a.id < 10
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=16B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT ANTI JOIN]
+|  |  row-size=16B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10
+   row-size=16B cardinality=0
 ====
 # Test subplans: Right anti join of parent and relative ref without On-clause.
 select b.item from functional.allcomplextypes a right anti join a.int_array_col b
@@ -196,16 +238,21 @@ where b.item % 2 = 0
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=4B cardinality=0
 |
 |--04:NESTED LOOP JOIN [LEFT ANTI JOIN]
+|  |  row-size=4B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=12B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates on b: b.item % 2 = 0
+   row-size=12B cardinality=0
 ====
 # Test subplans: Left outer join of parent and relative ref without On-clause.
 select a.id, b.item from functional.allcomplextypes a left outer join a.int_array_col b
@@ -214,16 +261,21 @@ where a.id < 10
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT OUTER JOIN]
+|  |  row-size=20B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10
+   row-size=16B cardinality=0
 ====
 # Test subplans: Same as above but with an equivalent inline view on the rhs.
 select a.id, v.item from functional.allcomplextypes a
@@ -233,16 +285,21 @@ where a.id < 10
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT OUTER JOIN]
+|  |  row-size=20B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col]
+|     row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10
+   row-size=16B cardinality=0
 ====
 # Test subplans: Right outer join of parent and relative ref without On-clause.
 select a.id, b.item from functional.allcomplextypes a right outer join a.int_array_col b
@@ -251,16 +308,21 @@ where b.item % 2 = 0
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [LEFT OUTER JOIN]
+|  |  row-size=20B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates on b: b.item % 2 = 0
+   row-size=16B cardinality=0
 ====
 # Test subplans: Full outer join of parent and relative ref without On-clause.
 select a.id, b.item from functional.allcomplextypes a full outer join a.int_array_col b
@@ -269,18 +331,23 @@ where b.item % 2 = 0 and a.id < 10
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [FULL OUTER JOIN]
 |  |  predicates: a.id < 10, b.item % 2 = 0
+|  |  row-size=20B cardinality=11
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10
    predicates on b: b.item % 2 = 0
+   row-size=16B cardinality=0
 ====
 # Test subplans: Non-equi inner join of parent and relative ref.
 select a.id, b.item from functional.allcomplextypes a, a.int_array_col b
@@ -289,18 +356,23 @@ where a.id < 10 and b.item % 2 = 0 and a.id < b.item
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [INNER JOIN]
 |  |  predicates: a.id < b.item
+|  |  row-size=20B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10, !empty(a.int_array_col)
    predicates on b: b.item % 2 = 0
+   row-size=16B cardinality=0
 ====
 # Test subplans: Non-equi left semi join of parent and relative ref.
 select a.id from functional.allcomplextypes a
@@ -310,18 +382,23 @@ where a.id < 10
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=16B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT SEMI JOIN]
 |  |  join predicates: a.id < b.item
+|  |  row-size=16B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10, !empty(a.int_array_col)
    predicates on b: b.item % 2 = 0
+   row-size=16B cardinality=0
 ====
 # Test subplans: Non-equi right semi join of parent and relative ref.
 select b.item from functional.allcomplextypes a
@@ -331,18 +408,23 @@ where b.item % 2 = 0
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=4B cardinality=0
 |
 |--04:NESTED LOOP JOIN [LEFT SEMI JOIN]
 |  |  join predicates: a.id < b.item
+|  |  row-size=4B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10, !empty(a.int_array_col)
    predicates on b: b.item % 2 = 0
+   row-size=16B cardinality=0
 ====
 # Test subplans: Non-equi left anti join of parent and relative ref.
 select a.id from functional.allcomplextypes a
@@ -352,18 +434,23 @@ where a.id < 10
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=16B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT ANTI JOIN]
 |  |  join predicates: a.id < b.item
+|  |  row-size=16B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10
    predicates on b: b.item % 2 = 0
+   row-size=16B cardinality=0
 ====
 # Test subplans: Non-equi right anti join of parent and relative ref.
 select b.item from functional.allcomplextypes a
@@ -373,18 +460,23 @@ where b.item % 2 = 0
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=4B cardinality=0
 |
 |--04:NESTED LOOP JOIN [LEFT ANTI JOIN]
 |  |  join predicates: a.id < b.item
+|  |  row-size=4B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10
    predicates on b: b.item % 2 = 0
+   row-size=16B cardinality=0
 ====
 # Test subplans: Non-equi left outer join of parent and relative ref.
 select a.id, b.item from functional.allcomplextypes a
@@ -394,18 +486,23 @@ where a.id < 10
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT OUTER JOIN]
 |  |  join predicates: a.id < b.item
+|  |  row-size=20B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10
    predicates on b: b.item % 2 = 0
+   row-size=16B cardinality=0
 ====
 # Test subplans: Non-equi right outer join of parent and relative ref.
 select a.id, b.item from functional.allcomplextypes a
@@ -415,18 +512,23 @@ where b.item % 2 = 0
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [LEFT OUTER JOIN]
 |  |  join predicates: a.id < b.item
+|  |  row-size=20B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10
    predicates on b: b.item % 2 = 0
+   row-size=16B cardinality=0
 ====
 # Test subplans: Non-equi full outer join of parent and relative ref.
 select a.id, b.item from functional.allcomplextypes a
@@ -436,18 +538,23 @@ where b.item % 2 = 0
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [FULL OUTER JOIN]
 |  |  join predicates: a.id < 10, a.id < b.item
 |  |  predicates: b.item % 2 = 0
+|  |  row-size=20B cardinality=11
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates on b: b.item % 2 = 0
+   row-size=16B cardinality=0
 ====
 # Test subplans: Inner equi-join of parent and relative ref.
 select a.id, b.f1, b.f2 from functional.allcomplextypes a
@@ -457,19 +564,24 @@ where a.id < 10 and b.f1 % 2 = 0 and b.f1 = a.id and b.f1 < a.year
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=40B cardinality=0
 |
 |--04:NESTED LOOP JOIN [INNER JOIN]
 |  |  join predicates: b.f1 = a.id
 |  |  predicates: b.f1 < a.year
+|  |  row-size=40B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=20B cardinality=1
 |  |
 |  03:UNNEST [a.struct_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10, a.id % 2 = 0, !empty(a.struct_array_col)
    predicates on b: b.f1 < 10, b.f1 % 2 = 0
+   row-size=20B cardinality=0
 ====
 # Test subplans: Left-semi equi-join of parent and relative ref.
 select a.id from functional.allcomplextypes a
@@ -480,18 +592,23 @@ where a.id < 10
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT SEMI JOIN]
 |  |  join predicates: b.f1 < a.year, b.f1 = a.id
+|  |  row-size=20B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=20B cardinality=1
 |  |
 |  03:UNNEST [a.struct_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10, a.id % 2 = 0, !empty(a.struct_array_col)
    predicates on b: b.f1 % 2 = 0, b.f1 < 10
+   row-size=20B cardinality=0
 ====
 # Test subplans: Right-semi equi-join of parent and relative ref.
 select b.f1, b.f2 from functional.allcomplextypes a
@@ -502,18 +619,23 @@ where b.f1 % 2 = 0
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [LEFT SEMI JOIN]
 |  |  join predicates: b.f1 < a.year, b.f1 = a.id
+|  |  row-size=20B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=20B cardinality=1
 |  |
 |  03:UNNEST [a.struct_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10, a.id % 2 = 0, !empty(a.struct_array_col)
    predicates on b: b.f1 < 10, b.f1 % 2 = 0
+   row-size=20B cardinality=0
 ====
 # Test subplans: Left-anti equi-join of parent and relative ref.
 select a.id from functional.allcomplextypes a
@@ -524,18 +646,23 @@ where a.id < 10
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT ANTI JOIN]
 |  |  join predicates: b.f1 < a.year, b.f1 = a.id
+|  |  row-size=20B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=20B cardinality=1
 |  |
 |  03:UNNEST [a.struct_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10
    predicates on b: b.f1 % 2 = 0, b.f1 < 10
+   row-size=20B cardinality=0
 ====
 # Test subplans: Right-anti equi-join of parent and relative ref.
 select b.f1, b.f2 from functional.allcomplextypes a
@@ -546,18 +673,23 @@ where b.f1 % 2 = 0
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [LEFT ANTI JOIN]
 |  |  join predicates: b.f1 < a.year, b.f1 = a.id
+|  |  row-size=20B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=20B cardinality=1
 |  |
 |  03:UNNEST [a.struct_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10, a.id % 2 = 0
    predicates on b: b.f1 % 2 = 0
+   row-size=20B cardinality=0
 ====
 # Test subplans: Left-outer equi-join of parent and relative ref.
 select a.id from functional.allcomplextypes a
@@ -568,18 +700,23 @@ where a.id < 10
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=28B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT OUTER JOIN]
 |  |  join predicates: b.f1 < a.year, b.f1 = a.id
+|  |  row-size=28B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=20B cardinality=1
 |  |
 |  03:UNNEST [a.struct_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10
    predicates on b: b.f1 % 2 = 0, b.f1 < 10
+   row-size=20B cardinality=0
 ====
 # Test subplans: Right-outer equi-join of parent and relative ref.
 select b.f1, b.f2 from functional.allcomplextypes a
@@ -590,18 +727,23 @@ where b.f1 % 2 = 0
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=40B cardinality=0
 |
 |--04:NESTED LOOP JOIN [LEFT OUTER JOIN]
 |  |  join predicates: b.f1 < a.year, b.f1 = a.id
+|  |  row-size=40B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=20B cardinality=1
 |  |
 |  03:UNNEST [a.struct_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10, a.id % 2 = 0
    predicates on b: b.f1 % 2 = 0
+   row-size=20B cardinality=0
 ====
 # Test subplans: Full-outer equi-join of parent and relative ref.
 select b.f1, b.f2 from functional.allcomplextypes a
@@ -612,19 +754,24 @@ where a.id < 10 and b.f1 % 2 = 0
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=40B cardinality=0
 |
 |--04:NESTED LOOP JOIN [FULL OUTER JOIN]
 |  |  join predicates: b.f1 < a.year, b.f1 = a.id
 |  |  predicates: a.id < 10, b.f1 % 2 = 0
+|  |  row-size=40B cardinality=11
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=20B cardinality=1
 |  |
 |  03:UNNEST [a.struct_array_col b]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: a.id < 10
    predicates on b: b.f1 % 2 = 0
+   row-size=20B cardinality=0
 ====
 # Test subplans: Test multiple relative refs, disjunctive predicates,
 # and correct join ordering within a subplan.
@@ -637,33 +784,44 @@ select 1 from functional.allcomplextypes a
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=96B cardinality=0
 |
 |--10:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.id = e.f1
+|  |  row-size=96B cardinality=1
 |  |
 |  |--06:UNNEST [a.struct_map_col e]
+|  |     row-size=0B cardinality=10
 |  |
 |  09:NESTED LOOP JOIN [LEFT OUTER JOIN]
 |  |  join predicates: (a.month < 4 OR d.f1 > 5)
+|  |  row-size=76B cardinality=1
 |  |
 |  |--05:UNNEST [a.struct_array_col d]
+|  |     row-size=0B cardinality=10
 |  |
 |  08:NESTED LOOP JOIN [INNER JOIN]
 |  |  predicates: (a.id < 1 OR b.item > 2)
+|  |  row-size=68B cardinality=1
 |  |
 |  |--03:UNNEST [a.int_array_col b]
+|  |     row-size=0B cardinality=10
 |  |
 |  07:NESTED LOOP JOIN [INNER JOIN]
 |  |  join predicates: c.value = a.year
+|  |  row-size=64B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=60B cardinality=1
 |  |
 |  04:UNNEST [a.int_map_col c]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: !empty(a.int_array_col), !empty(a.int_map_col), !empty(a.struct_map_col)
    predicates on e: e.f2 = 'test'
+   row-size=60B cardinality=0
 ====
 # Test subplans: Test that subplans are not re-ordered across semi/outer joins.
 # The 'alltypes*' tables are purposely placed with increasing size to test
@@ -681,37 +839,48 @@ PLAN-ROOT SINK
 11:HASH JOIN [INNER JOIN]
 |  hash predicates: d.id = b.id
 |  runtime filters: RF000 <- b.id
+|  row-size=36B cardinality=0
 |
 |--10:SUBPLAN
+|  |  row-size=32B cardinality=0
 |  |
 |  |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=32B cardinality=10
 |  |  |
 |  |  |--06:SINGULAR ROW SRC
+|  |  |     row-size=24B cardinality=1
 |  |  |
 |  |  07:UNNEST [a.struct_array_col e]
+|  |     row-size=0B cardinality=10
 |  |
 |  09:HASH JOIN [RIGHT ANTI JOIN]
 |  |  hash predicates: c.int_col = b.int_col
+|  |  row-size=24B cardinality=0
 |  |
 |  |--05:HASH JOIN [RIGHT OUTER JOIN]
 |  |  |  hash predicates: b.id = a.id
 |  |  |  runtime filters: RF002 <- a.id
+|  |  |  row-size=24B cardinality=0
 |  |  |
 |  |  |--00:SCAN HDFS [functional.allcomplextypes a]
 |  |  |     partitions=0/0 files=0 size=0B
 |  |  |     predicates: !empty(a.struct_array_col)
 |  |  |     predicates on e: e.f1 < 10
+|  |  |     row-size=16B cardinality=0
 |  |  |
 |  |  01:SCAN HDFS [functional.alltypestiny b]
 |  |     partitions=4/4 files=4 size=460B
 |  |     runtime filters: RF002 -> b.id
+|  |     row-size=8B cardinality=8
 |  |
 |  02:SCAN HDFS [functional.alltypessmall c]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 03:SCAN HDFS [functional.alltypes d]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> d.id
+   row-size=4B cardinality=7.30K
 ====
 # Test subplans: Same test as above but the relative table ref is wedged in between
 # semi/outer joins, and the join types are different.
@@ -726,33 +895,44 @@ PLAN-ROOT SINK
 |
 11:HASH JOIN [INNER JOIN]
 |  hash predicates: d.id = b.id
+|  row-size=8B cardinality=100
 |
 |--10:HASH JOIN [RIGHT ANTI JOIN]
 |  |  hash predicates: b.int_col = c.int_col
+|  |  row-size=4B cardinality=100
 |  |
 |  |--02:SCAN HDFS [functional.alltypessmall c]
 |  |     partitions=4/4 files=4 size=6.32KB
+|  |     row-size=4B cardinality=100
 |  |
 |  09:SUBPLAN
+|  |  row-size=32B cardinality=80
 |  |
 |  |--07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=32B cardinality=10
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=24B cardinality=1
 |  |  |
 |  |  06:UNNEST [a.struct_array_col e]
+|  |     row-size=0B cardinality=10
 |  |
 |  08:HASH JOIN [FULL OUTER JOIN]
 |  |  hash predicates: b.id = a.id
+|  |  row-size=24B cardinality=8
 |  |
 |  |--00:SCAN HDFS [functional.allcomplextypes a]
 |  |     partitions=0/0 files=0 size=0B
 |  |     predicates on e: e.f1 < 10
+|  |     row-size=16B cardinality=0
 |  |
 |  01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
+|     row-size=8B cardinality=8
 |
 03:SCAN HDFS [functional.alltypes d]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 # Test subplans: Test joining relative refs with independent table refs.
 # The 'alltypes*' tables are purposely placed with increasing size to test
@@ -770,41 +950,54 @@ PLAN-ROOT SINK
 12:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: e.id = d.value
 |  runtime filters: RF000 <- d.value
+|  row-size=145B cardinality=0
 |
 |--11:SUBPLAN
+|  |  row-size=145B cardinality=0
 |  |
 |  |--09:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=145B cardinality=10
 |  |  |
 |  |  |--07:SINGULAR ROW SRC
+|  |  |     row-size=129B cardinality=1
 |  |  |
 |  |  08:UNNEST [a.int_map_col d]
+|  |     row-size=0B cardinality=10
 |  |
 |  10:HASH JOIN [RIGHT OUTER JOIN]
 |  |  hash predicates: c.id = b.item
 |  |  other predicates: c.int_col > 30
 |  |  runtime filters: RF002 <- b.item
+|  |  row-size=129B cardinality=0
 |  |
 |  |--01:SUBPLAN
+|  |  |  row-size=40B cardinality=0
 |  |  |
 |  |  |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  |  row-size=40B cardinality=10
 |  |  |  |
 |  |  |  |--02:SINGULAR ROW SRC
+|  |  |  |     row-size=36B cardinality=1
 |  |  |  |
 |  |  |  03:UNNEST [a.int_array_col b]
+|  |  |     row-size=0B cardinality=10
 |  |  |
 |  |  00:SCAN HDFS [functional.allcomplextypes a]
 |  |     partitions=0/0 files=0 size=0B
 |  |     predicates: !empty(a.int_array_col), !empty(a.int_map_col)
 |  |     predicates on b: b.item < 10
+|  |     row-size=36B cardinality=0
 |  |
 |  05:SCAN HDFS [functional.alltypessmall c]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: c.id < 10, c.int_col > 30
 |     runtime filters: RF002 -> c.id
+|     row-size=89B cardinality=10
 |
 06:SCAN HDFS [functional.alltypes e]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> e.id
+   row-size=4B cardinality=7.30K
 ====
 # Test subplans: Same as above but with different join types
 select *
@@ -819,37 +1012,50 @@ PLAN-ROOT SINK
 |
 12:HASH JOIN [RIGHT ANTI JOIN]
 |  hash predicates: d.value = e.id
+|  row-size=89B cardinality=8
 |
 |--06:SCAN HDFS [functional.alltypestiny e]
 |     partitions=4/4 files=4 size=460B
+|     row-size=89B cardinality=8
 |
 11:SUBPLAN
+|  row-size=133B cardinality=100
 |
 |--09:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=133B cardinality=10
 |  |
 |  |--07:SINGULAR ROW SRC
+|  |     row-size=129B cardinality=1
 |  |
 |  08:UNNEST [a.int_map_col d]
+|     row-size=0B cardinality=10
 |
 10:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: c.id = b.item
 |  other predicates: b.item < 10, c.int_col > 30
+|  row-size=129B cardinality=10
 |
 |--01:SUBPLAN
+|  |  row-size=40B cardinality=0
 |  |
 |  |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=40B cardinality=10
 |  |  |
 |  |  |--02:SINGULAR ROW SRC
+|  |  |     row-size=36B cardinality=1
 |  |  |
 |  |  03:UNNEST [a.int_array_col b]
+|  |     row-size=0B cardinality=10
 |  |
 |  00:SCAN HDFS [functional.allcomplextypes a]
 |     partitions=0/0 files=0 size=0B
 |     predicates on b: b.item < 10
+|     row-size=36B cardinality=0
 |
 05:SCAN HDFS [functional.alltypessmall c]
    partitions=4/4 files=4 size=6.32KB
    predicates: c.int_col > 30
+   row-size=89B cardinality=10
 ====
 # Test subplans: Parent scan and aggregate subplan.
 select a.id, v.cnt
@@ -860,19 +1066,25 @@ where v.cnt < 10
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=24B cardinality=0
 |
 |--05:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=24B cardinality=0
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  04:AGGREGATE [FINALIZE]
 |  |  output: count(*)
 |  |  having: count(*) < 10
+|  |  row-size=8B cardinality=0
 |  |
 |  03:UNNEST [a.int_array_col]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 # Test subplans: Parent scan and grouping aggregate subplan.
 select a.id, v.f1, v.cnt
@@ -883,20 +1095,26 @@ where v.cnt < 10
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=32B cardinality=0
 |
 |--05:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=32B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  04:AGGREGATE [FINALIZE]
 |  |  output: count(*)
 |  |  group by: f1
 |  |  having: count(*) < 10
+|  |  row-size=16B cardinality=10
 |  |
 |  03:UNNEST [a.struct_array_col]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 # Test subplans: Top-n inside subplan.
 select a.id, v.f1, v.f2
@@ -907,21 +1125,28 @@ where v.f2 = 'test'
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=44B cardinality=0
 |
 |--06:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=44B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  05:SELECT
 |  |  predicates: item.f2 = 'test'
+|  |  row-size=28B cardinality=1
 |  |
 |  04:TOP-N [LIMIT=10]
 |  |  order by: f1 ASC
+|  |  row-size=28B cardinality=10
 |  |
 |  03:UNNEST [a.struct_array_col]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 # Test subplans: Parent scan and analytic subplan.
 select a.id, v.key, v.rnum
@@ -932,26 +1157,34 @@ where v.key != 'bad'
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=36B cardinality=0
 |
 |--07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=36B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  06:SELECT
 |  |  predicates: key != 'bad'
+|  |  row-size=20B cardinality=1
 |  |
 |  05:ANALYTIC
 |  |  functions: row_number()
 |  |  order by: key ASC
 |  |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
+|  |  row-size=20B cardinality=10
 |  |
 |  04:SORT
 |  |  order by: key ASC
+|  |  row-size=12B cardinality=10
 |  |
 |  03:UNNEST [a.int_map_col]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 # Test subplans: Parent scan and uncorrelated scalar subquery.
 select a.id from functional.allcomplextypes a
@@ -960,19 +1193,25 @@ where id < (select avg(item) from a.int_array_col)
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=16B cardinality=0
 |
 |--05:NESTED LOOP JOIN [RIGHT SEMI JOIN]
 |  |  join predicates: id < avg(item)
+|  |  row-size=16B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  04:AGGREGATE [FINALIZE]
 |  |  output: avg(item)
+|  |  row-size=8B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 # Test subplans: Parent scan and uncorrelated exists subquery.
 select a.id from functional.allcomplextypes a
@@ -981,16 +1220,21 @@ where exists (select item from a.int_array_col where item > 100)
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=16B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT SEMI JOIN]
+|  |  row-size=16B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col]
+|     row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates on int_array_col: item > 100
+   row-size=16B cardinality=0
 ====
 # Test subplans: Parent scan and uncorrelated not exists subquery.
 select a.id from functional.allcomplextypes a
@@ -999,15 +1243,20 @@ where not exists (select item from a.int_array_col)
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=16B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT ANTI JOIN]
+|  |  row-size=16B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 # Test subplans: Parent scan and correlated exists subquery
 # without an equi-join condition.
@@ -1017,16 +1266,21 @@ where exists (select m.key from a.struct_map_col m where a.id < m.f1)
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=16B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT SEMI JOIN]
 |  |  join predicates: a.id < m.f1
+|  |  row-size=16B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.struct_map_col m]
+|     row-size=8B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 # Test subplans: Parent scan and correlated not exists subquery
 # without an equi-join condition.
@@ -1036,16 +1290,21 @@ where not exists (select c.f2 from a.struct_array_col c where a.id < c.f1)
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=16B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT ANTI JOIN]
 |  |  join predicates: a.id < c.f1
+|  |  row-size=16B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [a.struct_array_col c]
+|     row-size=8B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 # Test subplans: Parent scan and a correlated IN subquery.
 # Note: The Nested Loop Join here is intentional because there is no
@@ -1057,16 +1316,21 @@ where id in (select b.item from a.int_array_col b where a.year < b.item)
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT SEMI JOIN]
 |  |  join predicates: a.year < b.item, b.item = id
+|  |  row-size=20B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=20B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col b]
+|     row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
+   row-size=20B cardinality=0
 ====
 # Test subplans: Parent scan and an uncorrelated NOT IN subquery.
 # Note: We must use a hash join because the nested-loop join does not support
@@ -1079,16 +1343,21 @@ where id not in (select b.item from a.int_array_col b)
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=16B cardinality=0
 |
 |--04:HASH JOIN [NULL AWARE LEFT ANTI JOIN]
 |  |  hash predicates: id = b.item
+|  |  row-size=16B cardinality=1
 |  |
 |  |--03:UNNEST [a.int_array_col b]
+|  |     row-size=4B cardinality=10
 |  |
 |  02:SINGULAR ROW SRC
+|     row-size=16B cardinality=1
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 # Test subplans: Parent scan and a correlated NOT IN subquery.
 # Note: We must use a hash join because the nested-loop join does not support
@@ -1101,17 +1370,22 @@ where id not in (select b.item from a.int_array_col b where a.year < b.item)
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:HASH JOIN [NULL AWARE LEFT ANTI JOIN]
 |  |  hash predicates: id = b.item
 |  |  other join predicates: a.year < b.item
+|  |  row-size=20B cardinality=1
 |  |
 |  |--03:UNNEST [a.int_array_col b]
+|  |     row-size=4B cardinality=10
 |  |
 |  02:SINGULAR ROW SRC
+|     row-size=20B cardinality=1
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
+   row-size=20B cardinality=0
 ====
 # Test subplans: Bushy plan with multiple subplans.
 select a.id, c.item, d.key, d.value, v.cnt
@@ -1127,49 +1401,67 @@ cross join
 PLAN-ROOT SINK
 |
 17:SUBPLAN
+|  row-size=84B cardinality=0
 |
 |--15:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=84B cardinality=1
 |  |
 |  |--10:SINGULAR ROW SRC
+|  |     row-size=76B cardinality=1
 |  |
 |  14:AGGREGATE [FINALIZE]
 |  |  output: count(*)
+|  |  row-size=8B cardinality=1
 |  |
 |  13:HASH JOIN [INNER JOIN]
 |  |  hash predicates: x.f1 = y.f1
+|  |  row-size=16B cardinality=10
 |  |
 |  |--12:UNNEST [b.struct_map_col y]
+|  |     row-size=0B cardinality=10
 |  |
 |  11:UNNEST [a.struct_array_col x]
+|     row-size=0B cardinality=10
 |
 16:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF000 <- b.id
+|  row-size=76B cardinality=0
 |
 |--06:SUBPLAN
+|  |  row-size=44B cardinality=0
 |  |
 |  |--09:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=44B cardinality=10
 |  |  |
 |  |  |--07:SINGULAR ROW SRC
+|  |  |     row-size=28B cardinality=1
 |  |  |
 |  |  08:UNNEST [b.int_map_col d]
+|  |     row-size=0B cardinality=10
 |  |
 |  05:SCAN HDFS [functional.allcomplextypes b]
 |     partitions=0/0 files=0 size=0B
 |     predicates: !empty(b.int_map_col)
+|     row-size=28B cardinality=0
 |
 01:SUBPLAN
+|  row-size=32B cardinality=0
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=32B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=28B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col c]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: !empty(a.int_array_col)
    runtime filters: RF000 -> a.id
+   row-size=28B cardinality=0
 ====
 # Test subplans: Same as above but with an INNER JOIN between c and d.
 select a.id, c.item, d.key, d.value, v.cnt
@@ -1186,51 +1478,69 @@ where b.id = d.value
 PLAN-ROOT SINK
 |
 17:SUBPLAN
+|  row-size=84B cardinality=0
 |
 |--15:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=84B cardinality=1
 |  |
 |  |--10:SINGULAR ROW SRC
+|  |     row-size=76B cardinality=1
 |  |
 |  14:AGGREGATE [FINALIZE]
 |  |  output: count(*)
+|  |  row-size=8B cardinality=1
 |  |
 |  13:HASH JOIN [INNER JOIN]
 |  |  hash predicates: x.f1 = y.f1
+|  |  row-size=16B cardinality=10
 |  |
 |  |--12:UNNEST [b.struct_map_col y]
+|  |     row-size=0B cardinality=10
 |  |
 |  11:UNNEST [a.struct_array_col x]
+|     row-size=0B cardinality=10
 |
 16:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF000 <- b.id
+|  row-size=76B cardinality=0
 |
 |--06:SUBPLAN
+|  |  row-size=44B cardinality=0
 |  |
 |  |--09:NESTED LOOP JOIN [INNER JOIN]
 |  |  |  join predicates: d.value = b.id
+|  |  |  row-size=44B cardinality=10
 |  |  |
 |  |  |--07:SINGULAR ROW SRC
+|  |  |     row-size=28B cardinality=1
 |  |  |
 |  |  08:UNNEST [b.int_map_col d]
+|  |     row-size=0B cardinality=10
 |  |
 |  05:SCAN HDFS [functional.allcomplextypes b]
 |     partitions=0/0 files=0 size=0B
 |     predicates: !empty(b.int_map_col)
+|     row-size=28B cardinality=0
 |
 01:SUBPLAN
+|  row-size=32B cardinality=0
 |
 |--04:NESTED LOOP JOIN [INNER JOIN]
 |  |  join predicates: c.item = a.id
+|  |  row-size=32B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=28B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col c]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: !empty(a.int_array_col)
    runtime filters: RF000 -> a.id
+   row-size=28B cardinality=0
 ====
 # Test subplans: Same as above but with a LEFT OUTER JOIN between c and d.
 # The outer join needs to be moved after the join between a and b to
@@ -1248,45 +1558,61 @@ cross join
 PLAN-ROOT SINK
 |
 15:SUBPLAN
+|  row-size=84B cardinality=0
 |
 |--13:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=84B cardinality=1
 |  |
 |  |--11:AGGREGATE [FINALIZE]
 |  |  |  output: count(*)
+|  |  |  row-size=8B cardinality=1
 |  |  |
 |  |  10:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: x.f1 = y.f1
+|  |  |  row-size=16B cardinality=10
 |  |  |
 |  |  |--09:UNNEST [b.struct_map_col y]
+|  |  |     row-size=0B cardinality=10
 |  |  |
 |  |  08:UNNEST [a.struct_array_col x]
+|  |     row-size=0B cardinality=10
 |  |
 |  12:NESTED LOOP JOIN [RIGHT OUTER JOIN]
 |  |  join predicates: d.value = c.item
+|  |  row-size=76B cardinality=10
 |  |
 |  |--06:SINGULAR ROW SRC
+|  |     row-size=60B cardinality=1
 |  |
 |  07:UNNEST [b.int_map_col d]
+|     row-size=0B cardinality=10
 |
 14:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF000 <- b.id
+|  row-size=60B cardinality=0
 |
 |--05:SCAN HDFS [functional.allcomplextypes b]
 |     partitions=0/0 files=0 size=0B
+|     row-size=28B cardinality=0
 |
 01:SUBPLAN
+|  row-size=32B cardinality=0
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=32B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=28B cardinality=1
 |  |
 |  03:UNNEST [a.int_array_col c]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    predicates: !empty(a.int_array_col)
    runtime filters: RF000 -> a.id
+   row-size=28B cardinality=0
 ====
 # Test subplans: Nested subplans.
 select 1
@@ -1298,26 +1624,35 @@ PLAN-ROOT SINK
 |
 01:SUBPLAN
 |  limit: 10
+|  row-size=44B cardinality=10
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=44B cardinality=100
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=20B cardinality=1
 |  |
 |  04:SUBPLAN
+|  |  row-size=24B cardinality=100
 |  |
 |  |--07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=24B cardinality=10
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=20B cardinality=1
 |  |  |
 |  |  06:UNNEST [o.o_lineitems]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=577.87MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: c_custkey < 10, !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o_orderkey < 5
    predicates on o_lineitems: l_linenumber < 3
+   row-size=20B cardinality=15.00K
 ====
 # Nested subplans.
 select 1
@@ -1329,23 +1664,32 @@ cross join
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=24B cardinality=0
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=24B cardinality=100
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=12B cardinality=1
 |  |
 |  04:SUBPLAN
+|  |  row-size=12B cardinality=100
 |  |
 |  |--07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=12B cardinality=10
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=12B cardinality=1
 |  |  |
 |  |  06:UNNEST [m1.value m2]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [a.map_map_col m1]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
+   row-size=12B cardinality=0
 ====
 # Test subplans: straight_join should place the Subplan in FROM-clause order
 # while coalescing adjacent ones.
@@ -1358,33 +1702,44 @@ cross join (select avg(item) a from b.int_array_col) v2
 PLAN-ROOT SINK
 |
 10:SUBPLAN
+|  row-size=48B cardinality=0
 |
 |--08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=48B cardinality=1
 |  |
 |  |--06:AGGREGATE [FINALIZE]
 |  |  |  output: avg(item)
+|  |  |  row-size=8B cardinality=1
 |  |  |
 |  |  05:UNNEST [b.int_array_col]
+|  |     row-size=0B cardinality=10
 |  |
 |  07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=40B cardinality=1
 |  |
 |  |--04:AGGREGATE [FINALIZE]
 |  |  |  output: count(*)
+|  |  |  row-size=8B cardinality=1
 |  |  |
 |  |  03:UNNEST [a.int_map_col]
+|  |     row-size=0B cardinality=10
 |  |
 |  02:SINGULAR ROW SRC
+|     row-size=32B cardinality=1
 |
 09:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF000 <- b.id
+|  row-size=32B cardinality=0
 |
 |--01:SCAN HDFS [functional.allcomplextypes b]
 |     partitions=0/0 files=0 size=0B
+|     row-size=16B cardinality=0
 |
 00:SCAN HDFS [functional.allcomplextypes a]
    partitions=0/0 files=0 size=0B
    runtime filters: RF000 -> a.id
+   row-size=16B cardinality=0
 ====
 # Test subplans: Enforcement of slot equivalences and removal
 # of redundant predicates.
@@ -1399,28 +1754,37 @@ where c.c_custkey = o.o_orderkey and c.c_custkey = o.o_shippriority
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=66B cardinality=150.00K
 |
 |--08:NESTED LOOP JOIN [INNER JOIN]
 |  |  join predicates: o.o_orderkey = c.c_custkey
+|  |  row-size=66B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=22B cardinality=1
 |  |
 |  04:SUBPLAN
+|  |  row-size=44B cardinality=10
 |  |
 |  |--07:NESTED LOOP JOIN [INNER JOIN]
 |  |  |  join predicates: l.l_linenumber = o.o_shippriority
+|  |  |  row-size=44B cardinality=10
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=24B cardinality=1
 |  |  |
 |  |  06:UNNEST [o.o_lineitems l]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=577.87MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: c.c_custkey = c.c_nationkey, !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o.o_orderkey = o.o_shippriority
    predicates on l: l.l_partkey = l.l_linenumber, l.l_partkey = l.l_suppkey
+   row-size=22B cardinality=15.00K
 ====
 # Test assignment of nested conjuncts in the parent scan and its limitations.
 select *
@@ -1440,32 +1804,44 @@ where a.item between 10 and 20
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=116B cardinality=0
 |
 |--12:NESTED LOOP JOIN [INNER JOIN]
 |  |  predicates: a.item < m.value
+|  |  row-size=116B cardinality=100
 |  |
 |  |--04:UNNEST [t.int_map_col m]
 |  |     limit: 1
+|  |     row-size=16B cardinality=1
 |  |
 |  11:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=100B cardinality=100
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=48B cardinality=1
 |  |
 |  10:HASH JOIN [INNER JOIN]
 |  |  hash predicates: c.f11 = a.item
+|  |  row-size=52B cardinality=100
 |  |
 |  |--03:UNNEST [t.int_array_col a]
+|  |     row-size=0B cardinality=10
 |  |
 |  06:SUBPLAN
+|  |  row-size=48B cardinality=100
 |  |
 |  |--09:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=48B cardinality=10
 |  |  |
 |  |  |--07:SINGULAR ROW SRC
+|  |  |     row-size=28B cardinality=1
 |  |  |
 |  |  08:UNNEST [c.f12]
 |  |     predicates: coalesce(key, CAST(f21 AS STRING)) = 'test3'
+|  |     row-size=20B cardinality=10
 |  |
 |  05:UNNEST [t.complex_nested_struct_col.f2 c]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes t]
    partitions=0/0 files=0 size=0B
@@ -1474,6 +1850,7 @@ PLAN-ROOT SINK
    predicates on m: m.key = 'test', m.value != 30
    predicates on c: c.f11 >= 10, c.f11 <= 20, c.f11 % 2 = 0
    predicates on f12: f12.key = 'test2'
+   row-size=48B cardinality=0
 ====
 # IMPALA-2358: Test join ordering of relative collection table refs inside
 # a subquery.
@@ -1491,26 +1868,35 @@ where s.s_suppkey not in
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=44B cardinality=10.00K
 |
 |--08:HASH JOIN [NULL AWARE LEFT ANTI JOIN]
 |  |  hash predicates: s.s_suppkey = ps1.ps_partkey
+|  |  row-size=44B cardinality=1
 |  |
 |  |--07:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: ps2.ps_comment = ps3.ps_comment
+|  |  |  row-size=48B cardinality=10
 |  |  |
 |  |  |--05:UNNEST [s.s_partsupps ps3]
+|  |  |     row-size=0B cardinality=10
 |  |  |
 |  |  06:HASH JOIN [LEFT OUTER JOIN]
 |  |  |  hash predicates: ps1.ps_supplycost = ps2.ps_supplycost
+|  |  |  row-size=36B cardinality=10
 |  |  |
 |  |  |--04:UNNEST [s.s_partsupps ps2]
+|  |  |     row-size=0B cardinality=10
 |  |  |
 |  |  03:UNNEST [s.s_partsupps ps1]
+|  |     row-size=0B cardinality=10
 |  |
 |  02:SINGULAR ROW SRC
+|     row-size=44B cardinality=1
 |
 00:SCAN HDFS [tpch_nested_parquet.supplier s]
-   partitions=1/1 files=1 size=111.08MB
+   partitions=1/1 files=1 size=41.79MB
+   row-size=44B cardinality=10.00K
 ====
 # IMPALA-2383: Test join ordering of relative collection ref after an outer join.
 select 1 from functional.allcomplextypes t1
@@ -1521,21 +1907,28 @@ inner join t2.int_array_col
 PLAN-ROOT SINK
 |
 06:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=20B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=20B cardinality=1
 |  |
 |  03:UNNEST [t2.int_array_col]
+|     row-size=0B cardinality=10
 |
 05:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t1.id = t2.id
+|  row-size=20B cardinality=0
 |
 |--01:SCAN HDFS [functional.allcomplextypes t2]
 |     partitions=0/0 files=0 size=0B
+|     row-size=16B cardinality=0
 |
 00:SCAN HDFS [functional.allcomplextypes t1]
    partitions=0/0 files=0 size=0B
+   row-size=4B cardinality=0
 ====
 # IMPALA-2401: Test correlated inline view with an analytic function and grouping.
 select a from functional.allcomplextypes t,
@@ -1545,25 +1938,33 @@ select a from functional.allcomplextypes t,
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=32B cardinality=0
 |
 |--07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=32B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=12B cardinality=1
 |  |
 |  06:ANALYTIC
 |  |  functions: count(*)
 |  |  partition by: key
+|  |  row-size=20B cardinality=10
 |  |
 |  05:SORT
 |  |  order by: key ASC NULLS FIRST
+|  |  row-size=12B cardinality=10
 |  |
 |  04:AGGREGATE [FINALIZE]
 |  |  group by: key
+|  |  row-size=12B cardinality=10
 |  |
 |  03:UNNEST [t.int_map_col]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes t]
    partitions=0/0 files=0 size=0B
+   row-size=12B cardinality=0
 ====
 # IMPALA-2349: Test planning of nested subplans with straight_join.
 select straight_join 1
@@ -1575,31 +1976,42 @@ inner join m1.value m2
 PLAN-ROOT SINK
 |
 10:SUBPLAN
+|  row-size=32B cardinality=0
 |
 |--08:SUBPLAN
+|  |  row-size=32B cardinality=100
 |  |
 |  |--06:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=32B cardinality=10
 |  |  |
 |  |  |--05:UNNEST [m1.value m2]
+|  |  |     row-size=0B cardinality=10
 |  |  |
 |  |  04:SINGULAR ROW SRC
+|  |     row-size=32B cardinality=1
 |  |
 |  07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=32B cardinality=10
 |  |
 |  |--03:UNNEST [t1.map_map_col m1]
+|  |     row-size=0B cardinality=10
 |  |
 |  02:SINGULAR ROW SRC
+|     row-size=20B cardinality=1
 |
 09:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t1.id = t2.id
+|  row-size=20B cardinality=0
 |
 |--01:SCAN HDFS [functional.allcomplextypes t2]
 |     partitions=0/0 files=0 size=0B
+|     row-size=4B cardinality=0
 |
 00:SCAN HDFS [functional.allcomplextypes t1]
    partitions=0/0 files=0 size=0B
    predicates: !empty(t1.map_map_col)
    predicates on m1: !empty(m1.value)
+   row-size=16B cardinality=0
 ====
 # IMPALA-2412: Test join ordering in nested subplans.
 select 1
@@ -1617,34 +2029,47 @@ where c.c_custkey in
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=52B cardinality=150.00K
 |
 |--12:HASH JOIN [RIGHT SEMI JOIN]
 |  |  hash predicates: o2.pos = c.c_custkey
+|  |  row-size=52B cardinality=1
 |  |
 |  |--11:NESTED LOOP JOIN [RIGHT OUTER JOIN]
 |  |  |  join predicates: o1.pos = c.c_custkey
+|  |  |  row-size=52B cardinality=10
 |  |  |
 |  |  |--02:SINGULAR ROW SRC
+|  |  |     row-size=44B cardinality=1
 |  |  |
 |  |  03:UNNEST [c.c_orders o1]
+|  |     row-size=0B cardinality=10
 |  |
 |  10:HASH JOIN [LEFT OUTER JOIN]
 |  |  hash predicates: o2.pos = o3.pos
+|  |  row-size=28B cardinality=10
 |  |
 |  |--09:UNNEST [c.c_orders o3]
+|  |     row-size=0B cardinality=10
 |  |
 |  05:SUBPLAN
+|  |  row-size=20B cardinality=10
 |  |
 |  |--08:NESTED LOOP JOIN [RIGHT OUTER JOIN]
+|  |  |  row-size=20B cardinality=1
 |  |  |
 |  |  |--06:SINGULAR ROW SRC
+|  |  |     row-size=20B cardinality=1
 |  |  |
 |  |  07:UNNEST [o2.o_lineitems l]
+|  |     row-size=0B cardinality=10
 |  |
 |  04:UNNEST [c.c_orders o2]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=577.87MB
+   partitions=1/1 files=4 size=288.99MB
+   row-size=44B cardinality=150.00K
 ====
 # IMPALA-2412: Test join ordering in nested subplans. Same as above
 # but with a few inner joins.
@@ -1663,35 +2088,48 @@ where c.c_custkey in
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=52B cardinality=150.00K
 |
 |--12:HASH JOIN [RIGHT SEMI JOIN]
 |  |  hash predicates: o2.pos = c.c_custkey
+|  |  row-size=52B cardinality=1
 |  |
 |  |--11:NESTED LOOP JOIN [INNER JOIN]
 |  |  |  join predicates: o1.pos = c.c_custkey
+|  |  |  row-size=52B cardinality=10
 |  |  |
 |  |  |--02:SINGULAR ROW SRC
+|  |  |     row-size=44B cardinality=1
 |  |  |
 |  |  03:UNNEST [c.c_orders o1]
+|  |     row-size=0B cardinality=10
 |  |
 |  10:HASH JOIN [INNER JOIN]
 |  |  hash predicates: o2.pos = o3.pos
+|  |  row-size=28B cardinality=10
 |  |
 |  |--09:UNNEST [c.c_orders o3]
+|  |     row-size=0B cardinality=10
 |  |
 |  05:SUBPLAN
+|  |  row-size=20B cardinality=10
 |  |
 |  |--08:NESTED LOOP JOIN [RIGHT OUTER JOIN]
+|  |  |  row-size=20B cardinality=1
 |  |  |
 |  |  |--06:SINGULAR ROW SRC
+|  |  |     row-size=20B cardinality=1
 |  |  |
 |  |  07:UNNEST [o2.o_lineitems l]
+|  |     row-size=0B cardinality=10
 |  |
 |  04:UNNEST [c.c_orders o2]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=577.87MB
+   partitions=1/1 files=4 size=288.99MB
    predicates: !empty(c.c_orders)
+   row-size=44B cardinality=150.00K
 ====
 # IMPALA-2412: Test join ordering in nested subplans.
 select 1
@@ -1707,34 +2145,47 @@ where c.c_custkey in
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=52B cardinality=150.00K
 |
 |--12:HASH JOIN [RIGHT SEMI JOIN]
 |  |  hash predicates: o2.pos = c.c_custkey
+|  |  row-size=52B cardinality=1
 |  |
 |  |--11:NESTED LOOP JOIN [RIGHT OUTER JOIN]
 |  |  |  join predicates: o1.pos = c.c_custkey
+|  |  |  row-size=52B cardinality=10
 |  |  |
 |  |  |--02:SINGULAR ROW SRC
+|  |  |     row-size=44B cardinality=1
 |  |  |
 |  |  03:UNNEST [c.c_orders o1]
+|  |     row-size=0B cardinality=10
 |  |
 |  10:SUBPLAN
+|  |  row-size=28B cardinality=10
 |  |
 |  |--08:NESTED LOOP JOIN [RIGHT OUTER JOIN]
+|  |  |  row-size=28B cardinality=1
 |  |  |
 |  |  |--06:SINGULAR ROW SRC
+|  |  |     row-size=28B cardinality=1
 |  |  |
 |  |  07:UNNEST [o2.o_lineitems l]
+|  |     row-size=0B cardinality=10
 |  |
 |  09:HASH JOIN [LEFT OUTER JOIN]
 |  |  hash predicates: o2.pos = o3.pos
+|  |  row-size=28B cardinality=10
 |  |
 |  |--05:UNNEST [c.c_orders o3]
+|  |     row-size=0B cardinality=10
 |  |
 |  04:UNNEST [c.c_orders o2]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=577.87MB
+   partitions=1/1 files=4 size=288.99MB
+   row-size=44B cardinality=150.00K
 ====
 # IMPALA-2446: Test predicate assignment when outer join has no conjuncts in
 # the ON clause and there are predicates in the WHERE clause that can be assigned to
@@ -1745,19 +2196,24 @@ where t1.id = t2.pos and t1.int_struct_col.f1 = 10 and t2.item = 1
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=32B cardinality=0
 |
 |--04:HASH JOIN [LEFT OUTER JOIN]
 |  |  hash predicates: t1.id = t2.pos
 |  |  other predicates: t2.item = 1, t1.id = t2.pos
+|  |  row-size=32B cardinality=1
 |  |
 |  |--03:UNNEST [t1.int_array_col t2]
+|  |     row-size=0B cardinality=10
 |  |
 |  02:SINGULAR ROW SRC
+|     row-size=20B cardinality=1
 |
 00:SCAN HDFS [functional.allcomplextypes t1]
    partitions=0/0 files=0 size=0B
    predicates: t1.int_struct_col.f1 = 10
    predicates on t2: t2.item = 1
+   row-size=20B cardinality=0
 ====
 # IMPALA-2446: Test predicate assignment when right outer join has no conjuncts in
 # the ON clause and there are predicates in the WHERE clause that can be assigned to
@@ -1768,19 +2224,24 @@ where t1.id = t2.pos and t1.int_struct_col.f1 = 10 and t2.item = 1
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=32B cardinality=0
 |
 |--04:HASH JOIN [RIGHT OUTER JOIN]
 |  |  hash predicates: t1.id = t2.pos
 |  |  other predicates: t1.int_struct_col.f1 = 10, t1.id = t2.pos
+|  |  row-size=32B cardinality=10
 |  |
 |  |--03:UNNEST [t1.int_array_col t2]
+|  |     row-size=0B cardinality=10
 |  |
 |  02:SINGULAR ROW SRC
+|     row-size=20B cardinality=1
 |
 00:SCAN HDFS [functional.allcomplextypes t1]
    partitions=0/0 files=0 size=0B
    predicates: t1.int_struct_col.f1 = 10
    predicates on t2: t2.item = 1
+   row-size=20B cardinality=0
 ====
 # IMPALA-2446: Test predicate assignment when full outer join has no conjuncts in
 # the ON clause and there are predicates in the WHERE clause that can be assigned to
@@ -1791,19 +2252,24 @@ where t1.id = t2.pos and t1.int_struct_col.f1 = 10 and t2.item = 1
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=32B cardinality=0
 |
 |--04:NESTED LOOP JOIN [FULL OUTER JOIN]
 |  |  join predicates: t2.pos = t1.id
 |  |  predicates: t1.int_struct_col.f1 = 10, t2.item = 1, t1.id = t2.pos
+|  |  row-size=32B cardinality=11
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=20B cardinality=1
 |  |
 |  03:UNNEST [t1.int_array_col t2]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes t1]
    partitions=0/0 files=0 size=0B
    predicates: t1.int_struct_col.f1 = 10
    predicates on t2: t2.item = 1
+   row-size=20B cardinality=0
 ====
 # IMPALA-2478: Test assignment of where-clause conjuncts with an outer-joined
 # # correlated inline view.
@@ -1814,17 +2280,22 @@ select id from functional.allcomplextypes t1 left outer join
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=28B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT OUTER JOIN]
 |  |  predicates: item = 1, t1.id > pos
+|  |  row-size=28B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [t1.int_array_col t2]
+|     row-size=12B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes t1]
    partitions=0/0 files=0 size=0B
    predicates on t2: t2.item = 1
+   row-size=16B cardinality=0
 ====
 # IMPALA-2445: Test ordering of nested subplans.
 select 1 from tpch_nested_parquet.customer c
@@ -1835,23 +2306,32 @@ inner join o.o_lineitems
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=24B cardinality=1.50M
 |
 |--08:SUBPLAN
+|  |  row-size=24B cardinality=10
 |  |
 |  |--06:NESTED LOOP JOIN [CROSS JOIN]
+|  |  |  row-size=24B cardinality=10
 |  |  |
 |  |  |--04:SINGULAR ROW SRC
+|  |  |     row-size=24B cardinality=1
 |  |  |
 |  |  05:UNNEST [o.o_lineitems]
+|  |     row-size=0B cardinality=10
 |  |
 |  07:NESTED LOOP JOIN [RIGHT OUTER JOIN]
+|  |  row-size=24B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=12B cardinality=1
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=577.87MB
+   partitions=1/1 files=4 size=288.99MB
+   row-size=12B cardinality=150.00K
 ====
 # IMPALA-3065/IMPALA-3062: Test correct assignment of !empty() predicates.
 # Predicates should not be generated if the parent tuple is outer joined.
@@ -1865,21 +2345,28 @@ PLAN-ROOT SINK
 06:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: c1.c_custkey = c2.c_custkey
 |  runtime filters: RF000 <- c2.c_custkey
+|  row-size=28B cardinality=1.50M
 |
 |--05:SCAN HDFS [tpch_nested_parquet.customer c2]
-|     partitions=1/1 files=4 size=577.87MB
+|     partitions=1/1 files=4 size=288.99MB
+|     row-size=8B cardinality=150.00K
 |
 01:SUBPLAN
+|  row-size=20B cardinality=1.50M
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=20B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=20B cardinality=1
 |  |
 |  03:UNNEST [c1.c_orders]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c1]
-   partitions=1/1 files=4 size=577.87MB
+   partitions=1/1 files=4 size=288.99MB
    runtime filters: RF000 -> c1.c_custkey
+   row-size=20B cardinality=150.00K
 ====
 # IMPALA-3065/IMPALA-3062: Test correct assignment of !empty() predicates.
 # Predicates should not be generated if the parent tuple is outer joined.
@@ -1892,25 +2379,34 @@ left semi join c2.c_orders o2
 PLAN-ROOT SINK
 |
 08:SUBPLAN
+|  row-size=40B cardinality=3.00M
 |
 |--06:NESTED LOOP JOIN [LEFT SEMI JOIN]
+|  |  row-size=40B cardinality=10
 |  |
 |  |--04:UNNEST [c2.c_orders o2]
+|  |     row-size=0B cardinality=10
 |  |
 |  05:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=40B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=40B cardinality=1
 |  |
 |  03:UNNEST [c1.c_orders o1]
+|     row-size=0B cardinality=10
 |
 07:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: c1.c_custkey = c2.c_custkey
+|  row-size=40B cardinality=300.00K
 |
 |--01:SCAN HDFS [tpch_nested_parquet.customer c2]
-|     partitions=1/1 files=4 size=577.87MB
+|     partitions=1/1 files=4 size=288.99MB
+|     row-size=20B cardinality=150.00K
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c1]
-   partitions=1/1 files=4 size=577.87MB
+   partitions=1/1 files=4 size=288.99MB
+   row-size=20B cardinality=150.00K
 ====
 # IMPALA-3084: Test correct assignment of NULL checking predicates
 # referencing outer-joined nested collections.
@@ -1921,16 +2417,21 @@ where o.o_orderkey is null and o.o_orderstatus <=> o_orderpriority
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=342B cardinality=150.00K
 |
 |--04:NESTED LOOP JOIN [RIGHT OUTER JOIN]
 |  |  predicates: o.o_orderkey IS NULL, o.o_orderstatus IS NOT DISTINCT FROM o_orderpriority
+|  |  row-size=342B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=230B cardinality=1
 |  |
 |  03:UNNEST [c.c_orders o]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=577.87MB
+   partitions=1/1 files=4 size=288.99MB
+   row-size=230B cardinality=150.00K
 ====
 # IMPALA-2540: Complex query mixing joins on base tables and nested collections.
 select 1
@@ -1944,48 +2445,63 @@ left join t4.item.o_lineitems t6 on t6.item.l_returnflag = t4.item.o_orderpriori
 PLAN-ROOT SINK
 |
 14:SUBPLAN
+|  row-size=183B cardinality=1
 |
 |--12:SUBPLAN
+|  |  row-size=183B cardinality=1
 |  |
 |  |--10:NESTED LOOP JOIN [RIGHT OUTER JOIN]
 |  |  |  join predicates: t6.item.l_returnflag = t4.item.o_orderpriority
+|  |  |  row-size=183B cardinality=10
 |  |  |
 |  |  |--08:SINGULAR ROW SRC
+|  |  |     row-size=171B cardinality=1
 |  |  |
 |  |  09:UNNEST [t4.item.o_lineitems t6]
+|  |     row-size=0B cardinality=10
 |  |
 |  11:NESTED LOOP JOIN [RIGHT OUTER JOIN]
+|  |  row-size=171B cardinality=1
 |  |
 |  |--06:SINGULAR ROW SRC
+|  |     row-size=147B cardinality=1
 |  |
 |  07:UNNEST [t2.c_orders t4]
+|     row-size=0B cardinality=10
 |
 13:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.pos = t2.c_nationkey
 |  runtime filters: RF000 <- t2.c_nationkey
+|  row-size=147B cardinality=1
 |
 |--05:HASH JOIN [INNER JOIN]
 |  |  hash predicates: t3.r_comment = t2.c_address
 |  |  runtime filters: RF002 <- t2.c_address
+|  |  row-size=139B cardinality=1
 |  |
 |  |--04:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: t2.c_custkey = t5.r_regionkey
 |  |  |  runtime filters: RF004 <- t5.r_regionkey
+|  |  |  row-size=61B cardinality=5
 |  |  |
 |  |  |--03:SCAN HDFS [tpch_nested_parquet.region t5]
-|  |  |     partitions=1/1 files=1 size=4.18KB
+|  |  |     partitions=1/1 files=1 size=3.44KB
+|  |  |     row-size=2B cardinality=5
 |  |  |
 |  |  01:SCAN HDFS [tpch_nested_parquet.customer t2]
-|  |     partitions=1/1 files=4 size=577.87MB
+|  |     partitions=1/1 files=4 size=288.99MB
 |  |     runtime filters: RF004 -> t2.c_custkey
+|  |     row-size=59B cardinality=150.00K
 |  |
 |  02:SCAN HDFS [tpch_nested_parquet.region t3]
-|     partitions=1/1 files=1 size=4.18KB
+|     partitions=1/1 files=1 size=3.44KB
 |     runtime filters: RF002 -> t3.r_comment
+|     row-size=78B cardinality=5
 |
 00:SCAN HDFS [tpch_nested_parquet.region.r_nations t1]
-   partitions=1/1 files=1 size=4.18KB
+   partitions=1/1 files=1 size=3.44KB
    runtime filters: RF000 -> t1.pos
+   row-size=8B cardinality=50
 ====
 # Add run time scalar subquery check for uncorrelated subqueries
 # Create CardinalityCheckNode inside a subplan
@@ -1998,21 +2514,27 @@ where c_custkey < (select o_orderkey
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=150.00K
 |
 |--05:NESTED LOOP JOIN [RIGHT SEMI JOIN]
 |  |  join predicates: c_custkey < o_orderkey
+|  |  row-size=20B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=20B cardinality=1
 |  |
 |  04:CARDINALITY CHECK
 |  |  limit: 1
+|  |  row-size=8B cardinality=1
 |  |
 |  03:UNNEST [c.c_orders]
 |     limit: 2
+|     row-size=8B cardinality=2
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=292.35MB
+   partitions=1/1 files=4 size=288.99MB
    predicates on c_orders: o_orderkey = 6000000
+   row-size=20B cardinality=150.00K
 ====
 # CardinalityCheckNode in subplan in a subplan
 select c_custkey
@@ -2025,31 +2547,42 @@ where c_custkey < (select o_orderkey
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=150.00K
 |
 |--10:NESTED LOOP JOIN [RIGHT SEMI JOIN]
 |  |  join predicates: c_custkey < o_orderkey
+|  |  row-size=20B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=20B cardinality=1
 |  |
 |  09:CARDINALITY CHECK
 |  |  limit: 1
+|  |  row-size=20B cardinality=1
 |  |
 |  04:SUBPLAN
 |  |  limit: 2
+|  |  row-size=20B cardinality=2
 |  |
 |  |--08:NESTED LOOP JOIN [RIGHT SEMI JOIN]
 |  |  |  join predicates: li.l_linenumber = o_orderkey
+|  |  |  row-size=20B cardinality=1
 |  |  |
 |  |  |--05:SINGULAR ROW SRC
+|  |  |     row-size=20B cardinality=1
 |  |  |
 |  |  07:CARDINALITY CHECK
 |  |  |  limit: 1
+|  |  |  row-size=4B cardinality=1
 |  |  |
 |  |  06:UNNEST [co.o_lineitems li]
 |  |     limit: 2
+|  |     row-size=4B cardinality=2
 |  |
 |  03:UNNEST [c.c_orders co]
+|     row-size=0B cardinality=10
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   partitions=1/1 files=4 size=292.35MB
+   partitions=1/1 files=4 size=288.99MB
+   row-size=20B cardinality=150.00K
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/nested-loop-join.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/nested-loop-join.test b/testdata/workloads/functional-planner/queries/PlannerTest/nested-loop-join.test
index 2b23443..2d71755 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/nested-loop-join.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/nested-loop-join.test
@@ -11,20 +11,25 @@ PLAN-ROOT SINK
 04:NESTED LOOP JOIN [RIGHT OUTER JOIN]
 |  join predicates: a.smallint_col >= c.smallint_col
 |  predicates: a.id < 10
+|  row-size=273B cardinality=7.30K
 |
 |--02:SCAN HDFS [functional.alltypesagg c]
 |     partitions=11/11 files=11 size=814.73KB
 |     predicates: c.bigint_col = 10
+|     row-size=95B cardinality=11
 |
 03:NESTED LOOP JOIN [RIGHT OUTER JOIN]
 |  join predicates: a.id != b.id OR a.int_col < b.int_col
+|  row-size=178B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypestiny a]
    partitions=4/4 files=4 size=460B
    predicates: a.id < 10
+   row-size=89B cardinality=1
 ---- DISTRIBUTEDPLAN
 NotImplementedException: Error generating a valid execution plan for this query. A RIGHT OUTER JOIN type with no equi-join predicates can only be executed with a single node plan.
 ====
@@ -40,19 +45,24 @@ PLAN-ROOT SINK
 |
 04:NESTED LOOP JOIN [RIGHT SEMI JOIN]
 |  join predicates: c.tinyint_col < d.bigint_col
+|  row-size=95B cardinality=1.10K
 |
 |--02:SCAN HDFS [functional.alltypesagg d]
 |     partitions=11/11 files=11 size=814.73KB
 |     predicates: d.bigint_col < 10
+|     row-size=95B cardinality=1.10K
 |
 03:NESTED LOOP JOIN [RIGHT SEMI JOIN]
 |  join predicates: a.tinyint_col > c.tinyint_col
+|  row-size=1B cardinality=100
 |
 |--01:SCAN HDFS [functional.alltypessmall c]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=1B cardinality=100
 |
 00:SCAN HDFS [functional.alltypestiny a]
    partitions=4/4 files=4 size=460B
+   row-size=1B cardinality=8
 ---- DISTRIBUTEDPLAN
 NotImplementedException: Error generating a valid execution plan for this query. A RIGHT SEMI JOIN type with no equi-join predicates can only be executed with a single node plan.
 ====
@@ -71,25 +81,32 @@ PLAN-ROOT SINK
 06:NESTED LOOP JOIN [FULL OUTER JOIN]
 |  join predicates: c.int_col > d.int_col
 |  predicates: a.bigint_col != c.bigint_col, a.id < 10
+|  row-size=362B cardinality=18.40K
 |
 |--03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 05:NESTED LOOP JOIN [FULL OUTER JOIN]
 |  join predicates: a.tinyint_col > c.tinyint_col
+|  row-size=273B cardinality=11.10K
 |
 |--02:SCAN HDFS [functional.alltypesagg c]
 |     partitions=11/11 files=11 size=814.73KB
+|     row-size=95B cardinality=11.00K
 |
 04:NESTED LOOP JOIN [FULL OUTER JOIN]
 |  join predicates: a.id != b.id OR a.int_col != b.int_col
+|  row-size=178B cardinality=101
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 00:SCAN HDFS [functional.alltypestiny a]
    partitions=4/4 files=4 size=460B
    predicates: a.id < 10
+   row-size=89B cardinality=1
 ---- DISTRIBUTEDPLAN
 NotImplementedException: Error generating a valid execution plan for this query. A FULL OUTER JOIN type with no equi-join predicates can only be executed with a single node plan.
 ====
@@ -103,16 +120,20 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 02:NESTED LOOP JOIN [RIGHT ANTI JOIN]
 |  join predicates: a.id < b.id
+|  row-size=8B cardinality=10
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: b.int_col = 5
+|     row-size=8B cardinality=10
 |
 00:SCAN HDFS [functional.alltypestiny a]
    partitions=4/4 files=4 size=460B
+   row-size=4B cardinality=8
 ---- DISTRIBUTEDPLAN
 NotImplementedException: Error generating a valid execution plan for this query. A RIGHT ANTI JOIN type with no equi-join predicates can only be executed with a single node plan.
 ====
@@ -128,34 +149,44 @@ PLAN-ROOT SINK
 |
 09:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 08:NESTED LOOP JOIN [RIGHT ANTI JOIN]
 |  join predicates: d.tinyint_col > e.tinyint_col
+|  row-size=5B cardinality=0
 |
 |--04:SCAN HDFS [functional.alltypesnopart e]
 |     partitions=1/1 files=0 size=0B
 |     predicates: e.id < 10
+|     row-size=5B cardinality=0
 |
 07:NESTED LOOP JOIN [RIGHT SEMI JOIN]
 |  join predicates: c.tinyint_col < d.tinyint_col
+|  row-size=1B cardinality=7.30K
 |
 |--03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=1B cardinality=7.30K
 |
 06:NESTED LOOP JOIN [RIGHT OUTER JOIN]
 |  join predicates: a.int_col != c.int_col
+|  row-size=17B cardinality=11.00K
 |
 |--02:SCAN HDFS [functional.alltypesagg c]
 |     partitions=11/11 files=11 size=814.73KB
+|     row-size=5B cardinality=11.00K
 |
 05:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: a.id < b.id
+|  row-size=12B cardinality=8
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 00:SCAN HDFS [functional.alltypestiny a]
    partitions=4/4 files=4 size=460B
+   row-size=8B cardinality=8
 ---- DISTRIBUTEDPLAN
 NotImplementedException: Error generating a valid execution plan for this query. A RIGHT ANTI JOIN type with no equi-join predicates can only be executed with a single node plan.
 ====
@@ -172,47 +203,58 @@ PLAN-ROOT SINK
 |
 15:AGGREGATE [FINALIZE]
 |  output: count:merge(*)
+|  row-size=8B cardinality=0
 |
 14:EXCHANGE [UNPARTITIONED]
 |
 09:AGGREGATE
 |  output: count(*)
+|  row-size=8B cardinality=0
 |
 08:NESTED LOOP JOIN [LEFT ANTI JOIN, BROADCAST]
 |  join predicates: d.tinyint_col > e.tinyint_col
+|  row-size=5B cardinality=0
 |
 |--13:EXCHANGE [BROADCAST]
 |  |
 |  07:NESTED LOOP JOIN [LEFT SEMI JOIN, BROADCAST]
 |  |  join predicates: c.tinyint_col < d.tinyint_col
+|  |  row-size=1B cardinality=7.30K
 |  |
 |  |--12:EXCHANGE [BROADCAST]
 |  |  |
 |  |  06:NESTED LOOP JOIN [LEFT OUTER JOIN, BROADCAST]
 |  |  |  join predicates: a.int_col != c.int_col
+|  |  |  row-size=17B cardinality=11.00K
 |  |  |
 |  |  |--11:EXCHANGE [BROADCAST]
 |  |  |  |
 |  |  |  05:NESTED LOOP JOIN [INNER JOIN, BROADCAST]
 |  |  |  |  predicates: a.id < b.id
+|  |  |  |  row-size=12B cardinality=100
 |  |  |  |
 |  |  |  |--10:EXCHANGE [BROADCAST]
 |  |  |  |  |
 |  |  |  |  00:SCAN HDFS [functional.alltypestiny a]
 |  |  |  |     partitions=4/4 files=4 size=460B
+|  |  |  |     row-size=8B cardinality=8
 |  |  |  |
 |  |  |  01:SCAN HDFS [functional.alltypessmall b]
 |  |  |     partitions=4/4 files=4 size=6.32KB
+|  |  |     row-size=4B cardinality=100
 |  |  |
 |  |  02:SCAN HDFS [functional.alltypesagg c]
 |  |     partitions=11/11 files=11 size=814.73KB
+|  |     row-size=5B cardinality=11.00K
 |  |
 |  03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=1B cardinality=7.30K
 |
 04:SCAN HDFS [functional.alltypesnopart e]
    partitions=1/1 files=0 size=0B
    predicates: e.id < 10
+   row-size=5B cardinality=0
 ====
 # IMPALA-5689: Do not invert a left outer join with no equi-join predicates.
 select * from (
@@ -225,23 +267,28 @@ PLAN-ROOT SINK
 07:EXCHANGE [UNPARTITIONED]
 |
 03:NESTED LOOP JOIN [LEFT OUTER JOIN, BROADCAST]
+|  row-size=93B cardinality=10
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: (t2.bigint_col = 5)
+|     row-size=89B cardinality=730
 |
 05:AGGREGATE [FINALIZE]
 |  group by: int_col
+|  row-size=4B cardinality=10
 |
 04:EXCHANGE [HASH(int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: int_col
+|  row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====
 # IMPALA-5689: Do not invert a left semi join with no equi-join predicates.
 select * from (
@@ -254,21 +301,26 @@ PLAN-ROOT SINK
 07:EXCHANGE [UNPARTITIONED]
 |
 03:NESTED LOOP JOIN [LEFT SEMI JOIN, BROADCAST]
+|  row-size=4B cardinality=10
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: (t2.bigint_col = 5)
+|     row-size=8B cardinality=730
 |
 05:AGGREGATE [FINALIZE]
 |  group by: int_col
+|  row-size=4B cardinality=10
 |
 04:EXCHANGE [HASH(int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: int_col
+|  row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=4B cardinality=7.30K
 ====


[19/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/joins.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/joins.test b/testdata/workloads/functional-planner/queries/PlannerTest/joins.test
index b681acb..c6a1a3a 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/joins.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/joins.test
@@ -7,14 +7,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=48B cardinality=0
 |
 |--01:SCAN HDFS [functional.testtbl t2]
 |     partitions=1/1 files=0 size=0B
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
    predicates: t1.zip = 94611
    runtime filters: RF000 -> t1.id
+   row-size=24B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -23,16 +26,19 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=48B cardinality=0
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.testtbl t2]
 |     partitions=1/1 files=0 size=0B
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
    predicates: t1.zip = 94611
    runtime filters: RF000 -> t1.id
+   row-size=24B cardinality=0
 ====
 # general exprs on both sides of equi-join predicates
 select *
@@ -44,13 +50,16 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t1.id - 1 = t2.id + 1
+|  row-size=48B cardinality=0
 |
 |--01:SCAN HDFS [functional.testtbl t2]
 |     partitions=1/1 files=0 size=0B
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
    predicates: t1.zip = 94611
+   row-size=24B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -58,15 +67,18 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [LEFT OUTER JOIN, BROADCAST]
 |  hash predicates: t1.id - 1 = t2.id + 1
+|  row-size=48B cardinality=0
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.testtbl t2]
 |     partitions=1/1 files=0 size=0B
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
    predicates: t1.zip = 94611
+   row-size=24B cardinality=0
 ====
 # test that on-clause predicates referring to multiple tuple ids
 # get registered as eq join conjuncts
@@ -80,21 +92,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: coalesce(functional.alltypestiny.id, functional.alltypestiny.id) = t3.id
 |  runtime filters: RF000 <- t3.id
+|  row-size=97B cardinality=8
 |
 |--02:SCAN HDFS [functional.alltypestiny t3]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypestiny.id = functional.alltypestiny.id
 |  runtime filters: RF002 <- functional.alltypestiny.id
+|  row-size=93B cardinality=8
 |
 |--01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> coalesce(functional.alltypestiny.id, functional.alltypestiny.id)
+|     row-size=4B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> coalesce(functional.alltypestiny.id, functional.alltypestiny.id), RF002 -> functional.alltypestiny.id
+   row-size=89B cardinality=8
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -103,25 +120,30 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: coalesce(functional.alltypestiny.id, functional.alltypestiny.id) = t3.id
 |  runtime filters: RF000 <- t3.id
+|  row-size=97B cardinality=8
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [functional.alltypestiny t3]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: functional.alltypestiny.id = functional.alltypestiny.id
 |  runtime filters: RF002 <- functional.alltypestiny.id
+|  row-size=93B cardinality=8
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> coalesce(functional.alltypestiny.id, functional.alltypestiny.id)
+|     row-size=4B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> coalesce(functional.alltypestiny.id, functional.alltypestiny.id), RF002 -> functional.alltypestiny.id
+   row-size=89B cardinality=8
 ====
 # multiple join predicates;
 # scan predicates get propagated correctly;
@@ -140,15 +162,20 @@ PLAN-ROOT SINK
 |  hash predicates: a.id = b.id, a.int_col = b.int_col
 |  other predicates: a.tinyint_col = 15, a.day >= 6, a.tinyint_col + b.tinyint_col < 15
 |  runtime filters: RF000 <- b.id, RF001 <- b.int_col
+|  row-size=184B cardinality=5
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
+|     partition predicates: b.month > 2
 |     partitions=2/4 files=2 size=3.17KB
 |     predicates: b.string_col = '15'
+|     row-size=89B cardinality=5
 |
 00:SCAN HDFS [functional.alltypesagg a]
+   partition predicates: a.day >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: a.tinyint_col = 15
    runtime filters: RF000 -> a.id, RF001 -> a.int_col
+   row-size=95B cardinality=556
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -158,19 +185,24 @@ PLAN-ROOT SINK
 |  hash predicates: a.id = b.id, a.int_col = b.int_col
 |  other predicates: a.tinyint_col = 15, a.day >= 6, a.tinyint_col + b.tinyint_col < 15
 |  runtime filters: RF000 <- b.id, RF001 <- b.int_col
+|  row-size=184B cardinality=5
 |
 |--04:EXCHANGE [HASH(b.id,b.int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall b]
+|     partition predicates: b.month > 2
 |     partitions=2/4 files=2 size=3.17KB
 |     predicates: b.string_col = '15'
+|     row-size=89B cardinality=5
 |
 03:EXCHANGE [HASH(a.id,a.int_col)]
 |
 00:SCAN HDFS [functional.alltypesagg a]
+   partition predicates: a.day >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: a.tinyint_col = 15
    runtime filters: RF000 -> a.id, RF001 -> a.int_col
+   row-size=95B cardinality=556
 ====
 # same as before, with 3 tables;
 # non-eq join predicates are evaluated at the correct join node
@@ -192,20 +224,28 @@ PLAN-ROOT SINK
 04:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: c.id = a.id, c.string_col = b.string_col
 |  other predicates: a.tinyint_col = 15, b.string_col = '15', a.day >= 6, b.month > 2, a.float_col - c.double_col < 0, a.tinyint_col + b.tinyint_col < 15, (b.double_col * c.tinyint_col > 1000 OR c.tinyint_col < 1000)
+|  row-size=279B cardinality=2.00K
 |
 |--03:HASH JOIN [FULL OUTER JOIN]
 |  |  hash predicates: a.id = b.id, a.int_col = b.int_col
+|  |  row-size=184B cardinality=561
 |  |
 |  |--01:SCAN HDFS [functional.alltypessmall b]
+|  |     partition predicates: b.month > 2
 |  |     partitions=2/4 files=2 size=3.17KB
 |  |     predicates: b.string_col = '15'
+|  |     row-size=89B cardinality=5
 |  |
 |  00:SCAN HDFS [functional.alltypesagg a]
+|     partition predicates: a.day >= 6
 |     partitions=5/11 files=5 size=372.38KB
 |     predicates: a.tinyint_col = 15
+|     row-size=95B cardinality=556
 |
 02:SCAN HDFS [functional.alltypesaggnonulls c]
+   partition predicates: c.day < 3
    partitions=2/10 files=2 size=148.10KB
+   row-size=95B cardinality=2.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -215,30 +255,38 @@ PLAN-ROOT SINK
 |  hash predicates: a.id = c.id, b.string_col = c.string_col
 |  other predicates: a.tinyint_col = 15, b.string_col = '15', a.day >= 6, b.month > 2, a.float_col - c.double_col < 0, a.tinyint_col + b.tinyint_col < 15, (b.double_col * c.tinyint_col > 1000 OR c.tinyint_col < 1000)
 |  runtime filters: RF000 <- c.id, RF001 <- c.string_col
+|  row-size=279B cardinality=2.00K
 |
 |--08:EXCHANGE [HASH(c.id,c.string_col)]
 |  |
 |  02:SCAN HDFS [functional.alltypesaggnonulls c]
+|     partition predicates: c.day < 3
 |     partitions=2/10 files=2 size=148.10KB
+|     row-size=95B cardinality=2.00K
 |
 07:EXCHANGE [HASH(a.id,b.string_col)]
 |
 03:HASH JOIN [FULL OUTER JOIN, PARTITIONED]
 |  hash predicates: a.id = b.id, a.int_col = b.int_col
+|  row-size=184B cardinality=561
 |
 |--06:EXCHANGE [HASH(b.id,b.int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall b]
+|     partition predicates: b.month > 2
 |     partitions=2/4 files=2 size=3.17KB
 |     predicates: b.string_col = '15'
 |     runtime filters: RF001 -> b.string_col
+|     row-size=89B cardinality=5
 |
 05:EXCHANGE [HASH(a.id,a.int_col)]
 |
 00:SCAN HDFS [functional.alltypesagg a]
+   partition predicates: a.day >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: a.tinyint_col = 15
    runtime filters: RF000 -> a.id
+   row-size=95B cardinality=556
 ====
 # equi join with constants in the on clause are not supported
 select a.id, b.id from
@@ -251,12 +299,15 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: 1 = 1
+|  row-size=8B cardinality=100
 |
 |--01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=4B cardinality=100
 ====
 # join using values() in a subquery
 select a.int_col, b.x from functional.alltypessmall a inner join
@@ -267,13 +318,16 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: a.int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=17B cardinality=100
 |
 |--01:UNION
 |     constant-operands=3
+|     row-size=13B cardinality=3
 |
 00:SCAN HDFS [functional.alltypessmall a]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> a.int_col
+   row-size=4B cardinality=100
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -282,15 +336,18 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=17B cardinality=100
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:UNION
 |     constant-operands=3
+|     row-size=13B cardinality=3
 |
 00:SCAN HDFS [functional.alltypessmall a]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> a.int_col
+   row-size=4B cardinality=100
 ====
 # hbase-hdfs join
 select *
@@ -301,12 +358,15 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypesagg.id = functional_hbase.alltypessmall.id, functional.alltypesagg.int_col = functional_hbase.alltypessmall.int_col
 |  runtime filters: RF000 <- functional_hbase.alltypessmall.id, RF001 <- functional_hbase.alltypessmall.int_col
+|  row-size=184B cardinality=53
 |
 |--01:SCAN HBASE [functional_hbase.alltypessmall]
+|     row-size=89B cardinality=50
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> functional.alltypesagg.id, RF001 -> functional.alltypesagg.int_col
+   row-size=95B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -315,14 +375,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: functional.alltypesagg.id = functional_hbase.alltypessmall.id, functional.alltypesagg.int_col = functional_hbase.alltypessmall.int_col
 |  runtime filters: RF000 <- functional_hbase.alltypessmall.id, RF001 <- functional_hbase.alltypessmall.int_col
+|  row-size=184B cardinality=53
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HBASE [functional_hbase.alltypessmall]
+|     row-size=89B cardinality=50
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> functional.alltypesagg.id, RF001 -> functional.alltypesagg.int_col
+   row-size=95B cardinality=11.00K
 ====
 # hbase-hdfs join with scan filtering
 select *
@@ -340,16 +403,20 @@ PLAN-ROOT SINK
 |  hash predicates: a.int_col = b.int_col, a.id = CAST(b.id AS INT)
 |  other predicates: a.tinyint_col + b.tinyint_col < 15
 |  runtime filters: RF000 <- b.int_col, RF001 <- CAST(b.id AS INT)
+|  row-size=202B cardinality=11
 |
 |--01:SCAN HBASE [functional_hbase.stringids b]
 |     start key: 5
 |     stop key: 5\0
 |     predicates: b.tinyint_col = 5
+|     row-size=107B cardinality=1
 |
 00:SCAN HDFS [functional.alltypesagg a]
+   partition predicates: a.day >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: a.tinyint_col = 15
    runtime filters: RF000 -> a.int_col, RF001 -> a.id
+   row-size=95B cardinality=556
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypesagg/year=2010/month=1/day=10/100110.txt 0:76263
@@ -368,6 +435,7 @@ PLAN-ROOT SINK
 |  hash predicates: a.int_col = b.int_col, a.id = CAST(b.id AS INT)
 |  other predicates: a.tinyint_col + b.tinyint_col < 15
 |  runtime filters: RF000 <- b.int_col, RF001 <- CAST(b.id AS INT)
+|  row-size=202B cardinality=11
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
@@ -375,11 +443,14 @@ PLAN-ROOT SINK
 |     start key: 5
 |     stop key: 5\0
 |     predicates: b.tinyint_col = 5
+|     row-size=107B cardinality=1
 |
 00:SCAN HDFS [functional.alltypesagg a]
+   partition predicates: a.day >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: a.tinyint_col = 15
    runtime filters: RF000 -> a.int_col, RF001 -> a.id
+   row-size=95B cardinality=556
 ====
 # hbase-hdfs join with scan filtering (bogus)
 select *
@@ -398,13 +469,16 @@ PLAN-ROOT SINK
 |  hash predicates: a.int_col = b.int_col, a.id = CAST(b.id AS INT)
 |  other predicates: a.tinyint_col + b.tinyint_col < 15
 |  runtime filters: RF000 <- b.int_col, RF001 <- CAST(b.id AS INT)
+|  row-size=202B cardinality=0
 |
 |--01:EMPTYSET
 |
 00:SCAN HDFS [functional.alltypesagg a]
+   partition predicates: a.day >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: a.tinyint_col = 15
    runtime filters: RF000 -> a.int_col, RF001 -> a.id
+   row-size=95B cardinality=556
 ---- SCANRANGELOCATIONS
 NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypesagg/year=2010/month=1/day=10/100110.txt 0:76263
@@ -433,55 +507,69 @@ PLAN-ROOT SINK
 |
 10:TOP-N [LIMIT=5]
 |  order by: count(x.day) ASC
+|  row-size=9B cardinality=1
 |
 09:AGGREGATE [FINALIZE]
 |  output: count(day)
 |  group by: tinyint_col
+|  row-size=9B cardinality=1
 |
 08:SELECT
 |  predicates: day >= 6
+|  row-size=5B cardinality=1
 |
 07:TOP-N [LIMIT=10]
 |  order by: day ASC, tinyint_col ASC
+|  row-size=5B cardinality=10
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: d.id = a.id
 |  runtime filters: RF000 <- a.id
+|  row-size=29B cardinality=113
 |
 |--05:HASH JOIN [RIGHT OUTER JOIN]
 |  |  hash predicates: b.id = c.id
 |  |  runtime filters: RF002 <- c.id
+|  |  row-size=25B cardinality=106
 |  |
 |  |--02:SCAN HDFS [functional.alltypesnopart c]
 |  |     partitions=1/1 files=0 size=0B
+|  |     row-size=5B cardinality=0
 |  |
 |  04:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.id = b.id, a.int_col = b.int_col
 |  |  runtime filters: RF004 <- b.id, RF005 <- b.int_col
+|  |  row-size=20B cardinality=106
 |  |
 |  |--01:SCAN HDFS [functional.alltypessmall b]
 |  |     partitions=4/4 files=4 size=6.32KB
 |  |     runtime filters: RF002 -> b.id
+|  |     row-size=8B cardinality=100
 |  |
 |  00:SCAN HDFS [functional.alltypesagg a]
 |     partitions=11/11 files=11 size=814.73KB
 |     runtime filters: RF002 -> a.id, RF004 -> a.id, RF005 -> a.int_col
+|     row-size=12B cardinality=11.00K
 |
 03:SCAN HDFS [functional.alltypesagg d]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> d.id
+   row-size=4B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 10:TOP-N [LIMIT=5]
 |  order by: count(x.day) ASC
+|  row-size=9B cardinality=1
 |
 09:AGGREGATE [FINALIZE]
 |  output: count(day)
 |  group by: tinyint_col
+|  row-size=9B cardinality=1
 |
 08:SELECT
 |  predicates: day >= 6
+|  row-size=5B cardinality=1
 |
 15:MERGING-EXCHANGE [UNPARTITIONED]
 |  order by: day ASC, tinyint_col ASC
@@ -489,41 +577,49 @@ PLAN-ROOT SINK
 |
 07:TOP-N [LIMIT=10]
 |  order by: day ASC, tinyint_col ASC
+|  row-size=5B cardinality=10
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: d.id = a.id
 |  runtime filters: RF000 <- a.id
+|  row-size=29B cardinality=113
 |
 |--14:EXCHANGE [BROADCAST]
 |  |
 |  05:HASH JOIN [RIGHT OUTER JOIN, PARTITIONED]
 |  |  hash predicates: b.id = c.id
 |  |  runtime filters: RF002 <- c.id
+|  |  row-size=25B cardinality=106
 |  |
 |  |--13:EXCHANGE [HASH(c.id)]
 |  |  |
 |  |  02:SCAN HDFS [functional.alltypesnopart c]
 |  |     partitions=1/1 files=0 size=0B
+|  |     row-size=5B cardinality=0
 |  |
 |  12:EXCHANGE [HASH(b.id)]
 |  |
 |  04:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: a.id = b.id, a.int_col = b.int_col
 |  |  runtime filters: RF004 <- b.id, RF005 <- b.int_col
+|  |  row-size=20B cardinality=106
 |  |
 |  |--11:EXCHANGE [BROADCAST]
 |  |  |
 |  |  01:SCAN HDFS [functional.alltypessmall b]
 |  |     partitions=4/4 files=4 size=6.32KB
 |  |     runtime filters: RF002 -> b.id
+|  |     row-size=8B cardinality=100
 |  |
 |  00:SCAN HDFS [functional.alltypesagg a]
 |     partitions=11/11 files=11 size=814.73KB
 |     runtime filters: RF002 -> a.id, RF004 -> a.id, RF005 -> a.int_col
+|     row-size=12B cardinality=11.00K
 |
 03:SCAN HDFS [functional.alltypesagg d]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> d.id
+   row-size=4B cardinality=11.00K
 ====
 # join without "other join conjuncts"
 select * from functional.alltypessmall a, functional.alltypessmall b where a.id = b.id limit 1
@@ -534,13 +630,16 @@ PLAN-ROOT SINK
 |  hash predicates: a.id = b.id
 |  runtime filters: RF000 <- b.id
 |  limit: 1
+|  row-size=178B cardinality=1
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 00:SCAN HDFS [functional.alltypessmall a]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> a.id
+   row-size=89B cardinality=100
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -551,17 +650,20 @@ PLAN-ROOT SINK
 |  hash predicates: a.id = b.id
 |  runtime filters: RF000 <- b.id
 |  limit: 1
+|  row-size=178B cardinality=1
 |
 |--04:EXCHANGE [HASH(b.id)]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall b]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 03:EXCHANGE [HASH(a.id)]
 |
 00:SCAN HDFS [functional.alltypessmall a]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> a.id
+   row-size=89B cardinality=100
 ====
 # join conjunct is derived from equivalence classes
 # (no explicit join conjunct between t1 and t2)
@@ -574,21 +676,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t3.id
 |  runtime filters: RF000 <- t3.id
+|  row-size=72B cardinality=0
 |
 |--02:SCAN HDFS [functional.testtbl t3]
 |     partitions=1/1 files=0 size=0B
+|     row-size=24B cardinality=0
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF002 <- t2.id
+|  row-size=48B cardinality=0
 |
 |--01:SCAN HDFS [functional.testtbl t2]
 |     partitions=1/1 files=0 size=0B
 |     runtime filters: RF000 -> t2.id
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
    runtime filters: RF000 -> t1.id, RF002 -> t1.id
+   row-size=24B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -597,25 +704,30 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: t1.id = t3.id
 |  runtime filters: RF000 <- t3.id
+|  row-size=72B cardinality=0
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [functional.testtbl t3]
 |     partitions=1/1 files=0 size=0B
+|     row-size=24B cardinality=0
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: t1.id = t2.id
 |  runtime filters: RF002 <- t2.id
+|  row-size=48B cardinality=0
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.testtbl t2]
 |     partitions=1/1 files=0 size=0B
 |     runtime filters: RF000 -> t2.id
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
    runtime filters: RF000 -> t1.id, RF002 -> t1.id
+   row-size=24B cardinality=0
 ====
 # join involving a table with no table stats (functional.emptytable)
 # tests that the default join strategy is broadcast
@@ -627,13 +739,16 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: b.int_col = a.f2
 |  runtime filters: RF000 <- a.f2
+|  row-size=105B cardinality=7.30K
 |
 |--00:SCAN HDFS [functional.emptytable a]
 |     partitions=0/0 files=0 size=0B
+|     row-size=16B cardinality=0
 |
 01:SCAN HDFS [functional.alltypes b]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> b.int_col
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -642,15 +757,18 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: b.int_col = a.f2
 |  runtime filters: RF000 <- a.f2
+|  row-size=105B cardinality=7.30K
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  00:SCAN HDFS [functional.emptytable a]
 |     partitions=0/0 files=0 size=0B
+|     row-size=16B cardinality=0
 |
 01:SCAN HDFS [functional.alltypes b]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> b.int_col
+   row-size=89B cardinality=7.30K
 ====
 # cross join
 select *
@@ -659,26 +777,32 @@ from functional.testtbl t1 cross join functional.testtbl
 PLAN-ROOT SINK
 |
 02:NESTED LOOP JOIN [CROSS JOIN]
+|  row-size=48B cardinality=0
 |
 |--01:SCAN HDFS [functional.testtbl]
 |     partitions=1/1 files=0 size=0B
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
+   row-size=24B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 04:EXCHANGE [UNPARTITIONED]
 |
 02:NESTED LOOP JOIN [CROSS JOIN, BROADCAST]
+|  row-size=48B cardinality=0
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.testtbl]
 |     partitions=1/1 files=0 size=0B
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
+   row-size=24B cardinality=0
 ====
 # cross join with where clause
 select *
@@ -688,12 +812,15 @@ PLAN-ROOT SINK
 |
 02:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: t1.id < t2.id
+|  row-size=48B cardinality=0
 |
 |--01:SCAN HDFS [functional.testtbl t2]
 |     partitions=1/1 files=0 size=0B
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
+   row-size=24B cardinality=0
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -701,14 +828,17 @@ PLAN-ROOT SINK
 |
 02:NESTED LOOP JOIN [INNER JOIN, BROADCAST]
 |  predicates: t1.id < t2.id
+|  row-size=48B cardinality=0
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.testtbl t2]
 |     partitions=1/1 files=0 size=0B
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
+   row-size=24B cardinality=0
 ====
 # Tests that the partitioned join between b and c exploits the existing
 # data partition of its lhs input.
@@ -723,21 +853,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: b.id = c.id, b.int_col = c.int_col
 |  runtime filters: RF000 <- c.id, RF001 <- c.int_col
+|  row-size=267B cardinality=7.30K
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id, a.int_col = b.int_col
 |  runtime filters: RF004 <- b.id, RF005 <- b.int_col
+|  row-size=178B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> b.id, RF001 -> b.int_col
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.id, RF001 -> a.int_col, RF004 -> a.id, RF005 -> a.int_col
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -746,27 +881,32 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: b.id = c.id, b.int_col = c.int_col
 |  runtime filters: RF000 <- c.id, RF001 <- c.int_col
+|  row-size=267B cardinality=7.30K
 |
 |--07:EXCHANGE [HASH(c.id,c.int_col)]
 |  |
 |  02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: a.id = b.id, a.int_col = b.int_col
 |  runtime filters: RF004 <- b.id, RF005 <- b.int_col
+|  row-size=178B cardinality=7.30K
 |
 |--06:EXCHANGE [HASH(b.id,b.int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> b.id, RF001 -> b.int_col
+|     row-size=89B cardinality=7.30K
 |
 05:EXCHANGE [HASH(a.id,a.int_col)]
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.id, RF001 -> a.int_col, RF004 -> a.id, RF005 -> a.int_col
+   row-size=89B cardinality=7.30K
 ====
 # Tests that the partitioned join between a and b exploits the existing
 # data partition of its rhs input.
@@ -781,17 +921,21 @@ PLAN-ROOT SINK
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: a.bool_col = bool_col, a.int_col = int_col
 |  runtime filters: RF000 <- bool_col, RF001 <- int_col
+|  row-size=102B cardinality=14.60K
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: count(*)
 |  |  group by: int_col, bool_col
+|  |  row-size=13B cardinality=20
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=5B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.bool_col, RF001 -> a.int_col
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -800,25 +944,30 @@ PLAN-ROOT SINK
 03:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: a.bool_col = bool_col, a.int_col = int_col
 |  runtime filters: RF000 <- bool_col, RF001 <- int_col
+|  row-size=102B cardinality=14.60K
 |
 |--05:AGGREGATE [FINALIZE]
 |  |  output: count:merge(*)
 |  |  group by: int_col, bool_col
+|  |  row-size=13B cardinality=20
 |  |
 |  04:EXCHANGE [HASH(int_col,bool_col)]
 |  |
 |  02:AGGREGATE [STREAMING]
 |  |  output: count(*)
 |  |  group by: int_col, bool_col
+|  |  row-size=13B cardinality=20
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=5B cardinality=7.30K
 |
 06:EXCHANGE [HASH(a.int_col,a.bool_col)]
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.bool_col, RF001 -> a.int_col
+   row-size=89B cardinality=7.30K
 ====
 # Tests that the partitioned join between b and c exploits the existing
 # data partition of its lhs and rhs inputs.
@@ -835,25 +984,31 @@ PLAN-ROOT SINK
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: a.bool_col = b.bool_col, a.int_col = b.int_col
 |  runtime filters: RF000 <- b.bool_col, RF001 <- b.int_col
+|  row-size=191B cardinality=10.66M
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: a.bool_col = bool_col, a.int_col = int_col
 |  runtime filters: RF004 <- bool_col, RF005 <- int_col
+|  row-size=102B cardinality=14.60K
 |
 |--03:AGGREGATE [FINALIZE]
 |  |  output: count(*)
 |  |  group by: int_col, bool_col
+|  |  row-size=13B cardinality=20
 |  |
 |  02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> functional.alltypes.bool_col, RF001 -> functional.alltypes.int_col
+|     row-size=5B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.bool_col, RF001 -> a.int_col, RF004 -> a.bool_col, RF005 -> a.int_col
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -862,35 +1017,42 @@ PLAN-ROOT SINK
 05:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: a.bool_col = b.bool_col, a.int_col = b.int_col
 |  runtime filters: RF000 <- b.bool_col, RF001 <- b.int_col
+|  row-size=191B cardinality=10.66M
 |
 |--09:EXCHANGE [HASH(b.int_col,b.bool_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: a.bool_col = bool_col, a.int_col = int_col
 |  runtime filters: RF004 <- bool_col, RF005 <- int_col
+|  row-size=102B cardinality=14.60K
 |
 |--07:AGGREGATE [FINALIZE]
 |  |  output: count:merge(*)
 |  |  group by: int_col, bool_col
+|  |  row-size=13B cardinality=20
 |  |
 |  06:EXCHANGE [HASH(int_col,bool_col)]
 |  |
 |  03:AGGREGATE [STREAMING]
 |  |  output: count(*)
 |  |  group by: int_col, bool_col
+|  |  row-size=13B cardinality=20
 |  |
 |  02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> functional.alltypes.bool_col, RF001 -> functional.alltypes.int_col
+|     row-size=5B cardinality=7.30K
 |
 08:EXCHANGE [HASH(a.int_col,a.bool_col)]
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.bool_col, RF001 -> a.int_col, RF004 -> a.bool_col, RF005 -> a.int_col
+   row-size=89B cardinality=7.30K
 ====
 # Tests that all predicates from the On-clause are applied (IMPALA-805)
 # and that slot equivalences are enforced at lowest possible plan node.
@@ -910,15 +1072,18 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: b.id = a.id, b.string_col = a.string_col
 |  runtime filters: RF000 <- a.id, RF001 <- a.string_col
+|  row-size=81B cardinality=73
 |
 |--00:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: a.id = a.int_col, a.id = a.tinyint_col, a.int_col = a.bigint_col, a.tinyint_col = a.smallint_col
+|     row-size=32B cardinality=730
 |
 01:SCAN HDFS [functional.alltypes b]
    partitions=24/24 files=24 size=478.45KB
    predicates: b.id = b.int_col, b.id = b.bigint_col, b.string_col = b.date_string_col
    runtime filters: RF000 -> b.id, RF001 -> b.string_col
+   row-size=49B cardinality=730
 ====
 # Tests that all predicates from the On-clause are applied (IMPALA-805).
 select 1 from functional.alltypes a
@@ -937,14 +1102,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: b.id = a.id, b.int_col = a.id, b.id = a.int_col, b.id = a.bigint_col, b.bigint_col = a.id, b.id = a.smallint_col, b.string_col = a.string_col, b.id = a.tinyint_col, b.date_string_col = a.string_col
 |  runtime filters: RF000 <- a.id, RF001 <- a.id, RF002 <- a.int_col, RF003 <- a.bigint_col, RF004 <- a.id, RF005 <- a.smallint_col, RF006 <- a.string_col, RF007 <- a.tinyint_col, RF008 <- a.string_col
+|  row-size=81B cardinality=730
 |
 |--00:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: a.int_col = a.bigint_col, a.tinyint_col = a.smallint_col
+|     row-size=32B cardinality=730
 |
 01:SCAN HDFS [functional.alltypes b]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> b.id, RF001 -> b.int_col, RF002 -> b.id, RF003 -> b.id, RF004 -> b.bigint_col, RF005 -> b.id, RF006 -> b.string_col, RF007 -> b.id, RF008 -> b.date_string_col
+   row-size=49B cardinality=7.30K
 ====
 # Tests elimination of redundant join predicates (IMPALA-912).
 select * from
@@ -961,21 +1129,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: c.id = b.id, c.int_col = b.int_col
 |  runtime filters: RF000 <- b.id, RF001 <- b.int_col
+|  row-size=267B cardinality=8
 |
 |--03:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.id = b.id, a.int_col = b.int_col
 |  |  runtime filters: RF004 <- b.id, RF005 <- b.int_col
+|  |  row-size=178B cardinality=8
 |  |
 |  |--01:SCAN HDFS [functional.alltypestiny b]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=89B cardinality=8
 |  |
 |  00:SCAN HDFS [functional.alltypes a]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF004 -> a.id, RF005 -> a.int_col
+|     row-size=89B cardinality=7.30K
 |
 02:SCAN HDFS [functional.alltypessmall c]
    partitions=4/4 files=4 size=6.32KB
    runtime filters: RF000 -> c.id, RF001 -> c.int_col
+   row-size=89B cardinality=100
 ====
 # Tests elimination of redundant join predicates (IMPALA-912).
 # This test relies on the desired join order b,a,c which requires
@@ -992,22 +1165,27 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = c.id
 |  runtime filters: RF000 <- c.id
+|  row-size=267B cardinality=5.84K
 |
 |--02:SCAN HDFS [functional.alltypessmall c]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: c.id = c.int_col
+|     row-size=89B cardinality=10
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: b.int_col = a.id
 |  runtime filters: RF002 <- a.id
+|  row-size=178B cardinality=5.84K
 |
 |--00:SCAN HDFS [functional.alltypestiny a]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> a.id
+|     row-size=89B cardinality=8
 |
 01:SCAN HDFS [functional.alltypes b]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> b.int_col, RF002 -> b.int_col
+   row-size=89B cardinality=7.30K
 ====
 # Tests elimination of redundant join predicates (IMPALA-912)
 # and that slot equivalences are enforced at the lowest possible plan node.
@@ -1024,23 +1202,28 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=28B cardinality=1.00K
 |
 |--03:AGGREGATE [FINALIZE]
 |  |  output: count(*)
 |  |  group by: int_col, smallint_col
 |  |  having: int_col = count(*)
+|  |  row-size=14B cardinality=10
 |  |
 |  02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: functional.alltypessmall.int_col = functional.alltypessmall.smallint_col
+|     row-size=6B cardinality=10
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
 |  group by: int_col, smallint_col
+|  row-size=14B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.int_col
+   row-size=6B cardinality=7.30K
 ====
 # Test retainment of join predicates referencing outer-joined tuples
 # that are otherwise redundant (equivalence class already covered
@@ -1065,22 +1248,27 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.smallint_col = t3.int_col, t2.bigint_col = t3.int_col
 |  runtime filters: RF000 <- t3.int_col, RF001 <- t3.int_col
+|  row-size=24B cardinality=821
 |
 |--02:SCAN HDFS [functional.alltypestiny t3]
 |     partitions=4/4 files=4 size=460B
 |     predicates: t3.int_col = t3.bigint_col
+|     row-size=12B cardinality=1
 |
 03:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: t2.smallint_col = t1.smallint_col
 |  runtime filters: RF004 <- t1.smallint_col
+|  row-size=12B cardinality=827.84K
 |
 |--00:SCAN HDFS [functional.alltypes t1]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> t1.smallint_col, RF001 -> t1.smallint_col
+|     row-size=2B cardinality=7.30K
 |
 01:SCAN HDFS [functional.alltypesagg t2]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t2.smallint_col, RF001 -> t2.bigint_col, RF004 -> t2.smallint_col
+   row-size=10B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -1089,28 +1277,33 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: t1.smallint_col = t3.int_col, t2.bigint_col = t3.int_col
 |  runtime filters: RF000 <- t3.int_col, RF001 <- t3.int_col
+|  row-size=24B cardinality=821
 |
 |--07:EXCHANGE [HASH(t3.int_col)]
 |  |
 |  02:SCAN HDFS [functional.alltypestiny t3]
 |     partitions=4/4 files=4 size=460B
 |     predicates: t3.int_col = t3.bigint_col
+|     row-size=12B cardinality=1
 |
 03:HASH JOIN [RIGHT OUTER JOIN, PARTITIONED]
 |  hash predicates: t2.smallint_col = t1.smallint_col
 |  runtime filters: RF004 <- t1.smallint_col
+|  row-size=12B cardinality=827.84K
 |
 |--06:EXCHANGE [HASH(t1.smallint_col)]
 |  |
 |  00:SCAN HDFS [functional.alltypes t1]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> t1.smallint_col, RF001 -> t1.smallint_col
+|     row-size=2B cardinality=7.30K
 |
 05:EXCHANGE [HASH(t2.smallint_col)]
 |
 01:SCAN HDFS [functional.alltypesagg t2]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t2.smallint_col, RF001 -> t2.bigint_col, RF004 -> t2.smallint_col
+   row-size=10B cardinality=11.00K
 ====
 # Test correct removal of redundant join predicates (IMPALA-1353):
 # Equivalences among inline-view slots are enforced. The predicates
@@ -1129,14 +1322,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = id + id, a.int_col = int_col * int_col
 |  runtime filters: RF000 <- id + id, RF001 <- int_col * int_col
+|  row-size=25B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: int_col * int_col = bigint_col, id + id = tinyint_col
+|     row-size=17B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.id, RF001 -> a.int_col
+   row-size=8B cardinality=7.30K
 ====
 # Test correct removal of redundant join predicates (IMPALA-1353):
 # Equivalences among inline-view slots are enforced. The predicates
@@ -1156,19 +1352,24 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = id + id, a.int_col = int_col * int_col
 |  runtime filters: RF000 <- id + id, RF001 <- int_col * int_col
+|  row-size=25B cardinality=7.30K
 |
 |--03:SELECT
 |  |  predicates: int_col * int_col = bigint_col, id + id = tinyint_col
+|  |  row-size=17B cardinality=2
 |  |
 |  02:TOP-N [LIMIT=20]
 |  |  order by: tinyint_col ASC
+|  |  row-size=17B cardinality=20
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=17B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.id, RF001 -> a.int_col
+   row-size=8B cardinality=7.30K
 ====
 # Test correct removal of redundant join predicates (IMPALA-1353):
 # Equivalences among inline-view slots are enforced. The predicates
@@ -1188,22 +1389,28 @@ PLAN-ROOT SINK
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = id + id, a.int_col = int_col * int_col
 |  runtime filters: RF000 <- id + id, RF001 <- int_col * int_col
+|  row-size=32B cardinality=7.30K
 |
 |--04:SELECT
 |  |  predicates: id + id = sum(tinyint_col), int_col * int_col = count(bigint_col)
+|  |  row-size=24B cardinality=2
 |  |
 |  03:TOP-N [LIMIT=20]
 |  |  order by: sum(tinyint_col) ASC
+|  |  row-size=24B cardinality=20
 |  |
 |  02:ANALYTIC
 |  |  functions: sum(tinyint_col), count(bigint_col)
+|  |  row-size=33B cardinality=100
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=17B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.id, RF001 -> a.int_col
+   row-size=8B cardinality=7.30K
 ====
 # IMPALA-1353/IMPALA-4916: Test correct removal of redundant join predicates.
 select 1
@@ -1222,20 +1429,25 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = x, a.int_col = y
 |  runtime filters: RF000 <- x, RF001 <- y
+|  row-size=33B cardinality=7.30K
 |
 |--01:UNION
+|  |  row-size=25B cardinality=11
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
 |  |     partitions=4/4 files=4 size=460B
 |  |     predicates: id - id = tinyint_col, int_col / int_col = bigint_col
+|  |     row-size=17B cardinality=1
 |  |
 |  02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
 |     predicates: int_col * int_col = bigint_col, id + id = tinyint_col
+|     row-size=17B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.id, RF001 -> a.int_col
+   row-size=8B cardinality=7.30K
 ====
 # Test creation of predicates at a join node for constructing the
 # minimum spanning tree to cover known slot equivalences (IMPALA-1102).
@@ -1251,21 +1463,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.string_col = t2.string_col
 |  runtime filters: RF000 <- t2.string_col
+|  row-size=53B cardinality=33.58K
 |
 |--02:SCAN HDFS [functional.alltypes t2]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=13B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.tinyint_col = t3.smallint_col, t1.string_col = t3.date_string_col
 |  runtime filters: RF002 <- t3.smallint_col, RF003 <- t3.date_string_col
+|  row-size=40B cardinality=46
 |
 |--01:SCAN HDFS [functional.alltypestiny t3]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> t3.date_string_col
+|     row-size=22B cardinality=8
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> t1.string_col, RF002 -> t1.tinyint_col, RF003 -> t1.string_col
+   row-size=18B cardinality=11.00K
 ====
 # Regression test for IMPALA-935.
 select 1 from
@@ -1278,18 +1495,23 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: int_col = bigint_col
+|  row-size=12B cardinality=20
 |
 |--03:AGGREGATE [FINALIZE]
 |  |  group by: bigint_col
+|  |  row-size=8B cardinality=2
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=8B cardinality=8
 |
 01:AGGREGATE [FINALIZE]
 |  group by: int_col
+|  row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=4B cardinality=100
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -1297,28 +1519,35 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [FULL OUTER JOIN, PARTITIONED]
 |  hash predicates: int_col = bigint_col
+|  row-size=12B cardinality=20
 |
 |--08:AGGREGATE [FINALIZE]
 |  |  group by: bigint_col
+|  |  row-size=8B cardinality=2
 |  |
 |  07:EXCHANGE [HASH(bigint_col)]
 |  |
 |  03:AGGREGATE [STREAMING]
 |  |  group by: bigint_col
+|  |  row-size=8B cardinality=2
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=8B cardinality=8
 |
 06:AGGREGATE [FINALIZE]
 |  group by: int_col
+|  row-size=4B cardinality=10
 |
 05:EXCHANGE [HASH(int_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: int_col
+|  row-size=4B cardinality=10
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=4B cardinality=100
 ====
 # Test joins with union inputs. One side of the join is a union.
 select a.id, b.id, a.string_col, b.string_col
@@ -1342,19 +1571,25 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: b.id = id
 |  runtime filters: RF000 <- id
+|  row-size=34B cardinality=7
 |
 |--06:EXCHANGE [HASH(id)]
 |  |
 |  00:UNION
 |  |  constant-operands=1
+|  |  row-size=17B cardinality=7
 |  |
 |  |--02:SCAN HDFS [functional.alltypessmall]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=1.58KB
 |  |     predicates: functional.alltypessmall.id < 5
+|  |     row-size=17B cardinality=3
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=1.57KB
 |     predicates: functional.alltypessmall.id < 5
+|     row-size=17B cardinality=3
 |
 05:EXCHANGE [HASH(b.id)]
 |
@@ -1362,6 +1597,7 @@ PLAN-ROOT SINK
    partitions=4/4 files=4 size=460B
    predicates: b.id < 5
    runtime filters: RF000 -> b.id
+   row-size=17B cardinality=1
 ====
 # Test joins with union inputs. One input is a union.
 select a.id, b.id, a.string_col, b.string_col
@@ -1384,25 +1620,32 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [LEFT OUTER JOIN, PARTITIONED]
 |  hash predicates: b.id = id
+|  row-size=34B cardinality=1
 |
 |--06:EXCHANGE [HASH(id)]
 |  |
 |  01:UNION
 |  |  constant-operands=1
+|  |  row-size=17B cardinality=7
 |  |
 |  |--03:SCAN HDFS [functional.alltypessmall]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=1.58KB
 |  |     predicates: functional.alltypessmall.id < 5
+|  |     row-size=17B cardinality=3
 |  |
 |  02:SCAN HDFS [functional.alltypessmall]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=1.57KB
 |     predicates: functional.alltypessmall.id < 5
+|     row-size=17B cardinality=3
 |
 05:EXCHANGE [HASH(b.id)]
 |
 00:SCAN HDFS [functional.alltypestiny b]
    partitions=4/4 files=4 size=460B
    predicates: b.id < 5
+   row-size=17B cardinality=1
 ====
 # Test joins with union inputs. Both inputs are a union.
 select a.id, b.id, a.string_col, b.string_col
@@ -1432,30 +1675,41 @@ PLAN-ROOT SINK
 06:HASH JOIN [FULL OUTER JOIN, PARTITIONED]
 |  hash predicates: id = id
 |  other predicates: id < 5
+|  row-size=34B cardinality=58
 |
 |--08:EXCHANGE [HASH(id)]
 |  |
 |  03:UNION
 |  |  constant-operands=1
+|  |  row-size=17B cardinality=7
 |  |
 |  |--05:SCAN HDFS [functional.alltypessmall]
+|  |     partition predicates: year = 2009, month = 2
 |  |     partitions=1/4 files=1 size=1.58KB
 |  |     predicates: functional.alltypessmall.id < 5
+|  |     row-size=17B cardinality=3
 |  |
 |  04:SCAN HDFS [functional.alltypessmall]
+|     partition predicates: year = 2009, month = 1
 |     partitions=1/4 files=1 size=1.57KB
 |     predicates: functional.alltypessmall.id < 5
+|     row-size=17B cardinality=3
 |
 07:EXCHANGE [HASH(id)]
 |
 00:UNION
 |  constant-operands=1
+|  row-size=17B cardinality=51
 |
 |--02:SCAN HDFS [functional.alltypessmall]
+|     partition predicates: year = 2009, month = 2
 |     partitions=1/4 files=1 size=1.58KB
+|     row-size=17B cardinality=25
 |
 01:SCAN HDFS [functional.alltypessmall]
+   partition predicates: year = 2009, month = 1
    partitions=1/4 files=1 size=1.57KB
+   row-size=17B cardinality=25
 ====
 # Test joins with decimals with different precision and scale
 # Regression test for IMPALA-1121
@@ -1466,17 +1720,21 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: a.d1 = b.d5
 |  runtime filters: RF000 <- b.d5
+|  row-size=12B cardinality=unavailable
 |
 |--01:SCAN HDFS [functional.decimal_tbl b]
 |     partitions=1/1 files=1 size=195B
+|     row-size=8B cardinality=unavailable
 |
 00:SCAN HDFS [functional.decimal_tbl a]
    partitions=1/1 files=1 size=195B
    runtime filters: RF000 -> a.d1
+   row-size=4B cardinality=unavailable
 ====
 # Test left semi join including correct predicate assignment and propagation
 select j.* from functional.jointbl j left semi join functional.dimtbl d
@@ -1489,15 +1747,18 @@ PLAN-ROOT SINK
 |  hash predicates: j.test_id = d.id
 |  other join predicates: j.test_zip < d.zip
 |  runtime filters: RF000 <- d.id
+|  row-size=33B cardinality=1
 |
 |--01:SCAN HDFS [functional.dimtbl d]
 |     partitions=1/1 files=1 size=171B
 |     predicates: d.id < 10, d.name = 'Name2'
+|     row-size=29B cardinality=1
 |
 00:SCAN HDFS [functional.jointbl j]
    partitions=1/1 files=1 size=433B
    predicates: j.test_id < 10
    runtime filters: RF000 -> j.test_id
+   row-size=33B cardinality=2
 ====
 # Test right semi join including correct predicate assignment and propagation
 select b.* from functional.alltypes a right semi join functional.alltypestiny b
@@ -1510,15 +1771,18 @@ PLAN-ROOT SINK
 |  hash predicates: a.id = b.id
 |  other join predicates: a.int_col < b.int_col
 |  runtime filters: RF000 <- b.id
+|  row-size=89B cardinality=1
 |
 |--01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
 |     predicates: b.id < 10
+|     row-size=89B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    predicates: a.id < 10, a.bool_col = FALSE
    runtime filters: RF000 -> a.id
+   row-size=9B cardinality=516
 ====
 # Test left anti join including correct predicate assignment and propagation
 select j.* from functional.jointbl j left anti join functional.dimtbl d
@@ -1530,14 +1794,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT ANTI JOIN]
 |  hash predicates: j.test_id = d.id
 |  other join predicates: j.test_zip < d.zip
+|  row-size=33B cardinality=2
 |
 |--01:SCAN HDFS [functional.dimtbl d]
 |     partitions=1/1 files=1 size=171B
 |     predicates: d.id < 10, d.name = 'Name2'
+|     row-size=29B cardinality=1
 |
 00:SCAN HDFS [functional.jointbl j]
    partitions=1/1 files=1 size=433B
    predicates: j.test_id < 10
+   row-size=33B cardinality=2
 ====
 # Test query with anti join and inner join and predicates
 select count(*) from functional.JoinTbl j
@@ -1548,24 +1815,30 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count(*)
+|  row-size=8B cardinality=1
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: k.test_id = j.test_id
 |  runtime filters: RF000 <- j.test_id
+|  row-size=20B cardinality=27
 |
 |--04:HASH JOIN [LEFT ANTI JOIN]
 |  |  hash predicates: j.test_id = d.id
+|  |  row-size=12B cardinality=10
 |  |
 |  |--01:SCAN HDFS [functional.dimtbl d]
 |  |     partitions=1/1 files=1 size=171B
+|  |     row-size=8B cardinality=10
 |  |
 |  00:SCAN HDFS [functional.jointbl j]
 |     partitions=1/1 files=1 size=433B
 |     predicates: j.alltypes_id = 5000
+|     row-size=12B cardinality=10
 |
 02:SCAN HDFS [functional.jointbl k]
    partitions=1/1 files=1 size=433B
    runtime filters: RF000 -> k.test_id
+   row-size=8B cardinality=19
 ====
 # Test legacy-style join hints.
 select straight_join * from functional.alltypestiny a
@@ -1579,27 +1852,32 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: b.id = c.id
 |  runtime filters: RF000 <- c.id
+|  row-size=267B cardinality=1
 |
 |--07:EXCHANGE [HASH(c.id)]
 |  |
 |  02:SCAN HDFS [functional.alltypessmall c]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 06:EXCHANGE [HASH(b.id)]
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF002 <- b.id
+|  row-size=178B cardinality=8
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> b.id
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypestiny a]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> a.id, RF002 -> a.id
+   row-size=89B cardinality=8
 ====
 # Test traditional commented join hints.
 select /* +straight_join */ * from functional.alltypestiny a
@@ -1613,27 +1891,32 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: b.id = c.id
 |  runtime filters: RF000 <- c.id
+|  row-size=267B cardinality=1
 |
 |--07:EXCHANGE [HASH(c.id)]
 |  |
 |  02:SCAN HDFS [functional.alltypessmall c]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 06:EXCHANGE [HASH(b.id)]
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF002 <- b.id
+|  row-size=178B cardinality=8
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> b.id
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypestiny a]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> a.id, RF002 -> a.id
+   row-size=89B cardinality=8
 ====
 # Test end-of-line commented join hints.
 select
@@ -1653,27 +1936,32 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: b.id = c.id
 |  runtime filters: RF000 <- c.id
+|  row-size=267B cardinality=1
 |
 |--07:EXCHANGE [HASH(c.id)]
 |  |
 |  02:SCAN HDFS [functional.alltypessmall c]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 06:EXCHANGE [HASH(b.id)]
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF002 <- b.id
+|  row-size=178B cardinality=8
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> b.id
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypestiny a]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> a.id, RF002 -> a.id
+   row-size=89B cardinality=8
 ====
 # Regression test for IMPALA-1289. Predicates should be assigned correctly
 # to inverted joins.
@@ -1689,20 +1977,25 @@ PLAN-ROOT SINK
 |
 04:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: c.id != b.id
+|  row-size=267B cardinality=8
 |
 |--03:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.id = b.id
 |  |  runtime filters: RF000 <- b.id
+|  |  row-size=178B cardinality=8
 |  |
 |  |--01:SCAN HDFS [functional.alltypestiny b]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=89B cardinality=8
 |  |
 |  00:SCAN HDFS [functional.alltypestiny a]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> a.id
+|     row-size=89B cardinality=8
 |
 02:SCAN HDFS [functional.alltypes c]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # Tests the generation of a distributed plan in which the input fragments
 # of a join node have compatible but not the same number of partitioning exprs with
@@ -1728,32 +2021,39 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: tinyint_col = tinyint_col
 |  runtime filters: RF000 <- tinyint_col
+|  row-size=18B cardinality=40
 |
 |--09:EXCHANGE [HASH(tinyint_col,tinyint_col,tinyint_col)]
 |  |
 |  08:AGGREGATE [FINALIZE]
 |  |  group by: tinyint_col, int_col
+|  |  row-size=5B cardinality=4
 |  |
 |  07:EXCHANGE [HASH(tinyint_col,int_col)]
 |  |
 |  01:AGGREGATE [STREAMING]
 |  |  group by: tinyint_col, int_col
+|  |  row-size=5B cardinality=4
 |  |
 |  00:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=5B cardinality=8
 |
 06:AGGREGATE [FINALIZE]
 |  group by: tinyint_col, int_col, bigint_col
+|  row-size=13B cardinality=10
 |
 05:EXCHANGE [HASH(tinyint_col,int_col,bigint_col)]
 |
 03:AGGREGATE [STREAMING]
 |  group by: tinyint_col, int_col, bigint_col
+|  row-size=13B cardinality=10
 |
 02:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
    predicates: functional.alltypessmall.tinyint_col = functional.alltypessmall.bigint_col, functional.alltypessmall.tinyint_col = functional.alltypessmall.int_col
    runtime filters: RF000 -> functional.alltypessmall.tinyint_col
+   row-size=13B cardinality=10
 ====
 # Same as above but with a full outer join.
 # Tests the generation of a distributed plan in which the input fragments
@@ -1779,30 +2079,37 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [FULL OUTER JOIN, PARTITIONED]
 |  hash predicates: tinyint_col = tinyint_col, bigint_col = tinyint_col, int_col = tinyint_col
+|  row-size=18B cardinality=104
 |
 |--09:EXCHANGE [HASH(tinyint_col,tinyint_col,tinyint_col)]
 |  |
 |  08:AGGREGATE [FINALIZE]
 |  |  group by: tinyint_col, int_col
+|  |  row-size=5B cardinality=4
 |  |
 |  07:EXCHANGE [HASH(tinyint_col,int_col)]
 |  |
 |  01:AGGREGATE [STREAMING]
 |  |  group by: tinyint_col, int_col
+|  |  row-size=5B cardinality=4
 |  |
 |  00:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=5B cardinality=8
 |
 06:AGGREGATE [FINALIZE]
 |  group by: tinyint_col, int_col, bigint_col
+|  row-size=13B cardinality=100
 |
 05:EXCHANGE [HASH(tinyint_col,int_col,bigint_col)]
 |
 03:AGGREGATE [STREAMING]
 |  group by: tinyint_col, int_col, bigint_col
+|  row-size=13B cardinality=100
 |
 02:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=13B cardinality=100
 ====
 # Test conservative treatment of partition-compatible input fragments
 # to a partitioned hash join. Making the partitions physically compatible
@@ -1828,31 +2135,38 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: bigint_col = bigint_col, string_col = string_col
 |  runtime filters: RF000 <- bigint_col, RF001 <- string_col
+|  row-size=42B cardinality=4
 |
 |--09:EXCHANGE [HASH(bigint_col,string_col)]
 |  |
 |  08:AGGREGATE [FINALIZE]
 |  |  group by: string_col, bigint_col
+|  |  row-size=21B cardinality=100
 |  |
 |  07:EXCHANGE [HASH(string_col,bigint_col)]
 |  |
 |  03:AGGREGATE [STREAMING]
 |  |  group by: string_col, bigint_col
+|  |  row-size=21B cardinality=100
 |  |
 |  02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=21B cardinality=100
 |
 06:AGGREGATE [FINALIZE]
 |  group by: bigint_col, string_col
+|  row-size=21B cardinality=4
 |
 05:EXCHANGE [HASH(bigint_col,string_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: bigint_col, string_col
+|  row-size=21B cardinality=4
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> functional.alltypestiny.bigint_col, RF001 -> functional.alltypestiny.string_col
+   row-size=21B cardinality=8
 ====
 # Test that hash exprs are re-ordered as necessary when placing a join into
 # a fragment with a compatible data partition (IMPALA-1324)
@@ -1870,23 +2184,28 @@ PLAN-ROOT SINK
 03:HASH JOIN [LEFT SEMI JOIN, PARTITIONED]
 |  hash predicates: int_col = b.int_col, string_col = b.string_col
 |  runtime filters: RF000 <- b.int_col, RF001 <- b.string_col
+|  row-size=17B cardinality=4
 |
 |--06:EXCHANGE [HASH(b.int_col,b.string_col)]
 |  |
 |  02:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=17B cardinality=7.30K
 |
 05:AGGREGATE [FINALIZE]
 |  group by: int_col, string_col
+|  row-size=17B cardinality=4
 |
 04:EXCHANGE [HASH(int_col,string_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: int_col, string_col
+|  row-size=17B cardinality=4
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> functional.alltypestiny.int_col, RF001 -> functional.alltypestiny.string_col
+   row-size=17B cardinality=8
 ====
 # Same as above but with the join inverted.
 select straight_join * from
@@ -1904,23 +2223,28 @@ PLAN-ROOT SINK
 03:HASH JOIN [RIGHT SEMI JOIN, PARTITIONED]
 |  hash predicates: a.int_col = int_col, a.string_col = string_col
 |  runtime filters: RF000 <- int_col, RF001 <- string_col
+|  row-size=17B cardinality=4
 |
 |--05:AGGREGATE [FINALIZE]
 |  |  group by: int_col, string_col
+|  |  row-size=17B cardinality=4
 |  |
 |  04:EXCHANGE [HASH(int_col,string_col)]
 |  |
 |  02:AGGREGATE [STREAMING]
 |  |  group by: int_col, string_col
+|  |  row-size=17B cardinality=4
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=17B cardinality=8
 |
 06:EXCHANGE [HASH(a.int_col,a.string_col)]
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.int_col, RF001 -> a.string_col
+   row-size=17B cardinality=7.30K
 ====
 # Complex combined regression test for IMPALA-1307 and IMPALA-1324
 select straight_join * from
@@ -1945,52 +2269,63 @@ PLAN-ROOT SINK
 07:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: int_col = smallint_col, string_col = string_col
 |  runtime filters: RF000 <- smallint_col, RF001 <- string_col
+|  row-size=67B cardinality=3.65K
 |
 |--15:EXCHANGE [HASH(string_col,smallint_col,smallint_col)]
 |  |
 |  14:AGGREGATE [FINALIZE]
 |  |  group by: string_col, tinyint_col, smallint_col, int_col, bigint_col
+|  |  row-size=28B cardinality=730
 |  |
 |  13:EXCHANGE [HASH(string_col,tinyint_col,smallint_col,int_col,bigint_col)]
 |  |
 |  05:AGGREGATE [STREAMING]
 |  |  group by: string_col, tinyint_col, smallint_col, int_col, bigint_col
+|  |  row-size=28B cardinality=730
 |  |
 |  04:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: bigint_col = smallint_col, smallint_col = tinyint_col
+|     row-size=28B cardinality=730
 |
 06:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: int_col = int_col, string_col = string_col
 |  runtime filters: RF004 <- int_col, RF005 <- string_col
+|  row-size=39B cardinality=10
 |
 |--12:EXCHANGE [HASH(string_col,int_col,int_col)]
 |  |
 |  11:AGGREGATE [FINALIZE]
 |  |  group by: tinyint_col, smallint_col, int_col, string_col
+|  |  row-size=20B cardinality=1
 |  |
 |  10:EXCHANGE [HASH(tinyint_col,smallint_col,int_col,string_col)]
 |  |
 |  03:AGGREGATE [STREAMING]
 |  |  group by: tinyint_col, smallint_col, int_col, string_col
+|  |  row-size=20B cardinality=1
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     predicates: functional.alltypestiny.int_col = functional.alltypestiny.smallint_col
 |     runtime filters: RF000 -> functional.alltypestiny.smallint_col, RF001 -> functional.alltypestiny.string_col
+|     row-size=20B cardinality=1
 |
 09:AGGREGATE [FINALIZE]
 |  group by: string_col, int_col, smallint_col
+|  row-size=19B cardinality=10
 |
 08:EXCHANGE [HASH(string_col,int_col,smallint_col)]
 |
 01:AGGREGATE [STREAMING]
 |  group by: string_col, int_col, smallint_col
+|  row-size=19B cardinality=10
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
    predicates: functional.alltypessmall.smallint_col = functional.alltypessmall.int_col
    runtime filters: RF000 -> functional.alltypessmall.int_col, RF001 -> functional.alltypessmall.string_col, RF004 -> functional.alltypessmall.int_col, RF005 -> functional.alltypessmall.string_col
+   row-size=19B cardinality=10
 ====
 # Assignment of predicates from the On-clause of an
 # anti join; inner join followed by anti join (IMPALA-1387)
@@ -2010,21 +2345,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [LEFT ANTI JOIN]
 |  hash predicates: a.id = c.id
 |  other join predicates: a.tinyint_col = 10, a.int_col = b.int_col
+|  row-size=97B cardinality=7.30K
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: c.int_col < 10
+|     row-size=8B cardinality=730
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF000 <- b.id
+|  row-size=97B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=8B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> a.id
+   row-size=89B cardinality=7.30K
 ====
 # Assignment of predicates from the On-clause of an anti join;
 # sequence of inner joins interleaved with an anti join.
@@ -2045,31 +2385,38 @@ PLAN-ROOT SINK
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: d.tinyint_col = a.tinyint_col
 |  runtime filters: RF000 <- a.tinyint_col
+|  row-size=273B cardinality=892.22K
 |
 |--07:HASH JOIN [LEFT ANTI JOIN]
 |  |  hash predicates: a.int_col = c.int_col
 |  |  other join predicates: a.float_col = 2.1, a.tinyint_col = b.tinyint_col
+|  |  row-size=178B cardinality=730
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny c]
 |  |     partitions=4/4 files=4 size=460B
 |  |     predicates: c.int_col < 10, c.bool_col = FALSE
+|  |     row-size=5B cardinality=1
 |  |
 |  06:HASH JOIN [INNER JOIN]
 |  |  hash predicates: b.id = a.id
 |  |  other predicates: a.float_col < b.float_col
 |  |  runtime filters: RF002 <- a.id
+|  |  row-size=178B cardinality=730
 |  |
 |  |--00:SCAN HDFS [functional.alltypes a]
 |  |     partitions=24/24 files=24 size=478.45KB
 |  |     predicates: a.int_col < 10
+|  |     row-size=89B cardinality=730
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF002 -> b.id
+|     row-size=89B cardinality=7.30K
 |
 03:SCAN HDFS [functional.alltypesagg d]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> d.tinyint_col
+   row-size=95B cardinality=11.00K
 ====
 # Inner joins with non-equi join predicates
 select *
@@ -2085,27 +2432,34 @@ PLAN-ROOT SINK
 |
 06:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: a.id < b.id, b.tinyint_col > d.int_col OR b.id != d.id
+|  row-size=362B cardinality=79.66K
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 05:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: a.int_col != c.int_col
+|  row-size=273B cardinality=79.66K
 |
 |--00:SCAN HDFS [functional.alltypestiny a]
 |     partitions=4/4 files=4 size=460B
 |     predicates: a.int_col = 10
+|     row-size=89B cardinality=4
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: d.bigint_col = c.bigint_col
 |  runtime filters: RF000 <- c.bigint_col
+|  row-size=184B cardinality=79.66K
 |
 |--02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:SCAN HDFS [functional.alltypesagg d]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> d.bigint_col
+   row-size=95B cardinality=11.00K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -2113,35 +2467,42 @@ PLAN-ROOT SINK
 |
 06:NESTED LOOP JOIN [INNER JOIN, BROADCAST]
 |  predicates: a.id < b.id, b.tinyint_col > d.int_col OR b.id != d.id
+|  row-size=362B cardinality=79.66K
 |
 |--10:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall b]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=89B cardinality=100
 |
 05:NESTED LOOP JOIN [INNER JOIN, BROADCAST]
 |  predicates: a.int_col != c.int_col
+|  row-size=273B cardinality=79.66K
 |
 |--09:EXCHANGE [BROADCAST]
 |  |
 |  00:SCAN HDFS [functional.alltypestiny a]
 |     partitions=4/4 files=4 size=460B
 |     predicates: a.int_col = 10
+|     row-size=89B cardinality=4
 |
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: d.bigint_col = c.bigint_col
 |  runtime filters: RF000 <- c.bigint_col
+|  row-size=184B cardinality=79.66K
 |
 |--08:EXCHANGE [HASH(c.bigint_col)]
 |  |
 |  02:SCAN HDFS [functional.alltypes c]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 07:EXCHANGE [HASH(d.bigint_col)]
 |
 03:SCAN HDFS [functional.alltypesagg d]
    partitions=11/11 files=11 size=814.73KB
    runtime filters: RF000 -> d.bigint_col
+   row-size=95B cardinality=11.00K
 ====
 # Inner joins between inline views with non-equi join predicates
 select *
@@ -2160,26 +2521,33 @@ PLAN-ROOT SINK
 |
 06:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: a.bigint_col > c.bigint_col
+|  row-size=192B cardinality=1.22K
 |
 |--02:NESTED LOOP JOIN [INNER JOIN]
 |  |  predicates: a.id <= b.id
+|  |  row-size=93B cardinality=100
 |  |
 |  |--00:SCAN HDFS [functional.alltypestiny a]
 |  |     partitions=4/4 files=4 size=460B
 |  |     predicates: a.tinyint_col < 10
+|  |     row-size=89B cardinality=1
 |  |
 |  01:SCAN HDFS [functional.alltypessmall b]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=4B cardinality=100
 |
 05:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: c.int_col != d.int_col
+|  row-size=99B cardinality=1.22K
 |
 |--04:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=4B cardinality=7.30K
 |
 03:SCAN HDFS [functional.alltypesagg c]
    partitions=11/11 files=11 size=814.73KB
    predicates: c.tinyint_col = 10
+   row-size=95B cardinality=1.22K
 ====
 # Inner, left-outer, left-semi and left-anti joins with non-equi join
 # predicates
@@ -2198,31 +2566,40 @@ PLAN-ROOT SINK
 |
 08:NESTED LOOP JOIN [RIGHT ANTI JOIN]
 |  join predicates: c.string_col != e.string_col
+|  row-size=267B cardinality=100
 |
 |--07:NESTED LOOP JOIN [RIGHT SEMI JOIN]
 |  |  join predicates: b.bigint_col > d.bigint_col
+|  |  row-size=267B cardinality=100
 |  |
 |  |--06:NESTED LOOP JOIN [RIGHT OUTER JOIN]
 |  |  |  join predicates: a.int_col != c.int_col OR a.tinyint_col > c.tinyint_col
+|  |  |  row-size=267B cardinality=100
 |  |  |
 |  |  |--05:NESTED LOOP JOIN [INNER JOIN]
 |  |  |  |  predicates: a.id < b.id
+|  |  |  |  row-size=178B cardinality=100
 |  |  |  |
 |  |  |  |--00:SCAN HDFS [functional.alltypestiny a]
 |  |  |  |     partitions=4/4 files=4 size=460B
 |  |  |  |     predicates: a.id < 10
+|  |  |  |     row-size=89B cardinality=1
 |  |  |  |
 |  |  |  01:SCAN HDFS [functional.alltypessmall b]
 |  |  |     partitions=4/4 files=4 size=6.32KB
+|  |  |     row-size=89B cardinality=100
 |  |  |
 |  |  02:SCAN HDFS [functional.alltypes c]
 |  |     partitions=24/24 files=24 size=478.45KB
+|  |     row-size=89B cardinality=7.30K
 |  |
 |  03:SCAN HDFS [functional.alltypesagg d]
 |     partitions=11/11 files=11 size=814.73KB
+|     row-size=8B cardinality=11.00K
 |
 04:SCAN HDFS [functional.alltypesagg e]
    partitions=11/11 files=11 size=814.73KB
+   row-size=15B cardinality=11.00K
 ====
 # Regression test for IMPALA-2495: Crash: impala::InPredicate::SetLookupPrepare
 select count(id) from functional.alltypestiny t1
@@ -2234,16 +2611,20 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(id)
+|  row-size=8B cardinality=1
 |
 02:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: t1.id = if(TupleIsNull(), NULL, 1)
 |  other predicates: if(TupleIsNull(), NULL, 1) IN (if(TupleIsNull(), NULL, 1), 10)
+|  row-size=4B cardinality=8
 |
 |--01:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=0B cardinality=100
 |
 00:SCAN HDFS [functional.alltypestiny t1]
    partitions=4/4 files=4 size=460B
+   row-size=4B cardinality=8
 ====
 # Test queries that appear earlier in this file, but substitute "<=>" or "IS DISTINCT
 # FROM" for "=" in the join predicates.
@@ -2256,14 +2637,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id IS NOT DISTINCT FROM t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=48B cardinality=0
 |
 |--01:SCAN HDFS [functional.testtbl t2]
 |     partitions=1/1 files=0 size=0B
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
    predicates: t1.zip = 94611
    runtime filters: RF000 -> t1.id
+   row-size=24B cardinality=0
 ====
 select *
 from functional.testtbl t1 join functional.testtbl t2
@@ -2274,14 +2658,17 @@ PLAN-ROOT SINK
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id IS NOT DISTINCT FROM t2.id
 |  runtime filters: RF000 <- t2.id
+|  row-size=48B cardinality=0
 |
 |--01:SCAN HDFS [functional.testtbl t2]
 |     partitions=1/1 files=0 size=0B
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
    predicates: t1.zip = 94611
    runtime filters: RF000 -> t1.id
+   row-size=24B cardinality=0
 ====
 select *
 from functional.testtbl t1 join functional.testtbl t2
@@ -2291,13 +2678,16 @@ PLAN-ROOT SINK
 |
 02:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: (t1.id IS DISTINCT FROM t2.id)
+|  row-size=48B cardinality=0
 |
 |--01:SCAN HDFS [functional.testtbl t2]
 |     partitions=1/1 files=0 size=0B
+|     row-size=24B cardinality=0
 |
 00:SCAN HDFS [functional.testtbl t1]
    partitions=1/1 files=0 size=0B
    predicates: t1.zip = 94611
+   row-size=24B cardinality=0
 ====
 # Test that "is not distinct from" plans the same as "=" in the same query above.
 select t1.*
@@ -2309,20 +2699,25 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: coalesce(functional.alltypestiny.id, functional.alltypestiny.id) IS NOT DISTINCT FROM t3.id
+|  row-size=97B cardinality=8
 |
 |--02:SCAN HDFS [functional.alltypestiny t3]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypestiny.id IS NOT DISTINCT FROM functional.alltypestiny.id
 |  runtime filters: RF000 <- functional.alltypestiny.id
+|  row-size=93B cardinality=8
 |
 |--01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=4B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> functional.alltypestiny.id
+   row-size=89B cardinality=8
 ====
 select *
 from functional.alltypesagg a
@@ -2342,20 +2737,28 @@ PLAN-ROOT SINK
 04:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: c.id = a.id, c.string_col IS NOT DISTINCT FROM b.string_col
 |  other predicates: a.tinyint_col = 15, b.string_col = '15', a.day >= 6, b.month > 2, a.float_col - c.double_col < 0, a.tinyint_col + b.tinyint_col < 15, (b.double_col * c.tinyint_col > 1000 OR c.tinyint_col < 1000)
+|  row-size=279B cardinality=2.00K
 |
 |--03:HASH JOIN [FULL OUTER JOIN]
 |  |  hash predicates: a.id IS NOT DISTINCT FROM b.id, a.int_col = b.int_col
+|  |  row-size=184B cardinality=561
 |  |
 |  |--01:SCAN HDFS [functional.alltypessmall b]
+|  |     partition predicates: b.month > 2
 |  |     partitions=2/4 files=2 size=3.17KB
 |  |     predicates: b.string_col = '15'
+|  |     row-size=89B cardinality=5
 |  |
 |  00:SCAN HDFS [functional.alltypesagg a]
+|     partition predicates: a.day >= 6
 |     partitions=5/11 files=5 size=372.38KB
 |     predicates: a.tinyint_col = 15
+|     row-size=95B cardinality=556
 |
 02:SCAN HDFS [functional.alltypesaggnonulls c]
+   partition predicates: c.day < 3
    partitions=2/10 files=2 size=148.10KB
+   row-size=95B cardinality=2.00K
 ====
 select t1.d, t2.d from functional.nulltable t1, functional.nulltable t2
 where not(t1.d IS DISTINCT FROM t2.d)
@@ -2364,12 +2767,15 @@ PLAN-ROOT SINK
 |
 02:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: NOT (t1.d IS DISTINCT FROM t2.d)
+|  row-size=8B cardinality=1
 |
 |--01:SCAN HDFS [functional.nulltable t2]
 |     partitions=1/1 files=1 size=18B
+|     row-size=4B cardinality=1
 |
 00:SCAN HDFS [functional.nulltable t1]
    partitions=1/1 files=1 size=18B
+   row-size=4B cardinality=1
 ====
 select t1.d, t2.d
 from functional.nulltable t1, functional.nulltable t2, functional.nulltable t3
@@ -2380,18 +2786,23 @@ PLAN-ROOT SINK
 |
 04:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: t3.a != t2.g
+|  row-size=34B cardinality=1
 |
 |--02:SCAN HDFS [functional.nulltable t3]
 |     partitions=1/1 files=1 size=18B
+|     row-size=13B cardinality=1
 |
 03:NESTED LOOP JOIN [INNER JOIN]
 |  predicates: t1.d IS DISTINCT FROM t2.d
+|  row-size=21B cardinality=1
 |
 |--00:SCAN HDFS [functional.nulltable t1]
 |     partitions=1/1 files=1 size=18B
+|     row-size=4B cardinality=1
 |
 01:SCAN HDFS [functional.nulltable t2]
    partitions=1/1 files=1 size=18B
+   row-size=17B cardinality=1
 ====
 # IMPALA-3450: limits on join nodes are reflected in cardinality estimates. The test for
 # this is embedded in PlannerTestBase.java and is not visible in these plans, as they only
@@ -2402,12 +2813,15 @@ PLAN-ROOT SINK
 |
 02:NESTED LOOP JOIN [CROSS JOIN]
 |  limit: 1
+|  row-size=8B cardinality=1
 |
 |--01:SCAN HDFS [tpch.customer b]
 |     partitions=1/1 files=1 size=23.08MB
+|     row-size=0B cardinality=150.00K
 |
 00:SCAN HDFS [tpch.customer a]
    partitions=1/1 files=1 size=23.08MB
+   row-size=8B cardinality=150.00K
 ====
 select a.c_custkey as c_custkey from tpch.customer a left semi join tpch.customer b
 using (c_custkey) limit 1
@@ -2418,13 +2832,16 @@ PLAN-ROOT SINK
 |  hash predicates: a.c_custkey = b.c_custkey
 |  runtime filters: RF000 <- b.c_custkey
 |  limit: 1
+|  row-size=8B cardinality=1
 |
 |--01:SCAN HDFS [tpch.customer b]
 |     partitions=1/1 files=1 size=23.08MB
+|     row-size=8B cardinality=150.00K
 |
 00:SCAN HDFS [tpch.customer a]
    partitions=1/1 files=1 size=23.08MB
    runtime filters: RF000 -> a.c_custkey
+   row-size=8B cardinality=150.00K
 ====
 select b.c_custkey as c_custkey from tpch.customer a right semi join tpch.customer b
 using (c_custkey) limit 1
@@ -2435,13 +2852,16 @@ PLAN-ROOT SINK
 |  hash predicates: a.c_custkey = b.c_custkey
 |  runtime filters: RF000 <- b.c_custkey
 |  limit: 1
+|  row-size=8B cardinality=1
 |
 |--01:SCAN HDFS [tpch.customer b]
 |     partitions=1/1 files=1 size=23.08MB
+|     row-size=8B cardinality=150.00K
 |
 00:SCAN HDFS [tpch.customer a]
    partitions=1/1 files=1 size=23.08MB
    runtime filters: RF000 -> a.c_custkey
+   row-size=8B cardinality=150.00K
 ====
 select a.c_custkey as c_custkey from tpch.customer a left outer join tpch.customer b
 using (c_custkey) limit 1
@@ -2451,12 +2871,15 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: a.c_custkey = b.c_custkey
 |  limit: 1
+|  row-size=16B cardinality=1
 |
 |--01:SCAN HDFS [tpch.customer b]
 |     partitions=1/1 files=1 size=23.08MB
+|     row-size=8B cardinality=150.00K
 |
 00:SCAN HDFS [tpch.customer a]
    partitions=1/1 files=1 size=23.08MB
+   row-size=8B cardinality=150.00K
 ====
 select b.c_custkey as c_custkey from tpch.customer a right outer join tpch.customer b
 using (c_custkey) limit 1
@@ -2467,13 +2890,16 @@ PLAN-ROOT SINK
 |  hash predicates: a.c_custkey = b.c_custkey
 |  runtime filters: RF000 <- b.c_custkey
 |  limit: 1
+|  row-size=16B cardinality=1
 |
 |--01:SCAN HDFS [tpch.customer b]
 |     partitions=1/1 files=1 size=23.08MB
+|     row-size=8B cardinality=150.00K
 |
 00:SCAN HDFS [tpch.customer a]
    partitions=1/1 files=1 size=23.08MB
    runtime filters: RF000 -> a.c_custkey
+   row-size=8B cardinality=150.00K
 ====
 select a.c_custkey as c_custkey from tpch.customer a full outer join tpch.customer b
 using (c_custkey) limit 1
@@ -2483,12 +2909,15 @@ PLAN-ROOT SINK
 02:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: a.c_custkey = b.c_custkey
 |  limit: 1
+|  row-size=16B cardinality=1
 |
 |--01:SCAN HDFS [tpch.customer b]
 |     partitions=1/1 files=1 size=23.08MB
+|     row-size=8B cardinality=150.00K
 |
 00:SCAN HDFS [tpch.customer a]
    partitions=1/1 files=1 size=23.08MB
+   row-size=8B cardinality=150.00K
 ====
 select a.c_custkey as c_custkey from tpch.customer a left anti join tpch.customer b
 using (c_custkey) limit 1
@@ -2498,12 +2927,15 @@ PLAN-ROOT SINK
 02:HASH JOIN [LEFT ANTI JOIN]
 |  hash predicates: a.c_custkey = b.c_custkey
 |  limit: 1
+|  row-size=8B cardinality=1
 |
 |--01:SCAN HDFS [tpch.customer b]
 |     partitions=1/1 files=1 size=23.08MB
+|     row-size=8B cardinality=150.00K
 |
 00:SCAN HDFS [tpch.customer a]
    partitions=1/1 files=1 size=23.08MB
+   row-size=8B cardinality=150.00K
 ====
 select b.c_custkey as c_custkey from tpch.customer a right anti join tpch.customer b
 using (c_custkey) limit 1
@@ -2513,12 +2945,15 @@ PLAN-ROOT SINK
 02:HASH JOIN [RIGHT ANTI JOIN]
 |  hash predicates: a.c_custkey = b.c_custkey
 |  limit: 1
+|  row-size=8B cardinality=1
 |
 |--01:SCAN HDFS [tpch.customer b]
 |     partitions=1/1 files=1 size=23.08MB
+|     row-size=8B cardinality=150.00K
 |
 00:SCAN HDFS [tpch.customer a]
    partitions=1/1 files=1 size=23.08MB
+   row-size=8B cardinality=150.00K
 ====
 with
   t1 as (select cast(0 as decimal(20, 0)) as c1),
@@ -2531,12 +2966,15 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: c1 = c2
+|  row-size=32B cardinality=1
 |
 |--01:UNION
 |     constant-operands=1
+|     row-size=16B cardinality=1
 |
 00:UNION
    constant-operands=1
+   row-size=16B cardinality=1
 ====
 # Cannot create a hash join because decimal types are incompatible due to decimal_v2.
 with
@@ -2559,10 +2997,13 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: c1 = c2
+|  row-size=32B cardinality=1
 |
 |--01:UNION
 |     constant-operands=1
+|     row-size=16B cardinality=1
 |
 00:UNION
    constant-operands=1
+   row-size=16B cardinality=1
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/kudu-delete.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/kudu-delete.test b/testdata/workloads/functional-planner/queries/PlannerTest/kudu-delete.test
index 2120c3e..128857e 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/kudu-delete.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/kudu-delete.test
@@ -3,10 +3,12 @@ delete from functional_kudu.testtbl
 DELETE FROM KUDU [functional_kudu.testtbl]
 |
 00:SCAN KUDU [functional_kudu.testtbl]
+   row-size=8B cardinality=0
 ---- DISTRIBUTEDPLAN
 DELETE FROM KUDU [functional_kudu.testtbl]
 |
 00:SCAN KUDU [functional_kudu.testtbl]
+   row-size=8B cardinality=0
 ====
 delete from functional_kudu.testtbl where name = 'hallo'
 ---- PLAN
@@ -14,11 +16,13 @@ DELETE FROM KUDU [functional_kudu.testtbl]
 |
 00:SCAN KUDU [functional_kudu.testtbl]
    kudu predicates: name = 'hallo'
+   row-size=8B cardinality=0
 ---- DISTRIBUTEDPLAN
 DELETE FROM KUDU [functional_kudu.testtbl]
 |
 00:SCAN KUDU [functional_kudu.testtbl]
    kudu predicates: name = 'hallo'
+   row-size=8B cardinality=0
 ====
 delete a from functional_kudu.testtbl a, functional.alltypes b
 where a.id = b.id and a.id in (select id from functional.alltypes)
@@ -28,44 +32,54 @@ DELETE FROM KUDU [functional_kudu.testtbl]
 04:HASH JOIN [RIGHT SEMI JOIN]
 |  hash predicates: id = a.id
 |  runtime filters: RF000 <- a.id
+|  row-size=12B cardinality=0
 |
 |--03:HASH JOIN [INNER JOIN]
 |  |  hash predicates: b.id = a.id
 |  |  runtime filters: RF002 <- a.id
+|  |  row-size=12B cardinality=0
 |  |
 |  |--00:SCAN KUDU [functional_kudu.testtbl a]
+|  |     row-size=8B cardinality=0
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF002 -> b.id
+|     row-size=4B cardinality=7.30K
 |
 02:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> id
+   row-size=4B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 DELETE FROM KUDU [functional_kudu.testtbl]
 |
 04:HASH JOIN [RIGHT SEMI JOIN, PARTITIONED]
 |  hash predicates: id = a.id
 |  runtime filters: RF000 <- a.id
+|  row-size=12B cardinality=0
 |
 |--07:EXCHANGE [HASH(a.id)]
 |  |
 |  03:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: b.id = a.id
 |  |  runtime filters: RF002 <- a.id
+|  |  row-size=12B cardinality=0
 |  |
 |  |--05:EXCHANGE [BROADCAST]
 |  |  |
 |  |  00:SCAN KUDU [functional_kudu.testtbl a]
+|  |     row-size=8B cardinality=0
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF002 -> b.id
+|     row-size=4B cardinality=7.30K
 |
 06:EXCHANGE [HASH(id)]
 |
 02:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> id
+   row-size=4B cardinality=7.30K
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/kudu-selectivity.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/kudu-selectivity.test b/testdata/workloads/functional-planner/queries/PlannerTest/kudu-selectivity.test
index dfe8241..d39ba76 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/kudu-selectivity.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/kudu-selectivity.test
@@ -77,7 +77,7 @@ Per-Host Resources: mem-estimate=3.75MB mem-reservation=0B thread-reservation=2
   00:SCAN KUDU [functional_kudu.zipcode_incomes]
      kudu predicates: zip > '2', id > '1'
      mem-estimate=3.75MB mem-reservation=0B thread-reservation=1
-     tuple-ids=0 row-size=124B cardinality=3317
+     tuple-ids=0 row-size=124B cardinality=3.32K
      in pipelines: 00(GETNEXT)
 ---- DISTRIBUTEDPLAN
 F01:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
@@ -87,7 +87,7 @@ Per-Host Resources: mem-estimate=517.89KB mem-reservation=0B thread-reservation=
   |
   01:EXCHANGE [UNPARTITIONED]
      mem-estimate=517.89KB mem-reservation=0B thread-reservation=0
-     tuple-ids=0 row-size=124B cardinality=3317
+     tuple-ids=0 row-size=124B cardinality=3.32K
      in pipelines: 00(GETNEXT)
 
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
@@ -97,7 +97,7 @@ Per-Host Resources: mem-estimate=3.75MB mem-reservation=0B thread-reservation=2
   00:SCAN KUDU [functional_kudu.zipcode_incomes]
      kudu predicates: zip > '2', id > '1'
      mem-estimate=3.75MB mem-reservation=0B thread-reservation=1
-     tuple-ids=0 row-size=124B cardinality=3317
+     tuple-ids=0 row-size=124B cardinality=3.32K
      in pipelines: 00(GETNEXT)
 ====
 select * from functional_kudu.zipcode_incomes where id = '1' or id = '2' or zip = '3'

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/kudu-update.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/kudu-update.test b/testdata/workloads/functional-planner/queries/PlannerTest/kudu-update.test
index 5fe40ac..2c70157 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/kudu-update.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/kudu-update.test
@@ -4,11 +4,13 @@ UPDATE KUDU [functional_kudu.testtbl]
 |
 00:SCAN KUDU [functional_kudu.testtbl]
    kudu predicates: zip > 94549
+   row-size=8B cardinality=0
 ---- DISTRIBUTEDPLAN
 UPDATE KUDU [functional_kudu.testtbl]
 |
 00:SCAN KUDU [functional_kudu.testtbl]
    kudu predicates: zip > 94549
+   row-size=8B cardinality=0
 ====
 # Predicate on key
 update functional_kudu.testtbl set name = 'peter' where zip > 94549 and id = 5
@@ -17,11 +19,13 @@ UPDATE KUDU [functional_kudu.testtbl]
 |
 00:SCAN KUDU [functional_kudu.testtbl]
    kudu predicates: id = 5, zip > 94549
+   row-size=8B cardinality=0
 ---- DISTRIBUTEDPLAN
 UPDATE KUDU [functional_kudu.testtbl]
 |
 00:SCAN KUDU [functional_kudu.testtbl]
    kudu predicates: id = 5, zip > 94549
+   row-size=8B cardinality=0
 ====
 # Mixing predicate and value assignment
 update functional_kudu.testtbl set zip = 94546 where zip > 94549
@@ -30,11 +34,13 @@ UPDATE KUDU [functional_kudu.testtbl]
 |
 00:SCAN KUDU [functional_kudu.testtbl]
    kudu predicates: zip > 94549
+   row-size=8B cardinality=0
 ---- DISTRIBUTEDPLAN
 UPDATE KUDU [functional_kudu.testtbl]
 |
 00:SCAN KUDU [functional_kudu.testtbl]
    kudu predicates: zip > 94549
+   row-size=8B cardinality=0
 ====
 update a
 set a.name = b.name
@@ -46,30 +52,36 @@ UPDATE KUDU [functional_kudu.testtbl]
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF001 <- b.id
+|  row-size=28B cardinality=0
 |
 |--01:SCAN HDFS [functional.testtbl b]
 |     partitions=1/1 files=0 size=0B
 |     predicates: b.id = 10
+|     row-size=20B cardinality=0
 |
 00:SCAN KUDU [functional_kudu.testtbl a]
    kudu predicates: a.id = 10
    runtime filters: RF001 -> a.id
+   row-size=8B cardinality=0
 ---- DISTRIBUTEDPLAN
 UPDATE KUDU [functional_kudu.testtbl]
 |
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF001 <- b.id
+|  row-size=28B cardinality=0
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.testtbl b]
 |     partitions=1/1 files=0 size=0B
 |     predicates: b.id = 10
+|     row-size=20B cardinality=0
 |
 00:SCAN KUDU [functional_kudu.testtbl a]
    kudu predicates: a.id = 10
    runtime filters: RF001 -> a.id
+   row-size=8B cardinality=0
 ====
 update a
 set a.name = 'values'
@@ -79,27 +91,33 @@ UPDATE KUDU [functional_kudu.testtbl]
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: ids = a.id
+|  row-size=9B cardinality=1
 |
 |--00:SCAN KUDU [functional_kudu.testtbl a]
+|     row-size=8B cardinality=0
 |
 01:UNION
    constant-operands=1
+   row-size=1B cardinality=1
 ---- DISTRIBUTEDPLAN
 UPDATE KUDU [functional_kudu.testtbl]
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: a.id = ids
 |  runtime filters: RF001 <- ids
+|  row-size=9B cardinality=1
 |
 |--04:EXCHANGE [HASH(ids)]
 |  |
 |  01:UNION
 |     constant-operands=1
+|     row-size=1B cardinality=1
 |
 03:EXCHANGE [HASH(a.id)]
 |
 00:SCAN KUDU [functional_kudu.testtbl a]
    runtime filters: RF001 -> a.id
+   row-size=8B cardinality=0
 ====
 update a
 set a.name = 'values'
@@ -111,19 +129,23 @@ UPDATE KUDU [functional_kudu.testtbl]
 02:HASH JOIN [LEFT SEMI JOIN]
 |  hash predicates: a.zip = zip
 |  runtime filters: RF001 <- zip
+|  row-size=12B cardinality=0
 |
 |--01:SCAN HDFS [functional.testtbl]
 |     partitions=1/1 files=0 size=0B
 |     limit: 10
+|     row-size=4B cardinality=0
 |
 00:SCAN KUDU [functional_kudu.testtbl a]
    runtime filters: RF001 -> a.zip
+   row-size=12B cardinality=0
 ---- DISTRIBUTEDPLAN
 UPDATE KUDU [functional_kudu.testtbl]
 |
 02:HASH JOIN [LEFT SEMI JOIN, BROADCAST]
 |  hash predicates: a.zip = zip
 |  runtime filters: RF001 <- zip
+|  row-size=12B cardinality=0
 |
 |--04:EXCHANGE [BROADCAST]
 |  |
@@ -133,9 +155,11 @@ UPDATE KUDU [functional_kudu.testtbl]
 |  01:SCAN HDFS [functional.testtbl]
 |     partitions=1/1 files=0 size=0B
 |     limit: 10
+|     row-size=4B cardinality=0
 |
 00:SCAN KUDU [functional_kudu.testtbl a]
    runtime filters: RF001 -> a.zip
+   row-size=12B cardinality=0
 ====
 update functional_kudu.testtbl set zip = 94546 where false
 ---- PLAN


[08/26] impala git commit: IMPALA-8021: Add estimated cardinality to EXPLAIN output

Posted by ta...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/tpcds-all.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/tpcds-all.test b/testdata/workloads/functional-planner/queries/PlannerTest/tpcds-all.test
index da86c5e..d0b103d 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/tpcds-all.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/tpcds-all.test
@@ -29,30 +29,37 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=100]
 |  order by: dt.d_year ASC, sum(ss_ext_sales_price) DESC, item.i_brand_id ASC
+|  row-size=52B cardinality=100
 |
 05:AGGREGATE [FINALIZE]
 |  output: sum(ss_ext_sales_price)
 |  group by: dt.d_year, item.i_brand, item.i_brand_id
+|  row-size=52B cardinality=3.04K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: store_sales.ss_sold_date_sk = dt.d_date_sk
 |  runtime filters: RF000 <- dt.d_date_sk
+|  row-size=72B cardinality=3.04K
 |
 |--00:SCAN HDFS [tpcds.date_dim dt]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: dt.d_moy = 12
+|     row-size=12B cardinality=6.09K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: store_sales.ss_item_sk = item.i_item_sk
 |  runtime filters: RF002 <- item.i_item_sk
+|  row-size=60B cardinality=3.04K
 |
 |--02:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
 |     predicates: item.i_manufact_id = 436
+|     row-size=44B cardinality=19
 |
 01:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> store_sales.ss_sold_date_sk, RF002 -> store_sales.ss_item_sk
+   row-size=16B cardinality=2.88M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=34.81MB Threads=9
 Per-Host Resource Estimates: Memory=235MB
@@ -64,42 +71,50 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=100]
 |  order by: dt.d_year ASC, sum(ss_ext_sales_price) DESC, item.i_brand_id ASC
+|  row-size=52B cardinality=100
 |
 11:AGGREGATE [FINALIZE]
 |  output: sum:merge(ss_ext_sales_price)
 |  group by: dt.d_year, item.i_brand, item.i_brand_id
+|  row-size=52B cardinality=3.04K
 |
 10:EXCHANGE [HASH(dt.d_year,item.i_brand,item.i_brand_id)]
 |
 05:AGGREGATE [STREAMING]
 |  output: sum(ss_ext_sales_price)
 |  group by: dt.d_year, item.i_brand, item.i_brand_id
+|  row-size=52B cardinality=3.04K
 |
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: store_sales.ss_sold_date_sk = dt.d_date_sk
 |  runtime filters: RF000 <- dt.d_date_sk
+|  row-size=72B cardinality=3.04K
 |
 |--09:EXCHANGE [HASH(dt.d_date_sk)]
 |  |
 |  00:SCAN HDFS [tpcds.date_dim dt]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: dt.d_moy = 12
+|     row-size=12B cardinality=6.09K
 |
 08:EXCHANGE [HASH(store_sales.ss_sold_date_sk)]
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_item_sk = item.i_item_sk
 |  runtime filters: RF002 <- item.i_item_sk
+|  row-size=60B cardinality=3.04K
 |
 |--07:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
 |     predicates: item.i_manufact_id = 436
+|     row-size=44B cardinality=19
 |
 01:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> store_sales.ss_sold_date_sk, RF002 -> store_sales.ss_item_sk
+   row-size=16B cardinality=2.88M
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=69.62MB Threads=11
 Per-Host Resource Estimates: Memory=167MB
@@ -111,20 +126,24 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=100]
 |  order by: dt.d_year ASC, sum(ss_ext_sales_price) DESC, item.i_brand_id ASC
+|  row-size=52B cardinality=100
 |
 11:AGGREGATE [FINALIZE]
 |  output: sum:merge(ss_ext_sales_price)
 |  group by: dt.d_year, item.i_brand, item.i_brand_id
+|  row-size=52B cardinality=3.04K
 |
 10:EXCHANGE [HASH(dt.d_year,item.i_brand,item.i_brand_id)]
 |
 05:AGGREGATE [STREAMING]
 |  output: sum(ss_ext_sales_price)
 |  group by: dt.d_year, item.i_brand, item.i_brand_id
+|  row-size=52B cardinality=3.04K
 |
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: store_sales.ss_sold_date_sk = dt.d_date_sk
 |  runtime filters: RF000 <- dt.d_date_sk
+|  row-size=72B cardinality=3.04K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -135,12 +154,14 @@ PLAN-ROOT SINK
 |  00:SCAN HDFS [tpcds.date_dim dt]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: dt.d_moy = 12
+|     row-size=12B cardinality=6.09K
 |
 08:EXCHANGE [HASH(store_sales.ss_sold_date_sk)]
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_item_sk = item.i_item_sk
 |  runtime filters: RF002 <- item.i_item_sk
+|  row-size=60B cardinality=3.04K
 |
 |--JOIN BUILD
 |  |  join-table-id=01 plan-id=02 cohort-id=01
@@ -151,10 +172,12 @@ PLAN-ROOT SINK
 |  02:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
 |     predicates: item.i_manufact_id = 436
+|     row-size=44B cardinality=19
 |
 01:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> store_sales.ss_sold_date_sk, RF002 -> store_sales.ss_item_sk
+   row-size=16B cardinality=2.88M
 ====
 # TPCDS-Q7
 select
@@ -192,45 +215,56 @@ PLAN-ROOT SINK
 |
 10:TOP-N [LIMIT=100]
 |  order by: i_item_id ASC
+|  row-size=60B cardinality=100
 |
 09:AGGREGATE [FINALIZE]
 |  output: avg(ss_quantity), avg(ss_list_price), avg(ss_coupon_amt), avg(ss_sales_price)
 |  group by: i_item_id
+|  row-size=60B cardinality=8.85K
 |
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: ss_item_sk = i_item_sk
 |  runtime filters: RF000 <- i_item_sk
+|  row-size=162B cardinality=261.60K
 |
 |--03:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
+|     row-size=36B cardinality=18.00K
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: ss_promo_sk = p_promo_sk
 |  runtime filters: RF002 <- p_promo_sk
+|  row-size=126B cardinality=261.60K
 |
 |--04:SCAN HDFS [tpcds.promotion]
 |     partitions=1/1 files=1 size=36.36KB
 |     predicates: (p_channel_email = 'N' OR p_channel_event = 'N')
+|     row-size=30B cardinality=300
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: ss_cdemo_sk = cd_demo_sk
 |  runtime filters: RF004 <- cd_demo_sk
+|  row-size=96B cardinality=263.34K
 |
 |--01:SCAN HDFS [tpcds.customer_demographics]
 |     partitions=1/1 files=1 size=76.92MB
 |     predicates: cd_marital_status = 'W', cd_gender = 'F', cd_education_status = 'Primary'
+|     row-size=52B cardinality=97.40K
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: ss_sold_date_sk = d_date_sk
 |  runtime filters: RF006 <- d_date_sk
+|  row-size=44B cardinality=589.03K
 |
 |--02:SCAN HDFS [tpcds.date_dim]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: d_year = 1998
+|     row-size=8B cardinality=373
 |
 00:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> ss_item_sk, RF002 -> ss_promo_sk, RF004 -> ss_cdemo_sk, RF006 -> ss_sold_date_sk
+   row-size=36B cardinality=2.88M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=54.31MB Threads=12
 Per-Host Resource Estimates: Memory=460MB
@@ -242,59 +276,71 @@ PLAN-ROOT SINK
 |
 10:TOP-N [LIMIT=100]
 |  order by: i_item_id ASC
+|  row-size=60B cardinality=100
 |
 16:AGGREGATE [FINALIZE]
 |  output: avg:merge(ss_quantity), avg:merge(ss_list_price), avg:merge(ss_coupon_amt), avg:merge(ss_sales_price)
 |  group by: i_item_id
+|  row-size=60B cardinality=8.85K
 |
 15:EXCHANGE [HASH(i_item_id)]
 |
 09:AGGREGATE [STREAMING]
 |  output: avg(ss_quantity), avg(ss_list_price), avg(ss_coupon_amt), avg(ss_sales_price)
 |  group by: i_item_id
+|  row-size=60B cardinality=8.85K
 |
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_item_sk = i_item_sk
 |  runtime filters: RF000 <- i_item_sk
+|  row-size=162B cardinality=261.60K
 |
 |--14:EXCHANGE [BROADCAST]
 |  |
 |  03:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
+|     row-size=36B cardinality=18.00K
 |
 07:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_promo_sk = p_promo_sk
 |  runtime filters: RF002 <- p_promo_sk
+|  row-size=126B cardinality=261.60K
 |
 |--13:EXCHANGE [BROADCAST]
 |  |
 |  04:SCAN HDFS [tpcds.promotion]
 |     partitions=1/1 files=1 size=36.36KB
 |     predicates: (p_channel_email = 'N' OR p_channel_event = 'N')
+|     row-size=30B cardinality=300
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_cdemo_sk = cd_demo_sk
 |  runtime filters: RF004 <- cd_demo_sk
+|  row-size=96B cardinality=263.34K
 |
 |--12:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [tpcds.customer_demographics]
 |     partitions=1/1 files=1 size=76.92MB
 |     predicates: cd_marital_status = 'W', cd_gender = 'F', cd_education_status = 'Primary'
+|     row-size=52B cardinality=97.40K
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_sold_date_sk = d_date_sk
 |  runtime filters: RF006 <- d_date_sk
+|  row-size=44B cardinality=589.03K
 |
 |--11:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [tpcds.date_dim]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: d_year = 1998
+|     row-size=8B cardinality=373
 |
 00:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> ss_item_sk, RF002 -> ss_promo_sk, RF004 -> ss_cdemo_sk, RF006 -> ss_sold_date_sk
+   row-size=36B cardinality=2.88M
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=108.62MB Threads=13
 Per-Host Resource Estimates: Memory=398MB
@@ -306,20 +352,24 @@ PLAN-ROOT SINK
 |
 10:TOP-N [LIMIT=100]
 |  order by: i_item_id ASC
+|  row-size=60B cardinality=100
 |
 16:AGGREGATE [FINALIZE]
 |  output: avg:merge(ss_quantity), avg:merge(ss_list_price), avg:merge(ss_coupon_amt), avg:merge(ss_sales_price)
 |  group by: i_item_id
+|  row-size=60B cardinality=8.85K
 |
 15:EXCHANGE [HASH(i_item_id)]
 |
 09:AGGREGATE [STREAMING]
 |  output: avg(ss_quantity), avg(ss_list_price), avg(ss_coupon_amt), avg(ss_sales_price)
 |  group by: i_item_id
+|  row-size=60B cardinality=8.85K
 |
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_item_sk = i_item_sk
 |  runtime filters: RF000 <- i_item_sk
+|  row-size=162B cardinality=261.60K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -329,10 +379,12 @@ PLAN-ROOT SINK
 |  |
 |  03:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
+|     row-size=36B cardinality=18.00K
 |
 07:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_promo_sk = p_promo_sk
 |  runtime filters: RF002 <- p_promo_sk
+|  row-size=126B cardinality=261.60K
 |
 |--JOIN BUILD
 |  |  join-table-id=01 plan-id=02 cohort-id=01
@@ -343,10 +395,12 @@ PLAN-ROOT SINK
 |  04:SCAN HDFS [tpcds.promotion]
 |     partitions=1/1 files=1 size=36.36KB
 |     predicates: (p_channel_email = 'N' OR p_channel_event = 'N')
+|     row-size=30B cardinality=300
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_cdemo_sk = cd_demo_sk
 |  runtime filters: RF004 <- cd_demo_sk
+|  row-size=96B cardinality=263.34K
 |
 |--JOIN BUILD
 |  |  join-table-id=02 plan-id=03 cohort-id=01
@@ -357,10 +411,12 @@ PLAN-ROOT SINK
 |  01:SCAN HDFS [tpcds.customer_demographics]
 |     partitions=1/1 files=1 size=76.92MB
 |     predicates: cd_marital_status = 'W', cd_gender = 'F', cd_education_status = 'Primary'
+|     row-size=52B cardinality=97.40K
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_sold_date_sk = d_date_sk
 |  runtime filters: RF006 <- d_date_sk
+|  row-size=44B cardinality=589.03K
 |
 |--JOIN BUILD
 |  |  join-table-id=03 plan-id=04 cohort-id=01
@@ -371,10 +427,12 @@ PLAN-ROOT SINK
 |  02:SCAN HDFS [tpcds.date_dim]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: d_year = 1998
+|     row-size=8B cardinality=373
 |
 00:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> ss_item_sk, RF002 -> ss_promo_sk, RF004 -> ss_cdemo_sk, RF006 -> ss_sold_date_sk
+   row-size=36B cardinality=2.88M
 ====
 # TPCDS-Q8
 select
@@ -452,52 +510,65 @@ PLAN-ROOT SINK
 |
 11:TOP-N [LIMIT=100]
 |  order by: s_store_name ASC
+|  row-size=32B cardinality=8
 |
 10:AGGREGATE [FINALIZE]
 |  output: sum(ss_net_profit)
 |  group by: s_store_name
+|  row-size=32B cardinality=8
 |
 09:HASH JOIN [INNER JOIN]
 |  hash predicates: substr(store.s_zip, 1, 2) = substr(substr(ca_zip, 1, 5), 1, 2)
 |  runtime filters: RF000 <- substr(substr(ca_zip, 1, 5), 1, 2)
+|  row-size=65B cardinality=84.40K
 |
 |--07:HASH JOIN [RIGHT SEMI JOIN]
 |  |  hash predicates: substr(ca_zip, 1, 5) = substr(ca_zip, 1, 5)
 |  |  runtime filters: RF004 <- substr(ca_zip, 1, 5)
+|  |  row-size=20B cardinality=396
 |  |
 |  |--05:AGGREGATE [FINALIZE]
 |  |  |  output: count(*)
 |  |  |  group by: substr(ca_zip, 1, 5)
 |  |  |  having: count(*) > 10
+|  |  |  row-size=20B cardinality=396
 |  |  |
 |  |  04:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: customer_address.ca_address_sk = customer.c_current_addr_sk
 |  |  |  runtime filters: RF006 <- customer.c_current_addr_sk
+|  |  |  row-size=38B cardinality=51.30K
 |  |  |
 |  |  |--03:SCAN HDFS [tpcds.customer]
 |  |  |     partitions=1/1 files=1 size=12.60MB
 |  |  |     predicates: c_preferred_cust_flag = 'Y'
+|  |  |     row-size=17B cardinality=50.00K
 |  |  |
 |  |  02:SCAN HDFS [tpcds.customer_address]
 |  |     partitions=1/1 files=1 size=5.25MB
 |  |     runtime filters: RF006 -> customer_address.ca_address_sk
+|  |     row-size=21B cardinality=50.00K
 |  |
 |  06:SCAN HDFS [tpcds.customer_address]
 |     partitions=1/1 files=1 size=5.25MB
 |     predicates: substr(ca_zip, 1, 5) IN ('89436', '30868', '65085', '22977', '83927', '77557', '58429', '40697', '80614', '10502', '32779', '91137', '61265', '98294', '17921', '18427', '21203', '59362', '87291', '84093', '21505', '17184', '10866', '67898', '25797', '28055', '18377', '80332', '74535', '21757', '29742', '90885', '29898', '17819', '40811', '25990', '47513', '89531', '91068', '10391', '18846', '99223', '82637', '41368', '83658', '86199', '81625', '26696', '89338', '88425', '32200', '81427', '19053', '77471', '36610', '99823', '43276', '41249', '48584', '83550', '82276', '18842', '78890', '14090', '38123', '40936', '34425', '19850', '43286', '80072', '79188', '54191', '11395', '50497', '84861', '90733', '21068', '57666', '37119', '25004', '57835', '70067', '62878', '95806', '19303', '18840', '19124', '29785', '16737', '16022', '49613', '89977', '68310', '60069', '98360', '48649', '39050', '41793', '25002', '27413', '39736', '47208', '16515', '94808', '57648', '15009', 
 '80015', '42961', '63982', '21744', '71853', '81087', '67468', '34175', '64008', '20261', '11201', '51799', '48043', '45645', '61163', '48375', '36447', '57042', '21218', '41100', '89951', '22745', '35851', '83326', '61125', '78298', '80752', '49858', '52940', '96976', '63792', '11376', '53582', '18717', '90226', '50530', '94203', '99447', '27670', '96577', '57856', '56372', '16165', '23427', '54561', '28806', '44439', '22926', '30123', '61451', '92397', '56979', '92309', '70873', '13355', '21801', '46346', '37562', '56458', '28286', '47306', '99555', '69399', '26234', '47546', '49661', '88601', '35943', '39936', '25632', '24611', '44166', '56648', '30379', '59785', '11110', '14329', '93815', '52226', '71381', '13842', '25612', '63294', '14664', '21077', '82626', '18799', '60915', '81020', '56447', '76619', '11433', '13414', '42548', '92713', '70467', '30884', '47484', '16072', '38936', '13036', '88376', '45539', '35901', '19506', '65690', '73957', '71850', '49231', '14276', '20005'
 , '18384', '76615', '11635', '38177', '55607', '41369', '95447', '58581', '58149', '91946', '33790', '76232', '75692', '95464', '22246', '51061', '56692', '53121', '77209', '15482', '10688', '14868', '45907', '73520', '72666', '25734', '17959', '24677', '66446', '94627', '53535', '15560', '41967', '69297', '11929', '59403', '33283', '52232', '57350', '43933', '40921', '36635', '10827', '71286', '19736', '80619', '25251', '95042', '15526', '36496', '55854', '49124', '81980', '35375', '49157', '63512', '28944', '14946', '36503', '54010', '18767', '23969', '43905', '66979', '33113', '21286', '58471', '59080', '13395', '79144', '70373', '67031', '38360', '26705', '50906', '52406', '26066', '73146', '15884', '31897', '30045', '61068', '45550', '92454', '13376', '14354', '19770', '22928', '97790', '50723', '46081', '30202', '14410', '20223', '88500', '67298', '13261', '14172', '81410', '93578', '83583', '46047', '94167', '82564', '21156', '15799', '86709', '37931', '74703', '83103', '2305
 4', '70470', '72008', '49247', '91911', '69998', '20961', '70070', '63197', '54853', '88191', '91830', '49521', '19454', '81450', '89091', '62378', '25683', '61869', '51744', '36580', '85778', '36871', '48121', '28810', '83712', '45486', '67393', '26935', '42393', '20132', '55349', '86057', '21309', '80218', '10094', '11357', '48819', '39734', '40758', '30432', '21204', '29467', '30214', '61024', '55307', '74621', '11622', '68908', '33032', '52868', '99194', '99900', '84936', '69036', '99149', '45013', '32895', '59004', '32322', '14933', '32936', '33562', '72550', '27385', '58049', '58200', '16808', '21360', '32961', '18586', '79307', '15492')
 |     runtime filters: RF004 -> substr(ca_zip, 1, 5)
+|     row-size=17B cardinality=5.00K
 |
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: store_sales.ss_store_sk = store.s_store_sk
 |  runtime filters: RF002 <- store.s_store_sk
+|  row-size=45B cardinality=84.40K
 |
 |--01:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
 |     runtime filters: RF000 -> substr(store.s_zip, 1, 2)
+|     row-size=37B cardinality=12
 |
 00:SCAN HDFS [tpcds.store_sales]
+   partition predicates: ss_sold_date_sk >= 2452276, ss_sold_date_sk <= 2452366
    partitions=90/1824 files=90 size=10.32MB
    runtime filters: RF002 -> store_sales.ss_store_sk
+   row-size=8B cardinality=84.40K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=46.13MB Threads=13
 Per-Host Resource Estimates: Memory=327MB
@@ -509,51 +580,61 @@ PLAN-ROOT SINK
 |
 11:TOP-N [LIMIT=100]
 |  order by: s_store_name ASC
+|  row-size=32B cardinality=8
 |
 19:AGGREGATE [FINALIZE]
 |  output: sum:merge(ss_net_profit)
 |  group by: s_store_name
+|  row-size=32B cardinality=8
 |
 18:EXCHANGE [HASH(s_store_name)]
 |
 10:AGGREGATE [STREAMING]
 |  output: sum(ss_net_profit)
 |  group by: s_store_name
+|  row-size=32B cardinality=8
 |
 09:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: substr(store.s_zip, 1, 2) = substr(substr(ca_zip, 1, 5), 1, 2)
 |  runtime filters: RF000 <- substr(substr(ca_zip, 1, 5), 1, 2)
+|  row-size=65B cardinality=84.40K
 |
 |--17:EXCHANGE [BROADCAST]
 |  |
 |  07:HASH JOIN [RIGHT SEMI JOIN, PARTITIONED]
 |  |  hash predicates: substr(ca_zip, 1, 5) = substr(ca_zip, 1, 5)
 |  |  runtime filters: RF004 <- substr(ca_zip, 1, 5)
+|  |  row-size=20B cardinality=396
 |  |
 |  |--15:AGGREGATE [FINALIZE]
 |  |  |  output: count:merge(*)
 |  |  |  group by: substr(ca_zip, 1, 5)
 |  |  |  having: count(*) > 10
+|  |  |  row-size=20B cardinality=396
 |  |  |
 |  |  14:EXCHANGE [HASH(substr(ca_zip, 1, 5))]
 |  |  |
 |  |  05:AGGREGATE [STREAMING]
 |  |  |  output: count(*)
 |  |  |  group by: substr(ca_zip, 1, 5)
+|  |  |  row-size=20B cardinality=3.96K
 |  |  |
 |  |  04:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  |  hash predicates: customer_address.ca_address_sk = customer.c_current_addr_sk
 |  |  |  runtime filters: RF006 <- customer.c_current_addr_sk
+|  |  |  row-size=38B cardinality=51.30K
 |  |  |
 |  |  |--13:EXCHANGE [BROADCAST]
 |  |  |  |
 |  |  |  03:SCAN HDFS [tpcds.customer]
 |  |  |     partitions=1/1 files=1 size=12.60MB
 |  |  |     predicates: c_preferred_cust_flag = 'Y'
+|  |  |     row-size=17B cardinality=50.00K
 |  |  |
 |  |  02:SCAN HDFS [tpcds.customer_address]
 |  |     partitions=1/1 files=1 size=5.25MB
 |  |     runtime filters: RF006 -> customer_address.ca_address_sk
+|  |     row-size=21B cardinality=50.00K
 |  |
 |  16:EXCHANGE [HASH(substr(ca_zip, 1, 5))]
 |  |
@@ -561,20 +642,25 @@ PLAN-ROOT SINK
 |     partitions=1/1 files=1 size=5.25MB
 |     predicates: substr(ca_zip, 1, 5) IN ('89436', '30868', '65085', '22977', '83927', '77557', '58429', '40697', '80614', '10502', '32779', '91137', '61265', '98294', '17921', '18427', '21203', '59362', '87291', '84093', '21505', '17184', '10866', '67898', '25797', '28055', '18377', '80332', '74535', '21757', '29742', '90885', '29898', '17819', '40811', '25990', '47513', '89531', '91068', '10391', '18846', '99223', '82637', '41368', '83658', '86199', '81625', '26696', '89338', '88425', '32200', '81427', '19053', '77471', '36610', '99823', '43276', '41249', '48584', '83550', '82276', '18842', '78890', '14090', '38123', '40936', '34425', '19850', '43286', '80072', '79188', '54191', '11395', '50497', '84861', '90733', '21068', '57666', '37119', '25004', '57835', '70067', '62878', '95806', '19303', '18840', '19124', '29785', '16737', '16022', '49613', '89977', '68310', '60069', '98360', '48649', '39050', '41793', '25002', '27413', '39736', '47208', '16515', '94808', '57648', '15009', 
 '80015', '42961', '63982', '21744', '71853', '81087', '67468', '34175', '64008', '20261', '11201', '51799', '48043', '45645', '61163', '48375', '36447', '57042', '21218', '41100', '89951', '22745', '35851', '83326', '61125', '78298', '80752', '49858', '52940', '96976', '63792', '11376', '53582', '18717', '90226', '50530', '94203', '99447', '27670', '96577', '57856', '56372', '16165', '23427', '54561', '28806', '44439', '22926', '30123', '61451', '92397', '56979', '92309', '70873', '13355', '21801', '46346', '37562', '56458', '28286', '47306', '99555', '69399', '26234', '47546', '49661', '88601', '35943', '39936', '25632', '24611', '44166', '56648', '30379', '59785', '11110', '14329', '93815', '52226', '71381', '13842', '25612', '63294', '14664', '21077', '82626', '18799', '60915', '81020', '56447', '76619', '11433', '13414', '42548', '92713', '70467', '30884', '47484', '16072', '38936', '13036', '88376', '45539', '35901', '19506', '65690', '73957', '71850', '49231', '14276', '20005'
 , '18384', '76615', '11635', '38177', '55607', '41369', '95447', '58581', '58149', '91946', '33790', '76232', '75692', '95464', '22246', '51061', '56692', '53121', '77209', '15482', '10688', '14868', '45907', '73520', '72666', '25734', '17959', '24677', '66446', '94627', '53535', '15560', '41967', '69297', '11929', '59403', '33283', '52232', '57350', '43933', '40921', '36635', '10827', '71286', '19736', '80619', '25251', '95042', '15526', '36496', '55854', '49124', '81980', '35375', '49157', '63512', '28944', '14946', '36503', '54010', '18767', '23969', '43905', '66979', '33113', '21286', '58471', '59080', '13395', '79144', '70373', '67031', '38360', '26705', '50906', '52406', '26066', '73146', '15884', '31897', '30045', '61068', '45550', '92454', '13376', '14354', '19770', '22928', '97790', '50723', '46081', '30202', '14410', '20223', '88500', '67298', '13261', '14172', '81410', '93578', '83583', '46047', '94167', '82564', '21156', '15799', '86709', '37931', '74703', '83103', '2305
 4', '70470', '72008', '49247', '91911', '69998', '20961', '70070', '63197', '54853', '88191', '91830', '49521', '19454', '81450', '89091', '62378', '25683', '61869', '51744', '36580', '85778', '36871', '48121', '28810', '83712', '45486', '67393', '26935', '42393', '20132', '55349', '86057', '21309', '80218', '10094', '11357', '48819', '39734', '40758', '30432', '21204', '29467', '30214', '61024', '55307', '74621', '11622', '68908', '33032', '52868', '99194', '99900', '84936', '69036', '99149', '45013', '32895', '59004', '32322', '14933', '32936', '33562', '72550', '27385', '58049', '58200', '16808', '21360', '32961', '18586', '79307', '15492')
 |     runtime filters: RF004 -> substr(ca_zip, 1, 5)
+|     row-size=17B cardinality=5.00K
 |
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_store_sk = store.s_store_sk
 |  runtime filters: RF002 <- store.s_store_sk
+|  row-size=45B cardinality=84.40K
 |
 |--12:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
 |     runtime filters: RF000 -> substr(store.s_zip, 1, 2)
+|     row-size=37B cardinality=12
 |
 00:SCAN HDFS [tpcds.store_sales]
+   partition predicates: ss_sold_date_sk >= 2452276, ss_sold_date_sk <= 2452366
    partitions=90/1824 files=90 size=10.32MB
    runtime filters: RF002 -> store_sales.ss_store_sk
+   row-size=8B cardinality=84.40K
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=92.27MB Threads=15
 Per-Host Resource Estimates: Memory=284MB
@@ -586,20 +672,24 @@ PLAN-ROOT SINK
 |
 11:TOP-N [LIMIT=100]
 |  order by: s_store_name ASC
+|  row-size=32B cardinality=8
 |
 19:AGGREGATE [FINALIZE]
 |  output: sum:merge(ss_net_profit)
 |  group by: s_store_name
+|  row-size=32B cardinality=8
 |
 18:EXCHANGE [HASH(s_store_name)]
 |
 10:AGGREGATE [STREAMING]
 |  output: sum(ss_net_profit)
 |  group by: s_store_name
+|  row-size=32B cardinality=8
 |
 09:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: substr(store.s_zip, 1, 2) = substr(substr(ca_zip, 1, 5), 1, 2)
 |  runtime filters: RF000 <- substr(substr(ca_zip, 1, 5), 1, 2)
+|  row-size=65B cardinality=84.40K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -610,6 +700,7 @@ PLAN-ROOT SINK
 |  07:HASH JOIN [RIGHT SEMI JOIN, PARTITIONED]
 |  |  hash predicates: substr(ca_zip, 1, 5) = substr(ca_zip, 1, 5)
 |  |  runtime filters: RF004 <- substr(ca_zip, 1, 5)
+|  |  row-size=20B cardinality=396
 |  |
 |  |--JOIN BUILD
 |  |  |  join-table-id=01 plan-id=02 cohort-id=02
@@ -619,16 +710,19 @@ PLAN-ROOT SINK
 |  |  |  output: count:merge(*)
 |  |  |  group by: substr(ca_zip, 1, 5)
 |  |  |  having: count(*) > 10
+|  |  |  row-size=20B cardinality=396
 |  |  |
 |  |  14:EXCHANGE [HASH(substr(ca_zip, 1, 5))]
 |  |  |
 |  |  05:AGGREGATE [STREAMING]
 |  |  |  output: count(*)
 |  |  |  group by: substr(ca_zip, 1, 5)
+|  |  |  row-size=20B cardinality=3.96K
 |  |  |
 |  |  04:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  |  hash predicates: customer_address.ca_address_sk = customer.c_current_addr_sk
 |  |  |  runtime filters: RF006 <- customer.c_current_addr_sk
+|  |  |  row-size=38B cardinality=51.30K
 |  |  |
 |  |  |--JOIN BUILD
 |  |  |  |  join-table-id=02 plan-id=03 cohort-id=03
@@ -639,10 +733,12 @@ PLAN-ROOT SINK
 |  |  |  03:SCAN HDFS [tpcds.customer]
 |  |  |     partitions=1/1 files=1 size=12.60MB
 |  |  |     predicates: c_preferred_cust_flag = 'Y'
+|  |  |     row-size=17B cardinality=50.00K
 |  |  |
 |  |  02:SCAN HDFS [tpcds.customer_address]
 |  |     partitions=1/1 files=1 size=5.25MB
 |  |     runtime filters: RF006 -> customer_address.ca_address_sk
+|  |     row-size=21B cardinality=50.00K
 |  |
 |  16:EXCHANGE [HASH(substr(ca_zip, 1, 5))]
 |  |
@@ -650,10 +746,12 @@ PLAN-ROOT SINK
 |     partitions=1/1 files=1 size=5.25MB
 |     predicates: substr(ca_zip, 1, 5) IN ('89436', '30868', '65085', '22977', '83927', '77557', '58429', '40697', '80614', '10502', '32779', '91137', '61265', '98294', '17921', '18427', '21203', '59362', '87291', '84093', '21505', '17184', '10866', '67898', '25797', '28055', '18377', '80332', '74535', '21757', '29742', '90885', '29898', '17819', '40811', '25990', '47513', '89531', '91068', '10391', '18846', '99223', '82637', '41368', '83658', '86199', '81625', '26696', '89338', '88425', '32200', '81427', '19053', '77471', '36610', '99823', '43276', '41249', '48584', '83550', '82276', '18842', '78890', '14090', '38123', '40936', '34425', '19850', '43286', '80072', '79188', '54191', '11395', '50497', '84861', '90733', '21068', '57666', '37119', '25004', '57835', '70067', '62878', '95806', '19303', '18840', '19124', '29785', '16737', '16022', '49613', '89977', '68310', '60069', '98360', '48649', '39050', '41793', '25002', '27413', '39736', '47208', '16515', '94808', '57648', '15009', 
 '80015', '42961', '63982', '21744', '71853', '81087', '67468', '34175', '64008', '20261', '11201', '51799', '48043', '45645', '61163', '48375', '36447', '57042', '21218', '41100', '89951', '22745', '35851', '83326', '61125', '78298', '80752', '49858', '52940', '96976', '63792', '11376', '53582', '18717', '90226', '50530', '94203', '99447', '27670', '96577', '57856', '56372', '16165', '23427', '54561', '28806', '44439', '22926', '30123', '61451', '92397', '56979', '92309', '70873', '13355', '21801', '46346', '37562', '56458', '28286', '47306', '99555', '69399', '26234', '47546', '49661', '88601', '35943', '39936', '25632', '24611', '44166', '56648', '30379', '59785', '11110', '14329', '93815', '52226', '71381', '13842', '25612', '63294', '14664', '21077', '82626', '18799', '60915', '81020', '56447', '76619', '11433', '13414', '42548', '92713', '70467', '30884', '47484', '16072', '38936', '13036', '88376', '45539', '35901', '19506', '65690', '73957', '71850', '49231', '14276', '20005'
 , '18384', '76615', '11635', '38177', '55607', '41369', '95447', '58581', '58149', '91946', '33790', '76232', '75692', '95464', '22246', '51061', '56692', '53121', '77209', '15482', '10688', '14868', '45907', '73520', '72666', '25734', '17959', '24677', '66446', '94627', '53535', '15560', '41967', '69297', '11929', '59403', '33283', '52232', '57350', '43933', '40921', '36635', '10827', '71286', '19736', '80619', '25251', '95042', '15526', '36496', '55854', '49124', '81980', '35375', '49157', '63512', '28944', '14946', '36503', '54010', '18767', '23969', '43905', '66979', '33113', '21286', '58471', '59080', '13395', '79144', '70373', '67031', '38360', '26705', '50906', '52406', '26066', '73146', '15884', '31897', '30045', '61068', '45550', '92454', '13376', '14354', '19770', '22928', '97790', '50723', '46081', '30202', '14410', '20223', '88500', '67298', '13261', '14172', '81410', '93578', '83583', '46047', '94167', '82564', '21156', '15799', '86709', '37931', '74703', '83103', '2305
 4', '70470', '72008', '49247', '91911', '69998', '20961', '70070', '63197', '54853', '88191', '91830', '49521', '19454', '81450', '89091', '62378', '25683', '61869', '51744', '36580', '85778', '36871', '48121', '28810', '83712', '45486', '67393', '26935', '42393', '20132', '55349', '86057', '21309', '80218', '10094', '11357', '48819', '39734', '40758', '30432', '21204', '29467', '30214', '61024', '55307', '74621', '11622', '68908', '33032', '52868', '99194', '99900', '84936', '69036', '99149', '45013', '32895', '59004', '32322', '14933', '32936', '33562', '72550', '27385', '58049', '58200', '16808', '21360', '32961', '18586', '79307', '15492')
 |     runtime filters: RF004 -> substr(ca_zip, 1, 5)
+|     row-size=17B cardinality=5.00K
 |
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_store_sk = store.s_store_sk
 |  runtime filters: RF002 <- store.s_store_sk
+|  row-size=45B cardinality=84.40K
 |
 |--JOIN BUILD
 |  |  join-table-id=03 plan-id=04 cohort-id=01
@@ -664,10 +762,13 @@ PLAN-ROOT SINK
 |  01:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
 |     runtime filters: RF000 -> substr(store.s_zip, 1, 2)
+|     row-size=37B cardinality=12
 |
 00:SCAN HDFS [tpcds.store_sales]
+   partition predicates: ss_sold_date_sk >= 2452276, ss_sold_date_sk <= 2452366
    partitions=90/1824 files=90 size=10.32MB
    runtime filters: RF002 -> store_sales.ss_store_sk
+   row-size=8B cardinality=84.40K
 ====
 # TPCDS-Q19
 select
@@ -713,54 +814,68 @@ PLAN-ROOT SINK
 |
 12:TOP-N [LIMIT=100]
 |  order by: sum(ss_ext_sales_price) DESC, i_brand ASC, i_brand_id ASC, i_manufact_id ASC, i_manufact ASC
+|  row-size=76B cardinality=24
 |
 11:AGGREGATE [FINALIZE]
 |  output: sum(ss_ext_sales_price)
 |  group by: i_brand, i_brand_id, i_manufact_id, i_manufact
+|  row-size=76B cardinality=24
 |
 10:HASH JOIN [INNER JOIN]
 |  hash predicates: ss_store_sk = s_store_sk
 |  other predicates: substr(ca_zip, 1, 5) != substr(s_zip, 1, 5)
 |  runtime filters: RF000 <- s_store_sk
+|  row-size=158B cardinality=24
 |
 |--05:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
+|     row-size=21B cardinality=12
 |
 09:HASH JOIN [INNER JOIN]
 |  hash predicates: ca_address_sk = c_current_addr_sk
 |  runtime filters: RF002 <- c_current_addr_sk
+|  row-size=137B cardinality=24
 |
 |--08:HASH JOIN [INNER JOIN]
 |  |  hash predicates: c_customer_sk = ss_customer_sk
 |  |  runtime filters: RF004 <- ss_customer_sk
+|  |  row-size=116B cardinality=24
 |  |
 |  |--07:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: ss_sold_date_sk = d_date_sk
 |  |  |  runtime filters: RF006 <- d_date_sk
+|  |  |  row-size=108B cardinality=24
 |  |  |
 |  |  |--00:SCAN HDFS [tpcds.date_dim]
 |  |  |     partitions=1/1 files=1 size=9.84MB
 |  |  |     predicates: d_year = 1999, d_moy = 11, tpcds.date_dim.d_date_sk <= 2451513, tpcds.date_dim.d_date_sk >= 2451484
+|  |  |     row-size=12B cardinality=50
 |  |  |
 |  |  06:HASH JOIN [INNER JOIN]
 |  |  |  hash predicates: ss_item_sk = i_item_sk
 |  |  |  runtime filters: RF008 <- i_item_sk
+|  |  |  row-size=96B cardinality=873
 |  |  |
 |  |  |--02:SCAN HDFS [tpcds.item]
 |  |  |     partitions=1/1 files=1 size=4.82MB
 |  |  |     predicates: i_manager_id = 7
+|  |  |     row-size=72B cardinality=182
 |  |  |
 |  |  01:SCAN HDFS [tpcds.store_sales]
+|  |     partition predicates: ss_sold_date_sk >= 2451484, ss_sold_date_sk <= 2451513
 |  |     partitions=30/1824 files=30 size=10.55MB
 |  |     runtime filters: RF000 -> ss_store_sk, RF006 -> ss_sold_date_sk, RF008 -> ss_item_sk
+|  |     row-size=24B cardinality=86.28K
 |  |
 |  03:SCAN HDFS [tpcds.customer]
 |     partitions=1/1 files=1 size=12.60MB
 |     runtime filters: RF004 -> c_customer_sk
+|     row-size=8B cardinality=100.00K
 |
 04:SCAN HDFS [tpcds.customer_address]
    partitions=1/1 files=1 size=5.25MB
    runtime filters: RF002 -> ca_address_sk
+   row-size=21B cardinality=50.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=54.13MB Threads=16
 Per-Host Resource Estimates: Memory=360MB
@@ -772,73 +887,88 @@ PLAN-ROOT SINK
 |
 12:TOP-N [LIMIT=100]
 |  order by: sum(ss_ext_sales_price) DESC, i_brand ASC, i_brand_id ASC, i_manufact_id ASC, i_manufact ASC
+|  row-size=76B cardinality=24
 |
 21:AGGREGATE [FINALIZE]
 |  output: sum:merge(ss_ext_sales_price)
 |  group by: i_brand, i_brand_id, i_manufact_id, i_manufact
+|  row-size=76B cardinality=24
 |
 20:EXCHANGE [HASH(i_brand,i_brand_id,i_manufact_id,i_manufact)]
 |
 11:AGGREGATE [STREAMING]
 |  output: sum(ss_ext_sales_price)
 |  group by: i_brand, i_brand_id, i_manufact_id, i_manufact
+|  row-size=76B cardinality=24
 |
 10:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_store_sk = s_store_sk
 |  other predicates: substr(ca_zip, 1, 5) != substr(s_zip, 1, 5)
 |  runtime filters: RF000 <- s_store_sk
+|  row-size=158B cardinality=24
 |
 |--19:EXCHANGE [BROADCAST]
 |  |
 |  05:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
+|     row-size=21B cardinality=12
 |
 09:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: c_current_addr_sk = ca_address_sk
 |  runtime filters: RF002 <- ca_address_sk
+|  row-size=137B cardinality=24
 |
 |--18:EXCHANGE [HASH(ca_address_sk)]
 |  |
 |  04:SCAN HDFS [tpcds.customer_address]
 |     partitions=1/1 files=1 size=5.25MB
+|     row-size=21B cardinality=50.00K
 |
 17:EXCHANGE [HASH(c_current_addr_sk)]
 |
 08:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: ss_customer_sk = c_customer_sk
 |  runtime filters: RF004 <- c_customer_sk
+|  row-size=116B cardinality=24
 |
 |--16:EXCHANGE [HASH(c_customer_sk)]
 |  |
 |  03:SCAN HDFS [tpcds.customer]
 |     partitions=1/1 files=1 size=12.60MB
 |     runtime filters: RF002 -> c_current_addr_sk
+|     row-size=8B cardinality=100.00K
 |
 15:EXCHANGE [HASH(ss_customer_sk)]
 |
 07:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_sold_date_sk = d_date_sk
 |  runtime filters: RF006 <- d_date_sk
+|  row-size=108B cardinality=24
 |
 |--14:EXCHANGE [BROADCAST]
 |  |
 |  00:SCAN HDFS [tpcds.date_dim]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: d_year = 1999, d_moy = 11, tpcds.date_dim.d_date_sk <= 2451513, tpcds.date_dim.d_date_sk >= 2451484
+|     row-size=12B cardinality=50
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_item_sk = i_item_sk
 |  runtime filters: RF008 <- i_item_sk
+|  row-size=96B cardinality=873
 |
 |--13:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
 |     predicates: i_manager_id = 7
+|     row-size=72B cardinality=182
 |
 01:SCAN HDFS [tpcds.store_sales]
+   partition predicates: ss_sold_date_sk >= 2451484, ss_sold_date_sk <= 2451513
    partitions=30/1824 files=30 size=10.55MB
    runtime filters: RF000 -> ss_store_sk, RF004 -> ss_customer_sk, RF006 -> ss_sold_date_sk, RF008 -> ss_item_sk
+   row-size=24B cardinality=86.28K
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=108.27MB Threads=19
 Per-Host Resource Estimates: Memory=303MB
@@ -850,21 +980,25 @@ PLAN-ROOT SINK
 |
 12:TOP-N [LIMIT=100]
 |  order by: sum(ss_ext_sales_price) DESC, i_brand ASC, i_brand_id ASC, i_manufact_id ASC, i_manufact ASC
+|  row-size=76B cardinality=24
 |
 21:AGGREGATE [FINALIZE]
 |  output: sum:merge(ss_ext_sales_price)
 |  group by: i_brand, i_brand_id, i_manufact_id, i_manufact
+|  row-size=76B cardinality=24
 |
 20:EXCHANGE [HASH(i_brand,i_brand_id,i_manufact_id,i_manufact)]
 |
 11:AGGREGATE [STREAMING]
 |  output: sum(ss_ext_sales_price)
 |  group by: i_brand, i_brand_id, i_manufact_id, i_manufact
+|  row-size=76B cardinality=24
 |
 10:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_store_sk = s_store_sk
 |  other predicates: substr(ca_zip, 1, 5) != substr(s_zip, 1, 5)
 |  runtime filters: RF000 <- s_store_sk
+|  row-size=158B cardinality=24
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -874,10 +1008,12 @@ PLAN-ROOT SINK
 |  |
 |  05:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
+|     row-size=21B cardinality=12
 |
 09:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: c_current_addr_sk = ca_address_sk
 |  runtime filters: RF002 <- ca_address_sk
+|  row-size=137B cardinality=24
 |
 |--JOIN BUILD
 |  |  join-table-id=01 plan-id=02 cohort-id=01
@@ -887,12 +1023,14 @@ PLAN-ROOT SINK
 |  |
 |  04:SCAN HDFS [tpcds.customer_address]
 |     partitions=1/1 files=1 size=5.25MB
+|     row-size=21B cardinality=50.00K
 |
 17:EXCHANGE [HASH(c_current_addr_sk)]
 |
 08:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: ss_customer_sk = c_customer_sk
 |  runtime filters: RF004 <- c_customer_sk
+|  row-size=116B cardinality=24
 |
 |--JOIN BUILD
 |  |  join-table-id=02 plan-id=03 cohort-id=01
@@ -903,12 +1041,14 @@ PLAN-ROOT SINK
 |  03:SCAN HDFS [tpcds.customer]
 |     partitions=1/1 files=1 size=12.60MB
 |     runtime filters: RF002 -> c_current_addr_sk
+|     row-size=8B cardinality=100.00K
 |
 15:EXCHANGE [HASH(ss_customer_sk)]
 |
 07:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_sold_date_sk = d_date_sk
 |  runtime filters: RF006 <- d_date_sk
+|  row-size=108B cardinality=24
 |
 |--JOIN BUILD
 |  |  join-table-id=03 plan-id=04 cohort-id=01
@@ -919,10 +1059,12 @@ PLAN-ROOT SINK
 |  00:SCAN HDFS [tpcds.date_dim]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: d_year = 1999, d_moy = 11, tpcds.date_dim.d_date_sk <= 2451513, tpcds.date_dim.d_date_sk >= 2451484
+|     row-size=12B cardinality=50
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_item_sk = i_item_sk
 |  runtime filters: RF008 <- i_item_sk
+|  row-size=96B cardinality=873
 |
 |--JOIN BUILD
 |  |  join-table-id=04 plan-id=05 cohort-id=01
@@ -933,10 +1075,13 @@ PLAN-ROOT SINK
 |  02:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
 |     predicates: i_manager_id = 7
+|     row-size=72B cardinality=182
 |
 01:SCAN HDFS [tpcds.store_sales]
+   partition predicates: ss_sold_date_sk >= 2451484, ss_sold_date_sk <= 2451513
    partitions=30/1824 files=30 size=10.55MB
    runtime filters: RF000 -> ss_store_sk, RF004 -> ss_customer_sk, RF006 -> ss_sold_date_sk, RF008 -> ss_item_sk
+   row-size=24B cardinality=86.28K
 ====
 # TPCDS-Q27
 select
@@ -976,45 +1121,56 @@ PLAN-ROOT SINK
 |
 10:TOP-N [LIMIT=100]
 |  order by: i_item_id ASC, s_state ASC
+|  row-size=74B cardinality=100
 |
 09:AGGREGATE [FINALIZE]
 |  output: avg(ss_quantity), avg(ss_list_price), avg(ss_coupon_amt), avg(ss_sales_price)
 |  group by: i_item_id, s_state
+|  row-size=74B cardinality=8.85K
 |
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: ss_item_sk = i_item_sk
 |  runtime filters: RF000 <- i_item_sk
+|  row-size=150B cardinality=263.34K
 |
 |--04:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
+|     row-size=36B cardinality=18.00K
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: ss_store_sk = s_store_sk
 |  runtime filters: RF002 <- s_store_sk
+|  row-size=114B cardinality=263.34K
 |
 |--03:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
 |     predicates: s_state IN ('WI', 'CA', 'TX', 'FL', 'WA', 'TN')
+|     row-size=18B cardinality=12
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: ss_cdemo_sk = cd_demo_sk
 |  runtime filters: RF004 <- cd_demo_sk
+|  row-size=96B cardinality=263.34K
 |
 |--01:SCAN HDFS [tpcds.customer_demographics]
 |     partitions=1/1 files=1 size=76.92MB
 |     predicates: cd_marital_status = 'W', cd_gender = 'F', cd_education_status = 'Primary'
+|     row-size=52B cardinality=97.40K
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: ss_sold_date_sk = d_date_sk
 |  runtime filters: RF006 <- d_date_sk
+|  row-size=44B cardinality=589.03K
 |
 |--02:SCAN HDFS [tpcds.date_dim]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: d_year = 1998
+|     row-size=8B cardinality=373
 |
 00:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> ss_item_sk, RF002 -> ss_store_sk, RF004 -> ss_cdemo_sk, RF006 -> ss_sold_date_sk
+   row-size=36B cardinality=2.88M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=54.26MB Threads=12
 Per-Host Resource Estimates: Memory=460MB
@@ -1026,59 +1182,71 @@ PLAN-ROOT SINK
 |
 10:TOP-N [LIMIT=100]
 |  order by: i_item_id ASC, s_state ASC
+|  row-size=74B cardinality=100
 |
 16:AGGREGATE [FINALIZE]
 |  output: avg:merge(ss_quantity), avg:merge(ss_list_price), avg:merge(ss_coupon_amt), avg:merge(ss_sales_price)
 |  group by: i_item_id, s_state
+|  row-size=74B cardinality=8.85K
 |
 15:EXCHANGE [HASH(i_item_id,s_state)]
 |
 09:AGGREGATE [STREAMING]
 |  output: avg(ss_quantity), avg(ss_list_price), avg(ss_coupon_amt), avg(ss_sales_price)
 |  group by: i_item_id, s_state
+|  row-size=74B cardinality=8.85K
 |
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_item_sk = i_item_sk
 |  runtime filters: RF000 <- i_item_sk
+|  row-size=150B cardinality=263.34K
 |
 |--14:EXCHANGE [BROADCAST]
 |  |
 |  04:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
+|     row-size=36B cardinality=18.00K
 |
 07:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_store_sk = s_store_sk
 |  runtime filters: RF002 <- s_store_sk
+|  row-size=114B cardinality=263.34K
 |
 |--13:EXCHANGE [BROADCAST]
 |  |
 |  03:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
 |     predicates: s_state IN ('WI', 'CA', 'TX', 'FL', 'WA', 'TN')
+|     row-size=18B cardinality=12
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_cdemo_sk = cd_demo_sk
 |  runtime filters: RF004 <- cd_demo_sk
+|  row-size=96B cardinality=263.34K
 |
 |--12:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [tpcds.customer_demographics]
 |     partitions=1/1 files=1 size=76.92MB
 |     predicates: cd_marital_status = 'W', cd_gender = 'F', cd_education_status = 'Primary'
+|     row-size=52B cardinality=97.40K
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_sold_date_sk = d_date_sk
 |  runtime filters: RF006 <- d_date_sk
+|  row-size=44B cardinality=589.03K
 |
 |--11:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [tpcds.date_dim]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: d_year = 1998
+|     row-size=8B cardinality=373
 |
 00:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> ss_item_sk, RF002 -> ss_store_sk, RF004 -> ss_cdemo_sk, RF006 -> ss_sold_date_sk
+   row-size=36B cardinality=2.88M
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=108.52MB Threads=13
 Per-Host Resource Estimates: Memory=398MB
@@ -1090,20 +1258,24 @@ PLAN-ROOT SINK
 |
 10:TOP-N [LIMIT=100]
 |  order by: i_item_id ASC, s_state ASC
+|  row-size=74B cardinality=100
 |
 16:AGGREGATE [FINALIZE]
 |  output: avg:merge(ss_quantity), avg:merge(ss_list_price), avg:merge(ss_coupon_amt), avg:merge(ss_sales_price)
 |  group by: i_item_id, s_state
+|  row-size=74B cardinality=8.85K
 |
 15:EXCHANGE [HASH(i_item_id,s_state)]
 |
 09:AGGREGATE [STREAMING]
 |  output: avg(ss_quantity), avg(ss_list_price), avg(ss_coupon_amt), avg(ss_sales_price)
 |  group by: i_item_id, s_state
+|  row-size=74B cardinality=8.85K
 |
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_item_sk = i_item_sk
 |  runtime filters: RF000 <- i_item_sk
+|  row-size=150B cardinality=263.34K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -1113,10 +1285,12 @@ PLAN-ROOT SINK
 |  |
 |  04:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
+|     row-size=36B cardinality=18.00K
 |
 07:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_store_sk = s_store_sk
 |  runtime filters: RF002 <- s_store_sk
+|  row-size=114B cardinality=263.34K
 |
 |--JOIN BUILD
 |  |  join-table-id=01 plan-id=02 cohort-id=01
@@ -1127,10 +1301,12 @@ PLAN-ROOT SINK
 |  03:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
 |     predicates: s_state IN ('WI', 'CA', 'TX', 'FL', 'WA', 'TN')
+|     row-size=18B cardinality=12
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_cdemo_sk = cd_demo_sk
 |  runtime filters: RF004 <- cd_demo_sk
+|  row-size=96B cardinality=263.34K
 |
 |--JOIN BUILD
 |  |  join-table-id=02 plan-id=03 cohort-id=01
@@ -1141,10 +1317,12 @@ PLAN-ROOT SINK
 |  01:SCAN HDFS [tpcds.customer_demographics]
 |     partitions=1/1 files=1 size=76.92MB
 |     predicates: cd_marital_status = 'W', cd_gender = 'F', cd_education_status = 'Primary'
+|     row-size=52B cardinality=97.40K
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_sold_date_sk = d_date_sk
 |  runtime filters: RF006 <- d_date_sk
+|  row-size=44B cardinality=589.03K
 |
 |--JOIN BUILD
 |  |  join-table-id=03 plan-id=04 cohort-id=01
@@ -1155,10 +1333,12 @@ PLAN-ROOT SINK
 |  02:SCAN HDFS [tpcds.date_dim]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: d_year = 1998
+|     row-size=8B cardinality=373
 |
 00:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> ss_item_sk, RF002 -> ss_store_sk, RF004 -> ss_cdemo_sk, RF006 -> ss_sold_date_sk
+   row-size=36B cardinality=2.88M
 ====
 # TPCDS-Q34
 select
@@ -1211,47 +1391,58 @@ PLAN-ROOT SINK
 |
 10:TOP-N [LIMIT=100000]
 |  order by: c_last_name ASC, c_first_name ASC, c_salutation ASC, c_preferred_cust_flag DESC
+|  row-size=80B cardinality=31.53K
 |
 09:HASH JOIN [INNER JOIN]
 |  hash predicates: c_customer_sk = ss_customer_sk
 |  runtime filters: RF000 <- ss_customer_sk
+|  row-size=88B cardinality=31.53K
 |
 |--07:AGGREGATE [FINALIZE]
 |  |  output: count(*)
 |  |  group by: ss_ticket_number, ss_customer_sk
 |  |  having: count(*) <= 20, count(*) >= 15
+|  |  row-size=20B cardinality=31.53K
 |  |
 |  06:HASH JOIN [INNER JOIN]
 |  |  hash predicates: store_sales.ss_store_sk = store.s_store_sk
 |  |  runtime filters: RF002 <- store.s_store_sk
+|  |  row-size=101B cardinality=31.53K
 |  |
 |  |--02:SCAN HDFS [tpcds.store]
 |  |     partitions=1/1 files=1 size=3.08KB
 |  |     predicates: store.s_county IN ('Saginaw County', 'Sumner County', 'Appanoose County', 'Daviess County', 'Fairfield County', 'Raleigh County', 'Ziebach County', 'Williamson County')
+|  |     row-size=33B cardinality=12
 |  |
 |  05:HASH JOIN [INNER JOIN]
 |  |  hash predicates: store_sales.ss_sold_date_sk = date_dim.d_date_sk
 |  |  runtime filters: RF004 <- date_dim.d_date_sk
+|  |  row-size=68B cardinality=31.53K
 |  |
 |  |--01:SCAN HDFS [tpcds.date_dim]
 |  |     partitions=1/1 files=1 size=9.84MB
 |  |     predicates: date_dim.d_year IN (1998, 1999, 2000), (date_dim.d_dom >= 1 AND date_dim.d_dom <= 3 OR date_dim.d_dom >= 25 AND date_dim.d_dom <= 28)
+|  |     row-size=12B cardinality=354
 |  |
 |  04:HASH JOIN [INNER JOIN]
 |  |  hash predicates: store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
 |  |  runtime filters: RF006 <- household_demographics.hd_demo_sk
+|  |  row-size=56B cardinality=162.45K
 |  |
 |  |--03:SCAN HDFS [tpcds.household_demographics]
 |  |     partitions=1/1 files=1 size=148.10KB
 |  |     predicates: household_demographics.hd_vehicle_count > 0, household_demographics.hd_buy_potential IN ('>10000', 'unknown'), (CASE WHEN household_demographics.hd_vehicle_count > 0 THEN household_demographics.hd_dep_count / household_demographics.hd_vehicle_count ELSE NULL END) > 1.2
+|  |     row-size=32B cardinality=416
 |  |
 |  00:SCAN HDFS [tpcds.store_sales]
 |     partitions=1824/1824 files=1824 size=346.60MB
 |     runtime filters: RF002 -> store_sales.ss_store_sk, RF004 -> store_sales.ss_sold_date_sk, RF006 -> store_sales.ss_hdemo_sk
+|     row-size=24B cardinality=2.88M
 |
 08:SCAN HDFS [tpcds.customer]
    partitions=1/1 files=1 size=12.60MB
    runtime filters: RF000 -> c_customer_sk
+   row-size=68B cardinality=100.00K
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=47.51MB Threads=13
 Per-Host Resource Estimates: Memory=337MB
@@ -1263,15 +1454,18 @@ PLAN-ROOT SINK
 |
 10:TOP-N [LIMIT=100000]
 |  order by: c_last_name ASC, c_first_name ASC, c_salutation ASC, c_preferred_cust_flag DESC
+|  row-size=80B cardinality=31.53K
 |
 09:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: ss_customer_sk = c_customer_sk
 |  runtime filters: RF000 <- c_customer_sk
+|  row-size=88B cardinality=31.53K
 |
 |--17:EXCHANGE [HASH(c_customer_sk)]
 |  |
 |  08:SCAN HDFS [tpcds.customer]
 |     partitions=1/1 files=1 size=12.60MB
+|     row-size=68B cardinality=100.00K
 |
 16:EXCHANGE [HASH(ss_customer_sk)]
 |
@@ -1279,46 +1473,55 @@ PLAN-ROOT SINK
 |  output: count:merge(*)
 |  group by: ss_ticket_number, ss_customer_sk
 |  having: count(*) <= 20, count(*) >= 15
+|  row-size=20B cardinality=31.53K
 |
 14:EXCHANGE [HASH(ss_ticket_number,ss_customer_sk)]
 |
 07:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: ss_ticket_number, ss_customer_sk
+|  row-size=20B cardinality=31.53K
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_store_sk = store.s_store_sk
 |  runtime filters: RF002 <- store.s_store_sk
+|  row-size=101B cardinality=31.53K
 |
 |--13:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
 |     predicates: store.s_county IN ('Saginaw County', 'Sumner County', 'Appanoose County', 'Daviess County', 'Fairfield County', 'Raleigh County', 'Ziebach County', 'Williamson County')
+|     row-size=33B cardinality=12
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_sold_date_sk = date_dim.d_date_sk
 |  runtime filters: RF004 <- date_dim.d_date_sk
+|  row-size=68B cardinality=31.53K
 |
 |--12:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [tpcds.date_dim]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: date_dim.d_year IN (1998, 1999, 2000), (date_dim.d_dom >= 1 AND date_dim.d_dom <= 3 OR date_dim.d_dom >= 25 AND date_dim.d_dom <= 28)
+|     row-size=12B cardinality=354
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
 |  runtime filters: RF006 <- household_demographics.hd_demo_sk
+|  row-size=56B cardinality=162.45K
 |
 |--11:EXCHANGE [BROADCAST]
 |  |
 |  03:SCAN HDFS [tpcds.household_demographics]
 |     partitions=1/1 files=1 size=148.10KB
 |     predicates: household_demographics.hd_vehicle_count > 0, household_demographics.hd_buy_potential IN ('>10000', 'unknown'), (CASE WHEN household_demographics.hd_vehicle_count > 0 THEN household_demographics.hd_dep_count / household_demographics.hd_vehicle_count ELSE NULL END) > 1.2
+|     row-size=32B cardinality=416
 |
 00:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> tpcds.store_sales.ss_customer_sk, RF002 -> store_sales.ss_store_sk, RF004 -> store_sales.ss_sold_date_sk, RF006 -> store_sales.ss_hdemo_sk
+   row-size=24B cardinality=2.88M
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=87.52MB Threads=15
 Per-Host Resource Estimates: Memory=280MB
@@ -1330,10 +1533,12 @@ PLAN-ROOT SINK
 |
 10:TOP-N [LIMIT=100000]
 |  order by: c_last_name ASC, c_first_name ASC, c_salutation ASC, c_preferred_cust_flag DESC
+|  row-size=80B cardinality=31.53K
 |
 09:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: ss_customer_sk = c_customer_sk
 |  runtime filters: RF000 <- c_customer_sk
+|  row-size=88B cardinality=31.53K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -1343,6 +1548,7 @@ PLAN-ROOT SINK
 |  |
 |  08:SCAN HDFS [tpcds.customer]
 |     partitions=1/1 files=1 size=12.60MB
+|     row-size=68B cardinality=100.00K
 |
 16:EXCHANGE [HASH(ss_customer_sk)]
 |
@@ -1350,16 +1556,19 @@ PLAN-ROOT SINK
 |  output: count:merge(*)
 |  group by: ss_ticket_number, ss_customer_sk
 |  having: count(*) <= 20, count(*) >= 15
+|  row-size=20B cardinality=31.53K
 |
 14:EXCHANGE [HASH(ss_ticket_number,ss_customer_sk)]
 |
 07:AGGREGATE [STREAMING]
 |  output: count(*)
 |  group by: ss_ticket_number, ss_customer_sk
+|  row-size=20B cardinality=31.53K
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_store_sk = store.s_store_sk
 |  runtime filters: RF002 <- store.s_store_sk
+|  row-size=101B cardinality=31.53K
 |
 |--JOIN BUILD
 |  |  join-table-id=01 plan-id=02 cohort-id=01
@@ -1370,10 +1579,12 @@ PLAN-ROOT SINK
 |  02:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
 |     predicates: store.s_county IN ('Saginaw County', 'Sumner County', 'Appanoose County', 'Daviess County', 'Fairfield County', 'Raleigh County', 'Ziebach County', 'Williamson County')
+|     row-size=33B cardinality=12
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_sold_date_sk = date_dim.d_date_sk
 |  runtime filters: RF004 <- date_dim.d_date_sk
+|  row-size=68B cardinality=31.53K
 |
 |--JOIN BUILD
 |  |  join-table-id=02 plan-id=03 cohort-id=01
@@ -1384,10 +1595,12 @@ PLAN-ROOT SINK
 |  01:SCAN HDFS [tpcds.date_dim]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: date_dim.d_year IN (1998, 1999, 2000), (date_dim.d_dom >= 1 AND date_dim.d_dom <= 3 OR date_dim.d_dom >= 25 AND date_dim.d_dom <= 28)
+|     row-size=12B cardinality=354
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
 |  runtime filters: RF006 <- household_demographics.hd_demo_sk
+|  row-size=56B cardinality=162.45K
 |
 |--JOIN BUILD
 |  |  join-table-id=03 plan-id=04 cohort-id=01
@@ -1398,10 +1611,12 @@ PLAN-ROOT SINK
 |  03:SCAN HDFS [tpcds.household_demographics]
 |     partitions=1/1 files=1 size=148.10KB
 |     predicates: household_demographics.hd_vehicle_count > 0, household_demographics.hd_buy_potential IN ('>10000', 'unknown'), (CASE WHEN household_demographics.hd_vehicle_count > 0 THEN household_demographics.hd_dep_count / household_demographics.hd_vehicle_count ELSE NULL END) > 1.2
+|     row-size=32B cardinality=416
 |
 00:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> tpcds.store_sales.ss_customer_sk, RF002 -> store_sales.ss_store_sk, RF004 -> store_sales.ss_sold_date_sk, RF006 -> store_sales.ss_hdemo_sk
+   row-size=24B cardinality=2.88M
 ====
 # TPCDS-Q42
 select
@@ -1436,30 +1651,37 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=100]
 |  order by: sum(ss_ext_sales_price) DESC, dt.d_year ASC, item.i_category_id ASC, item.i_category ASC
+|  row-size=42B cardinality=100
 |
 05:AGGREGATE [FINALIZE]
 |  output: sum(ss_ext_sales_price)
 |  group by: dt.d_year, item.i_category_id, item.i_category
+|  row-size=42B cardinality=1.73K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: store_sales.ss_sold_date_sk = dt.d_date_sk
 |  runtime filters: RF000 <- dt.d_date_sk
+|  row-size=62B cardinality=1.73K
 |
 |--00:SCAN HDFS [tpcds.date_dim dt]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: dt.d_year = 1998, dt.d_moy = 12
+|     row-size=12B cardinality=108
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: store_sales.ss_item_sk = item.i_item_sk
 |  runtime filters: RF002 <- item.i_item_sk
+|  row-size=50B cardinality=29.12K
 |
 |--02:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
 |     predicates: item.i_manager_id = 1
+|     row-size=34B cardinality=182
 |
 01:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> store_sales.ss_sold_date_sk, RF002 -> store_sales.ss_item_sk
+   row-size=16B cardinality=2.88M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=33.81MB Threads=8
 Per-Host Resource Estimates: Memory=234MB
@@ -1471,40 +1693,48 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=100]
 |  order by: sum(ss_ext_sales_price) DESC, dt.d_year ASC, item.i_category_id ASC, item.i_category ASC
+|  row-size=42B cardinality=100
 |
 10:AGGREGATE [FINALIZE]
 |  output: sum:merge(ss_ext_sales_price)
 |  group by: dt.d_year, item.i_category_id, item.i_category
+|  row-size=42B cardinality=1.73K
 |
 09:EXCHANGE [HASH(dt.d_year,item.i_category_id,item.i_category)]
 |
 05:AGGREGATE [STREAMING]
 |  output: sum(ss_ext_sales_price)
 |  group by: dt.d_year, item.i_category_id, item.i_category
+|  row-size=42B cardinality=1.73K
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_sold_date_sk = dt.d_date_sk
 |  runtime filters: RF000 <- dt.d_date_sk
+|  row-size=62B cardinality=1.73K
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
 |  00:SCAN HDFS [tpcds.date_dim dt]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: dt.d_year = 1998, dt.d_moy = 12
+|     row-size=12B cardinality=108
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_item_sk = item.i_item_sk
 |  runtime filters: RF002 <- item.i_item_sk
+|  row-size=50B cardinality=29.12K
 |
 |--07:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
 |     predicates: item.i_manager_id = 1
+|     row-size=34B cardinality=182
 |
 01:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> store_sales.ss_sold_date_sk, RF002 -> store_sales.ss_item_sk
+   row-size=16B cardinality=2.88M
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=67.62MB Threads=9
 Per-Host Resource Estimates: Memory=164MB
@@ -1516,20 +1746,24 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=100]
 |  order by: sum(ss_ext_sales_price) DESC, dt.d_year ASC, item.i_category_id ASC, item.i_category ASC
+|  row-size=42B cardinality=100
 |
 10:AGGREGATE [FINALIZE]
 |  output: sum:merge(ss_ext_sales_price)
 |  group by: dt.d_year, item.i_category_id, item.i_category
+|  row-size=42B cardinality=1.73K
 |
 09:EXCHANGE [HASH(dt.d_year,item.i_category_id,item.i_category)]
 |
 05:AGGREGATE [STREAMING]
 |  output: sum(ss_ext_sales_price)
 |  group by: dt.d_year, item.i_category_id, item.i_category
+|  row-size=42B cardinality=1.73K
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_sold_date_sk = dt.d_date_sk
 |  runtime filters: RF000 <- dt.d_date_sk
+|  row-size=62B cardinality=1.73K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -1540,10 +1774,12 @@ PLAN-ROOT SINK
 |  00:SCAN HDFS [tpcds.date_dim dt]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: dt.d_year = 1998, dt.d_moy = 12
+|     row-size=12B cardinality=108
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_item_sk = item.i_item_sk
 |  runtime filters: RF002 <- item.i_item_sk
+|  row-size=50B cardinality=29.12K
 |
 |--JOIN BUILD
 |  |  join-table-id=01 plan-id=02 cohort-id=01
@@ -1554,10 +1790,12 @@ PLAN-ROOT SINK
 |  02:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
 |     predicates: item.i_manager_id = 1
+|     row-size=34B cardinality=182
 |
 01:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> store_sales.ss_sold_date_sk, RF002 -> store_sales.ss_item_sk
+   row-size=16B cardinality=2.88M
 ====
 # TPCDS-Q43
 select
@@ -1600,30 +1838,37 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=100]
 |  order by: s_store_name ASC, s_store_id ASC, sum(CASE WHEN (d_day_name = 'Sunday') THEN ss_sales_price ELSE NULL END) ASC, sum(CASE WHEN (d_day_name = 'Monday') THEN ss_sales_price ELSE NULL END) ASC, sum(CASE WHEN (d_day_name = 'Tuesday') THEN ss_sales_price ELSE NULL END) ASC, sum(CASE WHEN (d_day_name = 'Wednesday') THEN ss_sales_price ELSE NULL END) ASC, sum(CASE WHEN (d_day_name = 'Thursday') THEN ss_sales_price ELSE NULL END) ASC, sum(CASE WHEN (d_day_name = 'Friday') THEN ss_sales_price ELSE NULL END) ASC, sum(CASE WHEN (d_day_name = 'Saturday') THEN ss_sales_price ELSE NULL END) ASC
+|  row-size=156B cardinality=48
 |
 05:AGGREGATE [FINALIZE]
 |  output: sum(CASE WHEN (d_day_name = 'Sunday') THEN ss_sales_price ELSE NULL END), sum(CASE WHEN (d_day_name = 'Monday') THEN ss_sales_price ELSE NULL END), sum(CASE WHEN (d_day_name = 'Tuesday') THEN ss_sales_price ELSE NULL END), sum(CASE WHEN (d_day_name = 'Wednesday') THEN ss_sales_price ELSE NULL END), sum(CASE WHEN (d_day_name = 'Thursday') THEN ss_sales_price ELSE NULL END), sum(CASE WHEN (d_day_name = 'Friday') THEN ss_sales_price ELSE NULL END), sum(CASE WHEN (d_day_name = 'Saturday') THEN ss_sales_price ELSE NULL END)
 |  group by: s_store_name, s_store_id
+|  row-size=156B cardinality=48
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: ss_store_sk = s_store_sk
 |  runtime filters: RF000 <- s_store_sk
+|  row-size=91B cardinality=589.03K
 |
 |--02:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
 |     predicates: s_gmt_offset = -5
+|     row-size=52B cardinality=12
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: ss_sold_date_sk = d_date_sk
 |  runtime filters: RF002 <- d_date_sk
+|  row-size=39B cardinality=589.03K
 |
 |--00:SCAN HDFS [tpcds.date_dim]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: d_year = 1998
+|     row-size=27B cardinality=373
 |
 01:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> ss_store_sk, RF002 -> ss_sold_date_sk
+   row-size=12B cardinality=2.88M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=25.82MB Threads=8
 Per-Host Resource Estimates: Memory=234MB
@@ -1635,40 +1880,48 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=100]
 |  order by: s_store_name ASC, s_store_id ASC, sum(CASE WHEN (d_day_name = 'Sunday') THEN ss_sales_price ELSE NULL END) ASC, sum(CASE WHEN (d_day_name = 'Monday') THEN ss_sales_price ELSE NULL END) ASC, sum(CASE WHEN (d_day_name = 'Tuesday') THEN ss_sales_price ELSE NULL END) ASC, sum(CASE WHEN (d_day_name = 'Wednesday') THEN ss_sales_price ELSE NULL END) ASC, sum(CASE WHEN (d_day_name = 'Thursday') THEN ss_sales_price ELSE NULL END) ASC, sum(CASE WHEN (d_day_name = 'Friday') THEN ss_sales_price ELSE NULL END) ASC, sum(CASE WHEN (d_day_name = 'Saturday') THEN ss_sales_price ELSE NULL END) ASC
+|  row-size=156B cardinality=48
 |
 10:AGGREGATE [FINALIZE]
 |  output: sum:merge(CASE WHEN (d_day_name = 'Sunday') THEN ss_sales_price ELSE NULL END), sum:merge(CASE WHEN (d_day_name = 'Monday') THEN ss_sales_price ELSE NULL END), sum:merge(CASE WHEN (d_day_name = 'Tuesday') THEN ss_sales_price ELSE NULL END), sum:merge(CASE WHEN (d_day_name = 'Wednesday') THEN ss_sales_price ELSE NULL END), sum:merge(CASE WHEN (d_day_name = 'Thursday') THEN ss_sales_price ELSE NULL END), sum:merge(CASE WHEN (d_day_name = 'Friday') THEN ss_sales_price ELSE NULL END), sum:merge(CASE WHEN (d_day_name = 'Saturday') THEN ss_sales_price ELSE NULL END)
 |  group by: s_store_name, s_store_id
+|  row-size=156B cardinality=48
 |
 09:EXCHANGE [HASH(s_store_name,s_store_id)]
 |
 05:AGGREGATE [STREAMING]
 |  output: sum(CASE WHEN (d_day_name = 'Sunday') THEN ss_sales_price ELSE NULL END), sum(CASE WHEN (d_day_name = 'Monday') THEN ss_sales_price ELSE NULL END), sum(CASE WHEN (d_day_name = 'Tuesday') THEN ss_sales_price ELSE NULL END), sum(CASE WHEN (d_day_name = 'Wednesday') THEN ss_sales_price ELSE NULL END), sum(CASE WHEN (d_day_name = 'Thursday') THEN ss_sales_price ELSE NULL END), sum(CASE WHEN (d_day_name = 'Friday') THEN ss_sales_price ELSE NULL END), sum(CASE WHEN (d_day_name = 'Saturday') THEN ss_sales_price ELSE NULL END)
 |  group by: s_store_name, s_store_id
+|  row-size=156B cardinality=48
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_store_sk = s_store_sk
 |  runtime filters: RF000 <- s_store_sk
+|  row-size=91B cardinality=589.03K
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
 |     predicates: s_gmt_offset = -5
+|     row-size=52B cardinality=12
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_sold_date_sk = d_date_sk
 |  runtime filters: RF002 <- d_date_sk
+|  row-size=39B cardinality=589.03K
 |
 |--07:EXCHANGE [BROADCAST]
 |  |
 |  00:SCAN HDFS [tpcds.date_dim]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: d_year = 1998
+|     row-size=27B cardinality=373
 |
 01:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> ss_store_sk, RF002 -> ss_sold_date_sk
+   row-size=12B cardinality=2.88M
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=51.64MB Threads=9
 Per-Host Resource Estimates: Memory=164MB
@@ -1680,20 +1933,24 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=100]
 |  order by: s_store_name ASC, s_store_id ASC, sum(CASE WHEN (d_day_name = 'Sunday') THEN ss_sales_price ELSE NULL END) ASC, sum(CASE WHEN (d_day_name = 'Monday') THEN ss_sales_price ELSE NULL END) ASC, sum(CASE WHEN (d_day_name = 'Tuesday') THEN ss_sales_price ELSE NULL END) ASC, sum(CASE WHEN (d_day_name = 'Wednesday') THEN ss_sales_price ELSE NULL END) ASC, sum(CASE WHEN (d_day_name = 'Thursday') THEN ss_sales_price ELSE NULL END) ASC, sum(CASE WHEN (d_day_name = 'Friday') THEN ss_sales_price ELSE NULL END) ASC, sum(CASE WHEN (d_day_name = 'Saturday') THEN ss_sales_price ELSE NULL END) ASC
+|  row-size=156B cardinality=48
 |
 10:AGGREGATE [FINALIZE]
 |  output: sum:merge(CASE WHEN (d_day_name = 'Sunday') THEN ss_sales_price ELSE NULL END), sum:merge(CASE WHEN (d_day_name = 'Monday') THEN ss_sales_price ELSE NULL END), sum:merge(CASE WHEN (d_day_name = 'Tuesday') THEN ss_sales_price ELSE NULL END), sum:merge(CASE WHEN (d_day_name = 'Wednesday') THEN ss_sales_price ELSE NULL END), sum:merge(CASE WHEN (d_day_name = 'Thursday') THEN ss_sales_price ELSE NULL END), sum:merge(CASE WHEN (d_day_name = 'Friday') THEN ss_sales_price ELSE NULL END), sum:merge(CASE WHEN (d_day_name = 'Saturday') THEN ss_sales_price ELSE NULL END)
 |  group by: s_store_name, s_store_id
+|  row-size=156B cardinality=48
 |
 09:EXCHANGE [HASH(s_store_name,s_store_id)]
 |
 05:AGGREGATE [STREAMING]
 |  output: sum(CASE WHEN (d_day_name = 'Sunday') THEN ss_sales_price ELSE NULL END), sum(CASE WHEN (d_day_name = 'Monday') THEN ss_sales_price ELSE NULL END), sum(CASE WHEN (d_day_name = 'Tuesday') THEN ss_sales_price ELSE NULL END), sum(CASE WHEN (d_day_name = 'Wednesday') THEN ss_sales_price ELSE NULL END), sum(CASE WHEN (d_day_name = 'Thursday') THEN ss_sales_price ELSE NULL END), sum(CASE WHEN (d_day_name = 'Friday') THEN ss_sales_price ELSE NULL END), sum(CASE WHEN (d_day_name = 'Saturday') THEN ss_sales_price ELSE NULL END)
 |  group by: s_store_name, s_store_id
+|  row-size=156B cardinality=48
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_store_sk = s_store_sk
 |  runtime filters: RF000 <- s_store_sk
+|  row-size=91B cardinality=589.03K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -1704,10 +1961,12 @@ PLAN-ROOT SINK
 |  02:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
 |     predicates: s_gmt_offset = -5
+|     row-size=52B cardinality=12
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_sold_date_sk = d_date_sk
 |  runtime filters: RF002 <- d_date_sk
+|  row-size=39B cardinality=589.03K
 |
 |--JOIN BUILD
 |  |  join-table-id=01 plan-id=02 cohort-id=01
@@ -1718,10 +1977,12 @@ PLAN-ROOT SINK
 |  00:SCAN HDFS [tpcds.date_dim]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: d_year = 1998
+|     row-size=27B cardinality=373
 |
 01:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> ss_store_sk, RF002 -> ss_sold_date_sk
+   row-size=12B cardinality=2.88M
 ====
 # TPCDS-Q46
 select
@@ -1781,61 +2042,76 @@ PLAN-ROOT SINK
 |
 14:TOP-N [LIMIT=100]
 |  order by: c_last_name ASC, c_first_name ASC, ca_city ASC, bought_city ASC, ss_ticket_number ASC
+|  row-size=118B cardinality=100
 |
 13:HASH JOIN [INNER JOIN]
 |  hash predicates: customer.c_current_addr_sk = current_addr.ca_address_sk
 |  other predicates: current_addr.ca_city != ca_city
 |  runtime filters: RF000 <- current_addr.ca_address_sk
+|  row-size=138B cardinality=230.45K
 |
 |--11:SCAN HDFS [tpcds.customer_address current_addr]
 |     partitions=1/1 files=1 size=5.25MB
+|     row-size=25B cardinality=50.00K
 |
 12:HASH JOIN [INNER JOIN]
 |  hash predicates: ss_customer_sk = c_customer_sk
 |  runtime filters: RF002 <- c_customer_sk
+|  row-size=113B cardinality=230.45K
 |
 |--10:SCAN HDFS [tpcds.customer]
 |     partitions=1/1 files=1 size=12.60MB
 |     runtime filters: RF000 -> customer.c_current_addr_sk
+|     row-size=44B cardinality=100.00K
 |
 09:AGGREGATE [FINALIZE]
 |  output: sum(ss_coupon_amt), sum(ss_net_profit)
 |  group by: ss_ticket_number, ss_customer_sk, ss_addr_sk, ca_city
+|  row-size=69B cardinality=230.45K
 |
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: store_sales.ss_addr_sk = customer_address.ca_address_sk
 |  runtime filters: RF004 <- customer_address.ca_address_sk
+|  row-size=107B cardinality=230.45K
 |
 |--04:SCAN HDFS [tpcds.customer_address]
 |     partitions=1/1 files=1 size=5.25MB
+|     row-size=25B cardinality=50.00K
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: store_sales.ss_store_sk = store.s_store_sk
 |  runtime filters: RF006 <- store.s_store_sk
+|  row-size=83B cardinality=230.45K
 |
 |--02:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
 |     predicates: store.s_city IN ('Midway', 'Concord', 'Spring Hill', 'Brownsville', 'Greenville')
+|     row-size=23B cardinality=12
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: store_sales.ss_sold_date_sk = date_dim.d_date_sk
 |  runtime filters: RF008 <- date_dim.d_date_sk
+|  row-size=60B cardinality=230.45K
 |
 |--01:SCAN HDFS [tpcds.date_dim]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: date_dim.d_dow IN (6, 0), date_dim.d_year IN (1999, 2000, 2001)
+|     row-size=12B cardinality=598
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
 |  runtime filters: RF010 <- household_demographics.hd_demo_sk
+|  row-size=48B cardinality=702.92K
 |
 |--03:SCAN HDFS [tpcds.household_demographics]
 |     partitions=1/1 files=1 size=148.10KB
 |     predicates: (household_demographics.hd_dep_count = 5 OR household_demographics.hd_vehicle_count = 3)
+|     row-size=12B cardinality=1.80K
 |
 00:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF002 -> tpcds.store_sales.ss_customer_sk, RF004 -> store_sales.ss_addr_sk, RF006 -> store_sales.ss_store_sk, RF008 -> store_sales.ss_sold_date_sk, RF010 -> store_sales.ss_hdemo_sk
+   row-size=36B cardinality=2.88M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=80.20MB Threads=17
 Per-Host Resource Estimates: Memory=412MB
@@ -1847,81 +2123,97 @@ PLAN-ROOT SINK
 |
 14:TOP-N [LIMIT=100]
 |  order by: c_last_name ASC, c_first_name ASC, ca_city ASC, bought_city ASC, ss_ticket_number ASC
+|  row-size=118B cardinality=100
 |
 13:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: customer.c_current_addr_sk = current_addr.ca_address_sk
 |  other predicates: current_addr.ca_city != ca_city
 |  runtime filters: RF000 <- current_addr.ca_address_sk
+|  row-size=138B cardinality=230.45K
 |
 |--23:EXCHANGE [BROADCAST]
 |  |
 |  11:SCAN HDFS [tpcds.customer_address current_addr]
 |     partitions=1/1 files=1 size=5.25MB
+|     row-size=25B cardinality=50.00K
 |
 12:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: ss_customer_sk = c_customer_sk
 |  runtime filters: RF002 <- c_customer_sk
+|  row-size=113B cardinality=230.45K
 |
 |--22:EXCHANGE [HASH(c_customer_sk)]
 |  |
 |  10:SCAN HDFS [tpcds.customer]
 |     partitions=1/1 files=1 size=12.60MB
 |     runtime filters: RF000 -> customer.c_current_addr_sk
+|     row-size=44B cardinality=100.00K
 |
 21:EXCHANGE [HASH(ss_customer_sk)]
 |
 20:AGGREGATE [FINALIZE]
 |  output: sum:merge(ss_coupon_amt), sum:merge(ss_net_profit)
 |  group by: ss_ticket_number, ss_customer_sk, ss_addr_sk, ca_city
+|  row-size=69B cardinality=230.45K
 |
 19:EXCHANGE [HASH(ss_ticket_number,ss_customer_sk,ss_addr_sk,ca_city)]
 |
 09:AGGREGATE [STREAMING]
 |  output: sum(ss_coupon_amt), sum(ss_net_profit)
 |  group by: ss_ticket_number, ss_customer_sk, ss_addr_sk, ca_city
+|  row-size=69B cardinality=230.45K
 |
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_addr_sk = customer_address.ca_address_sk
 |  runtime filters: RF004 <- customer_address.ca_address_sk
+|  row-size=107B cardinality=230.45K
 |
 |--18:EXCHANGE [BROADCAST]
 |  |
 |  04:SCAN HDFS [tpcds.customer_address]
 |     partitions=1/1 files=1 size=5.25MB
+|     row-size=25B cardinality=50.00K
 |
 07:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_store_sk = store.s_store_sk
 |  runtime filters: RF006 <- store.s_store_sk
+|  row-size=83B cardinality=230.45K
 |
 |--17:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
 |     predicates: store.s_city IN ('Midway', 'Concord', 'Spring Hill', 'Brownsville', 'Greenville')
+|     row-size=23B cardinality=12
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_sold_date_sk = date_dim.d_date_sk
 |  runtime filters: RF008 <- date_dim.d_date_sk
+|  row-size=60B cardinality=230.45K
 |
 |--16:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [tpcds.date_dim]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: date_dim.d_dow IN (6, 0), date_dim.d_year IN (1999, 2000, 2001)
+|     row-size=12B cardinality=598
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
 |  runtime filters: RF010 <- household_demographics.hd_demo_sk
+|  row-size=48B cardinality=702.92K
 |
 |--15:EXCHANGE [BROADCAST]
 |  |
 |  03:SCAN HDFS [tpcds.household_demographics]
 |     partitions=1/1 files=1 size=148.10KB
 |     predicates: (household_demographics.hd_dep_count = 5 OR household_demographics.hd_vehicle_count = 3)
+|     row-size=12B cardinality=1.80K
 |
 00:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF002 -> tpcds.store_sales.ss_customer_sk, RF004 -> store_sales.ss_addr_sk, RF006 -> store_sales.ss_store_sk, RF008 -> store_sales.ss_sold_date_sk, RF010 -> store_sales.ss_hdemo_sk
+   row-size=36B cardinality=2.88M
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=143.02MB Threads=19
 Per-Host Resource Estimates: Memory=361MB
@@ -1933,11 +2225,13 @@ PLAN-ROOT SINK
 |
 14:TOP-N [LIMIT=100]
 |  order by: c_last_name ASC, c_first_name ASC, ca_city ASC, bought_city ASC, ss_ticket_number ASC
+|  row-size=118B cardinality=100
 |
 13:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: customer.c_current_addr_sk = current_addr.ca_address_sk
 |  other predicates: current_addr.ca_city != ca_city
 |  runtime filters: RF000 <- current_addr.ca_address_sk
+|  row-size=138B cardinality=230.45K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -1947,10 +2241,12 @@ PLAN-ROOT SINK
 |  |
 |  11:SCAN HDFS [tpcds.customer_address current_addr]
 |     partitions=1/1 files=1 size=5.25MB
+|     row-size=25B cardinality=50.00K
 |
 12:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: ss_customer_sk = c_customer_sk
 |  runtime filters: RF002 <- c_customer_sk
+|  row-size=113B cardinality=230.45K
 |
 |--JOIN BUILD
 |  |  join-table-id=01 plan-id=02 cohort-id=01
@@ -1961,22 +2257,26 @@ PLAN-ROOT SINK
 |  10:SCAN HDFS [tpcds.customer]
 |     partitions=1/1 files=1 size=12.60MB
 |     runtime filters: RF000 -> customer.c_current_addr_sk
+|     row-size=44B cardinality=100.00K
 |
 21:EXCHANGE [HASH(ss_customer_sk)]
 |
 20:AGGREGATE [FINALIZE]
 |  output: sum:merge(ss_coupon_amt), sum:merge(ss_net_profit)
 |  group by: ss_ticket_number, ss_customer_sk, ss_addr_sk, ca_city
+|  row-size=69B cardinality=230.45K
 |
 19:EXCHANGE [HASH(ss_ticket_number,ss_customer_sk,ss_addr_sk,ca_city)]
 |
 09:AGGREGATE [STREAMING]
 |  output: sum(ss_coupon_amt), sum(ss_net_profit)
 |  group by: ss_ticket_number, ss_customer_sk, ss_addr_sk, ca_city
+|  row-size=69B cardinality=230.45K
 |
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_addr_sk = customer_address.ca_address_sk
 |  runtime filters: RF004 <- customer_address.ca_address_sk
+|  row-size=107B cardinality=230.45K
 |
 |--JOIN BUILD
 |  |  join-table-id=02 plan-id=03 cohort-id=01
@@ -1986,10 +2286,12 @@ PLAN-ROOT SINK
 |  |
 |  04:SCAN HDFS [tpcds.customer_address]
 |     partitions=1/1 files=1 size=5.25MB
+|     row-size=25B cardinality=50.00K
 |
 07:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_store_sk = store.s_store_sk
 |  runtime filters: RF006 <- store.s_store_sk
+|  row-size=83B cardinality=230.45K
 |
 |--JOIN BUILD
 |  |  join-table-id=03 plan-id=04 cohort-id=01
@@ -2000,10 +2302,12 @@ PLAN-ROOT SINK
 |  02:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
 |     predicates: store.s_city IN ('Midway', 'Concord', 'Spring Hill', 'Brownsville', 'Greenville')
+|     row-size=23B cardinality=12
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_sold_date_sk = date_dim.d_date_sk
 |  runtime filters: RF008 <- date_dim.d_date_sk
+|  row-size=60B cardinality=230.45K
 |
 |--JOIN BUILD
 |  |  join-table-id=04 plan-id=05 cohort-id=01
@@ -2014,10 +2318,12 @@ PLAN-ROOT SINK
 |  01:SCAN HDFS [tpcds.date_dim]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: date_dim.d_dow IN (6, 0), date_dim.d_year IN (1999, 2000, 2001)
+|     row-size=12B cardinality=598
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
 |  runtime filters: RF010 <- household_demographics.hd_demo_sk
+|  row-size=48B cardinality=702.92K
 |
 |--JOIN BUILD
 |  |  join-table-id=05 plan-id=06 cohort-id=01
@@ -2028,10 +2334,12 @@ PLAN-ROOT SINK
 |  03:SCAN HDFS [tpcds.household_demographics]
 |     partitions=1/1 files=1 size=148.10KB
 |     predicates: (household_demographics.hd_dep_count = 5 OR household_demographics.hd_vehicle_count = 3)
+|     row-size=12B cardinality=1.80K
 |
 00:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF002 -> tpcds.store_sales.ss_customer_sk, RF004 -> store_sales.ss_addr_sk, RF006 -> store_sales.ss_store_sk, RF008 -> store_sales.ss_sold_date_sk, RF010 -> store_sales.ss_hdemo_sk
+   row-size=36B cardinality=2.88M
 ====
 # TPCDS-Q52
 select
@@ -2065,30 +2373,37 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=100]
 |  order by: dt.d_year ASC, sum(ss_ext_sales_price) DESC, item.i_brand_id ASC
+|  row-size=52B cardinality=100
 |
 05:AGGREGATE [FINALIZE]
 |  output: sum(ss_ext_sales_price)
 |  group by: dt.d_year, item.i_brand, item.i_brand_id
+|  row-size=52B cardinality=1.73K
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: store_sales.ss_sold_date_sk = dt.d_date_sk
 |  runtime filters: RF000 <- dt.d_date_sk
+|  row-size=72B cardinality=1.73K
 |
 |--00:SCAN HDFS [tpcds.date_dim dt]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: dt.d_year = 1998, dt.d_moy = 12
+|     row-size=12B cardinality=108
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: store_sales.ss_item_sk = item.i_item_sk
 |  runtime filters: RF002 <- item.i_item_sk
+|  row-size=60B cardinality=29.12K
 |
 |--02:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
 |     predicates: item.i_manager_id = 1
+|     row-size=44B cardinality=182
 |
 01:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> store_sales.ss_sold_date_sk, RF002 -> store_sales.ss_item_sk
+   row-size=16B cardinality=2.88M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=33.81MB Threads=8
 Per-Host Resource Estimates: Memory=234MB
@@ -2100,40 +2415,48 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=100]
 |  order by: dt.d_year ASC, sum(ss_ext_sales_price) DESC, item.i_brand_id ASC
+|  row-size=52B cardinality=100
 |
 10:AGGREGATE [FINALIZE]
 |  output: sum:merge(ss_ext_sales_price)
 |  group by: dt.d_year, item.i_brand, item.i_brand_id
+|  row-size=52B cardinality=1.73K
 |
 09:EXCHANGE [HASH(dt.d_year,item.i_brand,item.i_brand_id)]
 |
 05:AGGREGATE [STREAMING]
 |  output: sum(ss_ext_sales_price)
 |  group by: dt.d_year, item.i_brand, item.i_brand_id
+|  row-size=52B cardinality=1.73K
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_sold_date_sk = dt.d_date_sk
 |  runtime filters: RF000 <- dt.d_date_sk
+|  row-size=72B cardinality=1.73K
 |
 |--08:EXCHANGE [BROADCAST]
 |  |
 |  00:SCAN HDFS [tpcds.date_dim dt]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: dt.d_year = 1998, dt.d_moy = 12
+|     row-size=12B cardinality=108
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_item_sk = item.i_item_sk
 |  runtime filters: RF002 <- item.i_item_sk
+|  row-size=60B cardinality=29.12K
 |
 |--07:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
 |     predicates: item.i_manager_id = 1
+|     row-size=44B cardinality=182
 |
 01:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> store_sales.ss_sold_date_sk, RF002 -> store_sales.ss_item_sk
+   row-size=16B cardinality=2.88M
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=67.62MB Threads=9
 Per-Host Resource Estimates: Memory=164MB
@@ -2145,20 +2468,24 @@ PLAN-ROOT SINK
 |
 06:TOP-N [LIMIT=100]
 |  order by: dt.d_year ASC, sum(ss_ext_sales_price) DESC, item.i_brand_id ASC
+|  row-size=52B cardinality=100
 |
 10:AGGREGATE [FINALIZE]
 |  output: sum:merge(ss_ext_sales_price)
 |  group by: dt.d_year, item.i_brand, item.i_brand_id
+|  row-size=52B cardinality=1.73K
 |
 09:EXCHANGE [HASH(dt.d_year,item.i_brand,item.i_brand_id)]
 |
 05:AGGREGATE [STREAMING]
 |  output: sum(ss_ext_sales_price)
 |  group by: dt.d_year, item.i_brand, item.i_brand_id
+|  row-size=52B cardinality=1.73K
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_sold_date_sk = dt.d_date_sk
 |  runtime filters: RF000 <- dt.d_date_sk
+|  row-size=72B cardinality=1.73K
 |
 |--JOIN BUILD
 |  |  join-table-id=00 plan-id=01 cohort-id=01
@@ -2169,10 +2496,12 @@ PLAN-ROOT SINK
 |  00:SCAN HDFS [tpcds.date_dim dt]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: dt.d_year = 1998, dt.d_moy = 12
+|     row-size=12B cardinality=108
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: store_sales.ss_item_sk = item.i_item_sk
 |  runtime filters: RF002 <- item.i_item_sk
+|  row-size=60B cardinality=29.12K
 |
 |--JOIN BUILD
 |  |  join-table-id=01 plan-id=02 cohort-id=01
@@ -2183,10 +2512,12 @@ PLAN-ROOT SINK
 |  02:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
 |     predicates: item.i_manager_id = 1
+|     row-size=44B cardinality=182
 |
 01:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> store_sales.ss_sold_date_sk, RF002 -> store_sales.ss_item_sk
+   row-size=16B cardinality=2.88M
 ====
 # TPCDS-Q53
 select
@@ -2226,37 +2557,46 @@ PLAN-ROOT SINK
 |
 08:TOP-N [LIMIT=100]
 |  order by: sum_sales ASC, i_manufact_id ASC
+|  row-size=20B cardinality=96
 |
 07:AGGREGATE [FINALIZE]
 |  output: sum(ss_sales_price)
 |  group by: i_manufact_id, d_qoy
+|  row-size=24B cardinality=96
 |
 06:HASH JOIN [INNER JOIN]
 |  hash predicates: ss_store_sk = s_store_sk
 |  runtime filters: RF000 <- s_store_sk
+|  row-size=114B cardinality=96
 |
 |--03:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
+|     row-size=4B cardinality=12
 |
 05:HASH JOIN [INNER JOIN]
 |  hash predicates: ss_sold_date_sk = d_date_sk
 |  runtime filters: RF002 <- d_date_sk
+|  row-size=110B cardinality=96
 |
 |--02:SCAN HDFS [tpcds.date_dim]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: d_month_seq IN (1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1222, 1223)
+|     row-size=12B cardinality=362
 |
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: ss_item_sk = i_item_sk
 |  runtime filters: RF004 <- i_item_sk
+|  row-size=98B cardinality=481
 |
 |--00:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
 |     predicates: ((i_category IN ('Books', 'Children', 'Electronics') AND i_class IN ('personal', 'portable', 'reference', 'self-help') AND i_brand IN ('scholaramalgamalg #14', 'scholaramalgamalg #7', 'exportiunivamalg #9', 'scholaramalgamalg #9')) OR (i_category IN ('Women', 'Music', 'Men') AND i_class IN ('accessories', 'classical', 'fragrances', 'pants') AND i_brand IN ('amalgimporto #1', 'edu packscholar #1', 'exportiimporto #1', 'importoamalg #1')))
+|     row-size=78B cardinality=3
 |
 01:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> ss_store_sk, RF002 -> ss_sold_date_sk, RF004 -> ss_item_sk
+   row-size=20B cardinality=2.88M
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=36.76MB Threads=10
 Per-Host Resource Estimates: Memory=269MB
@@ -2268,49 +2608,59 @@ PLAN-ROOT SINK
 |
 08:TOP-N [LIMIT=100]
 |  order by: sum_sales ASC, i_manufact_id ASC
+|  row-size=20B cardinality=96
 |
 13:AGGREGATE [FINALIZE]
 |  output: sum:merge(ss_sales_price)
 |  group by: i_manufact_id, d_qoy
+|  row-size=24B cardinality=96
 |
 12:EXCHANGE [HASH(i_manufact_id,d_qoy)]
 |
 07:AGGREGATE [STREAMING]
 |  output: sum(ss_sales_price)
 |  group by: i_manufact_id, d_qoy
+|  row-size=24B cardinality=96
 |
 06:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_store_sk = s_store_sk
 |  runtime filters: RF000 <- s_store_sk
+|  row-size=114B cardinality=96
 |
 |--11:EXCHANGE [BROADCAST]
 |  |
 |  03:SCAN HDFS [tpcds.store]
 |     partitions=1/1 files=1 size=3.08KB
+|     row-size=4B cardinality=12
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_sold_date_sk = d_date_sk
 |  runtime filters: RF002 <- d_date_sk
+|  row-size=110B cardinality=96
 |
 |--10:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [tpcds.date_dim]
 |     partitions=1/1 files=1 size=9.84MB
 |     predicates: d_month_seq IN (1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1222, 1223)
+|     row-size=12B cardinality=362
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: ss_item_sk = i_item_sk
 |  runtime filters: RF004 <- i_item_sk
+|  row-size=98B cardinality=481
 |
 |--09:EXCHANGE [BROADCAST]
 |  |
 |  00:SCAN HDFS [tpcds.item]
 |     partitions=1/1 files=1 size=4.82MB
 |     predicates: ((i_category IN ('Books', 'Children', 'Electronics') AND i_class IN ('personal', 'portable', 'reference', 'self-help') AND i_brand IN ('scholaramalgamalg #14', 'scholaramalgamalg #7', 'exportiunivamalg #9', 'scholaramalgamalg #9')) OR (i_category IN ('Women', 'Music', 'Men') AND i_class IN ('accessories', 'classical', 'fragrances', 'pants') AND i_brand IN ('amalgimporto #1', 'edu packscholar #1', 'exportiimporto #1', 'importoamalg #1')))
+|     row-size=78B cardinality=3
 |
 01:SCAN HDFS [tpcds.store_sales]
    partitions=1824/1824 files=1824 size=346.60MB
    runtime filters: RF000 -> ss_store_sk, RF002 -> ss_sold_date_sk, RF004 -> ss_item_sk
+   row-size=20B cardinality=2.88M
 ---- PARALLELPLANS
 Max Per-Host Resource Reservation: Memory=73.52MB Threads=11
 Per-Host Resource Estimates: Memory=202MB
@@ -2322,20 +2672,24 @@ PLAN-ROOT SINK
 |
 08:TOP-N [LIMIT=100]
 |  order by: sum_sales ASC, i_manufact_id ASC
+|  row-size=20B cardinality=96
 |
 13:AGGREGATE [FINALIZE]
 |  output: sum:merge(s

<TRUNCATED>