You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by mi...@apache.org on 2019/01/24 17:58:11 UTC

[impala] branch master updated (6c0ec34 -> bfb9ccc)

This is an automated email from the ASF dual-hosted git repository.

mikeb pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git.


    from 6c0ec34  IMPALA-8091: incremental improvements to NTP sync
     new 365e35a  Using 'master' branch of Impala-lzo and allowing test-with-docker to configure it.
     new 81d0bcb  Support centos:7 for test-with-docker.
     new fe47b23  IMPALA-7841 (Part 1): Refactor SelectStmt for easier debugging
     new 85a8b34  IMPALA-7905: Hive keywords not quoted for identifiers
     new bfb9ccc  IMPALA-7832: Support for IF NOT EXISTS in ALTER TABLE ADD COLUMN(S)

The 5 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 bin/bootstrap_system.sh                            |  80 +++-
 common/thrift/JniCatalog.thrift                    |  22 +-
 docker/entrypoint.sh                               |  32 +-
 docker/test-with-docker.py                         |  14 +-
 fe/src/main/cup/sql-parser.cup                     |  24 +-
 ...aceColsStmt.java => AlterTableAddColsStmt.java} |  75 ++-
 ...olsStmt.java => AlterTableReplaceColsStmt.java} |  81 +---
 .../java/org/apache/impala/analysis/Analyzer.java  |   8 +-
 .../apache/impala/analysis/CollectionTableRef.java |   2 +-
 .../org/apache/impala/analysis/InlineViewRef.java  |   2 +-
 .../org/apache/impala/analysis/InsertStmt.java     |   9 +-
 .../java/org/apache/impala/analysis/QueryStmt.java |   2 +-
 .../org/apache/impala/analysis/SelectStmt.java     | 176 ++++---
 .../java/org/apache/impala/analysis/Subquery.java  |   2 +-
 .../org/apache/impala/analysis/ToSqlUtils.java     | 144 ++++--
 .../org/apache/impala/analysis/WithClause.java     |   2 +-
 .../java/org/apache/impala/common/PrintUtils.java  |   1 +
 .../java/org/apache/impala/planner/PlanNode.java   |   4 +-
 .../apache/impala/service/CatalogOpExecutor.java   | 106 +++--
 .../org/apache/impala/analysis/AnalyzeDDLTest.java | 187 +++++++-
 .../apache/impala/analysis/AnalyzeExprsTest.java   |   3 +-
 .../apache/impala/analysis/AnalyzeStmtsTest.java   |   2 +-
 .../impala/analysis/AuthorizationStmtTest.java     |   1 +
 .../impala/analysis/ExprRewriteRulesTest.java      |   6 +-
 .../apache/impala/analysis/ExprRewriterTest.java   |  10 +-
 .../org/apache/impala/analysis/ParserTest.java     |  20 +-
 .../java/org/apache/impala/analysis/ToSqlTest.java |  55 ++-
 .../org/apache/impala/analysis/ToSqlUtilsTest.java | 174 +++++++
 .../org/apache/impala/planner/PlannerTest.java     |   4 +-
 .../org/apache/impala/util/PrintUtilsTest.java     |  14 +-
 .../queries/PlannerTest/aggregation.test           |  44 +-
 .../queries/PlannerTest/analytic-fns.test          |  16 +-
 .../queries/PlannerTest/constant-folding.test      |   4 +-
 .../queries/PlannerTest/ddl.test                   |   8 +-
 .../queries/PlannerTest/hbase.test                 |  12 +-
 .../queries/PlannerTest/hdfs.test                  | 186 ++++----
 .../queries/PlannerTest/implicit-joins.test        |   2 +-
 .../queries/PlannerTest/inline-view.test           |  32 +-
 .../queries/PlannerTest/insert-sort-by.test        |  22 +-
 .../queries/PlannerTest/insert.test                |  88 ++--
 .../queries/PlannerTest/join-order.test            |   8 +-
 .../queries/PlannerTest/joins.test                 |  62 +--
 .../queries/PlannerTest/kudu-upsert.test           |   4 +-
 .../queries/PlannerTest/lineage.test               |  88 ++--
 .../queries/PlannerTest/nested-collections.test    |  38 +-
 .../queries/PlannerTest/order.test                 |  72 +--
 .../PlannerTest/parquet-filtering-disabled.test    |   4 +-
 .../queries/PlannerTest/parquet-filtering.test     |   8 +-
 .../queries/PlannerTest/parquet-stats-agg.test     |  20 +-
 .../queries/PlannerTest/partition-key-scans.test   |  66 +--
 .../queries/PlannerTest/predicate-propagation.test | 186 ++++----
 .../queries/PlannerTest/resource-requirements.test |  26 +-
 .../PlannerTest/runtime-filter-propagation.test    | 166 +++----
 .../PlannerTest/runtime-filter-query-options.test  | 128 +++---
 .../PlannerTest/shuffle-by-distinct-exprs.test     |  92 ++--
 .../queries/PlannerTest/small-query-opt.test       |   6 +-
 .../queries/PlannerTest/subquery-rewrite.test      |  24 +-
 .../queries/PlannerTest/tablesample.test           |   4 +-
 .../queries/PlannerTest/union.test                 | 508 ++++++++++-----------
 .../queries/PlannerTest/views.test                 |   4 +-
 .../queries/QueryTest/alter-table.test             |  64 +++
 .../queries/QueryTest/kudu_alter.test              |   4 +-
 .../queries/QueryTest/kudu_insert.test             |   2 +-
 .../queries/QueryTest/stats-extrapolation.test     |  12 +-
 64 files changed, 1913 insertions(+), 1359 deletions(-)
 copy fe/src/main/java/org/apache/impala/analysis/{AlterTableAddReplaceColsStmt.java => AlterTableAddColsStmt.java} (74%)
 rename fe/src/main/java/org/apache/impala/analysis/{AlterTableAddReplaceColsStmt.java => AlterTableReplaceColsStmt.java} (60%)


[impala] 01/05: Using 'master' branch of Impala-lzo and allowing test-with-docker to configure it.

Posted by mi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

mikeb pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 365e35a36f142532044d8a34cbb87646bf9cb240
Author: Philip Zeyliger <ph...@cloudera.com>
AuthorDate: Tue Jan 22 11:58:34 2019 -0800

    Using 'master' branch of Impala-lzo and allowing test-with-docker to configure it.
    
    This updates bootstrap_system.sh to check out the 'master' branch of
    Impala-lzo. (I've separately updated the 'master' branch to
    be identical to today's cdh5-trunk branch; it had grown a few
    years stale.) I've also added support to teasing the configuration
    through test-with-docker.
    
    This allows for Impala 2.x and 3.x to diverge here, and it allows
    for testing changes to Impala-lzo.
    
    Change-Id: Ieba45fc18d9e490f75d16c477cdc1cce26f41ce9
    Reviewed-on: http://gerrit.cloudera.org:8080/12259
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 bin/bootstrap_system.sh    |  2 +-
 docker/entrypoint.sh       |  8 ++++++++
 docker/test-with-docker.py | 14 +++++++++++++-
 3 files changed, 22 insertions(+), 2 deletions(-)

diff --git a/bin/bootstrap_system.sh b/bin/bootstrap_system.sh
index 28df210..9b2d070 100755
--- a/bin/bootstrap_system.sh
+++ b/bin/bootstrap_system.sh
@@ -318,7 +318,7 @@ echo ">>> Checking out Impala-lzo"
 : ${IMPALA_LZO_HOME:="${IMPALA_HOME}/../Impala-lzo"}
 if ! [[ -d "$IMPALA_LZO_HOME" ]]
 then
-  git clone https://github.com/cloudera/impala-lzo.git "$IMPALA_LZO_HOME"
+  git clone --branch master https://github.com/cloudera/impala-lzo.git "$IMPALA_LZO_HOME"
 fi
 
 echo ">>> Checking out and building hadoop-lzo"
diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh
index 58eb5ef..8e78dfb 100755
--- a/docker/entrypoint.sh
+++ b/docker/entrypoint.sh
@@ -197,6 +197,14 @@ function build_impdev() {
   git fetch /git_common_dir --no-tags "$GIT_HEAD_REV"
   git checkout -b test-with-docker FETCH_HEAD
 
+  # Checkout impala-lzo too
+  mkdir /home/impdev/Impala-lzo
+  pushd /home/impdev/Impala-lzo
+  git init
+  git fetch $IMPALA_LZO_REPO --no-tags "$IMPALA_LZO_REF"
+  git checkout -b test-with-docker FETCH_HEAD
+  popd
+
   # Link in logs. Logs are on the host since that's the most important thing to
   # look at after the tests are run.
   ln -sf /logs logs
diff --git a/docker/test-with-docker.py b/docker/test-with-docker.py
index c9d688e..8bb35b0 100755
--- a/docker/test-with-docker.py
+++ b/docker/test-with-docker.py
@@ -184,6 +184,11 @@ def main():
                       default=os.path.expanduser("~/.ccache"))
   parser.add_argument('--tail', action="store_true",
       help="Run tail on all container log files.")
+  parser.add_argument('--impala-lzo-repo',
+      default="https://github.com/cloudera/impala-lzo.git",
+      help="Git repo for Impala-lzo repo")
+  parser.add_argument('--impala-lzo-ref', default='master',
+      help="Branch name for Impala-lzo repo.")
   parser.add_argument('--env', metavar='K=V', default=[], action='append',
       help="""Passes given environment variables (expressed as KEY=VALUE)
            through containers.
@@ -205,6 +210,8 @@ def main():
       suite_concurrency=args.suite_concurrency,
       impalad_mem_limit_bytes=args.impalad_mem_limit_bytes,
       tail=args.tail,
+      impala_lzo_repo=args.impala_lzo_repo,
+      impala_lzo_ref=args.impala_lzo_ref,
       env=args.env, base_image=args.base_image)
 
   fh = logging.FileHandler(os.path.join(_make_dir_if_not_exist(t.log_dir), "log.txt"))
@@ -438,7 +445,8 @@ class TestWithDocker(object):
   def __init__(self, build_image, suite_names, name, cleanup_containers,
                cleanup_image, ccache_dir, test_mode,
                suite_concurrency, parallel_test_concurrency,
-               impalad_mem_limit_bytes, tail, env, base_image):
+               impalad_mem_limit_bytes, tail,
+               impala_lzo_repo, impala_lzo_ref, env, base_image):
     self.build_image = build_image
     self.name = name
     self.containers = []
@@ -474,6 +482,8 @@ class TestWithDocker(object):
     self.parallel_test_concurrency = parallel_test_concurrency
     self.impalad_mem_limit_bytes = impalad_mem_limit_bytes
     self.tail = tail
+    self.impala_lzo_repo = impala_lzo_repo
+    self.impala_lzo_ref = impala_lzo_ref
     self.env = env
     self.base_image = base_image
 
@@ -558,6 +568,8 @@ class TestWithDocker(object):
           "-v", self.git_root + ":/repo:ro",
           "-v", self.git_common_dir + ":/git_common_dir:ro",
           "-e", "GIT_HEAD_REV=" + self.git_head_rev,
+          "-e", "IMPALA_LZO_REPO=" + self.impala_lzo_repo,
+          "-e", "IMPALA_LZO_REF=" + self.impala_lzo_ref,
           # Share timezone between host and container
           "-e", "LOCALTIME_LINK_TARGET=" + localtime_link_target,
           "-v", self.ccache_dir + ":/ccache",


[impala] 02/05: Support centos:7 for test-with-docker.

Posted by mi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

mikeb pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 81d0bcb3c967bf76b6858b221416e6fcb863b187
Author: Philip Zeyliger <ph...@cloudera.com>
AuthorDate: Wed Dec 26 13:21:20 2018 -0800

    Support centos:7 for test-with-docker.
    
    As a follow-on to IMPALA-7698, adds various incantations
    so that centos:7 can build under test-with-docker.
    
    The core issue is that the centos:7 image doesn't let you start sshd
    (necessary for the HBase startup scripts, and probably could be worked
    around) or postgresql (harder to work around) with systemctl, because
    systemd isn't "running." To avoid this, we start them manually
    with /usr/sbin/sshd and pg_ctl.
    
    Change-Id: I7577949b6eaaa2239bcf0fadf64e1490c2106b08
    Reviewed-on: http://gerrit.cloudera.org:8080/12139
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 bin/bootstrap_system.sh | 78 +++++++++++++++++++++++++++++++++++++++++++------
 docker/entrypoint.sh    | 24 +++++++++++++--
 2 files changed, 90 insertions(+), 12 deletions(-)

diff --git a/bin/bootstrap_system.sh b/bin/bootstrap_system.sh
index 9b2d070..cb869ff 100755
--- a/bin/bootstrap_system.sh
+++ b/bin/bootstrap_system.sh
@@ -37,6 +37,10 @@
 #      adduser --disabled-password --gecos '' impdev
 #      echo 'impdev ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
 #   3. Run this script as that user: su - impdev -c /bootstrap_development.sh
+#
+# This script has some specializations for CentOS/Redhat 6/7 and Ubuntu.
+# Of note, inside of Docker, Redhat 7 doesn't allow you to start daemons
+# with systemctl, so sshd and postgresql are started manually in those cases.
 
 set -eu -o pipefail
 
@@ -67,9 +71,21 @@ set -x
 
 # Determine whether we're running on redhat or ubuntu
 REDHAT=
+REDHAT6=
+REDHAT7=
 UBUNTU=
+IN_DOCKER=
 if [[ -f /etc/redhat-release ]]; then
   REDHAT=true
+  echo "Identified redhat system."
+  if grep 'release 7\.' /etc/redhat-release; then
+    REDHAT7=true
+    echo "Identified redhat7 system."
+  fi
+  if grep 'release 6\.' /etc/redhat-release; then
+    REDHAT6=true
+    echo "Identified redhat6 system."
+  fi
   # TODO: restrict redhat versions
 else
   source /etc/lsb-release
@@ -86,6 +102,10 @@ else
   fi
   UBUNTU=true
 fi
+if grep docker /proc/1/cgroup; then
+  IN_DOCKER=true
+  echo "Identified we are running inside of Docker."
+fi
 
 # Helper function to execute following command only on Ubuntu
 function ubuntu {
@@ -101,6 +121,31 @@ function redhat {
   fi
 }
 
+# Helper function to execute following command only on RedHat6
+function redhat6 {
+  if [[ "$REDHAT6" == true ]]; then
+    "$@"
+  fi
+}
+# Helper function to execute following command only on RedHat7
+function redhat7 {
+  if [[ "$REDHAT7" == true ]]; then
+    "$@"
+  fi
+}
+# Helper function to execute following command only in docker
+function indocker {
+  if [[ "$IN_DOCKER" == true ]]; then
+    "$@"
+  fi
+}
+# Helper function to execute following command only outside of docker
+function notindocker {
+  if [[ "$IN_DOCKER" != true ]]; then
+    "$@"
+  fi
+}
+
 # Note that yum has its own retries; see yum.conf(5).
 REAL_APT_GET=$(ubuntu which apt-get)
 function apt-get {
@@ -177,7 +222,10 @@ if ! { service --status-all | grep -E '^ \[ \+ \]  ssh$'; }
 then
   ubuntu sudo service ssh start
   # TODO: CentOS/RH 7 uses systemd, and this doesn't work.
-  redhat sudo service sshd start
+  redhat6 sudo service sshd start
+  redhat7 notindocker sudo service sshd start
+  redhat7 indocker sudo /usr/bin/ssh-keygen -A
+  redhat7 indocker sudo /usr/sbin/sshd
 fi
 
 # TODO: config ccache to give it plenty of space
@@ -187,7 +235,8 @@ fi
 echo ">>> Configuring system"
 
 ubuntu sudo service ntp stop
-redhat sudo service ntpd stop
+redhat6 sudo service ntpd stop
+redhat7 notindocker sudo service ntpd stop
 sudo ntpdate us.pool.ntp.org
 # If on EC2, use Amazon's ntp servers
 if which dmidecode && { sudo dmidecode -s bios-version | grep amazon; }
@@ -201,7 +250,8 @@ fi
 # is strictly needed by Kudu.
 # TODO: Make privileged docker start ntpd
 ubuntu sudo service ntp start || grep docker /proc/1/cgroup
-redhat sudo service ntpd start || grep docker /proc/1/cgroup
+redhat6 sudo service ntpd start || grep docker /proc/1/cgroup
+notindocker redhat7 sudo service ntpd start
 
 # IMPALA-3932, IMPALA-3926
 if [[ $UBUNTU = true && $DISTRIB_RELEASE = 16.04 ]]
@@ -211,8 +261,12 @@ then
   eval "$SET_LD_LIBRARY_PATH"
 fi
 
-redhat sudo service postgresql initdb
-sudo service postgresql stop
+redhat6 sudo service postgresql initdb
+redhat6 sudo service postgresql stop
+redhat7 notindocker sudo service postgresql initdb
+redhat7 notindocker sudo service postgresql stop
+redhat7 indocker sudo -u postgres PGDATA=/var/lib/pgsql/data pg_ctl init
+ubuntu sudo service postgresql stop
 
 # These configurations expose connectiong to PostgreSQL via md5-hashed
 # passwords over TCP to localhost, and the local socket is trusted
@@ -224,7 +278,13 @@ redhat sudo sed -ri 's/local +all +all +ident/local all all trust/g' \
 # Accept md5 passwords from localhost
 redhat sudo sed -i -e 's,\(host.*\)ident,\1md5,' /var/lib/pgsql/data/pg_hba.conf
 
-sudo service postgresql start
+ubuntu sudo service postgresql start
+redhat6 sudo service postgresql start
+redhat7 notindocker service postgresql start
+# Important to redirect pg_ctl to a logfile, lest it keep the stdout
+# file descriptor open, preventing the shell from exiting.
+redhat7 indocker sudo -u postgres PGDATA=/var/lib/pgsql/data bash -c \
+  "pg_ctl start -w --timeout=120 >> /var/lib/pgsql/pg.log 2>&1"
 
 # Set up postgress for HMS
 if ! [[ 1 = $(sudo -u postgres psql -At -c "SELECT count(*) FROM pg_roles WHERE rolname = 'hiveuser';") ]]
@@ -281,10 +341,10 @@ sudo chown $(whoami) /var/lib/hadoop-hdfs/
 # TODO: restrict this to only the users it is needed for
 echo "* - nofile 1048576" | sudo tee -a /etc/security/limits.conf
 
-# Default on CentOS limits a user to 1024 processes (threads) , which isn't
+# Default on CentOS limits a user to 1024 or 4096 processes (threads) , which isn't
 # enough for minicluster with all of its friends.
-redhat sudo sed -i 's,\*\s*soft\s*nproc\s*1024,* soft nproc unlimited,' \
-  /etc/security/limits.d/90-nproc.conf
+redhat sudo sed -i 's,\*\s*soft\s*nproc\s*[0-9]*$,* soft nproc unlimited,' \
+  /etc/security/limits.d/*-nproc.conf
 
 echo ">>> Checking out Impala"
 
diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh
index 8e78dfb..c4a1243 100755
--- a/docker/entrypoint.sh
+++ b/docker/entrypoint.sh
@@ -42,6 +42,19 @@
 #      where <suite> is one of: BE_TEST JDBC_TEST CLUSTER_TEST
 #                               EE_TEST_SERIAL EE_TEST_PARALLEL
 
+# Starts or stops postgres
+# The centos:7 Docker image doesn't allow systemctl to start postgresql,
+# so we start it explicitly with pg_ctl.
+function _pg_ctl() {
+  if [ -f /etc/redhat-release ]; then
+    if which systemctl; then
+      sudo -u postgres PGDATA=/var/lib/pgsql/data bash -c "pg_ctl $1 -w --timeout=120 >> /var/lib/pgsql/pg.log 2>&1"
+      return
+    fi
+  fi
+  sudo service postgresql $1
+}
+
 # Boostraps the container by creating a user and adding basic tools like Python and git.
 # Takes a uid as an argument for the user to be created.
 function build() {
@@ -129,11 +142,16 @@ function start_minicluster {
   pushd /home/impdev/Impala
 
   # Required for metastore
-  sudo service postgresql start
+  _pg_ctl start
 
   # Required for starting HBase
   if [ -f /etc/redhat-release ]; then
-    sudo service sshd start
+    if which systemctl; then
+      # centos7 doesn't support systemd running inside of docker to start daemons
+      sudo /usr/sbin/sshd
+    else
+      sudo service sshd start
+    fi
   else
     sudo service ssh start
   fi
@@ -235,7 +253,7 @@ function build_impdev() {
   testdata/bin/kill-all.sh
 
   # Shutting down PostgreSQL nicely speeds up it's start time for new containers.
-  sudo service postgresql stop
+  _pg_ctl stop
 
   # Clean up things we don't need to reduce image size
   find be -name '*.o' -execdir rm '{}' + # ~1.6GB


[impala] 05/05: IMPALA-7832: Support for IF NOT EXISTS in ALTER TABLE ADD COLUMN(S)

Posted by mi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

mikeb pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit bfb9ccc8e02be20fb8b57bae4d55e4094ab7ea3f
Author: Fredy Wijaya <fw...@cloudera.com>
AuthorDate: Tue Jan 8 12:45:59 2019 -0800

    IMPALA-7832: Support for IF NOT EXISTS in ALTER TABLE ADD COLUMN(S)
    
    This patch adds IF NOT EXISTS support in ALTER TABLE ADD COLUMN and
    ALTER TABLE ADD COLUMNS. If IF NOT EXISTS is specified and a column
    already exists with this name, no error is thrown. If IF NOT EXISTS
    is specified for multiple columns and a column already exists, no
    error is thrown and a new column that does not exist will be added.
    
    Syntax:
    ALTER TABLE tbl ADD COLUMN [IF NOT EXISTS] i int
    ALTER TABLE tbl ADD [IF NOT EXISTS] COLUMNS (i int, j int)
    
    Testing:
    - Added new FE tests
    - Ran all FE tests
    - Updated E2E DDL tests
    - Ran all E2E DDL tests
    
    Change-Id: I60ed22c8a8eefa10e94ad3dedf32fe67c16642d9
    Reviewed-on: http://gerrit.cloudera.org:8080/12181
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 common/thrift/JniCatalog.thrift                    |  22 ++-
 fe/src/main/cup/sql-parser.cup                     |  24 +--
 ...aceColsStmt.java => AlterTableAddColsStmt.java} |  75 ++++-----
 ...olsStmt.java => AlterTableReplaceColsStmt.java} |  81 +++-------
 .../apache/impala/service/CatalogOpExecutor.java   | 106 +++++++-----
 .../org/apache/impala/analysis/AnalyzeDDLTest.java | 179 ++++++++++++++++++---
 .../impala/analysis/AuthorizationStmtTest.java     |   1 +
 .../org/apache/impala/analysis/ParserTest.java     |  20 ++-
 .../queries/QueryTest/alter-table.test             |  64 ++++++++
 .../queries/QueryTest/kudu_alter.test              |   4 +-
 .../queries/QueryTest/kudu_insert.test             |   2 +-
 11 files changed, 397 insertions(+), 181 deletions(-)

diff --git a/common/thrift/JniCatalog.thrift b/common/thrift/JniCatalog.thrift
index b936773..2d97f43 100644
--- a/common/thrift/JniCatalog.thrift
+++ b/common/thrift/JniCatalog.thrift
@@ -92,7 +92,8 @@ struct TAlterDbParams {
 
 // Types of ALTER TABLE commands supported.
 enum TAlterTableType {
-  ADD_REPLACE_COLUMNS,
+  ADD_COLUMNS,
+  REPLACE_COLUMNS,
   ADD_PARTITION,
   ADD_DROP_RANGE_PARTITION,
   ALTER_COLUMN,
@@ -207,13 +208,19 @@ struct TAlterTableOrViewRenameParams {
   1: required CatalogObjects.TTableName new_table_name
 }
 
-// Parameters for ALTER TABLE ADD|REPLACE COLUMNS commands.
-struct TAlterTableAddReplaceColsParams {
+// Parameters for ALTER TABLE ADD COLUMNS commands.
+struct TAlterTableAddColsParams {
   // List of columns to add to the table
   1: required list<CatalogObjects.TColumn> columns
 
-  // If true, replace all existing columns. If false add (append) columns to the table.
-  2: required bool replace_existing_cols
+  // If true, no error is raised when a column already exists.
+  2: required bool if_not_exists
+}
+
+// Parameters for ALTER TABLE REPLACE COLUMNS commands.
+struct TAlterTableReplaceColsParams {
+  // List of columns to replace to the table
+  1: required list<CatalogObjects.TColumn> columns
 }
 
 // Parameters for specifying a single partition in ALTER TABLE ADD PARTITION
@@ -385,7 +392,7 @@ struct TAlterTableParams {
   3: optional TAlterTableOrViewRenameParams rename_params
 
   // Parameters for ALTER TABLE ADD COLUMNS
-  4: optional TAlterTableAddReplaceColsParams add_replace_cols_params
+  4: optional TAlterTableAddColsParams add_cols_params
 
   // Parameters for ALTER TABLE ADD PARTITION
   5: optional TAlterTableAddPartitionParams add_partition_params
@@ -422,6 +429,9 @@ struct TAlterTableParams {
 
   // Parameters for ALTER TABLE/VIEW SET OWNER
   16: optional TAlterTableOrViewSetOwnerParams set_owner_params
+
+  // Parameters for ALTER TABLE REPLACE COLUMNS
+  17: optional TAlterTableReplaceColsParams replace_cols_params
 }
 
 // Parameters of CREATE TABLE LIKE commands
diff --git a/fe/src/main/cup/sql-parser.cup b/fe/src/main/cup/sql-parser.cup
index bbd5ccf..051bb48 100644
--- a/fe/src/main/cup/sql-parser.cup
+++ b/fe/src/main/cup/sql-parser.cup
@@ -480,7 +480,6 @@ nonterminal THdfsFileFormat file_format_create_table_val;
 nonterminal Boolean if_exists_val;
 nonterminal Boolean if_not_exists_val;
 nonterminal Boolean is_primary_key_val;
-nonterminal Boolean replace_existing_cols_val;
 nonterminal HdfsUri location_val;
 nonterminal RowFormat row_format_val, opt_row_format_val;
 nonterminal String field_terminator_val;
@@ -1095,9 +1094,19 @@ alter_db_stmt ::=
 // a partition clause does not make sense for this stmt. If a partition
 // is given, manually throw a parse error.
 alter_tbl_stmt ::=
-  KW_ALTER KW_TABLE table_name:table replace_existing_cols_val:replace KW_COLUMNS
-  LPAREN column_def_list:col_defs RPAREN
-  {: RESULT = new AlterTableAddReplaceColsStmt(table, col_defs, replace); :}
+  KW_ALTER KW_TABLE table_name:table KW_ADD KW_COLUMN if_not_exists_val:if_not_exists
+  column_def:col_def
+  {:
+    List<ColumnDef> list = new ArrayList<>();
+    list.add(col_def);
+    RESULT = new AlterTableAddColsStmt(table, if_not_exists, list);
+  :}
+  | KW_ALTER KW_TABLE table_name:table KW_ADD if_not_exists_val:if_not_exists KW_COLUMNS
+    LPAREN column_def_list:col_defs RPAREN
+  {: RESULT = new AlterTableAddColsStmt(table, if_not_exists, col_defs); :}
+  | KW_ALTER KW_TABLE table_name:table KW_REPLACE KW_COLUMNS
+    LPAREN column_def_list:col_defs RPAREN
+  {: RESULT = new AlterTableReplaceColsStmt(table, col_defs); :}
   | KW_ALTER KW_TABLE table_name:table KW_ADD if_not_exists_val:if_not_exists
     partition_def_list:partitions
   {: RESULT = new AlterTableAddPartitionStmt(table, if_not_exists, partitions); :}
@@ -1199,13 +1208,6 @@ opt_kw_column ::=
   | /* empty */
   ;
 
-replace_existing_cols_val ::=
-  KW_REPLACE
-  {: RESULT = true; :}
-  | KW_ADD
-  {: RESULT = false; :}
-  ;
-
 create_db_stmt ::=
   KW_CREATE db_or_schema_kw if_not_exists_val:if_not_exists ident_or_default:db_name
   opt_comment_val:comment location_val:location
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddColsStmt.java
similarity index 74%
copy from fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
copy to fe/src/main/java/org/apache/impala/analysis/AlterTableAddColsStmt.java
index 1a506a1..f849537 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddColsStmt.java
@@ -17,56 +17,38 @@
 
 package org.apache.impala.analysis;
 
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-
 import org.apache.impala.catalog.Column;
 import org.apache.impala.catalog.FeHBaseTable;
 import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.common.AnalysisException;
-import org.apache.impala.thrift.TAlterTableAddReplaceColsParams;
+import org.apache.impala.thrift.TAlterTableAddColsParams;
 import org.apache.impala.thrift.TAlterTableParams;
 import org.apache.impala.thrift.TAlterTableType;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
 
 /**
- * Represents an ALTER TABLE ADD|REPLACE COLUMNS (colDef1, colDef2, ...) statement.
+ * Represents
+ * - ALTER TABLE ADD [IF NOT EXISTS] COLUMNS (colDef1, colDef2, ...)
+ * - ALTER TABLE ADD COLUMN [IF NOT EXISTS] colDef
+ * statements.
  */
-public class AlterTableAddReplaceColsStmt extends AlterTableStmt {
+public class AlterTableAddColsStmt extends AlterTableStmt {
+  private final boolean ifNotExists_;
   private final List<ColumnDef> columnDefs_;
-  private final boolean replaceExistingCols_;
 
-  public AlterTableAddReplaceColsStmt(TableName tableName, List<ColumnDef> columnDefs,
-      boolean replaceExistingCols) {
+  public AlterTableAddColsStmt(TableName tableName, boolean ifNotExists,
+      List<ColumnDef> columnDefs) {
     super(tableName);
+    ifNotExists_ = ifNotExists;
     Preconditions.checkState(columnDefs != null && columnDefs.size() > 0);
     columnDefs_ = Lists.newArrayList(columnDefs);
-    replaceExistingCols_ = replaceExistingCols;
-  }
-
-  public List<ColumnDef> getColumnDescs() { return columnDefs_; }
-
-  // Replace columns instead of appending new columns.
-  public boolean getReplaceExistingCols() {
-    return replaceExistingCols_;
-  }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(TAlterTableType.ADD_REPLACE_COLUMNS);
-    TAlterTableAddReplaceColsParams colParams = new TAlterTableAddReplaceColsParams();
-    for (ColumnDef col: getColumnDescs()) {
-      colParams.addToColumns(col.toThrift());
-    }
-    colParams.setReplace_existing_cols(replaceExistingCols_);
-    params.setAdd_replace_cols_params(colParams);
-    return params;
   }
 
   @Override
@@ -76,16 +58,10 @@ public class AlterTableAddReplaceColsStmt extends AlterTableStmt {
     // TODO: Support column-level DDL on HBase tables. Requires updating the column
     // mappings along with the table columns.
     if (t instanceof FeHBaseTable) {
-      throw new AnalysisException("ALTER TABLE ADD|REPLACE COLUMNS not currently " +
+      throw new AnalysisException("ALTER TABLE ADD COLUMNS not currently " +
           "supported on HBase tables.");
     }
 
-    boolean isKuduTable = t instanceof FeKuduTable;
-    if (isKuduTable && replaceExistingCols_) {
-      throw new AnalysisException("ALTER TABLE REPLACE COLUMNS is not " +
-          "supported on Kudu tables.");
-    }
-
     // Build a set of the partition keys for the table.
     Set<String> existingPartitionKeys = new HashSet<>();
     for (FieldSchema fs: t.getMetaStoreTable().getPartitionKeys()) {
@@ -105,13 +81,13 @@ public class AlterTableAddReplaceColsStmt extends AlterTableStmt {
       }
 
       Column col = t.getColumn(colName);
-      if (col != null && !replaceExistingCols_) {
+      if (col != null && !ifNotExists_) {
         throw new AnalysisException("Column already exists: " + colName);
       } else if (!colNames.add(colName)) {
         throw new AnalysisException("Duplicate column name: " + colName);
       }
 
-      if (isKuduTable) {
+      if (t instanceof FeKuduTable) {
         if (c.getType().isComplexType()) {
           throw new AnalysisException("Kudu tables do not support complex types: " +
               c.toString());
@@ -130,4 +106,17 @@ public class AlterTableAddReplaceColsStmt extends AlterTableStmt {
       }
     }
   }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.ADD_COLUMNS);
+    TAlterTableAddColsParams colParams = new TAlterTableAddColsParams();
+    for (ColumnDef col: columnDefs_) {
+      colParams.addToColumns(col.toThrift());
+    }
+    colParams.setIf_not_exists(ifNotExists_);
+    params.setAdd_cols_params(colParams);
+    return params;
+  }
 }
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableReplaceColsStmt.java
similarity index 60%
rename from fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
rename to fe/src/main/java/org/apache/impala/analysis/AlterTableReplaceColsStmt.java
index 1a506a1..c9c4dd9 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableReplaceColsStmt.java
@@ -17,56 +17,32 @@
 
 package org.apache.impala.analysis;
 
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-
 import org.apache.impala.catalog.Column;
 import org.apache.impala.catalog.FeHBaseTable;
 import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.common.AnalysisException;
-import org.apache.impala.thrift.TAlterTableAddReplaceColsParams;
 import org.apache.impala.thrift.TAlterTableParams;
+import org.apache.impala.thrift.TAlterTableReplaceColsParams;
 import org.apache.impala.thrift.TAlterTableType;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
 
 /**
- * Represents an ALTER TABLE ADD|REPLACE COLUMNS (colDef1, colDef2, ...) statement.
+ * Represents an ALTER TABLE REPLACE COLUMNS (colDef1, colDef2, ...) statement.
  */
-public class AlterTableAddReplaceColsStmt extends AlterTableStmt {
+public class AlterTableReplaceColsStmt extends AlterTableStmt {
   private final List<ColumnDef> columnDefs_;
-  private final boolean replaceExistingCols_;
 
-  public AlterTableAddReplaceColsStmt(TableName tableName, List<ColumnDef> columnDefs,
-      boolean replaceExistingCols) {
+  public AlterTableReplaceColsStmt(TableName tableName, List<ColumnDef> columnDefs) {
     super(tableName);
     Preconditions.checkState(columnDefs != null && columnDefs.size() > 0);
     columnDefs_ = Lists.newArrayList(columnDefs);
-    replaceExistingCols_ = replaceExistingCols;
-  }
-
-  public List<ColumnDef> getColumnDescs() { return columnDefs_; }
-
-  // Replace columns instead of appending new columns.
-  public boolean getReplaceExistingCols() {
-    return replaceExistingCols_;
-  }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(TAlterTableType.ADD_REPLACE_COLUMNS);
-    TAlterTableAddReplaceColsParams colParams = new TAlterTableAddReplaceColsParams();
-    for (ColumnDef col: getColumnDescs()) {
-      colParams.addToColumns(col.toThrift());
-    }
-    colParams.setReplace_existing_cols(replaceExistingCols_);
-    params.setAdd_replace_cols_params(colParams);
-    return params;
   }
 
   @Override
@@ -76,12 +52,12 @@ public class AlterTableAddReplaceColsStmt extends AlterTableStmt {
     // TODO: Support column-level DDL on HBase tables. Requires updating the column
     // mappings along with the table columns.
     if (t instanceof FeHBaseTable) {
-      throw new AnalysisException("ALTER TABLE ADD|REPLACE COLUMNS not currently " +
+      throw new AnalysisException("ALTER TABLE REPLACE COLUMNS not currently " +
           "supported on HBase tables.");
     }
 
     boolean isKuduTable = t instanceof FeKuduTable;
-    if (isKuduTable && replaceExistingCols_) {
+    if (isKuduTable) {
       throw new AnalysisException("ALTER TABLE REPLACE COLUMNS is not " +
           "supported on Kudu tables.");
     }
@@ -104,30 +80,21 @@ public class AlterTableAddReplaceColsStmt extends AlterTableStmt {
             "Column name conflicts with existing partition column: " + colName);
       }
 
-      Column col = t.getColumn(colName);
-      if (col != null && !replaceExistingCols_) {
-        throw new AnalysisException("Column already exists: " + colName);
-      } else if (!colNames.add(colName)) {
+      if (!colNames.add(colName)) {
         throw new AnalysisException("Duplicate column name: " + colName);
       }
+    }
+  }
 
-      if (isKuduTable) {
-        if (c.getType().isComplexType()) {
-          throw new AnalysisException("Kudu tables do not support complex types: " +
-              c.toString());
-        }
-        if (c.isPrimaryKey()) {
-          throw new AnalysisException("Cannot add a primary key using an ALTER TABLE " +
-              "ADD COLUMNS statement: " + c.toString());
-        }
-        if (c.isExplicitNotNullable() && !c.hasDefaultValue()) {
-          throw new AnalysisException("A new non-null column must have a default " +
-              "value: " + c.toString());
-        }
-      } else if (c.hasKuduOptions()) {
-        throw new AnalysisException("The specified column options are only supported " +
-            "in Kudu tables: " + c.toString());
-      }
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.REPLACE_COLUMNS);
+    TAlterTableReplaceColsParams colParams = new TAlterTableReplaceColsParams();
+    for (ColumnDef col: columnDefs_) {
+      colParams.addToColumns(col.toThrift());
     }
+    params.setReplace_cols_params(colParams);
+    return params;
   }
 }
diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
index 04ec23d..66f27cd 100644
--- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
@@ -90,14 +90,15 @@ import org.apache.impala.compat.MetastoreShim;
 import org.apache.impala.thrift.JniCatalogConstants;
 import org.apache.impala.thrift.TAlterDbParams;
 import org.apache.impala.thrift.TAlterDbSetOwnerParams;
+import org.apache.impala.thrift.TAlterTableAddColsParams;
 import org.apache.impala.thrift.TAlterTableAddDropRangePartitionParams;
 import org.apache.impala.thrift.TAlterTableAddPartitionParams;
-import org.apache.impala.thrift.TAlterTableAddReplaceColsParams;
 import org.apache.impala.thrift.TAlterTableAlterColParams;
 import org.apache.impala.thrift.TAlterTableDropColParams;
 import org.apache.impala.thrift.TAlterTableDropPartitionParams;
 import org.apache.impala.thrift.TAlterTableOrViewSetOwnerParams;
 import org.apache.impala.thrift.TAlterTableParams;
+import org.apache.impala.thrift.TAlterTableReplaceColsParams;
 import org.apache.impala.thrift.TAlterTableSetCachedParams;
 import org.apache.impala.thrift.TAlterTableSetFileFormatParams;
 import org.apache.impala.thrift.TAlterTableSetLocationParams;
@@ -420,18 +421,23 @@ public class CatalogOpExecutor {
         return;
       }
       switch (params.getAlter_type()) {
-        case ADD_REPLACE_COLUMNS:
-          TAlterTableAddReplaceColsParams addReplaceColParams =
-              params.getAdd_replace_cols_params();
-          alterTableAddReplaceCols(tbl, addReplaceColParams.getColumns(),
-              addReplaceColParams.isReplace_existing_cols());
+        case ADD_COLUMNS:
+          TAlterTableAddColsParams addColParams = params.getAdd_cols_params();
+          boolean added = alterTableAddCols(tbl, addColParams.getColumns(),
+              addColParams.isIf_not_exists());
           reloadTableSchema = true;
-          if (addReplaceColParams.isReplace_existing_cols()) {
-            addSummary(response, "Table columns have been replaced.");
-          } else {
+          if (added) {
             addSummary(response, "New column(s) have been added to the table.");
+          } else {
+            addSummary(response, "No new column(s) have been added to the table.");
           }
           break;
+        case REPLACE_COLUMNS:
+          TAlterTableReplaceColsParams replaceColParams = params.getReplace_cols_params();
+          alterTableReplaceCols(tbl, replaceColParams.getColumns());
+          reloadTableSchema = true;
+          addSummary(response, "Table columns have been replaced.");
+          break;
         case ADD_PARTITION:
           // Create and add HdfsPartition objects to the corresponding HdfsTable and load
           // their block metadata. Get the new table object with an updated catalog
@@ -585,7 +591,8 @@ public class CatalogOpExecutor {
    * Kudu in addition to the HMS table.
    */
   private boolean altersKuduTable(TAlterTableType type) {
-    return type == TAlterTableType.ADD_REPLACE_COLUMNS
+    return type == TAlterTableType.ADD_COLUMNS
+        || type == TAlterTableType.REPLACE_COLUMNS
         || type == TAlterTableType.DROP_COLUMN
         || type == TAlterTableType.ALTER_COLUMN
         || type == TAlterTableType.ADD_DROP_RANGE_PARTITION;
@@ -598,29 +605,31 @@ public class CatalogOpExecutor {
       KuduTable tbl, long newCatalogVersion) throws ImpalaException {
     Preconditions.checkState(tbl.getLock().isHeldByCurrentThread());
     switch (params.getAlter_type()) {
-      case ADD_REPLACE_COLUMNS:
-        TAlterTableAddReplaceColsParams addReplaceColParams =
-            params.getAdd_replace_cols_params();
-        KuduCatalogOpExecutor.addColumn((KuduTable) tbl,
-            addReplaceColParams.getColumns());
-        addSummary(response, "Column has been added/replaced.");
+      case ADD_COLUMNS:
+        TAlterTableAddColsParams addColParams = params.getAdd_cols_params();
+        KuduCatalogOpExecutor.addColumn(tbl, addColParams.getColumns());
+        addSummary(response, "Column(s) have been added.");
+        break;
+      case REPLACE_COLUMNS:
+        TAlterTableReplaceColsParams replaceColParams = params.getReplace_cols_params();
+        KuduCatalogOpExecutor.addColumn(tbl, replaceColParams.getColumns());
+        addSummary(response, "Column(s) have been replaced.");
         break;
       case DROP_COLUMN:
         TAlterTableDropColParams dropColParams = params.getDrop_col_params();
-        KuduCatalogOpExecutor.dropColumn((KuduTable) tbl,
-            dropColParams.getCol_name());
+        KuduCatalogOpExecutor.dropColumn(tbl, dropColParams.getCol_name());
         addSummary(response, "Column has been dropped.");
         break;
       case ALTER_COLUMN:
         TAlterTableAlterColParams alterColParams = params.getAlter_col_params();
-        KuduCatalogOpExecutor.alterColumn((KuduTable) tbl, alterColParams.getCol_name(),
+        KuduCatalogOpExecutor.alterColumn(tbl, alterColParams.getCol_name(),
             alterColParams.getNew_col_def());
         addSummary(response, "Column has been altered.");
         break;
       case ADD_DROP_RANGE_PARTITION:
         TAlterTableAddDropRangePartitionParams partParams =
             params.getAdd_drop_range_partition_params();
-        KuduCatalogOpExecutor.addDropRangePartition((KuduTable) tbl, partParams);
+        KuduCatalogOpExecutor.addDropRangePartition(tbl, partParams);
         addSummary(response, "Range partition has been " +
             (partParams.type == TRangePartitionOperationType.ADD ?
             "added." : "dropped."));
@@ -2027,28 +2036,49 @@ public class CatalogOpExecutor {
   }
 
   /**
-   * Appends one or more columns to the given table, optionally replacing all existing
-   * columns.
+   * Appends one or more columns to the given table. Returns true if there a column was
+   * added; false otherwise.
    */
-  private void alterTableAddReplaceCols(Table tbl, List<TColumn> columns,
-      boolean replaceExistingCols) throws ImpalaException {
+  private boolean alterTableAddCols(Table tbl, List<TColumn> columns, boolean ifNotExists)
+      throws ImpalaException {
     Preconditions.checkState(tbl.getLock().isHeldByCurrentThread());
     org.apache.hadoop.hive.metastore.api.Table msTbl = tbl.getMetaStoreTable().deepCopy();
-    List<FieldSchema> newColumns = buildFieldSchemaList(columns);
-    if (replaceExistingCols) {
-      msTbl.getSd().setCols(newColumns);
-      String sortByKey = AlterTableSortByStmt.TBL_PROP_SORT_COLUMNS;
-      if (msTbl.getParameters().containsKey(sortByKey)) {
-        String oldColumns = msTbl.getParameters().get(sortByKey);
-        String alteredColumns = MetaStoreUtil.intersectCsvListWithColumNames(oldColumns,
-            columns);
-        msTbl.getParameters().put(sortByKey, alteredColumns);
-      }
-    } else {
+    List<TColumn> colsToAdd = new ArrayList<>();
+    for (TColumn column: columns) {
+      Column col = tbl.getColumn(column.getColumnName());
+      if (ifNotExists && col != null) continue;
+      if (col != null) {
+        throw new CatalogException(
+            String.format("Column '%s' in table '%s' already exists.",
+            col.getName(), tbl.getName()));
+      }
+      colsToAdd.add(column);
+    }
+    // Only add columns that do not exist.
+    if (!colsToAdd.isEmpty()) {
       // Append the new column to the existing list of columns.
-      for (FieldSchema fs: buildFieldSchemaList(columns)) {
-        msTbl.getSd().addToCols(fs);
-      }
+      msTbl.getSd().getCols().addAll(buildFieldSchemaList(colsToAdd));
+      applyAlterTable(msTbl, true);
+      return true;
+    }
+    return false;
+  }
+
+  /**
+   * Replaces all existing columns to the given table.
+   */
+  private void alterTableReplaceCols(Table tbl, List<TColumn> columns)
+      throws ImpalaException {
+    Preconditions.checkState(tbl.getLock().isHeldByCurrentThread());
+    org.apache.hadoop.hive.metastore.api.Table msTbl = tbl.getMetaStoreTable().deepCopy();
+    List<FieldSchema> newColumns = buildFieldSchemaList(columns);
+    msTbl.getSd().setCols(newColumns);
+    String sortByKey = AlterTableSortByStmt.TBL_PROP_SORT_COLUMNS;
+    if (msTbl.getParameters().containsKey(sortByKey)) {
+      String oldColumns = msTbl.getParameters().get(sortByKey);
+      String alteredColumns = MetaStoreUtil.intersectCsvListWithColumNames(oldColumns,
+          columns);
+      msTbl.getParameters().put(sortByKey, alteredColumns);
     }
     applyAlterTable(msTbl, true);
   }
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
index b7949ba..7ffa719 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
@@ -321,39 +321,104 @@ public class AnalyzeDDLTest extends FrontendTestBase {
   }
 
   @Test
-  public void TestAlterTableAddReplaceColumns() throws AnalysisException {
+  public void TestAlterTableAddColumn() {
+    AnalyzesOk("alter table functional.alltypes add column new_col int");
+    AnalyzesOk("alter table functional.alltypes add column NEW_COL int");
+    AnalyzesOk("alter table functional.alltypes add column if not exists int_col int");
+    AnalyzesOk("alter table functional.alltypes add column if not exists INT_COL int");
+
+    // Column name must be unique for add.
+    AnalysisError("alter table functional.alltypes add column int_col int",
+        "Column already exists: int_col");
+    AnalysisError("alter table functional.alltypes add column INT_COL int",
+        "Column already exists: int_col");
+    // Add a column with same name as a partition column.
+    AnalysisError("alter table functional.alltypes add column year int",
+        "Column name conflicts with existing partition column: year");
+    AnalysisError("alter table functional.alltypes add column if not exists year int",
+        "Column name conflicts with existing partition column: year");
+    AnalysisError("alter table functional.alltypes add column YEAR int",
+        "Column name conflicts with existing partition column: year");
+    AnalysisError("alter table functional.alltypes add column if not exists YEAR int",
+        "Column name conflicts with existing partition column: year");
+    // Invalid column name.
+    AnalysisError("alter table functional.alltypes add column `???` int",
+        "Invalid column/field name: ???");
+
+    // Table/Db does not exist.
+    AnalysisError("alter table db_does_not_exist.alltypes add column i int",
+        "Could not resolve table reference: 'db_does_not_exist.alltypes'");
+    AnalysisError("alter table functional.table_does_not_exist add column i int",
+        "Could not resolve table reference: 'functional.table_does_not_exist'");
+
+    // Cannot ALTER TABLE a view.
+    AnalysisError("alter table functional.alltypes_view add column c1 string",
+        "ALTER TABLE not allowed on a view: functional.alltypes_view");
+    // Cannot ALTER TABLE a nested collection.
+    AnalysisError("alter table allcomplextypes.int_array_col add column c1 string",
+        createAnalysisCtx("functional"),
+        "ALTER TABLE not allowed on a nested collection: allcomplextypes.int_array_col");
+    // Cannot ALTER TABLE produced by a data source.
+    AnalysisError("alter table functional.alltypes_datasource add column c1 string",
+        "ALTER TABLE not allowed on a table produced by a data source: " +
+        "functional.alltypes_datasource");
+
+    // Cannot ALTER TABLE ADD COLUMNS on an HBase table.
+    AnalysisError("alter table functional_hbase.alltypes add column i int",
+        "ALTER TABLE ADD COLUMNS not currently supported on HBase tables.");
+
+    // Cannot ALTER ADD COLUMN primary key on Kudu table.
+    AnalysisError("alter table functional_kudu.alltypes add column " +
+        "new_col int primary key",
+        "Cannot add a primary key using an ALTER TABLE ADD COLUMNS statement: " +
+        "new_col INT PRIMARY KEY");
+
+    // A non-null column must have a default on Kudu table.
+    AnalysisError("alter table functional_kudu.alltypes add column new_col int not null",
+        "A new non-null column must have a default value: new_col INT NOT NULL");
+
+    // Cannot ALTER ADD COLUMN complex type on Kudu table.
+    AnalysisError("alter table functional_kudu.alltypes add column c struct<f1:int>",
+        "Kudu tables do not support complex types: c STRUCT<f1:INT>");
+
+    // A not null is a Kudu only option..
+    AnalysisError("alter table functional.alltypes add column new_col int not null",
+        "The specified column options are only supported in Kudu tables: " +
+        "new_col INT NOT NULL");
+  }
+
+  @Test
+  public void TestAlterTableAddColumns() {
     AnalyzesOk("alter table functional.alltypes add columns (new_col int)");
+    AnalyzesOk("alter table functional.alltypes add columns (NEW_COL int)");
     AnalyzesOk("alter table functional.alltypes add columns (c1 string comment 'hi')");
     AnalyzesOk("alter table functional.alltypes add columns (c struct<f1:int>)");
-    AnalyzesOk(
-        "alter table functional.alltypes replace columns (c1 int comment 'c', c2 int)");
-    AnalyzesOk("alter table functional.alltypes replace columns (c array<string>)");
+    AnalyzesOk("alter table functional.alltypes add if not exists columns (int_col int)");
+    AnalyzesOk("alter table functional.alltypes add if not exists columns (INT_COL int)");
 
-    // Column name must be unique for add
+    // Column name must be unique for add.
     AnalysisError("alter table functional.alltypes add columns (int_col int)",
         "Column already exists: int_col");
-    // Add a column with same name as a partition column
+    // Add a column with same name as a partition column.
     AnalysisError("alter table functional.alltypes add columns (year int)",
         "Column name conflicts with existing partition column: year");
+    AnalysisError("alter table functional.alltypes add if not exists columns (year int)",
+        "Column name conflicts with existing partition column: year");
     // Invalid column name.
     AnalysisError("alter table functional.alltypes add columns (`???` int)",
         "Invalid column/field name: ???");
-    AnalysisError("alter table functional.alltypes replace columns (`???` int)",
-        "Invalid column/field name: ???");
 
-    // Replace should not throw an error if the column already exists
-    AnalyzesOk("alter table functional.alltypes replace columns (int_col int)");
-    // It is not possible to replace a partition column
-    AnalysisError("alter table functional.alltypes replace columns (Year int)",
-        "Column name conflicts with existing partition column: year");
-
-    // Duplicate column names
+    // Duplicate column names.
     AnalysisError("alter table functional.alltypes add columns (c1 int, c1 int)",
         "Duplicate column name: c1");
-    AnalysisError("alter table functional.alltypes replace columns (c1 int, C1 int)",
+    AnalysisError("alter table functional.alltypes add columns (c1 int, C1 int)",
         "Duplicate column name: c1");
+    AnalysisError("alter table functional.alltypes add if not exists columns " +
+        "(c1 int, c1 int)", "Duplicate column name: c1");
+    AnalysisError("alter table functional.alltypes add if not exists columns " +
+        "(c1 int, C1 int)", "Duplicate column name: c1");
 
-    // Table/Db does not exist
+    // Table/Db does not exist.
     AnalysisError("alter table db_does_not_exist.alltypes add columns (i int)",
         "Could not resolve table reference: 'db_does_not_exist.alltypes'");
     AnalysisError("alter table functional.table_does_not_exist add columns (i int)",
@@ -374,9 +439,85 @@ public class AnalyzeDDLTest extends FrontendTestBase {
         "ALTER TABLE not allowed on a table produced by a data source: " +
         "functional.alltypes_datasource");
 
-    // Cannot ALTER TABLE ADD/REPLACE COLUMNS on an HBase table.
+    // Cannot ALTER TABLE ADD COLUMNS on an HBase table.
     AnalysisError("alter table functional_hbase.alltypes add columns (i int)",
-        "ALTER TABLE ADD|REPLACE COLUMNS not currently supported on HBase tables.");
+        "ALTER TABLE ADD COLUMNS not currently supported on HBase tables.");
+
+    // Cannot ALTER ADD COLUMNS primary key on Kudu table.
+    AnalysisError("alter table functional_kudu.alltypes add columns " +
+        "(new_col int primary key)",
+        "Cannot add a primary key using an ALTER TABLE ADD COLUMNS statement: " +
+        "new_col INT PRIMARY KEY");
+
+    // A non-null column must have a default on Kudu table.
+    AnalysisError("alter table functional_kudu.alltypes add columns" +
+        "(new_col int not null)",
+        "A new non-null column must have a default value: new_col INT NOT NULL");
+
+    // Cannot ALTER ADD COLUMN complex type on Kudu table.
+    AnalysisError("alter table functional_kudu.alltypes add columns (c struct<f1:int>)",
+        "Kudu tables do not support complex types: c STRUCT<f1:INT>");
+
+    // A not null is a Kudu only option..
+    AnalysisError("alter table functional.alltypes add columns(new_col int not null)",
+        "The specified column options are only supported in Kudu tables: " +
+        "new_col INT NOT NULL");
+  }
+
+  @Test
+  public void TestAlterTableReplaceColumns() {
+    AnalyzesOk("alter table functional.alltypes replace columns " +
+        "(c1 int comment 'c', c2 int)");
+    AnalyzesOk("alter table functional.alltypes replace columns " +
+        "(C1 int comment 'c', C2 int)");
+    AnalyzesOk("alter table functional.alltypes replace columns (c array<string>)");
+    // Invalid column name.
+    AnalysisError("alter table functional.alltypes replace columns (`???` int)",
+        "Invalid column/field name: ???");
+
+    // Replace should not throw an error if the column already exists.
+    AnalyzesOk("alter table functional.alltypes replace columns (int_col int)");
+    AnalyzesOk("alter table functional.alltypes replace columns (INT_COL int)");
+    // It is not possible to replace a partition column.
+    AnalysisError("alter table functional.alltypes replace columns (year int)",
+        "Column name conflicts with existing partition column: year");
+    AnalysisError("alter table functional.alltypes replace columns (Year int)",
+        "Column name conflicts with existing partition column: year");
+
+    // Duplicate column names.
+    AnalysisError("alter table functional.alltypes replace columns (c1 int, c1 int)",
+        "Duplicate column name: c1");
+    AnalysisError("alter table functional.alltypes replace columns (c1 int, C1 int)",
+        "Duplicate column name: c1");
+
+    // Table/Db does not exist
+    AnalysisError("alter table db_does_not_exist.alltypes replace columns (i int)",
+        "Could not resolve table reference: 'db_does_not_exist.alltypes'");
+    AnalysisError("alter table functional.table_does_not_exist replace columns (i int)",
+        "Could not resolve table reference: 'functional.table_does_not_exist'");
+
+    // Cannot ALTER TABLE a view.
+    AnalysisError("alter table functional.alltypes_view " +
+            "replace columns (c1 string comment 'hi')",
+        "ALTER TABLE not allowed on a view: functional.alltypes_view");
+    // Cannot ALTER TABLE a nested collection.
+    AnalysisError("alter table allcomplextypes.int_array_col " +
+            "replace columns (c1 string comment 'hi')",
+        createAnalysisCtx("functional"),
+        "ALTER TABLE not allowed on a nested collection: allcomplextypes.int_array_col");
+    // Cannot ALTER TABLE produced by a data source.
+    AnalysisError("alter table functional.alltypes_datasource " +
+            "replace columns (c1 string comment 'hi')",
+        "ALTER TABLE not allowed on a table produced by a data source: " +
+            "functional.alltypes_datasource");
+
+    // Cannot ALTER TABLE REPLACE COLUMNS on an HBase table.
+    AnalysisError("alter table functional_hbase.alltypes replace columns (i int)",
+        "ALTER TABLE REPLACE COLUMNS not currently supported on HBase tables.");
+
+    // Cannot ALTER TABLE REPLACE COLUMNS on an Kudu table.
+    AnalysisError("alter table functional_kudu.alltypes replace columns (i int)",
+        "ALTER TABLE REPLACE COLUMNS is not supported on Kudu tables.");
   }
 
   @Test
diff --git a/fe/src/test/java/org/apache/impala/analysis/AuthorizationStmtTest.java b/fe/src/test/java/org/apache/impala/analysis/AuthorizationStmtTest.java
index 2d7ada8..24086cc 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AuthorizationStmtTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AuthorizationStmtTest.java
@@ -1939,6 +1939,7 @@ public class AuthorizationStmtTest extends FrontendTestBase {
   @Test
   public void testAlterTable() throws ImpalaException {
     for (AuthzTest test: new AuthzTest[]{
+        authorize("alter table functional.alltypes add column c1 int"),
         authorize("alter table functional.alltypes add columns(c1 int)"),
         authorize("alter table functional.alltypes replace columns(c1 int)"),
         authorize("alter table functional.alltypes change int_col c1 int"),
diff --git a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
index 0704188..2810890 100644
--- a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
@@ -2123,8 +2123,23 @@ public class ParserTest extends FrontendTestBase {
   }
 
   @Test
+  public void TestAlterTableAddColumn() {
+    for (String keyword: new String[]{"", "IF NOT EXISTS"}) {
+      ParsesOk(String.format("ALTER TABLE Foo ADD COLUMN %s i int", keyword));
+      ParsesOk(String.format("ALTER TABLE TestDb.Foo ADD COLUMN %s i int", keyword));
+
+      ParserError(String.format("ALTER TestDb.Foo ADD COLUMN %s", keyword));
+      ParserError(String.format("ALTER Foo ADD COLUMN %s", keyword));
+      ParserError(String.format("ALTER TABLE TestDb.Foo ADD COLUMN %s (i int)", keyword));
+      ParserError(String.format("ALTER TABLE Foo ADD COLUMN %s (i int)", keyword));
+      ParserError(String.format("ALTER Foo %s ADD COLUMN i int", keyword));
+      ParserError(String.format("ALTER TestDb.Foo %s ADD COLUMN i int", keyword));
+    }
+  }
+
+  @Test
   public void TestAlterTableAddReplaceColumns() {
-    String[] addReplaceKw = {"ADD", "REPLACE"};
+    String[] addReplaceKw = {"ADD", "ADD IF NOT EXISTS", "REPLACE"};
     for (String addReplace: addReplaceKw) {
       ParsesOk(String.format(
           "ALTER TABLE Foo %s COLUMNS (i int, s string)", addReplace));
@@ -2151,9 +2166,6 @@ public class ParserTest extends FrontendTestBase {
       ParserError(String.format("ALTER TestDb.Foo %s COLUMNS ()", addReplace));
       ParserError(String.format("ALTER Foo %s COLUMNS (i int, s string)", addReplace));
       ParserError(String.format("ALTER TABLE %s COLUMNS (i int, s string)", addReplace));
-      // Don't yet support ALTER TABLE ADD COLUMN syntax
-      ParserError(String.format("ALTER TABLE Foo %s COLUMN i int", addReplace));
-      ParserError(String.format("ALTER TABLE Foo %s COLUMN (i int)", addReplace));
     }
   }
 
diff --git a/testdata/workloads/functional-query/queries/QueryTest/alter-table.test b/testdata/workloads/functional-query/queries/QueryTest/alter-table.test
index e6332d6..5d9e6cf 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/alter-table.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/alter-table.test
@@ -22,11 +22,65 @@ alter table t1 add columns (t tinyint, s string comment 'Str Col')
 string
 ====
 ---- QUERY
+# Add columns that already exist with "if not exists" clause.
+alter table t1 add if not exists columns (t tinyint, s string comment 'Str Col')
+---- RESULTS
+'No new column(s) have been added to the table.'
+---- TYPES
+string
+====
+---- QUERY
+# Add columns that do not exist with "if not exists" clause.
+alter table t1 add if not exists columns (t2 tinyint, s2 string comment 'Str Col')
+---- RESULTS
+'New column(s) have been added to the table.'
+---- TYPES
+string
+====
+---- QUERY
+# Add a column that already exists and a new column that does not exist with
+# "if not exists" clause.
+alter table t1 add if not exists columns (t3 tinyint, s2 string comment 'Str Col')
+---- RESULTS
+'New column(s) have been added to the table.'
+---- TYPES
+string
+====
+---- QUERY
+# Add a new column that does not exist.
+alter table t1 add column t4 tinyint
+---- RESULTS
+'New column(s) have been added to the table.'
+---- TYPES
+string
+====
+---- QUERY
+# Add a new column that does not exist with "if not exists" clause.
+alter table t1 add column if not exists t5 tinyint
+---- RESULTS
+'New column(s) have been added to the table.'
+---- TYPES
+string
+====
+---- QUERY
+# Add a new column that already exists with "if not exists" clause.
+alter table t1 add column if not exists t5 tinyint
+---- RESULTS
+'No new column(s) have been added to the table.'
+---- TYPES
+string
+====
+---- QUERY
 describe t1
 ---- RESULTS
 'i','int',''
 't','tinyint',''
 's','string','Str Col'
+'t2','tinyint',''
+'s2','string','Str Col'
+'t3','tinyint',''
+'t4','tinyint',''
+'t5','tinyint',''
 ---- TYPES
 string,string,string
 ====
@@ -78,6 +132,11 @@ describe t2
 'i','int',''
 't','tinyint',''
 's','string','Str Col'
+'t2','tinyint',''
+'s2','string','Str Col'
+'t3','tinyint',''
+'t4','tinyint',''
+'t5','tinyint',''
 ---- TYPES
 string,string,string
 ====
@@ -92,6 +151,11 @@ describe t2
 ---- RESULTS
 'i','int',''
 's','string','Str Col'
+'t2','tinyint',''
+'s2','string','Str Col'
+'t3','tinyint',''
+'t4','tinyint',''
+'t5','tinyint',''
 ---- TYPES
 string,string,string
 ====
diff --git a/testdata/workloads/functional-query/queries/QueryTest/kudu_alter.test b/testdata/workloads/functional-query/queries/QueryTest/kudu_alter.test
index a0d95d9..0bed123 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/kudu_alter.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/kudu_alter.test
@@ -219,7 +219,7 @@ alter table tbl_to_alter add range partition 1 < values <= 20;
 alter table tbl_to_alter add columns (new_col1 int not null default 10,
   new_col2 bigint not null default 1000)
 ---- RESULTS
-'Column has been added/replaced.'
+'Column(s) have been added.'
 ====
 ---- QUERY
 # Verify partition layout
@@ -272,7 +272,7 @@ INT,STRING,BIGINT,INT,BIGINT
 # Add nullable columns: with and without a default
 alter table tbl_to_alter add columns (new_col3 string null, new_col4 int null default -1)
 ---- RESULTS
-'Column has been added/replaced.'
+'Column(s) have been added.'
 ====
 ---- QUERY
 # Add a row
diff --git a/testdata/workloads/functional-query/queries/QueryTest/kudu_insert.test b/testdata/workloads/functional-query/queries/QueryTest/kudu_insert.test
index f38b3c5..981a734 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/kudu_insert.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/kudu_insert.test
@@ -363,7 +363,7 @@ INT,INT,INT,INT,INT,INT,STRING,BOOLEAN,DECIMAL
 ---- QUERY
 alter table tbl_with_defaults add columns (j int null, k int not null default 10000)
 ---- RESULTS
-'Column has been added/replaced.'
+'Column(s) have been added.'
 ====
 ---- QUERY
 select * from tbl_with_defaults


[impala] 04/05: IMPALA-7905: Hive keywords not quoted for identifiers

Posted by mi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

mikeb pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 85a8b34645a46038fd217c03e64326b72d9669b5
Author: Paul Rogers <pr...@cloudera.com>
AuthorDate: Thu Nov 29 23:50:53 2018 -0800

    IMPALA-7905: Hive keywords not quoted for identifiers
    
    Impala often generates SQL for statements using the toSql() call.
    Generated SQL is often used during testing or when writing the query
    plan. Impala keywords such as "create", when used as identifiers,
    must be quoted:
    
    SELECT `select`, `from` FROM `order` ...
    
    The code in ToSqlUtils.getIdentSql() quotes the identifier if it is
    an Impala or Hive keyword, or if it does not follow the identifier
    pattern. The code uses the Hive lexer to detect a keyword. But, the
    code contained a flaw: the lexer expects a case-insensitive input.
    We provide a case sensitive input. As a result, "MONTH" is caught as a
    Hive keyword and quoted, but "month" is not. This patch fixes that flaw.
    
    This patch also fixes:
    
    IMPALA-8051: Compute stats fails on a column with comment character in
    name
    
    The code uses the Hive lexical analyzer to check names. Since "#" and
    "--" are comment characters, a name like "foo#" is parsed as "foo" which
    does not need quotes, hence we don't quote "foo#", which causes issues.
    Added a special check for "#" and "--" to resolve this issue.
    
    Testing:
    
    * Refactored getIdentSql() easier testing.
    * Added a tests to the recently added ToSqlUtilsTest for this case and
      several others.
    * Making this change caused the columns `month`, `year`, and `key` to be
      quoted when before they were not. Updated many tests as a result.
    * Added a new identSql() function, for use in tests, to match the
      quoting that Impala uses, and to handle the wildcard, and multi-part
      names. Used this in ToSqlTest to handle the quoted names.
    * PlannerTest emits statement SQL to the output file wrapped to 80
      columns and sometimes leaves trailing spaces at the end of the line.
      Some tools remove that trailing space, resulting in trivial file
      differences.  Fixed this to remove trailing spaces in order to simplify
      file comparisons.
    * Tweaked the "In pipelines" output to avoid trailing spaces when no
      pipelines are listed.
    * Reran all FE tests.
    
    Change-Id: I06cc20b052a3a66535a171c36b4b31477c0ba6d0
    Reviewed-on: http://gerrit.cloudera.org:8080/12009
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 .../org/apache/impala/analysis/InsertStmt.java     |   9 +-
 .../org/apache/impala/analysis/ToSqlUtils.java     | 144 ++++--
 .../java/org/apache/impala/common/PrintUtils.java  |   1 +
 .../java/org/apache/impala/planner/PlanNode.java   |   4 +-
 .../org/apache/impala/analysis/AnalyzeDDLTest.java |   8 +-
 .../apache/impala/analysis/AnalyzeExprsTest.java   |   3 +-
 .../apache/impala/analysis/AnalyzeStmtsTest.java   |   2 +-
 .../impala/analysis/ExprRewriteRulesTest.java      |   6 +-
 .../apache/impala/analysis/ExprRewriterTest.java   |  10 +-
 .../java/org/apache/impala/analysis/ToSqlTest.java |  55 ++-
 .../org/apache/impala/analysis/ToSqlUtilsTest.java | 174 +++++++
 .../org/apache/impala/planner/PlannerTest.java     |   4 +-
 .../org/apache/impala/util/PrintUtilsTest.java     |  14 +-
 .../queries/PlannerTest/aggregation.test           |  44 +-
 .../queries/PlannerTest/analytic-fns.test          |  16 +-
 .../queries/PlannerTest/constant-folding.test      |   4 +-
 .../queries/PlannerTest/ddl.test                   |   8 +-
 .../queries/PlannerTest/hbase.test                 |  12 +-
 .../queries/PlannerTest/hdfs.test                  | 186 ++++----
 .../queries/PlannerTest/implicit-joins.test        |   2 +-
 .../queries/PlannerTest/inline-view.test           |  32 +-
 .../queries/PlannerTest/insert-sort-by.test        |  22 +-
 .../queries/PlannerTest/insert.test                |  88 ++--
 .../queries/PlannerTest/join-order.test            |   8 +-
 .../queries/PlannerTest/joins.test                 |  62 +--
 .../queries/PlannerTest/kudu-upsert.test           |   4 +-
 .../queries/PlannerTest/lineage.test               |  88 ++--
 .../queries/PlannerTest/nested-collections.test    |  38 +-
 .../queries/PlannerTest/order.test                 |  72 +--
 .../PlannerTest/parquet-filtering-disabled.test    |   4 +-
 .../queries/PlannerTest/parquet-filtering.test     |   8 +-
 .../queries/PlannerTest/parquet-stats-agg.test     |  20 +-
 .../queries/PlannerTest/partition-key-scans.test   |  66 +--
 .../queries/PlannerTest/predicate-propagation.test | 186 ++++----
 .../queries/PlannerTest/resource-requirements.test |  26 +-
 .../PlannerTest/runtime-filter-propagation.test    | 166 +++----
 .../PlannerTest/runtime-filter-query-options.test  | 128 +++---
 .../PlannerTest/shuffle-by-distinct-exprs.test     |  92 ++--
 .../queries/PlannerTest/small-query-opt.test       |   6 +-
 .../queries/PlannerTest/subquery-rewrite.test      |  24 +-
 .../queries/PlannerTest/tablesample.test           |   4 +-
 .../queries/PlannerTest/union.test                 | 508 ++++++++++-----------
 .../queries/PlannerTest/views.test                 |   4 +-
 .../queries/QueryTest/stats-extrapolation.test     |  12 +-
 44 files changed, 1310 insertions(+), 1064 deletions(-)

diff --git a/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java b/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
index 2722bcf..7c34d52 100644
--- a/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
@@ -938,13 +938,18 @@ public class InsertStmt extends StatementBase {
     strBuilder.append(" TABLE " + originalTableName_);
     if (columnPermutation_ != null) {
       strBuilder.append("(");
-      strBuilder.append(Joiner.on(", ").join(columnPermutation_));
+      String sep = "";
+      for (String col : columnPermutation_) {
+        strBuilder.append(sep);
+        strBuilder.append(ToSqlUtils.getIdentSql(col));
+        sep = ", ";
+      }
       strBuilder.append(")");
     }
     if (partitionKeyValues_ != null) {
       List<String> values = new ArrayList<>();
       for (PartitionKeyValue pkv: partitionKeyValues_) {
-        values.add(pkv.getColName()
+        values.add(ToSqlUtils.getIdentSql(pkv.getColName())
             + (pkv.getValue() != null ? ("=" + pkv.getValue().toSql(options)) : ""));
       }
       strBuilder.append(" PARTITION (" + Joiner.on(", ").join(values) + ")");
diff --git a/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java b/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
index 92f5614..366fb8f 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
@@ -20,18 +20,10 @@ package org.apache.impala.analysis;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Splitter;
-import com.google.common.base.Strings;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 import org.antlr.runtime.ANTLRStringStream;
 import org.antlr.runtime.Token;
 import org.apache.commons.lang.ObjectUtils;
@@ -55,6 +47,15 @@ import org.apache.impala.catalog.RowFormat;
 import org.apache.impala.catalog.Table;
 import org.apache.impala.util.KuduUtil;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Splitter;
+import com.google.common.base.Strings;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
 /**
  * Contains utility methods for creating SQL strings, for example,
  * for creating identifier strings that are compatible with Hive or Impala.
@@ -63,23 +64,33 @@ public class ToSqlUtils {
   // Table properties to hide when generating the toSql() statement
   // EXTERNAL, SORT BY, and comment are hidden because they are part of the toSql result,
   // e.g., "CREATE EXTERNAL TABLE <name> ... SORT BY (...) ... COMMENT <comment> ..."
-  private static final ImmutableSet<String> HIDDEN_TABLE_PROPERTIES = ImmutableSet.of(
+  @VisibleForTesting
+  protected static final ImmutableSet<String> HIDDEN_TABLE_PROPERTIES = ImmutableSet.of(
       "EXTERNAL", "comment", AlterTableSortByStmt.TBL_PROP_SORT_COLUMNS);
 
   /**
    * Removes all hidden properties from the given 'tblProperties' map.
    */
-  private static void removeHiddenTableProperties(Map<String, String> tblProperties,
-      Map<String, String> generatedTblProperties) {
+  @VisibleForTesting
+  protected static void removeHiddenTableProperties(Map<String, String> tblProperties) {
     for (String key: HIDDEN_TABLE_PROPERTIES) tblProperties.remove(key);
-    generatedTblProperties.remove(KuduTable.KEY_TABLE_NAME);
+  }
+
+  /**
+   * Removes all hidden Kudu from the given 'tblProperties' map.
+   */
+  @VisibleForTesting
+  protected static void removeHiddenKuduTableProperties(
+      Map<String, String> tblProperties) {
+    tblProperties.remove(KuduTable.KEY_TABLE_NAME);
   }
 
   /**
    * Returns the list of sort columns from 'properties' or 'null' if 'properties' doesn't
    * contain 'sort.columns'.
    */
-  private static List<String> getSortColumns(Map<String, String> properties) {
+  @VisibleForTesting
+  protected static List<String> getSortColumns(Map<String, String> properties) {
     String sortByKey = AlterTableSortByStmt.TBL_PROP_SORT_COLUMNS;
     if (!properties.containsKey(sortByKey)) return null;
     return Lists.newArrayList(Splitter.on(",").trimResults().omitEmptyStrings().split(
@@ -102,41 +113,88 @@ public class ToSqlUtils {
   }
 
   /**
-   * Given an unquoted identifier string, returns an identifier lexable by
-   * Impala and Hive, possibly by enclosing the original identifier in "`" quotes.
-   * For example, Hive cannot parse its own auto-generated column
-   * names "_c0", "_c1" etc. unless they are quoted. Impala and Hive keywords
-   * must also be quoted.
-   *
-   * Impala's lexer recognizes a superset of the unquoted identifiers that Hive can.
-   * At the same time, Impala's and Hive's list of keywords differ.
-   * This method always returns an identifier that Impala and Hive can recognize,
-   * although for some identifiers the quotes may not be strictly necessary for
-   * one or the other system.
+   * Check if a column (or table) name will be parsed by Hive as an identifier.
+   * If not, then the identifier must be quoted.
+   * @param ident name to check
+   * @return true if the name must be quoted for Hive, false if the
+   * name is a valid identifier and so needs no quoting
    */
-  public static String getIdentSql(String ident) {
-    boolean hiveNeedsQuotes = true;
-    HiveLexer hiveLexer = new HiveLexer(new ANTLRStringStream(ident));
+  public static boolean hiveNeedsQuotes(String ident) {
+    // Lexer catches only upper-case keywords: "SELECT", but not "select".
+    // So, do the check on an upper-case version of the identifier.
+    // Hive uses ANTLRNoCaseStringStream to upper-case text, but that
+    // class is a non-static inner class so we can't use it here.
+    HiveLexer hiveLexer = new HiveLexer(new ANTLRStringStream(ident.toUpperCase()));
     try {
       Token t = hiveLexer.nextToken();
       // Check that the lexer recognizes an identifier and then EOF.
-      boolean identFound = t.getType() == HiveLexer.Identifier;
+      // Not an identifier? Needs quotes.
+      if (t.getType() != HiveLexer.Identifier) return true;
+      // Not a single identifier? Needs quotes.
       t = hiveLexer.nextToken();
-      // No enclosing quotes are necessary for Hive.
-      hiveNeedsQuotes = !(identFound && t.getType() == HiveLexer.EOF);
+      return t.getType() != HiveLexer.EOF;
     } catch (Exception e) {
       // Ignore exception and just quote the identifier to be safe.
+      return true;
     }
-    boolean isImpalaReserved = SqlScanner.isReserved(ident.toUpperCase());
-    // Impala's scanner recognizes the ".123" portion of "db.123_tbl" as a decimal,
-    // so while the quoting is not necessary for the given identifier itself, the quotes
-    // are needed if this identifier will be preceded by a ".".
-    boolean startsWithNumber = false;
-    if (!hiveNeedsQuotes && !isImpalaReserved) {
-      startsWithNumber = Character.isDigit(ident.charAt(0));
+  }
+
+  /**
+   * Determines if an identifier must be quoted for Impala. This is a very
+   * weak test, it works only for simple identifiers. Use this in conjunction
+   * with {@link #hiveNeedsQuotes} for a complete check.
+   * @param ident the identifier to check
+   * @return true if the identifier is an Impala keyword, or if starts
+   * with a digit
+   */
+  public static boolean impalaNeedsQuotes(String ident) {
+    return SqlScanner.isReserved(ident) ||
+      // Quote numbers to avoid odd cases.
+      // SELECT id AS 3a3 FROM functional.alltypestiny
+      // is valid, but
+      // SELECT id AS 3e3 FROM functional.alltypestiny
+      // Is not. The "e" changes the meaning from identifier to number.
+      Character.isDigit(ident.charAt(0)) ||
+      // The parser-based checks fail if the identifier contains a comment
+      // character: the parser ignores those characters and the rest of
+      // the identifier. Treat them specially.
+      ident.contains("#") || ident.contains("--");
+  }
+
+  /**
+   * Given an unquoted identifier string, returns an identifier lexable by
+   * Impala and Hive, possibly by enclosing the original identifier in "`" quotes.
+   * For example, Hive cannot parse its own auto-generated column
+   * names "_c0", "_c1" etc. unless they are quoted. Impala and Hive keywords
+   * must also be quoted.
+   *
+   * The Impala and Hive lexical analyzers recognize a mostly-overlapping,
+   * but sometimes distinct set of keywords. Impala further imposes certain
+   * syntactic rules around identifiers that start with digits. To ensure
+   * that views generated by Impala are readable both both Impala and Hive,
+   * we quote names which are either Hive keywords, Impala keywords, or
+   * are ambiguous in Impala.
+   *
+   * The wildcard ("*") is never quoted though it is not an identifier.
+   */
+  public static String getIdentSql(String ident) {
+    // Don't quote the wildcard used in SELECT *.
+    if (ident.equals("*")) return ident;
+    return hiveNeedsQuotes(ident) || impalaNeedsQuotes(ident)
+        ? "`" + ident + "`" : ident;
+  }
+
+  /**
+   * Test case version of {@link #getIdentSql(String)}, with
+   * special handling for the wildcard and multi-part names.
+   * For creating generic expected values in tests.
+   */
+  public static String identSql(String ident) {
+    List<String> parts = new ArrayList<>();
+    for (String part : Splitter.on('.').split(ident)) {
+      parts.add(ident.equals("*") ? part : getIdentSql(part));
     }
-    if (hiveNeedsQuotes || isImpalaReserved || startsWithNumber) return "`" + ident + "`";
-    return ident;
+    return Joiner.on('.').join(parts);
   }
 
   public static List<String> getIdentSqlList(List<String> identList) {
@@ -173,7 +231,8 @@ public class ToSqlUtils {
         stmt.getTblProperties());
     Map<String, String> generatedProperties = Maps.newLinkedHashMap(
         stmt.getGeneratedKuduProperties());
-    removeHiddenTableProperties(properties, generatedProperties);
+    removeHiddenTableProperties(properties);
+    removeHiddenKuduTableProperties(generatedProperties);
     properties.putAll(generatedProperties);
     String kuduParamsSql = getKuduPartitionByParams(stmt);
     // TODO: Pass the correct compression, if applicable.
@@ -206,7 +265,8 @@ public class ToSqlUtils {
         Maps.newLinkedHashMap(innerStmt.getTblProperties());
     Map<String, String> generatedProperties = Maps.newLinkedHashMap(
         stmt.getCreateStmt().getGeneratedKuduProperties());
-    removeHiddenTableProperties(properties, generatedProperties);
+    removeHiddenTableProperties(properties);
+    removeHiddenKuduTableProperties(generatedProperties);
     properties.putAll(generatedProperties);
     String kuduParamsSql = getKuduPartitionByParams(innerStmt);
     // TODO: Pass the correct compression, if applicable.
@@ -237,7 +297,7 @@ public class ToSqlUtils {
         msTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString());
     List<String> sortColsSql = getSortColumns(properties);
     String comment = properties.get("comment");
-    removeHiddenTableProperties(properties, new HashMap<>());
+    removeHiddenTableProperties(properties);
     List<String> colsSql = new ArrayList<>();
     List<String> partitionColsSql = new ArrayList<>();
     boolean isHbaseTable = table instanceof FeHBaseTable;
diff --git a/fe/src/main/java/org/apache/impala/common/PrintUtils.java b/fe/src/main/java/org/apache/impala/common/PrintUtils.java
index 2636914..70a667c 100644
--- a/fe/src/main/java/org/apache/impala/common/PrintUtils.java
+++ b/fe/src/main/java/org/apache/impala/common/PrintUtils.java
@@ -181,6 +181,7 @@ public class PrintUtils {
       String line = split[i];
       String wrappedLine = WordUtils.wrap(line, wrapLength, null, true);
       // we keep any existing newlines in text - these should be commented hints
+      wrappedLine = wrappedLine.replaceAll(" +$", "");
       ret.append(wrappedLine);
       if (i < split.length - 1) ret.append("\n");
     }
diff --git a/fe/src/main/java/org/apache/impala/planner/PlanNode.java b/fe/src/main/java/org/apache/impala/planner/PlanNode.java
index 520cd8f..2e410ce 100644
--- a/fe/src/main/java/org/apache/impala/planner/PlanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/PlanNode.java
@@ -357,7 +357,9 @@ abstract public class PlanNode extends TreeNode<PlanNode> {
         for (PipelineMembership pipe: pipelines_) {
           pipelines.add(pipe.getExplainString());
         }
-        expBuilder.append(Joiner.on(", ").join(pipelines) + "\n");
+        if (pipelines.isEmpty()) expBuilder.append("<none>");
+        else expBuilder.append(Joiner.on(", ").join(pipelines));
+        expBuilder.append("\n");
       } else {
         expBuilder.append("<not computed>");
       }
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
index 05bd855..b7949ba 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
@@ -156,11 +156,11 @@ public class AnalyzeDDLTest extends FrontendTestBase {
     // Arbitrary exprs as partition key values. Non-partition columns should fail.
     AnalysisError("alter table functional.alltypes drop " +
         "partition(year=2050, month=int_col) ",
-        "Partition exprs cannot contain non-partition column(s): month = int_col.");
+        "Partition exprs cannot contain non-partition column(s): `month` = int_col.");
     AnalysisError("alter table functional.alltypes drop " +
         "partition(year=cast(int_col as int), month=12) ",
         "Partition exprs cannot contain non-partition column(s): " +
-        "year = CAST(int_col AS INT).");
+        "`year` = CAST(int_col AS INT).");
 
     // IF NOT EXISTS properly checks for partition existence
     AnalyzesOk("alter table functional.alltypes add " +
@@ -758,8 +758,8 @@ public class AnalyzeDDLTest extends FrontendTestBase {
     AnalysisError("alter table functional.alltypestiny partition (year=2009,month=1) " +
         "set location '/test-warehouse/new_location'",
         "Target partition is cached, please uncache before changing the location " +
-        "using: ALTER TABLE functional.alltypestiny PARTITION (year = 2009, month = 1) " +
-        "SET UNCACHED");
+        "using: ALTER TABLE functional.alltypestiny " +
+        "PARTITION (`year` = 2009, `month` = 1) SET UNCACHED");
 
     // Table/db/partition do not exist
     AnalysisError("alter table baddb.alltypestiny set cached in 'testPool'",
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeExprsTest.java b/fe/src/test/java/org/apache/impala/analysis/AnalyzeExprsTest.java
index 668b4de..40c2521 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeExprsTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeExprsTest.java
@@ -2836,7 +2836,8 @@ public class AnalyzeExprsTest extends AnalyzerTest {
         ++numDistinctExprs;
       }
       if (aggExpr.getFnName().toString().equals("ndv")) {
-        assertEquals(rewrittenColName, aggExpr.getChild(0).toSql());
+        assertEquals(ToSqlUtils.getIdentSql(rewrittenColName),
+            aggExpr.getChild(0).toSql());
         ++numNdvExprs;
       }
     }
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeStmtsTest.java b/fe/src/test/java/org/apache/impala/analysis/AnalyzeStmtsTest.java
index a46cff8..3b87c89 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeStmtsTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeStmtsTest.java
@@ -3814,7 +3814,7 @@ public class AnalyzeStmtsTest extends AnalyzerTest {
         "float_col, double_col, date_string_col, tinyint_col, timestamp_col, " +
         "year, string_col from functional.alltypes",
         "Target table 'functional.alltypes' is incompatible with source expressions.\n" +
-        "Expression 'month' (type: INT) is not compatible with column 'string_col'" +
+        "Expression '`month`' (type: INT) is not compatible with column 'string_col'" +
         " (type: STRING)");
 
     // Empty permutation and no query statement
diff --git a/fe/src/test/java/org/apache/impala/analysis/ExprRewriteRulesTest.java b/fe/src/test/java/org/apache/impala/analysis/ExprRewriteRulesTest.java
index 88421a0..011a558 100644
--- a/fe/src/test/java/org/apache/impala/analysis/ExprRewriteRulesTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/ExprRewriteRulesTest.java
@@ -511,7 +511,7 @@ public class ExprRewriteRulesTest extends FrontendTestBase {
 
     // IMPALA-5016: Simplify COALESCE function
     // Test skipping leading nulls.
-    RewritesOk("coalesce(null, id, year)", rule, "coalesce(id, year)");
+    RewritesOk("coalesce(null, id, year)", rule, "coalesce(id, `year`)");
     RewritesOk("coalesce(null, 1, id)", rule, "1");
     RewritesOk("coalesce(null, null, id)", rule, "id");
     // If the leading parameter is a non-NULL constant, rewrite to that constant.
@@ -526,10 +526,10 @@ public class ExprRewriteRulesTest extends FrontendTestBase {
     // Combine COALESCE rule with FoldConstantsRule.
     RewritesOk("coalesce(1 + 2, id, year)", rules, "3");
     RewritesOk("coalesce(null is null, bool_col)", rules, "TRUE");
-    RewritesOk("coalesce(10 + null, id, year)", rules, "coalesce(id, year)");
+    RewritesOk("coalesce(10 + null, id, year)", rules, "coalesce(id, `year`)");
     // Don't rewrite based on nullability of slots. TODO (IMPALA-5753).
     RewritesOk("coalesce(year, id)", rule, null);
-    RewritesOk("functional_kudu.alltypessmall", "coalesce(id, year)", rule, null);
+    RewritesOk("functional_kudu.alltypessmall", "coalesce(id, `year`)", rule, null);
     // IMPALA-7419: coalesce that gets simplified and contains an aggregate
     RewritesOk("coalesce(null, min(distinct tinyint_col), 42)", rule,
         "coalesce(min(tinyint_col), 42)");
diff --git a/fe/src/test/java/org/apache/impala/analysis/ExprRewriterTest.java b/fe/src/test/java/org/apache/impala/analysis/ExprRewriterTest.java
index 2c2f253..d16e102 100644
--- a/fe/src/test/java/org/apache/impala/analysis/ExprRewriterTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/ExprRewriterTest.java
@@ -304,9 +304,9 @@ public class ExprRewriterTest extends AnalyzerTest {
         "insert into functional.alltypes(id) partition(year=2009, month=10) " +
         "select 1 + 1",
         "INSERT INTO TABLE functional.alltypes(id) " +
-        "PARTITION (year=2009, month=10) SELECT 1 + 1",
+        "PARTITION (`year`=2009, `month`=10) SELECT 1 + 1",
         "INSERT INTO TABLE functional.alltypes(id) " +
-        "PARTITION (year=2009, month=10) SELECT 2");
+        "PARTITION (`year`=2009, `month`=10) SELECT 2");
 
     if (RuntimeEnv.INSTANCE.isKuduSupported()) {
       // Update.
@@ -386,9 +386,9 @@ public class ExprRewriterTest extends AnalyzerTest {
             + " over(partition by year order by id) as s from functional.alltypes) v "
             + " where year = 2009 and id = 1 and"
             + " int_col < 10 and s = 4",
-        "SELECT * FROM (SELECT id, int_col, year, sum(int_col)"
-            + " OVER (PARTITION BY year ORDER BY id ASC) s FROM functional.alltypes) v"
-            + " WHERE year = CAST(2009 AS INT) AND id = CAST(1 AS INT) AND"
+        "SELECT * FROM (SELECT id, int_col, `year`, sum(int_col)"
+            + " OVER (PARTITION BY `year` ORDER BY id ASC) s FROM functional.alltypes) v"
+            + " WHERE `year` = CAST(2009 AS INT) AND id = CAST(1 AS INT) AND"
             + " int_col < CAST(10 AS INT) AND s = CAST(4 AS BIGINT)");
     assertToSqlWithImplicitCasts(ctx,
         "select * from functional.alltypes where "
diff --git a/fe/src/test/java/org/apache/impala/analysis/ToSqlTest.java b/fe/src/test/java/org/apache/impala/analysis/ToSqlTest.java
index b570056..a106820 100644
--- a/fe/src/test/java/org/apache/impala/analysis/ToSqlTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/ToSqlTest.java
@@ -17,6 +17,7 @@
 
 package org.apache.impala.analysis;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
 import org.apache.impala.common.AnalysisException;
@@ -113,12 +114,7 @@ public class ToSqlTest extends FrontendTestBase {
         // Transform whitespace to single space.
         actual = actual.replace('\n', ' ').replaceAll(" +", " ").trim();
       }
-      if (!actual.equals(expected)) {
-        String msg = "\n<<< Expected(length:" + expected.length() + "): [" + expected
-            + "]\n>>> Actual(length:" + actual.length() + "): [" + actual + "]\n";
-        System.err.println(msg);
-        fail(msg);
-      }
+      assertEquals(expected, actual);
     } catch (Exception e) {
       e.printStackTrace();
       fail("Failed to analyze query: " + query + "\n" + e.getMessage());
@@ -205,13 +201,15 @@ public class ToSqlTest extends FrontendTestBase {
       String fqAlias = "functional." + tbl;
       boolean isCollectionTblRef = isCollectionTableRef(tbl);
       for (String col: columns) {
+        String quotedCol = ToSqlUtils.identSql(col);
         // Test implicit table aliases with unqualified and fully qualified
         // table/view names. Unqualified table/view names should be fully
         // qualified in the generated SQL (IMPALA-962).
         TblsTestToSql(String.format("select %s from $TBL", col), tblName,
-            String.format("SELECT %s FROM %s", col, fqAlias));
+            String.format("SELECT %s FROM %s", quotedCol, fqAlias));
         TblsTestToSql(String.format("select %s.%s from $TBL", uqAlias, col), tblName,
-            String.format("SELECT %s.%s FROM %s", uqAlias, col, fqAlias));
+            String.format("SELECT %s.%s FROM %s", uqAlias, quotedCol,
+            fqAlias));
         // Only references to base tables/views have a fully-qualified implicit alias.
         if (!isCollectionTblRef) {
           TblsTestToSql(String.format("select %s.%s from $TBL", fqAlias, col), tblName,
@@ -220,9 +218,9 @@ public class ToSqlTest extends FrontendTestBase {
 
         // Explicit table alias.
         TblsTestToSql(String.format("select %s from $TBL a", col), tblName,
-            String.format("SELECT %s FROM %s a", col, fqAlias));
+            String.format("SELECT %s FROM %s a", quotedCol, fqAlias));
         TblsTestToSql(String.format("select a.%s from $TBL a", col), tblName,
-            String.format("SELECT a.%s FROM %s a", col, fqAlias));
+            String.format("SELECT a.%s FROM %s a", quotedCol, fqAlias));
       }
     }
 
@@ -252,6 +250,7 @@ public class ToSqlTest extends FrontendTestBase {
     TableName tbl = new TableName("functional", "allcomplextypes");
 
     // Child table uses unqualified implicit alias of parent table.
+    childColumn = ToSqlUtils.identSql(childColumn);
     TblsTestToSql(
         String.format("select %s from $TBL, allcomplextypes.%s",
             childColumn, childTable), tbl,
@@ -443,7 +442,7 @@ public class ToSqlTest extends FrontendTestBase {
     testToSql("alter view functional.complex_view (abc, xyz) as " +
         "select year, month from functional.alltypes_view", "default",
         "ALTER VIEW functional.complex_view(abc, xyz) AS " +
-        "SELECT year, month FROM functional.alltypes_view");
+        "SELECT `year`, `month` FROM functional.alltypes_view");
     testToSql("alter view functional.alltypes_view (cnt) as " +
         "select count(distinct x.int_col) from functional.alltypessmall x " +
         "inner join functional.alltypessmall y on (x.id = y.id) group by x.bigint_col",
@@ -602,8 +601,8 @@ public class ToSqlTest extends FrontendTestBase {
             "select int_col, bool_col, year, month from functional.alltypes",
           String.format(" %snoshuffle%s", prefix, suffix), loc),
           InjectInsertHint("INSERT%s INTO TABLE functional.alltypes(int_col, " +
-            "bool_col) PARTITION (year, month)%s " +
-            "SELECT int_col, bool_col, year, month FROM functional.alltypes",
+            "bool_col) PARTITION (`year`, `month`)%s " +
+            "SELECT int_col, bool_col, `year`, `month` FROM functional.alltypes",
             " \n-- +noshuffle\n", loc));
       testToSql(InjectInsertHint(
             "insert%s into functional.alltypes(int_col, bool_col) " +
@@ -611,8 +610,8 @@ public class ToSqlTest extends FrontendTestBase {
             "select int_col, bool_col, year, month from functional.alltypes",
           String.format(" %sshuffle,clustered%s", prefix, suffix), loc),
           InjectInsertHint("INSERT%s INTO TABLE functional.alltypes(int_col, " +
-            "bool_col) PARTITION (year, month)%s " +
-            "SELECT int_col, bool_col, year, month FROM functional.alltypes",
+            "bool_col) PARTITION (`year`, `month`)%s " +
+            "SELECT int_col, bool_col, `year`, `month` FROM functional.alltypes",
             " \n-- +shuffle,clustered\n", loc));
 
       // Upsert hint.
@@ -835,9 +834,9 @@ public class ToSqlTest extends FrontendTestBase {
     testToSql("values(1, 'a'), (2, 'b') union all values(3, 'c')",
         "VALUES((1, 'a'), (2, 'b')) UNION ALL (VALUES(3, 'c'))");
     testToSql("insert into table functional.alltypessmall " +
-        "partition (year=2009, month=4) " +
+        "partition (`year`=2009, `month`=4) " +
         "values(1, true, 1, 1, 10, 10, 10.0, 10.0, 'a', 'a', cast (0 as timestamp))",
-        "INSERT INTO TABLE functional.alltypessmall PARTITION (year=2009, month=4) " +
+        "INSERT INTO TABLE functional.alltypessmall PARTITION (`year`=2009, `month`=4) " +
         "VALUES(1, TRUE, 1, 1, 10, 10, 10.0, 10.0, 'a', 'a', CAST(0 AS TIMESTAMP))");
     testToSql("upsert into table functional_kudu.testtbl values(1, 'a', 1)",
         "UPSERT INTO TABLE functional_kudu.testtbl VALUES(1, 'a', 1)");
@@ -925,8 +924,8 @@ public class ToSqlTest extends FrontendTestBase {
     testToSql(
         "select key, item from functional.allcomplextypes t, " +
         "(select a1.key, value.item from t.array_map_col a1, a1.value) v",
-        "SELECT key, item FROM functional.allcomplextypes t, " +
-        "(SELECT a1.key, value.item FROM t.array_map_col a1, a1.value) v");
+        "SELECT `key`, item FROM functional.allcomplextypes t, " +
+        "(SELECT a1.`key`, value.item FROM t.array_map_col a1, a1.value) v");
     // Correlated table refs in a union.
     testToSql(
         "select item from functional.allcomplextypes t, " +
@@ -939,7 +938,7 @@ public class ToSqlTest extends FrontendTestBase {
         "(select count(a1.key) c from t.array_map_col a1) v1) " +
         "select * from w",
         "WITH w AS (SELECT c FROM functional.allcomplextypes t, " +
-        "(SELECT count(a1.key) c FROM t.array_map_col a1) v1) " +
+        "(SELECT count(a1.`key`) c FROM t.array_map_col a1) v1) " +
         "SELECT * FROM w");
   }
 
@@ -1077,7 +1076,7 @@ public class ToSqlTest extends FrontendTestBase {
     testToSql("with t1 as (select * from functional.alltypes) " +
             "insert into functional.alltypes partition(year, month) select * from t1",
         "WITH t1 AS (SELECT * FROM functional.alltypes) " +
-        "INSERT INTO TABLE functional.alltypes PARTITION (year, month) " +
+        "INSERT INTO TABLE functional.alltypes PARTITION (`year`, `month`) " +
             "SELECT * FROM t1");
     // WITH clause in upsert stmt.
     testToSql("with t1 as (select * from functional.alltypes) upsert into " +
@@ -1146,7 +1145,7 @@ public class ToSqlTest extends FrontendTestBase {
         "float_col, double_col, date_string_col, string_col, timestamp_col " +
         "from functional.alltypes",
         "INSERT INTO TABLE functional.alltypessmall " +
-        "PARTITION (year=2009, month=4) SELECT id, " +
+        "PARTITION (`year`=2009, `month`=4) SELECT id, " +
         "bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, " +
         "double_col, date_string_col, string_col, timestamp_col " +
         "FROM functional.alltypes");
@@ -1157,9 +1156,9 @@ public class ToSqlTest extends FrontendTestBase {
         "float_col, double_col, date_string_col, string_col, timestamp_col, year, " +
         "month from functional.alltypes",
         "INSERT INTO TABLE functional.alltypessmall " +
-        "PARTITION (year, month) SELECT id, bool_col, " +
+        "PARTITION (`year`, `month`) SELECT id, bool_col, " +
         "tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, " +
-        "date_string_col, string_col, timestamp_col, year, month " +
+        "date_string_col, string_col, timestamp_col, `year`, `month` " +
         "FROM functional.alltypes");
     // Partially dynamic partitions.
     testToSql("insert into table functional.alltypessmall " +
@@ -1168,9 +1167,9 @@ public class ToSqlTest extends FrontendTestBase {
         "float_col, double_col, date_string_col, string_col, timestamp_col, month " +
         "from functional.alltypes",
         "INSERT INTO TABLE functional.alltypessmall " +
-        "PARTITION (year=2009, month) SELECT id, " +
+        "PARTITION (`year`=2009, `month`) SELECT id, " +
         "bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, " +
-        "double_col, date_string_col, string_col, timestamp_col, month " +
+        "double_col, date_string_col, string_col, timestamp_col, `month` " +
         "FROM functional.alltypes");
 
     // Permutations
@@ -1182,7 +1181,7 @@ public class ToSqlTest extends FrontendTestBase {
     // Permutations that mention partition column
     testToSql("insert into table functional.alltypes(id, year, month) " +
         " values(1, 1990, 12)",
-        "INSERT INTO TABLE functional.alltypes(id, year, month) " +
+        "INSERT INTO TABLE functional.alltypes(id, `year`, `month`) " +
         "VALUES(1, 1990, 12)");
 
     // Empty permutation with no select statement
@@ -1193,7 +1192,7 @@ public class ToSqlTest extends FrontendTestBase {
     testToSql("insert into table functional.alltypes(id) " +
         " partition (year=2009, month) values(1, 12)",
         "INSERT INTO TABLE functional.alltypes(id) " +
-        "PARTITION (year=2009, month) VALUES(1, 12)");
+        "PARTITION (`year`=2009, `month`) VALUES(1, 12)");
   }
 
   @Test
diff --git a/fe/src/test/java/org/apache/impala/analysis/ToSqlUtilsTest.java b/fe/src/test/java/org/apache/impala/analysis/ToSqlUtilsTest.java
index cbcd8b1..e33ee01 100644
--- a/fe/src/test/java/org/apache/impala/analysis/ToSqlUtilsTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/ToSqlUtilsTest.java
@@ -18,16 +18,21 @@
 package org.apache.impala.analysis;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.impala.catalog.AggregateFunction;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.FeView;
 import org.apache.impala.catalog.Function;
+import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.catalog.ScalarFunction;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.FrontendTestBase;
@@ -38,6 +43,96 @@ import com.google.common.collect.Lists;
 
 public class ToSqlUtilsTest extends FrontendTestBase {
 
+  @Test
+  public void testRemoveHiddenProperties() {
+    Map<String,String> props = new HashMap<>();
+    for (String kw : ToSqlUtils.HIDDEN_TABLE_PROPERTIES) {
+      props.put(kw, kw + "-value");
+    }
+    props.put("foo", "foo-value");
+    ToSqlUtils.removeHiddenTableProperties(props);
+    assertEquals(1, props.size());
+    assertEquals("foo-value", props.get("foo"));
+  }
+
+  @Test
+  public void testRemoveHiddenKuduProperties() {
+    Map<String,String> props = new HashMap<>();
+    props.put(KuduTable.KEY_TABLE_NAME, "kudu-value");
+    props.put("foo", "foo-value");
+    ToSqlUtils.removeHiddenKuduTableProperties(props);
+    assertEquals(1, props.size());
+    assertEquals("foo-value", props.get("foo"));
+  }
+
+  @Test
+  public void testGetSortColumns() {
+    Map<String,String> props = new HashMap<>();
+    props.put("foo", "foo-value");
+    // Returns null if no sort cols property
+    assertNull(ToSqlUtils.getSortColumns(props));
+
+    // Degenerate case
+    props.put(AlterTableSortByStmt.TBL_PROP_SORT_COLUMNS, "");
+    List<String> sortCols = ToSqlUtils.getSortColumns(props);
+    assertTrue(sortCols.isEmpty());
+
+    // One column
+    props.put(AlterTableSortByStmt.TBL_PROP_SORT_COLUMNS, "col1");
+    sortCols = ToSqlUtils.getSortColumns(props);
+    assertEquals(1, sortCols.size());
+    assertEquals("col1", sortCols.get(0));
+
+    // One column with padding
+    props.put(AlterTableSortByStmt.TBL_PROP_SORT_COLUMNS, " col1 ");
+    sortCols = ToSqlUtils.getSortColumns(props);
+    assertEquals(1, sortCols.size());
+    assertEquals("col1", sortCols.get(0));
+
+    // One column with spaces in name
+    props.put(AlterTableSortByStmt.TBL_PROP_SORT_COLUMNS, " col 1 ");
+    sortCols = ToSqlUtils.getSortColumns(props);
+    assertEquals(1, sortCols.size());
+    assertEquals("col 1", sortCols.get(0));
+
+    // Spurious commas
+    props.put(AlterTableSortByStmt.TBL_PROP_SORT_COLUMNS, ",col1,");
+    sortCols = ToSqlUtils.getSortColumns(props);
+    assertEquals(1, sortCols.size());
+    assertEquals("col1", sortCols.get(0));
+
+    // Spurious commas and spaces
+    props.put(AlterTableSortByStmt.TBL_PROP_SORT_COLUMNS, " , col1 , ");
+    sortCols = ToSqlUtils.getSortColumns(props);
+    assertEquals(1, sortCols.size());
+    assertEquals("col1", sortCols.get(0));
+
+    // Two columns
+    props.put(AlterTableSortByStmt.TBL_PROP_SORT_COLUMNS, "col1,col2");
+    sortCols = ToSqlUtils.getSortColumns(props);
+    assertEquals(2, sortCols.size());
+    assertEquals("col1", sortCols.get(0));
+    assertEquals("col2", sortCols.get(1));
+
+    // Two columns with extra commas and spaces
+    props.put(AlterTableSortByStmt.TBL_PROP_SORT_COLUMNS, " col1 ,, col2 ");
+    sortCols = ToSqlUtils.getSortColumns(props);
+    assertEquals(2, sortCols.size());
+    assertEquals("col1", sortCols.get(0));
+    assertEquals("col2", sortCols.get(1));
+
+    // Three columns
+    props.put(AlterTableSortByStmt.TBL_PROP_SORT_COLUMNS, "col1,col2,col3");
+    sortCols = ToSqlUtils.getSortColumns(props);
+    assertEquals(3, sortCols.size());
+    assertEquals("col1", sortCols.get(0));
+    assertEquals("col2", sortCols.get(1));
+    assertEquals("col3", sortCols.get(2));
+
+    // Note: this method cannot handle a the pathological case of a
+    // quoted column with a comma in the name: `foo,bar`.
+  }
+
   private FeTable getTable(String dbName, String tableName) {
     FeTable table = catalog_.getOrLoadTable(dbName, tableName);
     assertNotNull(table);
@@ -51,6 +146,85 @@ public class ToSqlUtilsTest extends FrontendTestBase {
   }
 
   @Test
+  public void testHiveNeedsQuotes() {
+    // Regular old ident
+    assertFalse(ToSqlUtils.hiveNeedsQuotes("foo"));
+    // Operator
+    assertTrue(ToSqlUtils.hiveNeedsQuotes("+"));
+    // Keyword
+    assertTrue(ToSqlUtils.hiveNeedsQuotes("SELECT"));
+    assertTrue(ToSqlUtils.hiveNeedsQuotes("select"));
+    assertTrue(ToSqlUtils.hiveNeedsQuotes("sElEcT"));
+    // Two idents
+    assertTrue(ToSqlUtils.hiveNeedsQuotes("foo bar"));
+    // Expression
+    assertTrue(ToSqlUtils.hiveNeedsQuotes("a+b"));
+    assertFalse(ToSqlUtils.hiveNeedsQuotes("123ab"));
+    assertTrue(ToSqlUtils.hiveNeedsQuotes("123.a"));
+  }
+
+  @Test
+  public void testImpalaNeedsQuotes() {
+    // Regular old ident
+    assertFalse(ToSqlUtils.impalaNeedsQuotes("foo"));
+    // Keyword
+    assertTrue(ToSqlUtils.impalaNeedsQuotes("SELECT"));
+    assertTrue(ToSqlUtils.impalaNeedsQuotes("select"));
+    assertTrue(ToSqlUtils.impalaNeedsQuotes("sElEcT"));
+    // Special case checks for numbers
+    assertTrue(ToSqlUtils.impalaNeedsQuotes("123"));
+    assertTrue(ToSqlUtils.impalaNeedsQuotes("123a"));
+    assertFalse(ToSqlUtils.impalaNeedsQuotes("a123"));
+
+    // Note: the Impala check can't detect multi-part
+    // symbols "a b" nor operators. Rely on the Hive
+    // version for that.
+  }
+
+  @Test
+  public void testGetIdentSql() {
+    // Hive & Impala keyword
+    assertEquals("`create`", ToSqlUtils.getIdentSql("create"));
+    // Hive-only keyword
+    assertEquals("`month`", ToSqlUtils.getIdentSql("month"));
+    // Impala keyword
+    assertEquals("`kudu`", ToSqlUtils.getIdentSql("kudu"));
+    // Number
+    assertEquals("`123`", ToSqlUtils.getIdentSql("123"));
+    // Starts with number
+    assertEquals("`123a`", ToSqlUtils.getIdentSql("123a"));
+    // Contains spaces
+    assertEquals("`a b`", ToSqlUtils.getIdentSql("a b"));
+    // Operator
+    assertEquals("`+`", ToSqlUtils.getIdentSql("+"));
+    // Simple identifier
+    assertEquals("foo", ToSqlUtils.getIdentSql("foo"));
+    // Comment characters in name
+    assertEquals("`foo#`", ToSqlUtils.getIdentSql("foo#"));
+    assertEquals("`foo#bar`", ToSqlUtils.getIdentSql("foo#bar"));
+    assertEquals("`foo--bar`", ToSqlUtils.getIdentSql("foo--bar"));
+
+    List<String> in = Lists.newArrayList("create", "foo");
+    List<String> out = ToSqlUtils.getIdentSqlList(in);
+    assertEquals(2, out.size());
+    assertEquals("`create`", out.get(0));
+    assertEquals("foo", out.get(1));
+
+    assertEquals("`create`.foo", ToSqlUtils.getPathSql(in));
+  }
+
+  @Test
+  public void tesToIdentSql() {
+    // Normal quoting
+    assertEquals("`create`", ToSqlUtils.identSql("create"));
+    assertEquals("foo", ToSqlUtils.identSql("foo"));
+    // Wildcard is special in test cases
+    assertEquals("*", ToSqlUtils.identSql("*"));
+    // Multi-part names
+    assertEquals("foo.`create`.*", ToSqlUtils.identSql("foo.create.*"));
+  }
+
+  @Test
   public void testCreateViewSql() {
     {
       FeView view = getView("functional", "view_view");
diff --git a/fe/src/test/java/org/apache/impala/planner/PlannerTest.java b/fe/src/test/java/org/apache/impala/planner/PlannerTest.java
index 683f702..26c0438 100644
--- a/fe/src/test/java/org/apache/impala/planner/PlannerTest.java
+++ b/fe/src/test/java/org/apache/impala/planner/PlannerTest.java
@@ -375,7 +375,9 @@ public class PlannerTest extends PlannerTestBase {
   }
 
   @Test
-  public void testParquetStatsAgg() { runPlannerTestFile("parquet-stats-agg"); }
+  public void testParquetStatsAgg() {
+    runPlannerTestFile("parquet-stats-agg");
+  }
 
   @Test
   public void testParquetFiltering() {
diff --git a/fe/src/test/java/org/apache/impala/util/PrintUtilsTest.java b/fe/src/test/java/org/apache/impala/util/PrintUtilsTest.java
index 550578f..300022f 100644
--- a/fe/src/test/java/org/apache/impala/util/PrintUtilsTest.java
+++ b/fe/src/test/java/org/apache/impala/util/PrintUtilsTest.java
@@ -17,7 +17,9 @@
 
 package org.apache.impala.util;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import org.apache.impala.common.PrintUtils;
 import org.junit.Test;
@@ -96,14 +98,14 @@ public class PrintUtilsTest {
         "Analyzed query: SELECT * FROM functional_kudu.alltypestiny\n"
             + "WHERE CAST(bigint_col AS DOUBLE) < CAST(10 AS DOUBLE)");
     // Simple query with a hint retains newlines surrounding hint.
-    assertWrap("SELECT \n"
+    assertWrap("SELECT\n"
             + "-- +straight_join\n"
-            + " * FROM tpch_parquet.orders INNER JOIN \n"
+            + " * FROM tpch_parquet.orders INNER JOIN\n"
             + "-- +shuffle\n"
             + " tpch_parquet.customer ON o_custkey = c_custkey",
-        "SELECT \n"
+        "SELECT\n"
             + "-- +straight_join\n"
-            + "* FROM tpch_parquet.orders INNER JOIN \n"
+            + "* FROM tpch_parquet.orders INNER JOIN\n"
             + "-- +shuffle\n"
             + "tpch_parquet.customer ON o_custkey = c_custkey");
     // test that a long string of blanks prints OK, some may be lost for clarity
@@ -152,7 +154,7 @@ public class PrintUtilsTest {
   }
 
   /**
-   * Assert that there are no blank liones embedded in the wrapped output.
+   * Assert that there are no blank lines embedded in the wrapped output.
    */
   private void assertNoBlankLines(String s) {
     assertFalse("output contains blank line " + s, s.contains("\n\n"));
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/aggregation.test b/testdata/workloads/functional-planner/queries/PlannerTest/aggregation.test
index 18230ab..d36a2af 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/aggregation.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/aggregation.test
@@ -911,8 +911,8 @@ where v.a = v.b and v.b = v.c and v.c = v.d and v.a = v.c and v.a = v.d
 PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
-|  group by: tinyint_col, smallint_col, int_col + int_col, coalesce(bigint_col, year)
-|  having: int_col + int_col = coalesce(bigint_col, year), smallint_col = int_col + int_col
+|  group by: tinyint_col, smallint_col, int_col + int_col, coalesce(bigint_col, `year`)
+|  having: int_col + int_col = coalesce(bigint_col, `year`), smallint_col = int_col + int_col
 |  row-size=19B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes]
@@ -1010,7 +1010,7 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: group_concat(string_col)
-|  group by: day
+|  group by: `day`
 |  row-size=16B cardinality=11
 |
 02:AGGREGATE
@@ -1023,14 +1023,14 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
-   predicates: day = id % 100
+   predicates: `day` = id % 100
    row-size=23B cardinality=1.10K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: group_concat(string_col)
-|  group by: day
+|  group by: `day`
 |  row-size=16B cardinality=11
 |
 02:AGGREGATE
@@ -1047,7 +1047,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
    partitions=11/11 files=11 size=814.73KB
-   predicates: day = id % 100
+   predicates: `day` = id % 100
    row-size=23B cardinality=1.10K
 ====
 # test group_concat with distinct together with another distinct aggregate function
@@ -1059,11 +1059,11 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(CAST(timestamp_col AS STRING)), group_concat(CAST(timestamp_col AS STRING))
-|  group by: year
+|  group by: `year`
 |  row-size=24B cardinality=1
 |
 01:AGGREGATE
-|  group by: year, CAST(timestamp_col AS STRING)
+|  group by: `year`, CAST(timestamp_col AS STRING)
 |  row-size=20B cardinality=10.21K
 |
 00:SCAN HDFS [functional.alltypesagg]
@@ -1076,24 +1076,24 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(CAST(timestamp_col AS STRING)), group_concat:merge(CAST(timestamp_col AS STRING))
-|  group by: year
+|  group by: `year`
 |  row-size=24B cardinality=1
 |
-05:EXCHANGE [HASH(year)]
+05:EXCHANGE [HASH(`year`)]
 |
 02:AGGREGATE [STREAMING]
 |  output: count(CAST(timestamp_col AS STRING)), group_concat(CAST(timestamp_col AS STRING))
-|  group by: year
+|  group by: `year`
 |  row-size=24B cardinality=1
 |
 04:AGGREGATE
-|  group by: year, CAST(timestamp_col AS STRING)
+|  group by: `year`, CAST(timestamp_col AS STRING)
 |  row-size=20B cardinality=10.21K
 |
-03:EXCHANGE [HASH(year,CAST(timestamp_col AS STRING))]
+03:EXCHANGE [HASH(`year`,CAST(timestamp_col AS STRING))]
 |
 01:AGGREGATE [STREAMING]
-|  group by: year, CAST(timestamp_col AS STRING)
+|  group by: `year`, CAST(timestamp_col AS STRING)
 |  row-size=20B cardinality=10.21K
 |
 00:SCAN HDFS [functional.alltypesagg]
@@ -1203,12 +1203,12 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(date_string_col), group_concat(date_string_col, '-'), count:merge(*)
-|  group by: month, year
+|  group by: `month`, `year`
 |  row-size=36B cardinality=1
 |
 01:AGGREGATE
 |  output: count(*)
-|  group by: month, year, date_string_col
+|  group by: `month`, `year`, date_string_col
 |  row-size=36B cardinality=10
 |
 00:SCAN HDFS [functional.alltypesagg]
@@ -1221,26 +1221,26 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(date_string_col), group_concat:merge(date_string_col, '-'), count:merge(*)
-|  group by: month, year
+|  group by: `month`, `year`
 |  row-size=36B cardinality=1
 |
-05:EXCHANGE [HASH(month,year)]
+05:EXCHANGE [HASH(`month`,`year`)]
 |
 02:AGGREGATE [STREAMING]
 |  output: count(date_string_col), group_concat(date_string_col, '-'), count:merge(*)
-|  group by: month, year
+|  group by: `month`, `year`
 |  row-size=36B cardinality=1
 |
 04:AGGREGATE
 |  output: count:merge(*)
-|  group by: month, year, date_string_col
+|  group by: `month`, `year`, date_string_col
 |  row-size=36B cardinality=10
 |
-03:EXCHANGE [HASH(month,year,date_string_col)]
+03:EXCHANGE [HASH(`month`,`year`,date_string_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: count(*)
-|  group by: month, year, date_string_col
+|  group by: `month`, `year`, date_string_col
 |  row-size=36B cardinality=10
 |
 00:SCAN HDFS [functional.alltypesagg]
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/analytic-fns.test b/testdata/workloads/functional-planner/queries/PlannerTest/analytic-fns.test
index 9a70f6a..fef2844 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/analytic-fns.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/analytic-fns.test
@@ -2170,7 +2170,7 @@ PLAN-ROOT SINK
 |
 02:ANALYTIC
 |  functions: sum(int_col)
-|  partition by: year
+|  partition by: `year`
 |  order by: id ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
 |  row-size=20B cardinality=3.65K
@@ -2205,7 +2205,7 @@ PLAN-ROOT SINK
 |
 08:ANALYTIC
 |  functions: sum(int_col)
-|  partition by: year, tinyint_col
+|  partition by: `year`, tinyint_col
 |  row-size=42B cardinality=3.65K
 |
 07:SORT
@@ -2214,7 +2214,7 @@ PLAN-ROOT SINK
 |
 06:ANALYTIC
 |  functions: last_value(int_col)
-|  partition by: int_col, year
+|  partition by: int_col, `year`
 |  order by: id ASC
 |  window: ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING
 |  row-size=34B cardinality=3.65K
@@ -2225,7 +2225,7 @@ PLAN-ROOT SINK
 |
 04:ANALYTIC
 |  functions: avg(int_col)
-|  partition by: tinyint_col, id, year
+|  partition by: tinyint_col, id, `year`
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
 |  row-size=30B cardinality=3.65K
@@ -2236,7 +2236,7 @@ PLAN-ROOT SINK
 |
 02:ANALYTIC
 |  functions: last_value(tinyint_col)
-|  partition by: id, year
+|  partition by: id, `year`
 |  order by: int_col ASC
 |  window: ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
 |  row-size=22B cardinality=3.65K
@@ -2279,7 +2279,7 @@ PLAN-ROOT SINK
 |
 04:ANALYTIC
 |  functions: avg(int_col)
-|  partition by: year
+|  partition by: `year`
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
 |  row-size=33B cardinality=7.30K
@@ -2290,7 +2290,7 @@ PLAN-ROOT SINK
 |
 02:ANALYTIC
 |  functions: sum(int_col)
-|  partition by: year, tinyint_col
+|  partition by: `year`, tinyint_col
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
 |  row-size=25B cardinality=7.30K
@@ -2319,7 +2319,7 @@ PLAN-ROOT SINK
 |
 02:ANALYTIC
 |  functions: sum(id)
-|  partition by: month, tinyint_col
+|  partition by: `month`, tinyint_col
 |  order by: bigint_col ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
 |  row-size=29B cardinality=1
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/constant-folding.test b/testdata/workloads/functional-planner/queries/PlannerTest/constant-folding.test
index 752ccf3..993f432 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/constant-folding.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/constant-folding.test
@@ -408,8 +408,8 @@ insert into functional.alltypes (id, int_col) partition(year,month)
 select id, int_col, cast(1 + 1 + 1 + year as int), cast(month - (1 - 1 - 1) as int)
 from functional.alltypessmall
 ---- PLAN
-Analyzed query: SELECT id, int_col, CAST(CAST(3 AS BIGINT) + CAST(year AS
-BIGINT) AS INT), CAST(CAST(month AS BIGINT) - CAST(-1 AS BIGINT) AS INT) FROM
+Analyzed query: SELECT id, int_col, CAST(CAST(3 AS BIGINT) + CAST(`year` AS
+BIGINT) AS INT), CAST(CAST(`month` AS BIGINT) - CAST(-1 AS BIGINT) AS INT) FROM
 functional.alltypessmall
 F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
 |  Per-Host Resources: mem-estimate=38.00MB mem-reservation=6.01MB thread-reservation=2
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/ddl.test b/testdata/workloads/functional-planner/queries/PlannerTest/ddl.test
index ce495b3..84243c0 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/ddl.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/ddl.test
@@ -32,12 +32,12 @@ WRITE TO HDFS [default.t, OVERWRITE=false]
 |  row-size=25B cardinality=13
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
-|     partition predicates: b.month = 2
+|     partition predicates: b.`month` = 2
 |     partitions=1/4 files=1 size=1.58KB
 |     row-size=4B cardinality=25
 |
 00:SCAN HDFS [functional.alltypes a]
-   partition predicates: a.year = 2009
+   partition predicates: a.`year` = 2009
    partitions=12/24 files=12 size=238.68KB
    runtime filters: RF000 -> a.id
    row-size=21B cardinality=3.65K
@@ -63,12 +63,12 @@ WRITE TO HDFS [default.t, OVERWRITE=false]
 |--04:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall b]
-|     partition predicates: b.month = 2
+|     partition predicates: b.`month` = 2
 |     partitions=1/4 files=1 size=1.58KB
 |     row-size=4B cardinality=25
 |
 00:SCAN HDFS [functional.alltypes a]
-   partition predicates: a.year = 2009
+   partition predicates: a.`year` = 2009
    partitions=12/24 files=12 size=238.68KB
    runtime filters: RF000 -> a.id
    row-size=21B cardinality=3.65K
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/hbase.test b/testdata/workloads/functional-planner/queries/PlannerTest/hbase.test
index ef7764c..e52addf 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/hbase.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/hbase.test
@@ -582,7 +582,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.alltypesagg]
    predicates: bigint_col IS NOT NULL
-   row-size=84B cardinality=1.73K
+   row-size=84B cardinality=1.72K
 ====
 # HBase scan query with conjunctive predicates one of which is an 'IS NULL'
 select * from functional_hbase.alltypesagg
@@ -591,7 +591,7 @@ where bigint_col is null and day = 1
 PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.alltypesagg]
-   predicates: bigint_col IS NULL, day = 1
+   predicates: bigint_col IS NULL, `day` = 1
    row-size=84B cardinality=1.73K
 ====
 # HBase scan query with conjunctive predicates one of which is an 'IS NOT NULL'
@@ -602,7 +602,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.alltypesagg]
    predicates: bigint_col IS NOT NULL, bool_col = TRUE
-   row-size=84B cardinality=1.73K
+   row-size=84B cardinality=1.72K
 ---- SCANRANGELOCATIONS
 NODE 0:
   HBASE KEYRANGE 3:7
@@ -615,7 +615,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HBASE [functional_hbase.alltypesagg]
    predicates: bigint_col IS NOT NULL, bool_col = TRUE
-   row-size=84B cardinality=1.73K
+   row-size=84B cardinality=1.72K
 ====
 # HBase scan query with an aggregation and a single predicate
 select count(*) from functional_hbase.alltypesagg
@@ -658,7 +658,7 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=1
 |
 00:SCAN HBASE [functional_hbase.alltypesagg]
-   predicates: bigint_col = 10, day = 1
+   predicates: `day` = 1, bigint_col = 10
    row-size=12B cardinality=1.73K
 ====
 # IMPALA-1141: Simple joins to make sure cardinality estimates are right.
@@ -688,7 +688,7 @@ PLAN-ROOT SINK
 |  row-size=20B cardinality=120
 |
 |--02:SCAN HBASE [functional_hbase.alltypessmall c]
-|     predicates: c.month = 4
+|     predicates: c.`month` = 4
 |     row-size=12B cardinality=12
 |
 01:SCAN HBASE [functional_hbase.alltypessmall a]
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/hdfs.test b/testdata/workloads/functional-planner/queries/PlannerTest/hdfs.test
index 244ad82..5e7f685 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/hdfs.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/hdfs.test
@@ -5,7 +5,7 @@ where cast(year as string) = '2019-01-01'
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: CAST(year AS STRING) = '2019-01-01'
+   partition predicates: CAST(`year` AS STRING) = '2019-01-01'
    partitions=0/24 files=0 size=0B
    row-size=89B cardinality=0
 ---- DISTRIBUTEDPLAN
@@ -14,7 +14,7 @@ PLAN-ROOT SINK
 01:EXCHANGE [UNPARTITIONED]
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: CAST(year AS STRING) = '2019-01-01'
+   partition predicates: CAST(`year` AS STRING) = '2019-01-01'
    partitions=0/24 files=0 size=0B
    row-size=89B cardinality=0
 ====
@@ -106,7 +106,7 @@ select id, month from functional.alltypes where year = 2009
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009
+   partition predicates: `year` = 2009
    partitions=12/24 files=12 size=238.68KB
    row-size=8B cardinality=3.65K
 ---- SCANRANGELOCATIONS
@@ -129,7 +129,7 @@ PLAN-ROOT SINK
 01:EXCHANGE [UNPARTITIONED]
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009
+   partition predicates: `year` = 2009
    partitions=12/24 files=12 size=238.68KB
    row-size=8B cardinality=3.65K
 ====
@@ -139,7 +139,7 @@ select * from functional.alltypes where year = 2009.0
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009.0
+   partition predicates: `year` = 2009.0
    partitions=12/24 files=12 size=238.68KB
    row-size=89B cardinality=3.65K
 ====
@@ -148,7 +148,7 @@ select * from functional.alltypes where 2009 = year
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009
+   partition predicates: `year` = 2009
    partitions=12/24 files=12 size=238.68KB
    row-size=89B cardinality=3.65K
 ====
@@ -157,7 +157,7 @@ select * from functional.alltypes where 2009 <=> year
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year IS NOT DISTINCT FROM 2009
+   partition predicates: `year` IS NOT DISTINCT FROM 2009
    partitions=12/24 files=12 size=238.68KB
    row-size=89B cardinality=3.65K
 ====
@@ -167,7 +167,7 @@ select * from functional.alltypes where !(month > 2)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: NOT (month > 2)
+   partition predicates: NOT (`month` > 2)
    partitions=4/24 files=4 size=76.83KB
    row-size=89B cardinality=1.18K
 ====
@@ -177,7 +177,7 @@ select * from functional.alltypes where !(!(month=1))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: NOT (NOT (month = 1))
+   partition predicates: NOT (NOT (`month` = 1))
    partitions=2/24 files=2 size=40.32KB
    row-size=89B cardinality=620
 ====
@@ -186,7 +186,7 @@ select * from functional.alltypes where !(!(month<=>1))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: NOT (NOT (month IS NOT DISTINCT FROM 1))
+   partition predicates: NOT (NOT (`month` IS NOT DISTINCT FROM 1))
    partitions=2/24 files=2 size=40.32KB
    row-size=89B cardinality=620
 ====
@@ -196,7 +196,7 @@ select * from functional.alltypes where year=2009 and !(month < 6)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, NOT (month < 6)
+   partition predicates: `year` = 2009, NOT (`month` < 6)
    partitions=7/24 files=7 size=140.58KB
    row-size=89B cardinality=2.14K
 ====
@@ -206,7 +206,7 @@ select * from functional.alltypes where !(year < 2009) and !(month < 6)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: NOT (year < 2009), NOT (month < 6)
+   partition predicates: NOT (`year` < 2009), NOT (`month` < 6)
    partitions=14/24 files=14 size=281.15KB
    row-size=89B cardinality=4.28K
 ====
@@ -216,7 +216,7 @@ select * from functional.alltypes where !(year = 2009 and month > 6)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: NOT (year = 2009 AND month > 6)
+   partition predicates: NOT (`year` = 2009 AND `month` > 6)
    partitions=18/24 files=18 size=357.58KB
    row-size=89B cardinality=5.46K
 ====
@@ -225,7 +225,7 @@ select * from functional.alltypes where !(year <=> 2009 and month > 6)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: NOT (year IS NOT DISTINCT FROM 2009 AND month > 6)
+   partition predicates: NOT (`year` IS NOT DISTINCT FROM 2009 AND `month` > 6)
    partitions=18/24 files=18 size=357.58KB
    row-size=89B cardinality=5.46K
 ====
@@ -234,7 +234,7 @@ select * from functional.alltypes where !(year <=> 2009) or !(month > 6)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: NOT (year IS NOT DISTINCT FROM 2009) OR NOT (month > 6)
+   partition predicates: NOT (`year` IS NOT DISTINCT FROM 2009) OR NOT (`month` > 6)
    partitions=18/24 files=18 size=357.58KB
    row-size=89B cardinality=5.46K
 ====
@@ -244,7 +244,7 @@ select * from functional.alltypes where !(month = 6 or month = 8)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: NOT month IN (6, 8)
+   partition predicates: NOT `month` IN (6, 8)
    partitions=20/24 files=20 size=398.31KB
    row-size=89B cardinality=6.08K
 ====
@@ -253,7 +253,7 @@ select * from functional.alltypes where !(month <=> 6 or month <=> 8)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: NOT (month IS NOT DISTINCT FROM 6 OR month IS NOT DISTINCT FROM 8)
+   partition predicates: NOT (`month` IS NOT DISTINCT FROM 6 OR `month` IS NOT DISTINCT FROM 8)
    partitions=20/24 files=20 size=398.31KB
    row-size=89B cardinality=6.08K
 ====
@@ -263,7 +263,7 @@ select * from functional.alltypes where not (year = 2009 or month is null)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: NOT (year = 2009 OR month IS NULL)
+   partition predicates: NOT (`year` = 2009 OR `month` IS NULL)
    partitions=12/24 files=12 size=239.77KB
    row-size=89B cardinality=3.65K
 ====
@@ -273,7 +273,7 @@ select * from functional.alltypes where not (year = 2009 or month <=> null)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: NOT (year = 2009 OR month IS NOT DISTINCT FROM NULL)
+   partition predicates: NOT (`year` = 2009 OR `month` IS NOT DISTINCT FROM NULL)
    partitions=12/24 files=12 size=239.77KB
    row-size=89B cardinality=3.65K
 ====
@@ -283,7 +283,7 @@ select * from functional.alltypes where not (not (month is null))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: NOT (NOT (month IS NULL))
+   partition predicates: NOT (NOT (`month` IS NULL))
    partitions=0/24 files=0 size=0B
    row-size=89B cardinality=0
 ====
@@ -293,7 +293,7 @@ select * from functional.alltypes where not (not (month <=> null))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: NOT (NOT (month IS NOT DISTINCT FROM NULL))
+   partition predicates: NOT (NOT (`month` IS NOT DISTINCT FROM NULL))
    partitions=0/24 files=0 size=0B
    row-size=89B cardinality=0
 ====
@@ -303,7 +303,7 @@ select * from functional.alltypes where not (not (month is null or year = 2009))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: NOT (NOT (month IS NULL OR year = 2009))
+   partition predicates: NOT (NOT (`month` IS NULL OR `year` = 2009))
    partitions=12/24 files=12 size=238.68KB
    row-size=89B cardinality=3.65K
 ====
@@ -313,7 +313,7 @@ select * from functional.alltypes where not (not (month <=> null or year = 2009)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: NOT (NOT (month IS NOT DISTINCT FROM NULL OR year = 2009))
+   partition predicates: NOT (NOT (`month` IS NOT DISTINCT FROM NULL OR `year` = 2009))
    partitions=12/24 files=12 size=238.68KB
    row-size=89B cardinality=3.65K
 ====
@@ -323,7 +323,7 @@ select * from functional.alltypes where month=1
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: month = 1
+   partition predicates: `month` = 1
    partitions=2/24 files=2 size=40.32KB
    row-size=89B cardinality=620
 ====
@@ -333,7 +333,7 @@ select * from functional.alltypes where year=2009 and month=1
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/24 files=1 size=19.95KB
    row-size=89B cardinality=310
 ====
@@ -343,7 +343,7 @@ select * from functional.alltypes where year=2009 and month > 6
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month > 6
+   partition predicates: `year` = 2009, `month` > 6
    partitions=6/24 files=6 size=120.87KB
    row-size=89B cardinality=1.84K
 ====
@@ -352,7 +352,7 @@ select * from functional.alltypes where year=2009 and month < 6
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month < 6
+   partition predicates: `year` = 2009, `month` < 6
    partitions=5/24 files=5 size=98.11KB
    row-size=89B cardinality=1.51K
 ====
@@ -361,7 +361,7 @@ select * from functional.alltypes where year=2009 and month in (1, 3, 5, 7)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month IN (1, 3, 5, 7)
+   partition predicates: `year` = 2009, `month` IN (1, 3, 5, 7)
    partitions=4/24 files=4 size=80.74KB
    row-size=89B cardinality=1.24K
 ====
@@ -370,7 +370,7 @@ select * from functional.alltypes where year<=>2009 and month in (1, 3, 5, 7)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year IS NOT DISTINCT FROM 2009, month IN (1, 3, 5, 7)
+   partition predicates: `year` IS NOT DISTINCT FROM 2009, `month` IN (1, 3, 5, 7)
    partitions=4/24 files=4 size=80.74KB
    row-size=89B cardinality=1.24K
 ====
@@ -381,7 +381,7 @@ where year=2009 and month in (1, 3, 5, 7) and month is not null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month IN (1, 3, 5, 7), month IS NOT NULL
+   partition predicates: `year` = 2009, `month` IN (1, 3, 5, 7), `month` IS NOT NULL
    partitions=4/24 files=4 size=80.74KB
    row-size=89B cardinality=1.24K
 ====
@@ -392,7 +392,7 @@ where year=2009 and month in (1, 3, 5, 7) and month is null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month IN (1, 3, 5, 7), month IS NULL
+   partition predicates: `year` = 2009, `month` IN (1, 3, 5, 7), `month` IS NULL
    partitions=0/24 files=0 size=0B
    row-size=89B cardinality=0
 ====
@@ -401,7 +401,7 @@ select * from functional.alltypes where year=2009 and (month in (1, 3, 5) or mon
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month IN (1, 3, 5, 7)
+   partition predicates: `year` = 2009, `month` IN (1, 3, 5, 7)
    partitions=4/24 files=4 size=80.74KB
    row-size=89B cardinality=1.24K
 ====
@@ -411,7 +411,7 @@ select * from functional.alltypes where year<=2009 and month < 6
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year <= 2009, month < 6
+   partition predicates: `year` <= 2009, `month` < 6
    partitions=5/24 files=5 size=98.11KB
    row-size=89B cardinality=1.51K
 ====
@@ -421,7 +421,7 @@ select * from functional.alltypes where month < 9 and month > 6
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: month < 9, month > 6
+   partition predicates: `month` < 9, `month` > 6
    partitions=4/24 files=4 size=81.46KB
    row-size=89B cardinality=1.24K
 ====
@@ -431,7 +431,7 @@ select * from functional.alltypes where year < 2010 and year < 2009 and month >
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year < 2010, year < 2009, month > 6
+   partition predicates: `year` < 2010, `year` < 2009, `month` > 6
    partitions=0/24 files=0 size=0B
    row-size=89B cardinality=0
 ====
@@ -442,7 +442,7 @@ where year < 2010 and (month > 6 or month = 1 or month in (3, 4))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year < 2010, (month > 6 OR month = 1 OR month IN (3, 4))
+   partition predicates: `year` < 2010, (`month` > 6 OR `month` = 1 OR `month` IN (3, 4))
    partitions=9/24 files=9 size=180.49KB
    row-size=89B cardinality=2.76K
 ====
@@ -453,7 +453,7 @@ where year < 2010 and (month > 6 or month <=> 1 or month in (3, 4))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year < 2010, (month > 6 OR month IS NOT DISTINCT FROM 1 OR month IN (3, 4))
+   partition predicates: `year` < 2010, (`month` > 6 OR `month` IS NOT DISTINCT FROM 1 OR `month` IN (3, 4))
    partitions=9/24 files=9 size=180.49KB
    row-size=89B cardinality=2.76K
 ====
@@ -463,7 +463,7 @@ select * from functional.alltypes where year = 2009 and month between 6 and 8
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month >= 6, month <= 8
+   partition predicates: `year` = 2009, `month` >= 6, `month` <= 8
    partitions=3/24 files=3 size=60.43KB
    row-size=89B cardinality=920
 ====
@@ -473,7 +473,7 @@ select * from functional.alltypes where year <=> 2009 and month between 6 and 8
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year IS NOT DISTINCT FROM 2009, month >= 6, month <= 8
+   partition predicates: `year` IS NOT DISTINCT FROM 2009, `month` >= 6, `month` <= 8
    partitions=3/24 files=3 size=60.43KB
    row-size=89B cardinality=920
 ====
@@ -484,7 +484,7 @@ where year between 2009 and 2009 and month between 6 and 8
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year >= 2009, year <= 2009, month >= 6, month <= 8
+   partition predicates: `year` >= 2009, `year` <= 2009, `month` >= 6, `month` <= 8
    partitions=3/24 files=3 size=60.43KB
    row-size=89B cardinality=920
 ====
@@ -495,7 +495,7 @@ where year = 2009 and (month between 6 and 7 or month between 7 and 8)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, (month >= 6 AND month <= 7 OR month >= 7 AND month <= 8)
+   partition predicates: `year` = 2009, (`month` >= 6 AND `month` <= 7 OR `month` >= 7 AND `month` <= 8)
    partitions=3/24 files=3 size=60.43KB
    row-size=89B cardinality=920
 ---- SCANRANGELOCATIONS
@@ -511,7 +511,7 @@ where year = 2009 and (month between 5+1 and 8-1 or month between 9-2 and 1+7)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, (month >= 6 AND month <= 7 OR month >= 7 AND month <= 8)
+   partition predicates: `year` = 2009, (`month` >= 6 AND `month` <= 7 OR `month` >= 7 AND `month` <= 8)
    partitions=3/24 files=3 size=60.43KB
    row-size=89B cardinality=920
 ---- SCANRANGELOCATIONS
@@ -526,7 +526,7 @@ select * from functional.alltypes where year - 1 = 2009
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year - 1 = 2009
+   partition predicates: `year` - 1 = 2009
    partitions=12/24 files=12 size=239.77KB
    row-size=89B cardinality=3.65K
 ====
@@ -535,7 +535,7 @@ select * from functional.alltypes where year - 1 <=> 2009
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year - 1 IS NOT DISTINCT FROM 2009
+   partition predicates: `year` - 1 IS NOT DISTINCT FROM 2009
    partitions=12/24 files=12 size=239.77KB
    row-size=89B cardinality=3.65K
 ====
@@ -546,7 +546,7 @@ select * from functional.alltypesagg where day is null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: day IS NULL
+   partition predicates: `day` IS NULL
    partitions=1/11 files=1 size=71.05KB
    row-size=95B cardinality=1.00K
 ====
@@ -556,7 +556,7 @@ select * from functional.alltypesagg where day <=> null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: day IS NOT DISTINCT FROM NULL
+   partition predicates: `day` IS NOT DISTINCT FROM NULL
    partitions=1/11 files=1 size=71.05KB
    row-size=95B cardinality=1.00K
 ====
@@ -566,7 +566,7 @@ select * from functional.alltypesagg where day is not null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: day IS NOT NULL
+   partition predicates: `day` IS NOT NULL
    partitions=10/11 files=10 size=743.67KB
    row-size=95B cardinality=10.00K
 ====
@@ -576,7 +576,7 @@ select * from functional.alltypesagg where day is distinct from null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: day IS DISTINCT FROM NULL
+   partition predicates: `day` IS DISTINCT FROM NULL
    partitions=10/11 files=10 size=743.67KB
    row-size=95B cardinality=10.00K
 ====
@@ -585,7 +585,7 @@ select * from functional.alltypesagg where day = day
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: day = day
+   partition predicates: `day` = `day`
    partitions=10/11 files=10 size=743.67KB
    row-size=95B cardinality=10.00K
 ====
@@ -617,7 +617,7 @@ select * from functional.alltypesagg where day is null and month = 1
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: day IS NULL, month = 1
+   partition predicates: `day` IS NULL, `month` = 1
    partitions=1/11 files=1 size=71.05KB
    row-size=95B cardinality=1.00K
 ====
@@ -627,7 +627,7 @@ select * from functional.alltypesagg where day <=> null and month = 1
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: day IS NOT DISTINCT FROM NULL, month = 1
+   partition predicates: `day` IS NOT DISTINCT FROM NULL, `month` = 1
    partitions=1/11 files=1 size=71.05KB
    row-size=95B cardinality=1.00K
 ====
@@ -637,7 +637,7 @@ select * from functional.alltypesagg where month = 1 and (day is null or day = 1
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: month = 1, (day IS NULL OR day = 10)
+   partition predicates: `month` = 1, (`day` IS NULL OR `day` = 10)
    partitions=2/11 files=2 size=145.53KB
    row-size=95B cardinality=2.00K
 ====
@@ -647,7 +647,7 @@ select * from functional.alltypesagg where month = 1 and (day <=> null or day =
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: month = 1, (day IS NOT DISTINCT FROM NULL OR day = 10)
+   partition predicates: `month` = 1, (`day` IS NOT DISTINCT FROM NULL OR `day` = 10)
    partitions=2/11 files=2 size=145.53KB
    row-size=95B cardinality=2.00K
 ====
@@ -657,7 +657,7 @@ select * from functional.alltypesagg where month = 1 and (day is null or year =
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: month = 1, (day IS NULL OR year = 2010)
+   partition predicates: `month` = 1, (`day` IS NULL OR `year` = 2010)
    partitions=11/11 files=11 size=814.73KB
    row-size=95B cardinality=11.00K
 ====
@@ -667,7 +667,7 @@ select * from functional.alltypesagg where month = 1 and (day <=> null or year =
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: month = 1, (day IS NOT DISTINCT FROM NULL OR year = 2010)
+   partition predicates: `month` = 1, (`day` IS NOT DISTINCT FROM NULL OR `year` = 2010)
    partitions=11/11 files=11 size=814.73KB
    row-size=95B cardinality=11.00K
 ====
@@ -678,7 +678,7 @@ where (year = 2010 or month = 1) and (day is not null or day = 10)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: (year = 2010 OR month = 1), (day IS NOT NULL OR day = 10)
+   partition predicates: (`year` = 2010 OR `month` = 1), (`day` IS NOT NULL OR `day` = 10)
    partitions=10/11 files=10 size=743.67KB
    row-size=95B cardinality=10.00K
 ====
@@ -689,7 +689,7 @@ where (year = 2010 or month = 1) and (day is distinct from null or day = 10)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: (year = 2010 OR month = 1), (day IS DISTINCT FROM NULL OR day = 10)
+   partition predicates: (`year` = 2010 OR `month` = 1), (`day` IS DISTINCT FROM NULL OR `day` = 10)
    partitions=10/11 files=10 size=743.67KB
    row-size=95B cardinality=10.00K
 ====
@@ -699,7 +699,7 @@ select * from functional.alltypesagg where day is null or month = 1
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: day IS NULL OR month = 1
+   partition predicates: `day` IS NULL OR `month` = 1
    partitions=11/11 files=11 size=814.73KB
    row-size=95B cardinality=11.00K
 ====
@@ -709,7 +709,7 @@ select * from functional.alltypesagg where day <=> null or month = 1
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: day IS NOT DISTINCT FROM NULL OR month = 1
+   partition predicates: `day` IS NOT DISTINCT FROM NULL OR `month` = 1
    partitions=11/11 files=11 size=814.73KB
    row-size=95B cardinality=11.00K
 ====
@@ -719,7 +719,7 @@ select * from functional.alltypesagg where day is null or day = 10
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: day IS NULL OR day = 10
+   partition predicates: `day` IS NULL OR `day` = 10
    partitions=2/11 files=2 size=145.53KB
    row-size=95B cardinality=2.00K
 ====
@@ -729,7 +729,7 @@ select * from functional.alltypesagg where day <=> null or day = 10
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: day IS NOT DISTINCT FROM NULL OR day = 10
+   partition predicates: `day` IS NOT DISTINCT FROM NULL OR `day` = 10
    partitions=2/11 files=2 size=145.53KB
    row-size=95B cardinality=2.00K
 ====
@@ -739,7 +739,7 @@ select * from functional.alltypesagg where day = 10 or (day is null and year = 2
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: day = 10 OR (day IS NULL AND year = 2010)
+   partition predicates: `day` = 10 OR (`day` IS NULL AND `year` = 2010)
    partitions=2/11 files=2 size=145.53KB
    row-size=95B cardinality=2.00K
 ====
@@ -749,7 +749,7 @@ select * from functional.alltypesagg where day = 10 or (day <=> null and year =
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: day = 10 OR (day IS NOT DISTINCT FROM NULL AND year = 2010)
+   partition predicates: `day` = 10 OR (`day` IS NOT DISTINCT FROM NULL AND `year` = 2010)
    partitions=2/11 files=2 size=145.53KB
    row-size=95B cardinality=2.00K
 ====
@@ -760,7 +760,7 @@ where (month = 1 and day = 1) or (day is null and year = 2010)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: (month = 1 AND day = 1) OR (day IS NULL AND year = 2010)
+   partition predicates: (`month` = 1 AND `day` = 1) OR (`day` IS NULL AND `year` = 2010)
    partitions=2/11 files=2 size=144.45KB
    row-size=95B cardinality=2.00K
 ====
@@ -771,7 +771,7 @@ where (month = 1 and day = 1) or (day <=> null and year = 2010)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: (month = 1 AND day = 1) OR (day IS NOT DISTINCT FROM NULL AND year = 2010)
+   partition predicates: (`month` = 1 AND `day` = 1) OR (`day` IS NOT DISTINCT FROM NULL AND `year` = 2010)
    partitions=2/11 files=2 size=144.45KB
    row-size=95B cardinality=2.00K
 ====
@@ -781,7 +781,7 @@ select * from functional.alltypesagg where not (day is not null)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: NOT (day IS NOT NULL)
+   partition predicates: NOT (`day` IS NOT NULL)
    partitions=1/11 files=1 size=71.05KB
    row-size=95B cardinality=1.00K
 ====
@@ -791,7 +791,7 @@ select * from functional.alltypesagg where not (day is distinct from null)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: NOT (day IS DISTINCT FROM NULL)
+   partition predicates: NOT (`day` IS DISTINCT FROM NULL)
    partitions=1/11 files=1 size=71.05KB
    row-size=95B cardinality=1.00K
 ====
@@ -801,7 +801,7 @@ select * from functional.alltypesagg where not (not (day is null))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: NOT (NOT (day IS NULL))
+   partition predicates: NOT (NOT (`day` IS NULL))
    partitions=1/11 files=1 size=71.05KB
    row-size=95B cardinality=1.00K
 ====
@@ -811,7 +811,7 @@ select * from functional.alltypesagg where not (not (day <=> null))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: NOT (NOT (day IS NOT DISTINCT FROM NULL))
+   partition predicates: NOT (NOT (`day` IS NOT DISTINCT FROM NULL))
    partitions=1/11 files=1 size=71.05KB
    row-size=95B cardinality=1.00K
 ====
@@ -821,7 +821,7 @@ select * from functional.alltypesagg where not (day is not null and month = 1)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: NOT (day IS NOT NULL AND month = 1)
+   partition predicates: NOT (`day` IS NOT NULL AND `month` = 1)
    partitions=1/11 files=1 size=71.05KB
    row-size=95B cardinality=1.00K
 ====
@@ -831,7 +831,7 @@ select * from functional.alltypesagg where not (day is distinct from null and mo
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: NOT (day IS DISTINCT FROM NULL AND month = 1)
+   partition predicates: NOT (`day` IS DISTINCT FROM NULL AND `month` = 1)
    partitions=1/11 files=1 size=71.05KB
    row-size=95B cardinality=1.00K
 ====
@@ -841,7 +841,7 @@ select * from functional.alltypesagg where not (day is not null or day < 9)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: NOT (day IS NOT NULL OR day < 9)
+   partition predicates: NOT (`day` IS NOT NULL OR `day` < 9)
    partitions=0/11 files=0 size=0B
    row-size=95B cardinality=0
 ====
@@ -851,7 +851,7 @@ select * from functional.alltypesagg where not (day is distinct from null or day
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: NOT (day IS DISTINCT FROM NULL OR day < 9)
+   partition predicates: NOT (`day` IS DISTINCT FROM NULL OR `day` < 9)
    partitions=0/11 files=0 size=0B
    row-size=95B cardinality=0
 ====
@@ -862,7 +862,7 @@ where not (day is not null and (not (day < 9 and month = 1)))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: NOT (day IS NOT NULL AND (NOT (day < 9 AND month = 1)))
+   partition predicates: NOT (`day` IS NOT NULL AND (NOT (`day` < 9 AND `month` = 1)))
    partitions=9/11 files=9 size=665.77KB
    row-size=95B cardinality=9.00K
 ====
@@ -873,7 +873,7 @@ where not (day is distinct from null and (not (day < 9 and month = 1)))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: NOT (day IS DISTINCT FROM NULL AND (NOT (day < 9 AND month = 1)))
+   partition predicates: NOT (`day` IS DISTINCT FROM NULL AND (NOT (`day` < 9 AND `month` = 1)))
    partitions=9/11 files=9 size=665.77KB
    row-size=95B cardinality=9.00K
 ====
@@ -884,7 +884,7 @@ where not (day is not null or (day = 1 and (not (month = 1 or year = 2010))))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: NOT (day IS NOT NULL OR (day = 1 AND (NOT (month = 1 OR year = 2010))))
+   partition predicates: NOT (`day` IS NOT NULL OR (`day` = 1 AND (NOT (`month` = 1 OR `year` = 2010))))
    partitions=1/11 files=1 size=71.05KB
    row-size=95B cardinality=1.00K
 ====
@@ -895,7 +895,7 @@ where not (day is distinct from null or (day = 1 and (not (month = 1 or year = 2
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: NOT (day IS DISTINCT FROM NULL OR (day = 1 AND (NOT (month = 1 OR year = 2010))))
+   partition predicates: NOT (`day` IS DISTINCT FROM NULL OR (`day` = 1 AND (NOT (`month` = 1 OR `year` = 2010))))
    partitions=1/11 files=1 size=71.05KB
    row-size=95B cardinality=1.00K
 ====
@@ -906,7 +906,7 @@ where year + 1 = 2011 and month + 1 <= 3 and day is null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: year + 1 = 2011, month + 1 <= 3, day IS NULL
+   partition predicates: `year` + 1 = 2011, `month` + 1 <= 3, `day` IS NULL
    partitions=1/11 files=1 size=71.05KB
    row-size=95B cardinality=1.00K
 ====
@@ -917,7 +917,7 @@ where year + 1 = 2011 and month + 1 <= 3 and day <=> null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: year + 1 = 2011, month + 1 <= 3, day IS NOT DISTINCT FROM NULL
+   partition predicates: `year` + 1 = 2011, `month` + 1 <= 3, `day` IS NOT DISTINCT FROM NULL
    partitions=1/11 files=1 size=71.05KB
    row-size=95B cardinality=1.00K
 ====
@@ -930,7 +930,7 @@ or not (day not in (10)) or not (day != 8)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: day = 5 OR (day >= 1 AND day <= 2) OR (day > 6 AND day < 8) OR day IS NULL OR day IN (4) OR NOT (day IS NOT NULL) OR NOT (day NOT IN (10)) OR NOT (day != 8)
+   partition predicates: `day` = 5 OR (`day` >= 1 AND `day` <= 2) OR (`day` > 6 AND `day` < 8) OR `day` IS NULL OR `day` IN (4) OR NOT (`day` IS NOT NULL) OR NOT (`day` NOT IN (10)) OR NOT (`day` != 8)
    partitions=8/11 files=8 size=591.30KB
    row-size=95B cardinality=8.00K
 ---- SCANRANGELOCATIONS
@@ -953,7 +953,7 @@ or not (day not in (10)) or not (day != 8)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg]
-   partition predicates: day = 5 OR (day >= 1 AND day <= 2) OR (day > 6 AND day < 8) OR day IS NOT DISTINCT FROM NULL OR day IN (4) OR NOT (day IS DISTINCT FROM NULL) OR NOT (day NOT IN (10)) OR NOT (day != 8)
+   partition predicates: `day` = 5 OR (`day` >= 1 AND `day` <= 2) OR (`day` > 6 AND `day` < 8) OR `day` IS NOT DISTINCT FROM NULL OR `day` IN (4) OR NOT (`day` IS DISTINCT FROM NULL) OR NOT (`day` NOT IN (10)) OR NOT (`day` != 8)
    partitions=8/11 files=8 size=591.30KB
    row-size=95B cardinality=8.00K
 ---- SCANRANGELOCATIONS
@@ -1061,7 +1061,7 @@ select * from functional.alltypesaggmultifiles where day <= 2
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesaggmultifiles]
-   partition predicates: day <= 2
+   partition predicates: `day` <= 2
    partitions=2/11 files=8 size=145.97KB
    row-size=84B cardinality=unavailable
 ====
@@ -1128,7 +1128,7 @@ select * from functional.alltypestiny t1 where t1.year != null or t1.year = null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypestiny t1]
-   partition predicates: t1.year != NULL OR t1.year = NULL
+   partition predicates: t1.`year` != NULL OR t1.`year` = NULL
    partitions=0/4 files=0 size=0B
    row-size=89B cardinality=0
 ====
@@ -1139,7 +1139,7 @@ where t1.year IS DISTINCT FROM null or t1.year = null
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypestiny t1]
-   partition predicates: t1.year IS DISTINCT FROM NULL OR t1.year = NULL
+   partition predicates: t1.`year` IS DISTINCT FROM NULL OR t1.`year` = NULL
    partitions=4/4 files=4 size=460B
    row-size=89B cardinality=8
 ====
@@ -1150,7 +1150,7 @@ select * from functional.alltypesagg t1 where t1.year + null != t1.day
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
-   partition predicates: t1.day != t1.year + NULL
+   partition predicates: t1.`day` != t1.`year` + NULL
    partitions=0/11 files=0 size=0B
    row-size=95B cardinality=0
 ====
@@ -1161,7 +1161,7 @@ select * from functional.alltypesagg t1 where t1.year + null IS DISTINCT FROM t1
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
-   partition predicates: t1.day IS DISTINCT FROM t1.year + NULL
+   partition predicates: t1.`day` IS DISTINCT FROM t1.`year` + NULL
    partitions=10/11 files=10 size=743.67KB
    row-size=95B cardinality=10.00K
 ====
@@ -1172,7 +1172,7 @@ select * from functional.alltypesagg t1 where day in (10, null)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
-   partition predicates: day IN (10, NULL)
+   partition predicates: `day` IN (10, NULL)
    partitions=1/11 files=1 size=74.48KB
    row-size=95B cardinality=1.00K
 ====
@@ -1183,7 +1183,7 @@ select * from functional.alltypesagg t1 where day not in (10, null)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
-   partition predicates: day NOT IN (10, NULL)
+   partition predicates: `day` NOT IN (10, NULL)
    partitions=0/11 files=0 size=0B
    row-size=95B cardinality=0
 ====
@@ -1194,7 +1194,7 @@ where t1.day = instr("this is a test", "this") or t1.year = year(now()) + 100
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
-   partition predicates: t1.day = 1 OR t1.year = 2119
+   partition predicates: t1.`day` = 1 OR t1.`year` = 2119
    partitions=1/11 files=1 size=73.39KB
    row-size=95B cardinality=1.00K
 ====
@@ -1206,7 +1206,7 @@ where t1.day in (1, cast(2.0 as INT), year(now()) + 100)
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
-   partition predicates: t1.day IN (1, 2, 2119)
+   partition predicates: t1.`day` IN (1, 2, 2119)
    partitions=2/11 files=2 size=147.87KB
    row-size=95B cardinality=2.00K
 ====
@@ -1218,7 +1218,7 @@ where -t1.day in(-1 - 1) or cast(t1.day as string) like '%1%'
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
-   partition predicates: -1 * t1.day IN (-2) OR CAST(t1.day AS STRING) LIKE '%1%'
+   partition predicates: -1 * t1.`day` IN (-2) OR CAST(t1.`day` AS STRING) LIKE '%1%'
    partitions=3/11 files=3 size=222.34KB
    row-size=95B cardinality=3.00K
 ====
@@ -1231,7 +1231,7 @@ where year = (cast(0 as double) / cast(0 as double))
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = (0 / 0), month = (1 / 0)
+   partition predicates: `year` = (0 / 0), `month` = (1 / 0)
    partitions=0/24 files=0 size=0B
    row-size=89B cardinality=0
 ====
@@ -1242,7 +1242,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
-   predicates: year < rand()
+   predicates: `year` < rand()
    row-size=89B cardinality=730
 ====
 # IMPALA-5180: Test that predicates not touching a partition column are ignored in
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/implicit-joins.test b/testdata/workloads/functional-planner/queries/PlannerTest/implicit-joins.test
index df7b607..d210576 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/implicit-joins.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/implicit-joins.test
@@ -438,7 +438,7 @@ PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
 |  output: avg(id)
-|  group by: month
+|  group by: `month`
 |  row-size=12B cardinality=12
 |
 02:SCAN HDFS [functional.alltypes]
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/inline-view.test b/testdata/workloads/functional-planner/queries/PlannerTest/inline-view.test
index aa47074..a181af9 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/inline-view.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/inline-view.test
@@ -30,12 +30,12 @@ PLAN-ROOT SINK
 |  |  row-size=12B cardinality=1
 |  |
 |  00:SCAN HDFS [functional.alltypessmall]
-|     partition predicates: month = 1
+|     partition predicates: `month` = 1
 |     partitions=1/4 files=1 size=1.57KB
 |     row-size=4B cardinality=25
 |
 03:SCAN HDFS [functional.alltypes t2]
-   partition predicates: month = 1
+   partition predicates: `month` = 1
    partitions=2/24 files=2 size=40.32KB
    runtime filters: RF000 -> t2.int_col
    row-size=89B cardinality=620
@@ -73,12 +73,12 @@ PLAN-ROOT SINK
 |  |  row-size=12B cardinality=10
 |  |
 |  00:SCAN HDFS [functional.alltypessmall]
-|     partition predicates: month = 1
+|     partition predicates: `month` = 1
 |     partitions=1/4 files=1 size=1.57KB
 |     row-size=4B cardinality=25
 |
 03:SCAN HDFS [functional.alltypes t2]
-   partition predicates: month = 1
+   partition predicates: `month` = 1
    partitions=2/24 files=2 size=40.32KB
    runtime filters: RF000 -> t2.int_col
    row-size=89B cardinality=620
@@ -160,18 +160,18 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: a.id = b.id, a.int_col = b.int_col
-|  other predicates: a.tinyint_col = 15, a.day >= 6, a.tinyint_col + b.tinyint_col < 15
+|  other predicates: a.tinyint_col = 15, a.`day` >= 6, a.tinyint_col + b.tinyint_col < 15
 |  runtime filters: RF000 <- b.id, RF001 <- b.int_col
 |  row-size=117B cardinality=5
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
-|     partition predicates: b.month > 2
+|     partition predicates: b.`month` > 2
 |     partitions=2/4 files=2 size=3.17KB
 |     predicates: b.string_col = '15'
 |     row-size=22B cardinality=5
 |
 00:SCAN HDFS [functional.alltypesagg a]
-   partition predicates: a.day >= 6
+   partition predicates: a.`day` >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: a.tinyint_col = 15
    runtime filters: RF000 -> a.id, RF001 -> a.int_col
@@ -193,14 +193,14 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [RIGHT OUTER JOIN, PARTITIONED]
 |  hash predicates: a.id = b.id, a.int_col = b.int_col
-|  other predicates: a.tinyint_col = 15, a.day >= 6, a.tinyint_col + b.tinyint_col < 15
+|  other predicates: a.tinyint_col = 15, a.`day` >= 6, a.tinyint_col + b.tinyint_col < 15
 |  runtime filters: RF000 <- b.id, RF001 <- b.int_col
 |  row-size=117B cardinality=5
 |
 |--04:EXCHANGE [HASH(b.id,b.int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall b]
-|     partition predicates: b.month > 2
+|     partition predicates: b.`month` > 2
 |     partitions=2/4 files=2 size=3.17KB
 |     predicates: b.string_col = '15'
 |     row-size=22B cardinality=5
@@ -208,7 +208,7 @@ PLAN-ROOT SINK
 03:EXCHANGE [HASH(a.id,a.int_col)]
 |
 00:SCAN HDFS [functional.alltypesagg a]
-   partition predicates: a.day >= 6
+   partition predicates: a.`day` >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: a.tinyint_col = 15
    runtime filters: RF000 -> a.id, RF001 -> a.int_col
@@ -244,7 +244,7 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: id = id, int_col = int_col
-|  other predicates: tinyint_col = 15, day >= 6, tinyint_col + tinyint_col < 15
+|  other predicates: tinyint_col = 15, `day` >= 6, tinyint_col + tinyint_col < 15
 |  runtime filters: RF000 <- id, RF001 <- int_col
 |  row-size=39B cardinality=2
 |
@@ -267,7 +267,7 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [RIGHT OUTER JOIN, PARTITIONED]
 |  hash predicates: id = id, int_col = int_col
-|  other predicates: tinyint_col = 15, day >= 6, tinyint_col + tinyint_col < 15
+|  other predicates: tinyint_col = 15, `day` >= 6, tinyint_col + tinyint_col < 15
 |  runtime filters: RF000 <- id, RF001 <- int_col
 |  row-size=39B cardinality=2
 |
@@ -400,7 +400,7 @@ PLAN-ROOT SINK
 |  |     row-size=8B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypesagg a]
-|     partition predicates: month = 1, a.day = 1
+|     partition predicates: `month` = 1, a.day = 1
 |     partitions=1/11 files=1 size=73.39KB
 |     predicates: a.int_col > 899
 |     runtime filters: RF002 -> a.smallint_col
@@ -445,7 +445,7 @@ PLAN-ROOT SINK
 |  |--06:EXCHANGE [HASH(a.smallint_col)]
 |  |  |
 |  |  01:SCAN HDFS [functional.alltypesagg a]
-|  |     partition predicates: month = 1, a.day = 1
+|  |     partition predicates: `month` = 1, a.day = 1
 |  |     partitions=1/11 files=1 size=73.39KB
 |  |     predicates: a.int_col > 899
 |  |     row-size=7B cardinality=100
@@ -955,7 +955,7 @@ PLAN-ROOT SINK
 |  |     row-size=8B cardinality=10
 |  |
 |  00:SCAN HDFS [functional.alltypesagg a]
-|     partition predicates: month = 1, a.day = 1
+|     partition predicates: `month` = 1, a.day = 1
 |     partitions=1/11 files=1 size=73.39KB
 |     predicates: a.int_col > 899
 |     runtime filters: RF002 -> a.smallint_col
@@ -987,7 +987,7 @@ PLAN-ROOT SINK
 |  |--06:EXCHANGE [HASH(a.smallint_col)]
 |  |  |
 |  |  00:SCAN HDFS [functional.alltypesagg a]
-|  |     partition predicates: month = 1, a.day = 1
+|  |     partition predicates: `month` = 1, a.day = 1
 |  |     partitions=1/11 files=1 size=73.39KB
 |  |     predicates: a.int_col > 899
 |  |     row-size=7B cardinality=100
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/insert-sort-by.test b/testdata/workloads/functional-planner/queries/PlannerTest/insert-sort-by.test
index e6432da..d6416cb 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/insert-sort-by.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/insert-sort-by.test
@@ -21,7 +21,7 @@ WRITE TO HDFS [test_sort_by.t, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST, int_col ASC NULLS LAST, bool_col ASC NULLS LAST
 |  row-size=17B cardinality=7.30K
 |
-01:EXCHANGE [HASH(year,month)]
+01:EXCHANGE [HASH(`year`,`month`)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
@@ -78,7 +78,7 @@ WRITE TO HDFS [test_sort_by.t, OVERWRITE=false, PARTITION-KEYS=(year,month)]
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST, int_col ASC NULLS LAST, bool_col ASC NULLS LAST
 |  row-size=17B cardinality=7.30K
 |
-01:EXCHANGE [HASH(year,month)]
+01:EXCHANGE [HASH(`year`,`month`)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
@@ -314,16 +314,16 @@ select a.id, max(b.int_col), min(a.bool_col), b.year, a.month
 from functional.alltypes a join functional.alltypes b on a.id = b.id
 group by a.id, b.year, a.month
 ---- PLAN
-WRITE TO HDFS [test_sort_by.t, OVERWRITE=false, PARTITION-KEYS=(b.year,a.month)]
+WRITE TO HDFS [test_sort_by.t, OVERWRITE=false, PARTITION-KEYS=(b.`year`,a.`month`)]
 |  partitions=24
 |
 04:SORT
-|  order by: b.year ASC NULLS LAST, a.month ASC NULLS LAST, max(b.int_col) ASC NULLS LAST, min(a.bool_col) ASC NULLS LAST
+|  order by: b.`year` ASC NULLS LAST, a.`month` ASC NULLS LAST, max(b.int_col) ASC NULLS LAST, min(a.bool_col) ASC NULLS LAST
 |  row-size=17B cardinality=7.30K
 |
 03:AGGREGATE [FINALIZE]
 |  output: max(b.int_col), min(a.bool_col)
-|  group by: a.id, b.year, a.month
+|  group by: a.id, b.`year`, a.`month`
 |  row-size=17B cardinality=7.30K
 |
 02:HASH JOIN [INNER JOIN]
@@ -340,25 +340,25 @@ WRITE TO HDFS [test_sort_by.t, OVERWRITE=false, PARTITION-KEYS=(b.year,a.month)]
    runtime filters: RF000 -> b.id
    row-size=12B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
-WRITE TO HDFS [test_sort_by.t, OVERWRITE=false, PARTITION-KEYS=(b.year,a.month)]
+WRITE TO HDFS [test_sort_by.t, OVERWRITE=false, PARTITION-KEYS=(b.`year`,a.`month`)]
 |  partitions=24
 |
 09:SORT
-|  order by: b.year ASC NULLS LAST, a.month ASC NULLS LAST, max(b.int_col) ASC NULLS LAST, min(a.bool_col) ASC NULLS LAST
+|  order by: b.`year` ASC NULLS LAST, a.`month` ASC NULLS LAST, max(b.int_col) ASC NULLS LAST, min(a.bool_col) ASC NULLS LAST
 |  row-size=17B cardinality=7.30K
 |
-08:EXCHANGE [HASH(b.year,a.month)]
+08:EXCHANGE [HASH(b.`year`,a.`month`)]
 |
 07:AGGREGATE [FINALIZE]
 |  output: max:merge(b.int_col), min:merge(a.bool_col)
-|  group by: a.id, b.year, a.month
+|  group by: a.id, b.`year`, a.`month`
 |  row-size=17B cardinality=7.30K
 |
-06:EXCHANGE [HASH(a.id,b.year,a.month)]
+06:EXCHANGE [HASH(a.id,b.`year`,a.`month`)]
 |
 03:AGGREGATE [STREAMING]
 |  output: max(b.int_col), min(a.bool_col)
-|  group by: a.id, b.year, a.month
+|  group by: a.id, b.`year`, a.`month`
 |  row-size=17B cardinality=7.30K
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/insert.test b/testdata/workloads/functional-planner/queries/PlannerTest/insert.test
index 2e85c6a..19483fe 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/insert.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/insert.test
@@ -9,7 +9,7 @@ WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
 |  partitions=1
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month = 5
+   partition predicates: `year` = 2009, `month` = 5
    partitions=1/24 files=1 size=20.36KB
    row-size=81B cardinality=310
 ---- SCANRANGELOCATIONS
@@ -20,7 +20,7 @@ WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
 |  partitions=1
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month = 5
+   partition predicates: `year` = 2009, `month` = 5
    partitions=1/24 files=1 size=20.36KB
    row-size=81B cardinality=310
 ====
@@ -36,7 +36,7 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,4
 |  partitions=1
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month = 5
+   partition predicates: `year` = 2009, `month` = 5
    partitions=1/24 files=1 size=20.36KB
    row-size=81B cardinality=310
 ---- SCANRANGELOCATIONS
@@ -47,7 +47,7 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,4
 |  partitions=1
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month = 5
+   partition predicates: `year` = 2009, `month` = 5
    partitions=1/24 files=1 size=20.36KB
    row-size=81B cardinality=310
 ====
@@ -63,7 +63,7 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=true, PARTITION-KEYS=(2009,4)
 |  partitions=1
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month = 5
+   partition predicates: `year` = 2009, `month` = 5
    partitions=1/24 files=1 size=20.36KB
    row-size=81B cardinality=310
 ---- SCANRANGELOCATIONS
@@ -74,7 +74,7 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=true, PARTITION-KEYS=(2009,4)
 |  partitions=1
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month = 5
+   partition predicates: `year` = 2009, `month` = 5
    partitions=1/24 files=1 size=20.36KB
    row-size=81B cardinality=310
 ====
@@ -94,7 +94,7 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,m
 |  row-size=89B cardinality=610
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month > 10
+   partition predicates: `year` = 2009, `month` > 10
    partitions=2/24 files=2 size=40.07KB
    row-size=89B cardinality=610
 ---- SCANRANGELOCATIONS
@@ -109,10 +109,10 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,m
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
 |  row-size=89B cardinality=610
 |
-01:EXCHANGE [HASH(year,month)]
+01:EXCHANGE [HASH(`year`,`month`)]
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month > 10
+   partition predicates: `year` = 2009, `month` > 10
    partitions=2/24 files=2 size=40.07KB
    row-size=89B cardinality=610
 ====
@@ -124,11 +124,11 @@ float_col, double_col, date_string_col, string_col, timestamp_col, year, month
 from functional.alltypes
 where year=2009 and month>10
 ---- PLAN
-WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,month)]
+WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(`year`,`month`)]
 |  partitions=24
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month > 10
+   partition predicates: `year` = 2009, `month` > 10
    partitions=2/24 files=2 size=40.07KB
    row-size=89B cardinality=610
 ---- SCANRANGELOCATIONS
@@ -136,13 +136,13 @@ NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=11/091101.txt 0:20179
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=12/091201.txt 0:20853
 ---- DISTRIBUTEDPLAN
-WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,month)]
+WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(`year`,`month`)]
 |  partitions=24
 |
-01:EXCHANGE [HASH(year,month)]
+01:EXCHANGE [HASH(`year`,`month`)]
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month > 10
+   partition predicates: `year` = 2009, `month` > 10
    partitions=2/24 files=2 size=40.07KB
    row-size=89B cardinality=610
 ====
@@ -163,7 +163,7 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(int_co
 |  row-size=72B cardinality=unavailable
 |
 00:SCAN HDFS [functional_seq_snap.alltypes]
-   partition predicates: year = 2009, month > 10
+   partition predicates: `year` = 2009, `month` > 10
    partitions=2/24 files=2 size=11.34KB
    row-size=72B cardinality=unavailable
 ---- DISTRIBUTEDPLAN
@@ -177,7 +177,7 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(int_co
 01:EXCHANGE [HASH(int_col,int_col)]
 |
 00:SCAN HDFS [functional_seq_snap.alltypes]
-   partition predicates: year = 2009, month > 10
+   partition predicates: `year` = 2009, `month` > 10
    partitions=2/24 files=2 size=11.34KB
    row-size=72B cardinality=unavailable
 ====
@@ -192,20 +192,20 @@ from functional.alltypes
 where year=2009 and month>10
 group by year, month
 ---- PLAN
-WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,month)]
+WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(`year`,`month`)]
 |  partitions=24
 |
 02:SORT
-|  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  order by: `year` ASC NULLS LAST, `month` ASC NULLS LAST
 |  row-size=80B cardinality=24
 |
 01:AGGREGATE [FINALIZE]
 |  output: min(id), min(bool_col), min(tinyint_col), min(smallint_col), min(int_col), min(bigint_col), min(float_col), min(double_col), min(date_string_col), min(string_col), min(timestamp_col)
-|  group by: year, month
+|  group by: `year`, `month`
 |  row-size=80B cardinality=24
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month > 10
+   partition predicates: `year` = 2009, `month` > 10
    partitions=2/24 files=2 size=40.07KB
    row-size=89B cardinality=610
 ---- SCANRANGELOCATIONS
@@ -213,27 +213,27 @@ NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=11/091101.txt 0:20179
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=12/091201.txt 0:20853
 ---- DISTRIBUTEDPLAN
-WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,month)]
+WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(`year`,`month`)]
 |  partitions=24
 |
 04:SORT
-|  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  order by: `year` ASC NULLS LAST, `month` ASC NULLS LAST
 |  row-size=80B cardinality=24
 |
 03:AGGREGATE [FINALIZE]
 |  output: min:merge(id), min:merge(bool_col), min:merge(tinyint_col), min:merge(smallint_col), min:merge(int_col), min:merge(bigint_col), min:merge(float_col), min:merge(double_col), min:merge(date_string_col), min:merge(string_col), min:merge(timestamp_col)
-|  group by: year, month
+|  group by: `year`, `month`
 |  row-size=80B cardinality=24
 |
-02:EXCHANGE [HASH(year,month)]
+02:EXCHANGE [HASH(`year`,`month`)]
 |
 01:AGGREGATE [STREAMING]
 |  output: min(id), min(bool_col), min(tinyint_col), min(smallint_col), min(int_col), min(bigint_col), min(float_col), min(double_col), min(date_string_col), min(string_col), min(timestamp_col)
-|  group by: year, month
+|  group by: `year`, `month`
 |  row-size=80B cardinality=24
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month > 10
+   partition predicates: `year` = 2009, `month` > 10
    partitions=2/24 files=2 size=40.07KB
    row-size=89B cardinality=610
 ====
@@ -253,7 +253,7 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,m
 |  row-size=85B cardinality=610
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month > 10
+   partition predicates: `year` = 2009, `month` > 10
    partitions=2/24 files=2 size=40.07KB
    row-size=85B cardinality=610
 ---- SCANRANGELOCATIONS
@@ -268,10 +268,10 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,m
 |  order by: month ASC NULLS LAST
 |  row-size=85B cardinality=610
 |
-01:EXCHANGE [HASH(month)]
+01:EXCHANGE [HASH(`month`)]
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month > 10
+   partition predicates: `year` = 2009, `month` > 10
    partitions=2/24 files=2 size=40.07KB
    row-size=85B cardinality=610
 ====
@@ -286,20 +286,20 @@ from functional.alltypes
 where year=2009 and month>10
 group by month
 ---- PLAN
-WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,month)]
+WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,`month`)]
 |  partitions=12
 |
 02:SORT
-|  order by: month ASC NULLS LAST
+|  order by: `month` ASC NULLS LAST
 |  row-size=76B cardinality=12
 |
 01:AGGREGATE [FINALIZE]
 |  output: min(id), min(bool_col), min(tinyint_col), min(smallint_col), min(int_col), min(bigint_col), min(float_col), min(double_col), min(date_string_col), min(string_col), min(timestamp_col)
-|  group by: month
+|  group by: `month`
 |  row-size=76B cardinality=12
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month > 10
+   partition predicates: `year` = 2009, `month` > 10
    partitions=2/24 files=2 size=40.07KB
    row-size=85B cardinality=610
 ---- SCANRANGELOCATIONS
@@ -307,27 +307,27 @@ NODE 0:
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=11/091101.txt 0:20179
   HDFS SPLIT hdfs://localhost:20500/test-warehouse/alltypes/year=2009/month=12/091201.txt 0:20853
 ---- DISTRIBUTEDPLAN
-WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,month)]
+WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(2009,`month`)]
 |  partitions=12
 |
 04:SORT
-|  order by: month ASC NULLS LAST
+|  order by: `month` ASC NULLS LAST
 |  row-size=76B cardinality=12
 |
 03:AGGREGATE [FINALIZE]
 |  output: min:merge(id), min:merge(bool_col), min:merge(tinyint_col), min:merge(smallint_col), min:merge(int_col), min:merge(bigint_col), min:merge(float_col), min:merge(double_col), min:merge(date_string_col), min:merge(string_col), min:merge(timestamp_col)
-|  group by: month
+|  group by: `month`
 |  row-size=76B cardinality=12
 |
-02:EXCHANGE [HASH(month)]
+02:EXCHANGE [HASH(`month`)]
 |
 01:AGGREGATE [STREAMING]
 |  output: min(id), min(bool_col), min(tinyint_col), min(smallint_col), min(int_col), min(bigint_col), min(float_col), min(double_col), min(date_string_col), min(string_col), min(timestamp_col)
-|  group by: month
+|  group by: `month`
 |  row-size=76B cardinality=12
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month > 10
+   partition predicates: `year` = 2009, `month` > 10
    partitions=2/24 files=2 size=40.07KB
    row-size=85B cardinality=610
 ====
@@ -347,7 +347,7 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,4
 |  row-size=85B cardinality=300
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year > 2009, month = 4
+   partition predicates: `year` > 2009, `month` = 4
    partitions=1/24 files=1 size=19.71KB
    row-size=85B cardinality=300
 ---- SCANRANGELOCATIONS
@@ -362,7 +362,7 @@ WRITE TO HDFS [functional.alltypessmall, OVERWRITE=false, PARTITION-KEYS=(year,4
 |  row-size=85B cardinality=300
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year > 2009, month = 4
+   partition predicates: `year` > 2009, `month` = 4
    partitions=1/24 files=1 size=19.71KB
    row-size=85B cardinality=300
 ====
@@ -376,7 +376,7 @@ WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
 |  partitions=1
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/24 files=1 size=19.95KB
    limit: 10
    row-size=81B cardinality=10
@@ -391,7 +391,7 @@ WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
 |  limit: 10
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/24 files=1 size=19.95KB
    limit: 10
    row-size=81B cardinality=10
@@ -688,7 +688,7 @@ WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(year,1)]
 |  order by: year ASC NULLS LAST
 |  row-size=85B cardinality=7.30K
 |
-01:EXCHANGE [HASH(year)]
+01:EXCHANGE [HASH(`year`)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/join-order.test b/testdata/workloads/functional-planner/queries/PlannerTest/join-order.test
index 01ff807..79242f4 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/join-order.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/join-order.test
@@ -1284,11 +1284,11 @@ PLAN-ROOT SINK
 |  |
 |  07:HASH JOIN [LEFT OUTER JOIN]
 |  |  hash predicates: b.id = a.id
-|  |  other predicates: a.year < 10
+|  |  other predicates: a.`year` < 10
 |  |  row-size=12B cardinality=8
 |  |
 |  |--05:SCAN HDFS [functional.alltypes a]
-|  |     partition predicates: a.year < 10
+|  |     partition predicates: a.`year` < 10
 |  |     partitions=0/24 files=0 size=0B
 |  |     runtime filters: RF004 -> a.id
 |  |     row-size=8B cardinality=0
@@ -1362,11 +1362,11 @@ PLAN-ROOT SINK
 |  |
 |  07:HASH JOIN [FULL OUTER JOIN]
 |  |  hash predicates: b.id = a.id
-|  |  other predicates: a.year < 10
+|  |  other predicates: a.`year` < 10
 |  |  row-size=12B cardinality=8
 |  |
 |  |--05:SCAN HDFS [functional.alltypes a]
-|  |     partition predicates: a.year < 10
+|  |     partition predicates: a.`year` < 10
 |  |     partitions=0/24 files=0 size=0B
 |  |     runtime filters: RF002 -> a.id
 |  |     row-size=8B cardinality=0
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/joins.test b/testdata/workloads/functional-planner/queries/PlannerTest/joins.test
index c6a1a3a..409f979 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/joins.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/joins.test
@@ -160,18 +160,18 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [RIGHT OUTER JOIN]
 |  hash predicates: a.id = b.id, a.int_col = b.int_col
-|  other predicates: a.tinyint_col = 15, a.day >= 6, a.tinyint_col + b.tinyint_col < 15
+|  other predicates: a.tinyint_col = 15, a.`day` >= 6, a.tinyint_col + b.tinyint_col < 15
 |  runtime filters: RF000 <- b.id, RF001 <- b.int_col
 |  row-size=184B cardinality=5
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
-|     partition predicates: b.month > 2
+|     partition predicates: b.`month` > 2
 |     partitions=2/4 files=2 size=3.17KB
 |     predicates: b.string_col = '15'
 |     row-size=89B cardinality=5
 |
 00:SCAN HDFS [functional.alltypesagg a]
-   partition predicates: a.day >= 6
+   partition predicates: a.`day` >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: a.tinyint_col = 15
    runtime filters: RF000 -> a.id, RF001 -> a.int_col
@@ -183,14 +183,14 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [RIGHT OUTER JOIN, PARTITIONED]
 |  hash predicates: a.id = b.id, a.int_col = b.int_col
-|  other predicates: a.tinyint_col = 15, a.day >= 6, a.tinyint_col + b.tinyint_col < 15
+|  other predicates: a.tinyint_col = 15, a.`day` >= 6, a.tinyint_col + b.tinyint_col < 15
 |  runtime filters: RF000 <- b.id, RF001 <- b.int_col
 |  row-size=184B cardinality=5
 |
 |--04:EXCHANGE [HASH(b.id,b.int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall b]
-|     partition predicates: b.month > 2
+|     partition predicates: b.`month` > 2
 |     partitions=2/4 files=2 size=3.17KB
 |     predicates: b.string_col = '15'
 |     row-size=89B cardinality=5
@@ -198,7 +198,7 @@ PLAN-ROOT SINK
 03:EXCHANGE [HASH(a.id,a.int_col)]
 |
 00:SCAN HDFS [functional.alltypesagg a]
-   partition predicates: a.day >= 6
+   partition predicates: a.`day` >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: a.tinyint_col = 15
    runtime filters: RF000 -> a.id, RF001 -> a.int_col
@@ -223,7 +223,7 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: c.id = a.id, c.string_col = b.string_col
-|  other predicates: a.tinyint_col = 15, b.string_col = '15', a.day >= 6, b.month > 2, a.float_col - c.double_col < 0, a.tinyint_col + b.tinyint_col < 15, (b.double_col * c.tinyint_col > 1000 OR c.tinyint_col < 1000)
+|  other predicates: a.tinyint_col = 15, b.string_col = '15', a.`day` >= 6, b.`month` > 2, a.float_col - c.double_col < 0, a.tinyint_col + b.tinyint_col < 15, (b.double_col * c.tinyint_col > 1000 OR c.tinyint_col < 1000)
 |  row-size=279B cardinality=2.00K
 |
 |--03:HASH JOIN [FULL OUTER JOIN]
@@ -231,19 +231,19 @@ PLAN-ROOT SINK
 |  |  row-size=184B cardinality=561
 |  |
 |  |--01:SCAN HDFS [functional.alltypessmall b]
-|  |     partition predicates: b.month > 2
+|  |     partition predicates: b.`month` > 2
 |  |     partitions=2/4 files=2 size=3.17KB
 |  |     predicates: b.string_col = '15'
 |  |     row-size=89B cardinality=5
 |  |
 |  00:SCAN HDFS [functional.alltypesagg a]
-|     partition predicates: a.day >= 6
+|     partition predicates: a.`day` >= 6
 |     partitions=5/11 files=5 size=372.38KB
 |     predicates: a.tinyint_col = 15
 |     row-size=95B cardinality=556
 |
 02:SCAN HDFS [functional.alltypesaggnonulls c]
-   partition predicates: c.day < 3
+   partition predicates: c.`day` < 3
    partitions=2/10 files=2 size=148.10KB
    row-size=95B cardinality=2.00K
 ---- DISTRIBUTEDPLAN
@@ -253,14 +253,14 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [RIGHT OUTER JOIN, PARTITIONED]
 |  hash predicates: a.id = c.id, b.string_col = c.string_col
-|  other predicates: a.tinyint_col = 15, b.string_col = '15', a.day >= 6, b.month > 2, a.float_col - c.double_col < 0, a.tinyint_col + b.tinyint_col < 15, (b.double_col * c.tinyint_col > 1000 OR c.tinyint_col < 1000)
+|  other predicates: a.tinyint_col = 15, b.string_col = '15', a.`day` >= 6, b.`month` > 2, a.float_col - c.double_col < 0, a.tinyint_col + b.tinyint_col < 15, (b.double_col * c.tinyint_col > 1000 OR c.tinyint_col < 1000)
 |  runtime filters: RF000 <- c.id, RF001 <- c.string_col
 |  row-size=279B cardinality=2.00K
 |
 |--08:EXCHANGE [HASH(c.id,c.string_col)]
 |  |
 |  02:SCAN HDFS [functional.alltypesaggnonulls c]
-|     partition predicates: c.day < 3
+|     partition predicates: c.`day` < 3
 |     partitions=2/10 files=2 size=148.10KB
 |     row-size=95B cardinality=2.00K
 |
@@ -273,7 +273,7 @@ PLAN-ROOT SINK
 |--06:EXCHANGE [HASH(b.id,b.int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall b]
-|     partition predicates: b.month > 2
+|     partition predicates: b.`month` > 2
 |     partitions=2/4 files=2 size=3.17KB
 |     predicates: b.string_col = '15'
 |     runtime filters: RF001 -> b.string_col
@@ -282,7 +282,7 @@ PLAN-ROOT SINK
 05:EXCHANGE [HASH(a.id,a.int_col)]
 |
 00:SCAN HDFS [functional.alltypesagg a]
-   partition predicates: a.day >= 6
+   partition predicates: a.`day` >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: a.tinyint_col = 15
    runtime filters: RF000 -> a.id
@@ -412,7 +412,7 @@ PLAN-ROOT SINK
 |     row-size=107B cardinality=1
 |
 00:SCAN HDFS [functional.alltypesagg a]
-   partition predicates: a.day >= 6
+   partition predicates: a.`day` >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: a.tinyint_col = 15
    runtime filters: RF000 -> a.int_col, RF001 -> a.id
@@ -446,7 +446,7 @@ PLAN-ROOT SINK
 |     row-size=107B cardinality=1
 |
 00:SCAN HDFS [functional.alltypesagg a]
-   partition predicates: a.day >= 6
+   partition predicates: a.`day` >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: a.tinyint_col = 15
    runtime filters: RF000 -> a.int_col, RF001 -> a.id
@@ -474,7 +474,7 @@ PLAN-ROOT SINK
 |--01:EMPTYSET
 |
 00:SCAN HDFS [functional.alltypesagg a]
-   partition predicates: a.day >= 6
+   partition predicates: a.`day` >= 6
    partitions=5/11 files=5 size=372.38KB
    predicates: a.tinyint_col = 15
    runtime filters: RF000 -> a.int_col, RF001 -> a.id
@@ -506,7 +506,7 @@ limit 5
 PLAN-ROOT SINK
 |
 10:TOP-N [LIMIT=5]
-|  order by: count(x.day) ASC
+|  order by: count(x.`day`) ASC
 |  row-size=9B cardinality=1
 |
 09:AGGREGATE [FINALIZE]
@@ -559,7 +559,7 @@ PLAN-ROOT SINK
 PLAN-ROOT SINK
 |
 10:TOP-N [LIMIT=5]
-|  order by: count(x.day) ASC
+|  order by: count(x.`day`) ASC
 |  row-size=9B cardinality=1
 |
 09:AGGREGATE [FINALIZE]
@@ -1580,13 +1580,13 @@ PLAN-ROOT SINK
 |  |  row-size=17B cardinality=7
 |  |
 |  |--02:SCAN HDFS [functional.alltypessmall]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=1.58KB
 |  |     predicates: functional.alltypessmall.id < 5
 |  |     row-size=17B cardinality=3
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=1.57KB
 |     predicates: functional.alltypessmall.id < 5
 |     row-size=17B cardinality=3
@@ -1629,13 +1629,13 @@ PLAN-ROOT SINK
 |  |  row-size=17B cardinality=7
 |  |
 |  |--03:SCAN HDFS [functional.alltypessmall]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=1.58KB
 |  |     predicates: functional.alltypessmall.id < 5
 |  |     row-size=17B cardinality=3
 |  |
 |  02:SCAN HDFS [functional.alltypessmall]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=1.57KB
 |     predicates: functional.alltypessmall.id < 5
 |     row-size=17B cardinality=3
@@ -1684,13 +1684,13 @@ PLAN-ROOT SINK
 |  |  row-size=17B cardinality=7
 |  |
 |  |--05:SCAN HDFS [functional.alltypessmall]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=1.58KB
 |  |     predicates: functional.alltypessmall.id < 5
 |  |     row-size=17B cardinality=3
 |  |
 |  04:SCAN HDFS [functional.alltypessmall]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=1.57KB
 |     predicates: functional.alltypessmall.id < 5
 |     row-size=17B cardinality=3
@@ -1702,12 +1702,12 @@ PLAN-ROOT SINK
 |  row-size=17B cardinality=51
 |
 |--02:SCAN HDFS [functional.alltypessmall]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=1.58KB
 |     row-size=17B cardinality=25
 |
 01:SCAN HDFS [functional.alltypessmall]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=1.57KB
    row-size=17B cardinality=25
 ====
@@ -2736,7 +2736,7 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: c.id = a.id, c.string_col IS NOT DISTINCT FROM b.string_col
-|  other predicates: a.tinyint_col = 15, b.string_col = '15', a.day >= 6, b.month > 2, a.float_col - c.double_col < 0, a.tinyint_col + b.tinyint_col < 15, (b.double_col * c.tinyint_col > 1000 OR c.tinyint_col < 1000)
+|  other predicates: a.tinyint_col = 15, b.string_col = '15', a.`day` >= 6, b.`month` > 2, a.float_col - c.double_col < 0, a.tinyint_col + b.tinyint_col < 15, (b.double_col * c.tinyint_col > 1000 OR c.tinyint_col < 1000)
 |  row-size=279B cardinality=2.00K
 |
 |--03:HASH JOIN [FULL OUTER JOIN]
@@ -2744,19 +2744,19 @@ PLAN-ROOT SINK
 |  |  row-size=184B cardinality=561
 |  |
 |  |--01:SCAN HDFS [functional.alltypessmall b]
-|  |     partition predicates: b.month > 2
+|  |     partition predicates: b.`month` > 2
 |  |     partitions=2/4 files=2 size=3.17KB
 |  |     predicates: b.string_col = '15'
 |  |     row-size=89B cardinality=5
 |  |
 |  00:SCAN HDFS [functional.alltypesagg a]
-|     partition predicates: a.day >= 6
+|     partition predicates: a.`day` >= 6
 |     partitions=5/11 files=5 size=372.38KB
 |     predicates: a.tinyint_col = 15
 |     row-size=95B cardinality=556
 |
 02:SCAN HDFS [functional.alltypesaggnonulls c]
-   partition predicates: c.day < 3
+   partition predicates: c.`day` < 3
    partitions=2/10 files=2 size=148.10KB
    row-size=95B cardinality=2.00K
 ====
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/kudu-upsert.test b/testdata/workloads/functional-planner/queries/PlannerTest/kudu-upsert.test
index d4c3beb..a206c03 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/kudu-upsert.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/kudu-upsert.test
@@ -6,7 +6,7 @@ where year=2009 and month=05
 UPSERT INTO KUDU [functional_kudu.testtbl]
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month = 5
+   partition predicates: `year` = 2009, `month` = 5
    partitions=1/24 files=1 size=20.36KB
    row-size=25B cardinality=310
 ---- DISTRIBUTEDPLAN
@@ -19,7 +19,7 @@ UPSERT INTO KUDU [functional_kudu.testtbl]
 01:EXCHANGE [KUDU(KuduPartition(bigint_col))]
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month = 5
+   partition predicates: `year` = 2009, `month` = 5
    partitions=1/24 files=1 size=20.36KB
    row-size=25B cardinality=310
 ====
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test b/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test
index 9c4c49b..9e2e598 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test
@@ -10,7 +10,7 @@ select * from (
     "queryId":"0:0",
     "hash":"25456c60a2e874a20732f42c7af27553",
     "user":"progers",
-    "timestamp":1546590134,
+    "timestamp":1547867921,
     "edges":[
         {
             "sources":[
@@ -64,7 +64,7 @@ order by b.bigint_col limit 10
     "queryId":"0:0",
     "hash":"e0309eeff9811f53c82657d62c1e04eb",
     "user":"progers",
-    "timestamp":1546590134,
+    "timestamp":1547867921,
     "edges":[
         {
             "sources":[
@@ -201,7 +201,7 @@ create table lineage_test_tbl as select int_col, tinyint_col from functional.all
     "queryId":"0:0",
     "hash":"407f23b24758ffcb2ac445b9703f5c44",
     "user":"progers",
-    "timestamp":1546590134,
+    "timestamp":1547867921,
     "edges":[
         {
             "sources":[
@@ -256,7 +256,7 @@ where a.year = 2009 and b.month = 2
     "queryId":"0:0",
     "hash":"f3101dcb046a7d34d7ee14892a6cc94e",
     "user":"progers",
-    "timestamp":1546590134,
+    "timestamp":1547867921,
     "edges":[
         {
             "sources":[
@@ -344,7 +344,7 @@ select * from
     "queryId":"0:0",
     "hash":"9c04c1e9feee35ffacf14bfcd3b363a7",
     "user":"progers",
-    "timestamp":1546590134,
+    "timestamp":1547867921,
     "edges":[
         {
             "sources":[
@@ -378,7 +378,7 @@ create table lineage_test_tblm as select * from functional_hbase.alltypes limit
     "queryId":"0:0",
     "hash":"a294f36bddf2adb329eac3055a76b2b5",
     "user":"progers",
-    "timestamp":1546590134,
+    "timestamp":1547867921,
     "edges":[
         {
             "sources":[
@@ -643,7 +643,7 @@ functional_hbase.alltypes
     "queryId":"0:0",
     "hash":"b923425ce9cc2d53d36523ec83971e67",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867921,
     "edges":[
         {
             "sources":[
@@ -828,7 +828,7 @@ from functional.alltypes
     "queryId":"0:0",
     "hash":"b7b9474fc6b97f104bd031209438ee0e",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867921,
     "edges":[
         {
             "sources":[
@@ -1007,7 +1007,7 @@ where year=2009 and month=05
     "queryId":"0:0",
     "hash":"2ed3a6c784e1c0c7fcef226d71375180",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867921,
     "edges":[
         {
             "sources":[
@@ -1238,7 +1238,7 @@ where year=2009 and month>10
     "queryId":"0:0",
     "hash":"39ac95ce0632ef1ee8b474be644971f3",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867921,
     "edges":[
         {
             "sources":[
@@ -1481,7 +1481,7 @@ having min(id) > 10
     "queryId":"0:0",
     "hash":"e6969c2cc67e9d6f3f985ddc6431f915",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867921,
     "edges":[
         {
             "sources":[
@@ -1768,7 +1768,7 @@ group by int_col, tinyint_col
     "queryId":"0:0",
     "hash":"83c78528e6f5325c56a3f3521b08a78d",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867921,
     "edges":[
         {
             "sources":[
@@ -1815,7 +1815,7 @@ select int_col, rank() over(order by int_col) from functional.alltypesagg
     "queryId":"0:0",
     "hash":"4f1ecaaed571d2ed9f09f091f399c311",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867921,
     "edges":[
         {
             "sources":[
@@ -1873,7 +1873,7 @@ order by a.tinyint_col, a.int_col
     "queryId":"0:0",
     "hash":"b6e26c00b2ef17f0592ebadb0ecc21f6",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867921,
     "edges":[
         {
             "sources":[
@@ -1984,7 +1984,7 @@ order by 2, 3, 4
     "queryId":"0:0",
     "hash":"6bf993cea0d1ab9e613674ef178916c9",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867921,
     "edges":[
         {
             "sources":[
@@ -2116,7 +2116,7 @@ order by 2, 3, 4
     "queryId":"0:0",
     "hash":"811403c86e86fe630dea7bd0a6c89273",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867921,
     "edges":[
         {
             "sources":[
@@ -2250,7 +2250,7 @@ where b.month = 1
     "queryId":"0:0",
     "hash":"e3000cd5edf2a02e1f5407810f3cc09a",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -2358,7 +2358,7 @@ where month = 1
     "queryId":"0:0",
     "hash":"3f1ecf7239e205342aee4979e7cb4877",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -2460,7 +2460,7 @@ and x.int_col + x.float_col + cast(c.string_col as float) < 1000
     "queryId":"0:0",
     "hash":"4edf165aed5982ede63f7c91074f4b44",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -2637,7 +2637,7 @@ from
     "queryId":"0:0",
     "hash":"8b4d1ab11721d9ebdf26666d4195eb18",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -2731,7 +2731,7 @@ limit 0
     "queryId":"0:0",
     "hash":"50d3b4f249f038b0711ea75c17640fc9",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -2795,7 +2795,7 @@ select int_col, string_col from functional.view_view
     "queryId":"0:0",
     "hash":"9073496459077de1332e5017977dedf5",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -2848,7 +2848,7 @@ where t.id < 10
     "queryId":"0:0",
     "hash":"8ba7998033f90e1e358f4fdc7ea4251b",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -2894,7 +2894,7 @@ where id in
     "queryId":"0:0",
     "hash":"e8ad1371d2a13e1ee9ec45689b62cdc9",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -2994,7 +2994,7 @@ and tinyint_col < 10
     "queryId":"0:0",
     "hash":"a7500c022d29c583c31b287868a848bf",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -3045,7 +3045,7 @@ and a.bigint_col > 10
     "queryId":"0:0",
     "hash":"5e6227f323793ea4441e2a3119af2f09",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -3126,7 +3126,7 @@ with t as (select int_col x, bigint_col y from functional.alltypes) select x, y
     "queryId":"0:0",
     "hash":"a7ab58d90540f28a8dfd69703632ad7a",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -3180,7 +3180,7 @@ select id, int_col, string_col, year, month from t1
     "queryId":"0:0",
     "hash":"0bc5b3e66cc72387f74893b1f1934946",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -3399,7 +3399,7 @@ from
     "queryId":"0:0",
     "hash":"aa95e5e6f39fc80bb3c318a2515dc77d",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -3472,7 +3472,7 @@ create view test_view_lineage as select id from functional.alltypestiny
     "queryId":"0:0",
     "hash":"ff6b1ecb265afe4f03355a07238cfe37",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -3522,7 +3522,7 @@ limit 0
     "queryId":"0:0",
     "hash":"b96adf892b897da1e562c5be98724fb5",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -3599,7 +3599,7 @@ create view test_view_lineage (a1, a2, a3, a4, a5, a6, a7) as
     "queryId":"0:0",
     "hash":"ffbe643df8f26e92907fb45de1aeda36",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -3782,7 +3782,7 @@ create view test_view_lineage as
     "queryId":"0:0",
     "hash":"d4b9e2d63548088f911816b2ae29d7c2",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -3919,7 +3919,7 @@ alter view functional.alltypes_view as select id from functional.alltypestiny
     "queryId":"0:0",
     "hash":"8c9367afc562a4c04d2d40e1276646c2",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -3960,7 +3960,7 @@ select * from (
     "queryId":"0:0",
     "hash":"4fb3ceddbf596097335af607d528f5a7",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -4028,7 +4028,7 @@ select * from functional.allcomplextypes.int_array_col a inner join
     "queryId":"0:0",
     "hash":"8c0c64f8a4c08b82ad343ab439101957",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -4134,7 +4134,7 @@ select * from functional.allcomplextypes t, t.int_array_col a, t.struct_map_col
     "queryId":"0:0",
     "hash":"1b0db371b32e90d33629ed7779332cf7",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -4319,7 +4319,7 @@ select a + b as ab, c, d, e from functional.allcomplextypes t,
     "queryId":"0:0",
     "hash":"4affc0d1e384475d1ff2fc2e19643064",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -4457,7 +4457,7 @@ where not exists (select 1 from functional.alltypes a where v.id = a.id)
     "queryId":"0:0",
     "hash":"e79b8abc8a682d9e0f6b2c30a6c885f3",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -4501,7 +4501,7 @@ where k.int_col < 10
     "queryId":"0:0",
     "hash":"7b7c92d488186d869bb6b78c97666f41",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -4556,7 +4556,7 @@ functional.alltypes a where a.id < 100
     "queryId":"0:0",
     "hash":"87a59bac56c6ad27f7af6e71af46d552",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -4640,7 +4640,7 @@ functional.alltypes where id < 10
     "queryId":"0:0",
     "hash":"0bccfdbf4118e6d5a3d94062ecb5130a",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -4704,7 +4704,7 @@ functional.alltypes where id < 10
     "queryId":"0:0",
     "hash":"f4c1e7b016e75012f7268f2f42ae5630",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
@@ -4770,7 +4770,7 @@ from functional.alltypestiny
     "queryId":"0:0",
     "hash":"de98b09af6b6ab0f0678c5fc0c4369b4",
     "user":"progers",
-    "timestamp":1546590135,
+    "timestamp":1547867922,
     "edges":[
         {
             "sources":[
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/nested-collections.test b/testdata/workloads/functional-planner/queries/PlannerTest/nested-collections.test
index 406f83b..eaa167b 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/nested-collections.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/nested-collections.test
@@ -17,7 +17,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.allcomplextypes.int_map_col]
    partitions=0/0 files=0 size=0B
-   predicates: value < 10, key = 'test'
+   predicates: value < 10, `key` = 'test'
    row-size=16B cardinality=0
 ====
 # Scan of a deeply nested collection.
@@ -32,7 +32,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.allcomplextypes.complex_nested_struct_col.f2.f12]
    partitions=0/0 files=0 size=0B
-   predicates: key = 'test'
+   predicates: `key` = 'test'
    row-size=20B cardinality=0
 ====
 # Join on two nested collections with structs.
@@ -568,7 +568,7 @@ PLAN-ROOT SINK
 |
 |--04:NESTED LOOP JOIN [INNER JOIN]
 |  |  join predicates: b.f1 = a.id
-|  |  predicates: b.f1 < a.year
+|  |  predicates: b.f1 < a.`year`
 |  |  row-size=40B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
@@ -595,7 +595,7 @@ PLAN-ROOT SINK
 |  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT SEMI JOIN]
-|  |  join predicates: b.f1 < a.year, b.f1 = a.id
+|  |  join predicates: b.f1 < a.`year`, b.f1 = a.id
 |  |  row-size=20B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
@@ -622,7 +622,7 @@ PLAN-ROOT SINK
 |  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [LEFT SEMI JOIN]
-|  |  join predicates: b.f1 < a.year, b.f1 = a.id
+|  |  join predicates: b.f1 < a.`year`, b.f1 = a.id
 |  |  row-size=20B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
@@ -649,7 +649,7 @@ PLAN-ROOT SINK
 |  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT ANTI JOIN]
-|  |  join predicates: b.f1 < a.year, b.f1 = a.id
+|  |  join predicates: b.f1 < a.`year`, b.f1 = a.id
 |  |  row-size=20B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
@@ -676,7 +676,7 @@ PLAN-ROOT SINK
 |  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [LEFT ANTI JOIN]
-|  |  join predicates: b.f1 < a.year, b.f1 = a.id
+|  |  join predicates: b.f1 < a.`year`, b.f1 = a.id
 |  |  row-size=20B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
@@ -703,7 +703,7 @@ PLAN-ROOT SINK
 |  row-size=28B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT OUTER JOIN]
-|  |  join predicates: b.f1 < a.year, b.f1 = a.id
+|  |  join predicates: b.f1 < a.`year`, b.f1 = a.id
 |  |  row-size=28B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
@@ -730,7 +730,7 @@ PLAN-ROOT SINK
 |  row-size=40B cardinality=0
 |
 |--04:NESTED LOOP JOIN [LEFT OUTER JOIN]
-|  |  join predicates: b.f1 < a.year, b.f1 = a.id
+|  |  join predicates: b.f1 < a.`year`, b.f1 = a.id
 |  |  row-size=40B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
@@ -757,7 +757,7 @@ PLAN-ROOT SINK
 |  row-size=40B cardinality=0
 |
 |--04:NESTED LOOP JOIN [FULL OUTER JOIN]
-|  |  join predicates: b.f1 < a.year, b.f1 = a.id
+|  |  join predicates: b.f1 < a.`year`, b.f1 = a.id
 |  |  predicates: a.id < 10, b.f1 % 2 = 0
 |  |  row-size=40B cardinality=11
 |  |
@@ -794,7 +794,7 @@ PLAN-ROOT SINK
 |  |     row-size=0B cardinality=10
 |  |
 |  09:NESTED LOOP JOIN [LEFT OUTER JOIN]
-|  |  join predicates: (a.month < 4 OR d.f1 > 5)
+|  |  join predicates: (a.`month` < 4 OR d.f1 > 5)
 |  |  row-size=76B cardinality=1
 |  |
 |  |--05:UNNEST [a.struct_array_col d]
@@ -808,7 +808,7 @@ PLAN-ROOT SINK
 |  |     row-size=0B cardinality=10
 |  |
 |  07:NESTED LOOP JOIN [INNER JOIN]
-|  |  join predicates: c.value = a.year
+|  |  join predicates: c.value = a.`year`
 |  |  row-size=64B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
@@ -1319,7 +1319,7 @@ PLAN-ROOT SINK
 |  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [RIGHT SEMI JOIN]
-|  |  join predicates: a.year < b.item, b.item = id
+|  |  join predicates: a.`year` < b.item, b.item = id
 |  |  row-size=20B cardinality=1
 |  |
 |  |--02:SINGULAR ROW SRC
@@ -1374,7 +1374,7 @@ PLAN-ROOT SINK
 |
 |--04:HASH JOIN [NULL AWARE LEFT ANTI JOIN]
 |  |  hash predicates: id = b.item
-|  |  other join predicates: a.year < b.item
+|  |  other join predicates: a.`year` < b.item
 |  |  row-size=20B cardinality=1
 |  |
 |  |--03:UNNEST [a.int_array_col b]
@@ -1837,7 +1837,7 @@ PLAN-ROOT SINK
 |  |  |     row-size=28B cardinality=1
 |  |  |
 |  |  08:UNNEST [c.f12]
-|  |     predicates: coalesce(key, CAST(f21 AS STRING)) = 'test3'
+|  |     predicates: coalesce(`key`, CAST(f21 AS STRING)) = 'test3'
 |  |     row-size=20B cardinality=10
 |  |
 |  05:UNNEST [t.complex_nested_struct_col.f2 c]
@@ -1847,7 +1847,7 @@ PLAN-ROOT SINK
    partitions=0/0 files=0 size=0B
    predicates: t.id < 200, !empty(t.complex_nested_struct_col.f2), !empty(t.int_array_col)
    predicates on a: a.item >= 10, a.item <= 20, a.item % 2 = 0
-   predicates on m: m.key = 'test', m.value != 30
+   predicates on m: m.`key` = 'test', m.value != 30
    predicates on c: c.f11 >= 10, c.f11 <= 20, c.f11 % 2 = 0
    predicates on f12: f12.key = 'test2'
    row-size=48B cardinality=0
@@ -1948,15 +1948,15 @@ PLAN-ROOT SINK
 |  |
 |  06:ANALYTIC
 |  |  functions: count(*)
-|  |  partition by: key
+|  |  partition by: `key`
 |  |  row-size=20B cardinality=10
 |  |
 |  05:SORT
-|  |  order by: key ASC NULLS FIRST
+|  |  order by: `key` ASC NULLS FIRST
 |  |  row-size=12B cardinality=10
 |  |
 |  04:AGGREGATE [FINALIZE]
-|  |  group by: key
+|  |  group by: `key`
 |  |  row-size=12B cardinality=10
 |  |
 |  03:UNNEST [t.int_map_col]
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/order.test b/testdata/workloads/functional-planner/queries/PlannerTest/order.test
index 28d6aa8..fe02596 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/order.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/order.test
@@ -434,7 +434,7 @@ PLAN-ROOT SINK
 |  |     row-size=8B cardinality=10
 |  |
 |  00:SCAN HDFS [functional.alltypesagg a]
-|     partition predicates: a.month = 1, a.day = 1
+|     partition predicates: a.`month` = 1, a.`day` = 1
 |     partitions=1/11 files=1 size=73.39KB
 |     predicates: a.int_col > 899
 |     runtime filters: RF002 -> a.smallint_col
@@ -471,7 +471,7 @@ PLAN-ROOT SINK
 |  |--07:EXCHANGE [HASH(a.smallint_col)]
 |  |  |
 |  |  00:SCAN HDFS [functional.alltypesagg a]
-|  |     partition predicates: a.month = 1, a.day = 1
+|  |     partition predicates: a.`month` = 1, a.`day` = 1
 |  |     partitions=1/11 files=1 size=73.39KB
 |  |     predicates: a.int_col > 899
 |  |     row-size=7B cardinality=100
@@ -659,22 +659,22 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=8
 |
 |--04:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- DISTRIBUTEDPLAN
@@ -702,22 +702,22 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=8
 |
 |--04:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -749,22 +749,22 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 1
+|  |     partition predicates: `year` = 2009, `month` = 1
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--06:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 05:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 2
+   partition predicates: `year` = 2009, `month` = 2
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- DISTRIBUTEDPLAN
@@ -796,22 +796,22 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 1
+|  |     partition predicates: `year` = 2009, `month` = 1
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--06:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 05:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 2
+   partition predicates: `year` = 2009, `month` = 2
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -863,27 +863,27 @@ PLAN-ROOT SINK
 |  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  |--06:SCAN HDFS [functional.alltypestiny]
-|  |  |     partition predicates: year = 2009, month = 3
+|  |  |     partition predicates: `year` = 2009, `month` = 3
 |  |  |     partitions=1/4 files=1 size=115B
 |  |  |     row-size=89B cardinality=2
 |  |  |
 |  |  05:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 1
+|  |     partition predicates: `year` = 2009, `month` = 1
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
@@ -896,22 +896,22 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=2
 |  |
 |  |--14:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 5
+|  |     partition predicates: `year` = 2009, `month` = 5
 |  |     partitions=0/4 files=0 size=0B
 |  |     row-size=89B cardinality=0
 |  |
 |  13:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 4
+|     partition predicates: `year` = 2009, `month` = 4
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--11:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 4
+|     partition predicates: `year` = 2009, `month` = 4
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 10:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 3
+   partition predicates: `year` = 2009, `month` = 3
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- DISTRIBUTEDPLAN
@@ -957,27 +957,27 @@ PLAN-ROOT SINK
 |  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  |--06:SCAN HDFS [functional.alltypestiny]
-|  |  |     partition predicates: year = 2009, month = 3
+|  |  |     partition predicates: `year` = 2009, `month` = 3
 |  |  |     partitions=1/4 files=1 size=115B
 |  |  |     row-size=89B cardinality=2
 |  |  |
 |  |  05:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 1
+|  |     partition predicates: `year` = 2009, `month` = 1
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
@@ -996,22 +996,22 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=2
 |  |
 |  |--14:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 5
+|  |     partition predicates: `year` = 2009, `month` = 5
 |  |     partitions=0/4 files=0 size=0B
 |  |     row-size=89B cardinality=0
 |  |
 |  13:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 4
+|     partition predicates: `year` = 2009, `month` = 4
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--11:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 4
+|     partition predicates: `year` = 2009, `month` = 4
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 10:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 3
+   partition predicates: `year` = 2009, `month` = 3
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/parquet-filtering-disabled.test b/testdata/workloads/functional-planner/queries/PlannerTest/parquet-filtering-disabled.test
index d518039..07e5ef2 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/parquet-filtering-disabled.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/parquet-filtering-disabled.test
@@ -57,7 +57,7 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partition predicates: year > CAST(2000 AS INT), month < CAST(12 AS INT)
+   partition predicates: `year` > CAST(2000 AS INT), `month` < CAST(12 AS INT)
    partitions=22/24 files=22 size=173.16KB
    predicates: bool_col, bigint_col < CAST(5000 AS BIGINT), double_col > CAST(100.00 AS DOUBLE), float_col > CAST(50.00 AS FLOAT), id = CAST(1 AS INT), tinyint_col < CAST(50 AS TINYINT), int_col % CAST(2 AS INT) = CAST(1 AS INT), string_col IN ('aaaa', 'bbbb', 'cccc'), smallint_col IN (CAST(1 AS SMALLINT), CAST(2 AS SMALLINT), CAST(3 AS SMALLINT), CAST(4 AS SMALLINT), CAST(5 AS SMALLINT)), timestamp_cmp(timestamp_col, TIMESTAMP '2016-11-20 00:00:00') = CAST(1 AS INT), date_string_col > ' [...]
    stored statistics:
@@ -334,7 +334,7 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypesmixedformat]
-   partition predicates: year > CAST(2000 AS INT), month < CAST(12 AS INT)
+   partition predicates: `year` > CAST(2000 AS INT), `month` < CAST(12 AS INT)
    partitions=4/4 files=4 size=66.12KB
    predicates: bool_col, bigint_col < CAST(5000 AS BIGINT), double_col > CAST(100.00 AS DOUBLE), float_col > CAST(50.00 AS FLOAT), id = CAST(1 AS INT), tinyint_col < CAST(50 AS TINYINT), int_col % CAST(2 AS INT) = CAST(1 AS INT), string_col IN ('aaaa', 'bbbb', 'cccc'), smallint_col IN (CAST(1 AS SMALLINT), CAST(2 AS SMALLINT), CAST(3 AS SMALLINT), CAST(4 AS SMALLINT), CAST(5 AS SMALLINT)), timestamp_cmp(timestamp_col, TIMESTAMP '2016-11-20 00:00:00') = CAST(1 AS INT), date_string_col > ' [...]
    stored statistics:
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/parquet-filtering.test b/testdata/workloads/functional-planner/queries/PlannerTest/parquet-filtering.test
index 6e1f249..c520c6b 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/parquet-filtering.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/parquet-filtering.test
@@ -81,7 +81,7 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partition predicates: year > CAST(2000 AS INT), month < CAST(12 AS INT)
+   partition predicates: `year` > CAST(2000 AS INT), `month` < CAST(12 AS INT)
    partitions=22/24 files=22 size=173.16KB
    predicates: bool_col, bigint_col < CAST(5000 AS BIGINT), double_col > CAST(100.00 AS DOUBLE), float_col > CAST(50.00 AS FLOAT), id = CAST(1 AS INT), tinyint_col < CAST(50 AS TINYINT), int_col % CAST(2 AS INT) = CAST(1 AS INT), string_col IN ('aaaa', 'bbbb', 'cccc'), smallint_col IN (CAST(1 AS SMALLINT), CAST(2 AS SMALLINT), CAST(3 AS SMALLINT), CAST(4 AS SMALLINT), CAST(5 AS SMALLINT)), timestamp_cmp(timestamp_col, TIMESTAMP '2016-11-20 00:00:00') = CAST(1 AS INT), date_string_col > ' [...]
    stored statistics:
@@ -116,7 +116,7 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year > CAST(2000 AS INT), month < CAST(12 AS INT)
+   partition predicates: `year` > CAST(2000 AS INT), `month` < CAST(12 AS INT)
    partitions=22/24 files=22 size=437.72KB
    predicates: id = CAST(1 AS INT), string_col IN ('aaaa', 'bbbb', 'cccc'), smallint_col IN (CAST(1 AS SMALLINT), CAST(2 AS SMALLINT), CAST(3 AS SMALLINT), CAST(4 AS SMALLINT), CAST(5 AS SMALLINT)), bool_col, bigint_col < CAST(5000 AS BIGINT), double_col > CAST(100.00 AS DOUBLE), float_col > CAST(50.00 AS FLOAT), tinyint_col < CAST(50 AS TINYINT), int_col % CAST(2 AS INT) = CAST(1 AS INT), timestamp_cmp(timestamp_col, TIMESTAMP '2016-11-20 00:00:00') = CAST(1 AS INT), date_string_col > ' [...]
    stored statistics:
@@ -583,7 +583,7 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypesmixedformat]
-   partition predicates: year > CAST(2000 AS INT), month < CAST(12 AS INT)
+   partition predicates: `year` > CAST(2000 AS INT), `month` < CAST(12 AS INT)
    partitions=4/4 files=4 size=66.12KB
    predicates: bool_col, bigint_col < CAST(5000 AS BIGINT), double_col > CAST(100.00 AS DOUBLE), float_col > CAST(50.00 AS FLOAT), id = CAST(1 AS INT), tinyint_col < CAST(50 AS TINYINT), int_col % CAST(2 AS INT) = CAST(1 AS INT), string_col IN ('aaaa', 'bbbb', 'cccc'), smallint_col IN (CAST(1 AS SMALLINT), CAST(2 AS SMALLINT), CAST(3 AS SMALLINT), CAST(4 AS SMALLINT), CAST(5 AS SMALLINT)), timestamp_cmp(timestamp_col, TIMESTAMP '2016-11-20 00:00:00') = CAST(1 AS INT), date_string_col > ' [...]
    stored statistics:
@@ -619,7 +619,7 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional.alltypesmixedformat]
-   partition predicates: year != CAST(2009 AS INT), month != CAST(4 AS INT)
+   partition predicates: `year` != CAST(2009 AS INT), `month` != CAST(4 AS INT)
    partitions=0/4 files=0 size=0B
    predicates: bool_col, bigint_col < CAST(5000 AS BIGINT), double_col > CAST(100.00 AS DOUBLE), float_col > CAST(50.00 AS FLOAT), id = CAST(1 AS INT), tinyint_col < CAST(50 AS TINYINT), int_col % CAST(2 AS INT) = CAST(1 AS INT), string_col IN ('aaaa', 'bbbb', 'cccc'), smallint_col IN (CAST(1 AS SMALLINT), CAST(2 AS SMALLINT), CAST(3 AS SMALLINT), CAST(4 AS SMALLINT), CAST(5 AS SMALLINT)), timestamp_cmp(timestamp_col, TIMESTAMP '2016-11-20 00:00:00') = CAST(1 AS INT), date_string_col > ' [...]
    stored statistics:
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/parquet-stats-agg.test b/testdata/workloads/functional-planner/queries/PlannerTest/parquet-stats-agg.test
index 85962ce..6ec083a 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/parquet-stats-agg.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/parquet-stats-agg.test
@@ -130,7 +130,7 @@ select count(year) from functional_parquet.alltypes
 PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
-|  output: count(year)
+|  output: count(`year`)
 |  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional_parquet.alltypes]
@@ -144,7 +144,7 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum_init_zero(functional_parquet.alltypes.parquet-stats: num_rows)
-|  group by: month, year
+|  group by: `month`, `year`
 |  row-size=16B cardinality=24
 |
 00:SCAN HDFS [functional_parquet.alltypes]
@@ -158,7 +158,7 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
-|  group by: tinyint_col, year
+|  group by: tinyint_col, `year`
 |  row-size=13B cardinality=unavailable
 |
 00:SCAN HDFS [functional_parquet.alltypes]
@@ -171,7 +171,7 @@ select avg(year), count(*) from functional_parquet.alltypes
 PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
-|  output: avg(year), count(*)
+|  output: avg(`year`), count(*)
 |  row-size=16B cardinality=1
 |
 00:SCAN HDFS [functional_parquet.alltypes]
@@ -241,7 +241,7 @@ PLAN-ROOT SINK
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: sum_init_zero(functional_parquet.alltypes.parquet-stats: num_rows)
-|  |  group by: year
+|  |  group by: `year`
 |  |  row-size=12B cardinality=2
 |  |
 |  01:SCAN HDFS [functional_parquet.alltypes]
@@ -283,7 +283,7 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partition predicates: year < 2010, month > 8
+   partition predicates: `year` < 2010, `month` > 8
    partitions=4/24 files=4 size=31.40KB
    row-size=8B cardinality=unavailable
 ====
@@ -297,7 +297,7 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partition predicates: year < 2010
+   partition predicates: `year` < 2010
    partitions=12/24 files=12 size=94.74KB
    predicates: tinyint_col > 8
    row-size=1B cardinality=unavailable
@@ -365,7 +365,7 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=0
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partition predicates: year = -1
+   partition predicates: `year` = -1
    partitions=0/24 files=0 size=0B
    row-size=0B cardinality=0
 ====
@@ -413,11 +413,11 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: sum_init_zero(functional_parquet.alltypes.parquet-stats: num_rows)
-|  group by: year
+|  group by: `year`
 |  row-size=12B cardinality=2
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   partition predicates: month = 1
+   partition predicates: `month` = 1
    partitions=2/24 files=2 size=16.06KB
    row-size=12B cardinality=unavailable
 ====
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/partition-key-scans.test b/testdata/workloads/functional-planner/queries/PlannerTest/partition-key-scans.test
index bc4d740..2b372d8 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/partition-key-scans.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/partition-key-scans.test
@@ -4,7 +4,7 @@ select min(month), max(year), ndv(day) from functional.alltypesagg
 PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
-|  output: min(month), max(year), ndv(day)
+|  output: min(`month`), max(`year`), ndv(`day`)
 |  row-size=16B cardinality=1
 |
 00:UNION
@@ -14,7 +14,7 @@ PLAN-ROOT SINK
 PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
-|  output: min(month), max(year), ndv(day)
+|  output: min(`month`), max(`year`), ndv(`day`)
 |  row-size=16B cardinality=1
 |
 00:UNION
@@ -27,12 +27,12 @@ select count(distinct year), ndv(day) from functional.alltypesagg
 PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
-|  output: count(year), ndv:merge(day)
+|  output: count(`year`), ndv:merge(`day`)
 |  row-size=16B cardinality=1
 |
 01:AGGREGATE
-|  output: ndv(day)
-|  group by: year
+|  output: ndv(`day`)
+|  group by: `year`
 |  row-size=12B cardinality=1
 |
 00:UNION
@@ -42,12 +42,12 @@ PLAN-ROOT SINK
 PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
-|  output: count(year), ndv:merge(day)
+|  output: count(`year`), ndv:merge(`day`)
 |  row-size=16B cardinality=1
 |
 01:AGGREGATE
-|  output: ndv(day)
-|  group by: year
+|  output: ndv(`day`)
+|  group by: `year`
 |  row-size=12B cardinality=1
 |
 00:UNION
@@ -60,7 +60,7 @@ select min(month), max(day) from functional.alltypesagg where year = 2010 and da
 PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
-|  output: min(month), max(day)
+|  output: min(`month`), max(`day`)
 |  row-size=8B cardinality=1
 |
 00:UNION
@@ -75,7 +75,7 @@ select c1, c2 from
 PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
-|  output: min(year), max(month)
+|  output: min(`year`), max(`month`)
 |  row-size=8B cardinality=0
 |
 00:UNION
@@ -87,9 +87,9 @@ select ndv(month) from functional.alltypesagg group by year having max(day)=10
 PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
-|  output: ndv(month), max(day)
-|  group by: year
-|  having: max(day) = 10
+|  output: ndv(`month`), max(`day`)
+|  group by: `year`
+|  having: max(`day`) = 10
 |  row-size=16B cardinality=0
 |
 00:UNION
@@ -102,7 +102,7 @@ select month from functional.alltypes group by month
 PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
-|  group by: month
+|  group by: `month`
 |  row-size=4B cardinality=12
 |
 00:UNION
@@ -115,7 +115,7 @@ select distinct month from functional.alltypes where month % 2 = 0
 PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
-|  group by: month
+|  group by: `month`
 |  row-size=4B cardinality=6
 |
 00:UNION
@@ -130,11 +130,11 @@ where a.year = b.year
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
-|  output: min(a.month)
+|  output: min(a.`month`)
 |  row-size=4B cardinality=1
 |
 02:HASH JOIN [INNER JOIN]
-|  hash predicates: a.year = b.year
+|  hash predicates: a.`year` = b.`year`
 |  row-size=12B cardinality=24
 |
 |--01:UNION
@@ -148,11 +148,11 @@ PLAN-ROOT SINK
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
-|  output: min(a.month)
+|  output: min(a.`month`)
 |  row-size=4B cardinality=1
 |
 02:HASH JOIN [INNER JOIN, BROADCAST]
-|  hash predicates: a.year = b.year
+|  hash predicates: a.`year` = b.`year`
 |  row-size=12B cardinality=24
 |
 |--04:EXCHANGE [UNPARTITIONED]
@@ -175,12 +175,12 @@ on (a.year = b.year)
 PLAN-ROOT SINK
 |
 04:HASH JOIN [INNER JOIN]
-|  hash predicates: year = year
-|  runtime filters: RF000 <- year
+|  hash predicates: `year` = `year`
+|  runtime filters: RF000 <- `year`
 |  row-size=16B cardinality=4
 |
 |--01:AGGREGATE [FINALIZE]
-|  |  group by: year
+|  |  group by: `year`
 |  |  row-size=4B cardinality=2
 |  |
 |  00:UNION
@@ -188,8 +188,8 @@ PLAN-ROOT SINK
 |     row-size=4B cardinality=2
 |
 03:AGGREGATE [FINALIZE]
-|  output: count(month)
-|  group by: year
+|  output: count(`month`)
+|  group by: `year`
 |  row-size=12B cardinality=2
 |
 02:SCAN HDFS [functional.alltypes]
@@ -204,7 +204,7 @@ functional.alltypes a, functional.alltypesnopart b
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
-|  output: min(a.year), ndv(b.timestamp_col)
+|  output: min(a.`year`), ndv(b.timestamp_col)
 |  row-size=12B cardinality=0
 |
 02:NESTED LOOP JOIN [CROSS JOIN]
@@ -227,7 +227,7 @@ select c1, c2 from
 PLAN-ROOT SINK
 |
 03:AGGREGATE [FINALIZE]
-|  output: ndv(a.year + b.year), min(a.month + b.month)
+|  output: ndv(a.`year` + b.`year`), min(a.`month` + b.`month`)
 |  row-size=16B cardinality=1
 |
 02:NESTED LOOP JOIN [CROSS JOIN]
@@ -251,12 +251,12 @@ on (t1.int_col = t3.int_col)
 PLAN-ROOT SINK
 |
 03:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.int_col = min(t2.year)
-|  runtime filters: RF000 <- min(t2.year)
+|  hash predicates: t1.int_col = min(t2.`year`)
+|  runtime filters: RF000 <- min(t2.`year`)
 |  row-size=8B cardinality=8
 |
 |--02:AGGREGATE [FINALIZE]
-|  |  output: min(t2.year)
+|  |  output: min(t2.`year`)
 |  |  row-size=4B cardinality=1
 |  |
 |  01:UNION
@@ -283,7 +283,7 @@ PLAN-ROOT SINK
 |  row-size=4B cardinality=14
 |
 |--04:AGGREGATE [FINALIZE]
-|  |  group by: year
+|  |  group by: `year`
 |  |  row-size=4B cardinality=2
 |  |
 |  03:UNION
@@ -291,7 +291,7 @@ PLAN-ROOT SINK
 |     row-size=4B cardinality=2
 |
 02:AGGREGATE [FINALIZE]
-|  group by: month
+|  group by: `month`
 |  row-size=4B cardinality=12
 |
 01:UNION
@@ -304,7 +304,7 @@ select date_string_col, min(month) from functional.alltypes group by date_string
 PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
-|  output: min(month)
+|  output: min(`month`)
 |  group by: date_string_col
 |  row-size=24B cardinality=736
 |
@@ -318,7 +318,7 @@ select count(month) from functional.alltypes
 PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
-|  output: count(month)
+|  output: count(`month`)
 |  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes]
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/predicate-propagation.test b/testdata/workloads/functional-planner/queries/PlannerTest/predicate-propagation.test
index 4b1ce24..7780a14 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/predicate-propagation.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/predicate-propagation.test
@@ -66,7 +66,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes]
    partition predicates: functional.alltypes.month < 2
    partitions=2/24 files=2 size=40.32KB
-   predicates: functional.alltypes.id < 2, functional.alltypes.tinyint_col < 2, id = int_col, int_col < 2, month = id, tinyint_col = int_col
+   predicates: `month` = id, functional.alltypes.id < 2, functional.alltypes.tinyint_col < 2, id = int_col, int_col < 2, tinyint_col = int_col
    row-size=13B cardinality=62
 ====
 # all subquery results get materialized correctly;
@@ -243,8 +243,8 @@ where a.year = 2009 and b.month + 2 <= 4 and b.id = 17
 PLAN-ROOT SINK
 |
 04:HASH JOIN [INNER JOIN]
-|  hash predicates: b.id = c.id, b.month = c.month, b.year = c.year, b.smallint_col = c.int_col
-|  runtime filters: RF000 <- c.id, RF001 <- c.month, RF002 <- c.year, RF003 <- c.int_col
+|  hash predicates: b.`month` = c.`month`, b.`year` = c.`year`, b.id = c.id, b.smallint_col = c.int_col
+|  runtime filters: RF000 <- c.`month`, RF001 <- c.`year`, RF002 <- c.id, RF003 <- c.int_col
 |  row-size=43B cardinality=1
 |
 |--02:SCAN HDFS [functional.alltypestiny c]
@@ -254,22 +254,22 @@ PLAN-ROOT SINK
 |     row-size=16B cardinality=1
 |
 03:HASH JOIN [INNER JOIN]
-|  hash predicates: a.id = b.id, a.month = b.month, a.year = b.year, a.tinyint_col = b.smallint_col
-|  runtime filters: RF008 <- b.id, RF009 <- b.month, RF010 <- b.year, RF011 <- b.smallint_col
+|  hash predicates: a.`month` = b.`month`, a.`year` = b.`year`, a.id = b.id, a.tinyint_col = b.smallint_col
+|  runtime filters: RF008 <- b.`month`, RF009 <- b.`year`, RF010 <- b.id, RF011 <- b.smallint_col
 |  row-size=27B cardinality=1
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
-|     partition predicates: b.year = 2009, b.month + 2 <= 4
+|     partition predicates: b.year = 2009, b.`month` + 2 <= 4
 |     partitions=2/4 files=2 size=3.16KB
 |     predicates: b.id = 17, CAST(sin(b.smallint_col) AS BOOLEAN) = TRUE
-|     runtime filters: RF000 -> b.id, RF001 -> b.month, RF002 -> b.year, RF003 -> b.smallint_col
+|     runtime filters: RF000 -> b.`month`, RF001 -> b.`year`, RF002 -> b.id, RF003 -> b.smallint_col
 |     row-size=14B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes a]
-   partition predicates: a.year = 2009, a.month + 2 <= 4
+   partition predicates: a.`year` = 2009, a.month + 2 <= 4
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id = 17, CAST(sin(a.tinyint_col) AS BOOLEAN) = TRUE
-   runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col, RF008 -> a.id, RF009 -> a.month, RF010 -> a.year, RF011 -> a.tinyint_col
+   runtime filters: RF000 -> a.month, RF001 -> a.year, RF002 -> a.id, RF003 -> a.tinyint_col, RF008 -> a.`month`, RF009 -> a.`year`, RF010 -> a.id, RF011 -> a.tinyint_col
    row-size=13B cardinality=1
 ---- SCANRANGELOCATIONS
 NODE 0:
@@ -287,11 +287,11 @@ PLAN-ROOT SINK
 08:EXCHANGE [UNPARTITIONED]
 |
 04:HASH JOIN [INNER JOIN, PARTITIONED]
-|  hash predicates: b.id = c.id, b.month = c.month, b.year = c.year, b.smallint_col = c.int_col
-|  runtime filters: RF000 <- c.id, RF001 <- c.month, RF002 <- c.year, RF003 <- c.int_col
+|  hash predicates: b.`month` = c.`month`, b.`year` = c.`year`, b.id = c.id, b.smallint_col = c.int_col
+|  runtime filters: RF000 <- c.`month`, RF001 <- c.`year`, RF002 <- c.id, RF003 <- c.int_col
 |  row-size=43B cardinality=1
 |
-|--07:EXCHANGE [HASH(c.id,c.month,c.year,c.int_col)]
+|--07:EXCHANGE [HASH(c.`month`,c.`year`,c.id,c.int_col)]
 |  |
 |  02:SCAN HDFS [functional.alltypestiny c]
 |     partition predicates: c.year = 2009, c.month + 2 <= 4
@@ -300,26 +300,26 @@ PLAN-ROOT SINK
 |     row-size=16B cardinality=1
 |
 03:HASH JOIN [INNER JOIN, PARTITIONED]
-|  hash predicates: a.id = b.id, a.month = b.month, a.year = b.year, a.tinyint_col = b.smallint_col
-|  runtime filters: RF008 <- b.id, RF009 <- b.month, RF010 <- b.year, RF011 <- b.smallint_col
+|  hash predicates: a.`month` = b.`month`, a.`year` = b.`year`, a.id = b.id, a.tinyint_col = b.smallint_col
+|  runtime filters: RF008 <- b.`month`, RF009 <- b.`year`, RF010 <- b.id, RF011 <- b.smallint_col
 |  row-size=27B cardinality=1
 |
-|--06:EXCHANGE [HASH(b.id,b.month,b.year,b.smallint_col)]
+|--06:EXCHANGE [HASH(b.`month`,b.`year`,b.id,b.smallint_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall b]
-|     partition predicates: b.year = 2009, b.month + 2 <= 4
+|     partition predicates: b.year = 2009, b.`month` + 2 <= 4
 |     partitions=2/4 files=2 size=3.16KB
 |     predicates: b.id = 17, CAST(sin(b.smallint_col) AS BOOLEAN) = TRUE
-|     runtime filters: RF000 -> b.id, RF001 -> b.month, RF002 -> b.year, RF003 -> b.smallint_col
+|     runtime filters: RF000 -> b.`month`, RF001 -> b.`year`, RF002 -> b.id, RF003 -> b.smallint_col
 |     row-size=14B cardinality=1
 |
-05:EXCHANGE [HASH(a.id,a.month,a.year,a.tinyint_col)]
+05:EXCHANGE [HASH(a.`month`,a.`year`,a.id,a.tinyint_col)]
 |
 00:SCAN HDFS [functional.alltypes a]
-   partition predicates: a.year = 2009, a.month + 2 <= 4
+   partition predicates: a.`year` = 2009, a.month + 2 <= 4
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id = 17, CAST(sin(a.tinyint_col) AS BOOLEAN) = TRUE
-   runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col, RF008 -> a.id, RF009 -> a.month, RF010 -> a.year, RF011 -> a.tinyint_col
+   runtime filters: RF000 -> a.month, RF001 -> a.year, RF002 -> a.id, RF003 -> a.tinyint_col, RF008 -> a.`month`, RF009 -> a.`year`, RF010 -> a.id, RF011 -> a.tinyint_col
    row-size=13B cardinality=1
 ====
 # basic propagation between equivalence classes, with partition pruning;
@@ -434,18 +434,18 @@ where a.year = 2009 and a.tinyint_col = 7 and a.id is null and b.id = 17 and b.i
 PLAN-ROOT SINK
 |
 02:HASH JOIN [LEFT OUTER JOIN]
-|  hash predicates: a.id = b.id, a.month = b.month, a.tinyint_col = b.tinyint_col, a.year = b.year
+|  hash predicates: a.`month` = b.`month`, a.`year` = b.`year`, a.id = b.id, a.tinyint_col = b.tinyint_col
 |  other predicates: b.int_col IS NULL, b.id = 17
 |  row-size=30B cardinality=115
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
-|     partition predicates: b.month + 1 = 2, b.year = 2009
+|     partition predicates: b.`month` + 1 = 2, b.year = 2009
 |     partitions=1/4 files=1 size=1.57KB
 |     predicates: b.id = 17, b.tinyint_col = 7
 |     row-size=17B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes a]
-   partition predicates: a.year = 2009
+   partition predicates: a.`year` = 2009
    partitions=12/24 files=12 size=238.68KB
    predicates: a.id IS NULL, a.tinyint_col = 7
    row-size=13B cardinality=115
@@ -471,20 +471,20 @@ PLAN-ROOT SINK
 04:EXCHANGE [UNPARTITIONED]
 |
 02:HASH JOIN [LEFT OUTER JOIN, BROADCAST]
-|  hash predicates: a.id = b.id, a.month = b.month, a.tinyint_col = b.tinyint_col, a.year = b.year
+|  hash predicates: a.`month` = b.`month`, a.`year` = b.`year`, a.id = b.id, a.tinyint_col = b.tinyint_col
 |  other predicates: b.int_col IS NULL, b.id = 17
 |  row-size=30B cardinality=115
 |
 |--03:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypessmall b]
-|     partition predicates: b.month + 1 = 2, b.year = 2009
+|     partition predicates: b.`month` + 1 = 2, b.year = 2009
 |     partitions=1/4 files=1 size=1.57KB
 |     predicates: b.id = 17, b.tinyint_col = 7
 |     row-size=17B cardinality=1
 |
 00:SCAN HDFS [functional.alltypes a]
-   partition predicates: a.year = 2009
+   partition predicates: a.`year` = 2009
    partitions=12/24 files=12 size=238.68KB
    predicates: a.id IS NULL, a.tinyint_col = 7
    row-size=13B cardinality=115
@@ -508,22 +508,22 @@ where b.year = 2009 and b.tinyint_col = 7 and b.id is null and a.id = 17 and a.i
 PLAN-ROOT SINK
 |
 02:HASH JOIN [RIGHT OUTER JOIN]
-|  hash predicates: a.id = b.id, a.month = b.month, a.tinyint_col = b.tinyint_col, a.year = b.year
+|  hash predicates: a.`month` = b.`month`, a.`year` = b.`year`, a.id = b.id, a.tinyint_col = b.tinyint_col
 |  other predicates: a.int_col IS NULL, a.id = 17
-|  runtime filters: RF000 <- b.id, RF001 <- b.month, RF002 <- b.tinyint_col, RF003 <- b.year
+|  runtime filters: RF000 <- b.`month`, RF001 <- b.`year`, RF002 <- b.id, RF003 <- b.tinyint_col
 |  row-size=30B cardinality=115
 |
 |--01:SCAN HDFS [functional.alltypes b]
-|     partition predicates: b.year = 2009
+|     partition predicates: b.`year` = 2009
 |     partitions=12/24 files=12 size=238.68KB
 |     predicates: b.id IS NULL, b.tinyint_col = 7
 |     row-size=13B cardinality=115
 |
 00:SCAN HDFS [functional.alltypessmall a]
-   partition predicates: a.month + 1 = 2, a.year = 2009
+   partition predicates: a.`month` + 1 = 2, a.year = 2009
    partitions=1/4 files=1 size=1.57KB
    predicates: a.id = 17, a.tinyint_col = 7
-   runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.tinyint_col, RF003 -> a.year
+   runtime filters: RF000 -> a.`month`, RF001 -> a.`year`, RF002 -> a.id, RF003 -> a.tinyint_col
    row-size=17B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
@@ -531,26 +531,26 @@ PLAN-ROOT SINK
 05:EXCHANGE [UNPARTITIONED]
 |
 02:HASH JOIN [RIGHT OUTER JOIN, PARTITIONED]
-|  hash predicates: a.id = b.id, a.month = b.month, a.tinyint_col = b.tinyint_col, a.year = b.year
+|  hash predicates: a.`month` = b.`month`, a.`year` = b.`year`, a.id = b.id, a.tinyint_col = b.tinyint_col
 |  other predicates: a.int_col IS NULL, a.id = 17
-|  runtime filters: RF000 <- b.id, RF001 <- b.month, RF002 <- b.tinyint_col, RF003 <- b.year
+|  runtime filters: RF000 <- b.`month`, RF001 <- b.`year`, RF002 <- b.id, RF003 <- b.tinyint_col
 |  row-size=30B cardinality=115
 |
-|--04:EXCHANGE [HASH(b.id,b.month,b.tinyint_col,b.year)]
+|--04:EXCHANGE [HASH(b.`month`,b.`year`,b.id,b.tinyint_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
-|     partition predicates: b.year = 2009
+|     partition predicates: b.`year` = 2009
 |     partitions=12/24 files=12 size=238.68KB
 |     predicates: b.id IS NULL, b.tinyint_col = 7
 |     row-size=13B cardinality=115
 |
-03:EXCHANGE [HASH(a.id,a.month,a.tinyint_col,a.year)]
+03:EXCHANGE [HASH(a.`month`,a.`year`,a.id,a.tinyint_col)]
 |
 00:SCAN HDFS [functional.alltypessmall a]
-   partition predicates: a.month + 1 = 2, a.year = 2009
+   partition predicates: a.`month` + 1 = 2, a.year = 2009
    partitions=1/4 files=1 size=1.57KB
    predicates: a.id = 17, a.tinyint_col = 7
-   runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.tinyint_col, RF003 -> a.year
+   runtime filters: RF000 -> a.`month`, RF001 -> a.`year`, RF002 -> a.id, RF003 -> a.tinyint_col
    row-size=17B cardinality=1
 ====
 # propagation into inline view with aggregation:
@@ -570,13 +570,13 @@ where a.year = 2009 and b.month <= 2 and b.count_col + 1 = 17 and a.tinyint_col
 PLAN-ROOT SINK
 |
 03:HASH JOIN [INNER JOIN]
-|  hash predicates: a.id = id, a.month = month, a.year = year, a.tinyint_col = int_col
-|  runtime filters: RF000 <- id, RF001 <- month, RF002 <- year, RF003 <- int_col
+|  hash predicates: a.`month` = `month`, a.`year` = `year`, a.id = id, a.tinyint_col = int_col
+|  runtime filters: RF000 <- `month`, RF001 <- `year`, RF002 <- id, RF003 <- int_col
 |  row-size=37B cardinality=1
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: count(*)
-|  |  group by: year, month, id, int_col
+|  |  group by: `year`, `month`, id, int_col
 |  |  having: count(*) + 1 = 17
 |  |  row-size=24B cardinality=5
 |  |
@@ -587,10 +587,10 @@ PLAN-ROOT SINK
 |     row-size=16B cardinality=5
 |
 00:SCAN HDFS [functional.alltypes a]
-   partition predicates: a.year = 2009, a.month <= 2
+   partition predicates: a.`year` = 2009, a.month <= 2
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id > 11, a.tinyint_col != 5
-   runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col
+   runtime filters: RF000 -> a.`month`, RF001 -> a.`year`, RF002 -> a.id, RF003 -> a.tinyint_col
    row-size=13B cardinality=59
 ---- SCANRANGELOCATIONS
 NODE 0:
@@ -605,23 +605,23 @@ PLAN-ROOT SINK
 07:EXCHANGE [UNPARTITIONED]
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
-|  hash predicates: a.id = id, a.month = month, a.year = year, a.tinyint_col = int_col
-|  runtime filters: RF000 <- id, RF001 <- month, RF002 <- year, RF003 <- int_col
+|  hash predicates: a.`month` = `month`, a.`year` = `year`, a.id = id, a.tinyint_col = int_col
+|  runtime filters: RF000 <- `month`, RF001 <- `year`, RF002 <- id, RF003 <- int_col
 |  row-size=37B cardinality=1
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
 |  05:AGGREGATE [FINALIZE]
 |  |  output: count:merge(*)
-|  |  group by: year, month, id, int_col
+|  |  group by: `year`, `month`, id, int_col
 |  |  having: count(*) + 1 = 17
 |  |  row-size=24B cardinality=5
 |  |
-|  04:EXCHANGE [HASH(year,month,id,int_col)]
+|  04:EXCHANGE [HASH(`year`,`month`,id,int_col)]
 |  |
 |  02:AGGREGATE [STREAMING]
 |  |  output: count(*)
-|  |  group by: year, month, id, int_col
+|  |  group by: `year`, `month`, id, int_col
 |  |  row-size=24B cardinality=5
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
@@ -631,10 +631,10 @@ PLAN-ROOT SINK
 |     row-size=16B cardinality=5
 |
 00:SCAN HDFS [functional.alltypes a]
-   partition predicates: a.year = 2009, a.month <= 2
+   partition predicates: a.`year` = 2009, a.month <= 2
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id > 11, a.tinyint_col != 5
-   runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col
+   runtime filters: RF000 -> a.`month`, RF001 -> a.`year`, RF002 -> a.id, RF003 -> a.tinyint_col
    row-size=13B cardinality=59
 ====
 # Same as above but with cross join
@@ -657,13 +657,13 @@ where a.id = b.id and
 PLAN-ROOT SINK
 |
 03:HASH JOIN [INNER JOIN]
-|  hash predicates: a.id = id, a.month = month, a.year = year, a.tinyint_col = int_col
-|  runtime filters: RF000 <- id, RF001 <- month, RF002 <- year, RF003 <- int_col
+|  hash predicates: a.`month` = `month`, a.`year` = `year`, a.id = id, a.tinyint_col = int_col
+|  runtime filters: RF000 <- `month`, RF001 <- `year`, RF002 <- id, RF003 <- int_col
 |  row-size=37B cardinality=1
 |
 |--02:AGGREGATE [FINALIZE]
 |  |  output: count(*)
-|  |  group by: year, month, id, int_col
+|  |  group by: `year`, `month`, id, int_col
 |  |  having: count(*) + 1 = 17
 |  |  row-size=24B cardinality=5
 |  |
@@ -674,10 +674,10 @@ PLAN-ROOT SINK
 |     row-size=16B cardinality=5
 |
 00:SCAN HDFS [functional.alltypes a]
-   partition predicates: a.year = 2009, a.month <= 2
+   partition predicates: a.`year` = 2009, a.month <= 2
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id > 11, a.tinyint_col != 5
-   runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col
+   runtime filters: RF000 -> a.`month`, RF001 -> a.`year`, RF002 -> a.id, RF003 -> a.tinyint_col
    row-size=13B cardinality=59
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
@@ -685,23 +685,23 @@ PLAN-ROOT SINK
 07:EXCHANGE [UNPARTITIONED]
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
-|  hash predicates: a.id = id, a.month = month, a.year = year, a.tinyint_col = int_col
-|  runtime filters: RF000 <- id, RF001 <- month, RF002 <- year, RF003 <- int_col
+|  hash predicates: a.`month` = `month`, a.`year` = `year`, a.id = id, a.tinyint_col = int_col
+|  runtime filters: RF000 <- `month`, RF001 <- `year`, RF002 <- id, RF003 <- int_col
 |  row-size=37B cardinality=1
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
 |  05:AGGREGATE [FINALIZE]
 |  |  output: count:merge(*)
-|  |  group by: year, month, id, int_col
+|  |  group by: `year`, `month`, id, int_col
 |  |  having: count(*) + 1 = 17
 |  |  row-size=24B cardinality=5
 |  |
-|  04:EXCHANGE [HASH(year,month,id,int_col)]
+|  04:EXCHANGE [HASH(`year`,`month`,id,int_col)]
 |  |
 |  02:AGGREGATE [STREAMING]
 |  |  output: count(*)
-|  |  group by: year, month, id, int_col
+|  |  group by: `year`, `month`, id, int_col
 |  |  row-size=24B cardinality=5
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
@@ -711,10 +711,10 @@ PLAN-ROOT SINK
 |     row-size=16B cardinality=5
 |
 00:SCAN HDFS [functional.alltypes a]
-   partition predicates: a.year = 2009, a.month <= 2
+   partition predicates: a.`year` = 2009, a.month <= 2
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id > 11, a.tinyint_col != 5
-   runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col
+   runtime filters: RF000 -> a.`month`, RF001 -> a.`year`, RF002 -> a.id, RF003 -> a.tinyint_col
    row-size=13B cardinality=59
 ====
 # no propagation into select block with limit;
@@ -735,8 +735,8 @@ where a.year = 2009 and b.month <= 2 and b.count_col + 1 = 17 and a.tinyint_col
 PLAN-ROOT SINK
 |
 04:HASH JOIN [INNER JOIN]
-|  hash predicates: a.id = id, a.month = month, a.year = year, a.tinyint_col = int_col
-|  runtime filters: RF000 <- id, RF001 <- month, RF002 <- year, RF003 <- int_col
+|  hash predicates: a.`month` = `month`, a.`year` = `year`, a.id = id, a.tinyint_col = int_col
+|  runtime filters: RF000 <- `month`, RF001 <- `year`, RF002 <- id, RF003 <- int_col
 |  row-size=37B cardinality=1
 |
 |--03:SELECT
@@ -745,7 +745,7 @@ PLAN-ROOT SINK
 |  |
 |  02:AGGREGATE [FINALIZE]
 |  |  output: count(*)
-|  |  group by: year, month, id, int_col
+|  |  group by: `year`, `month`, id, int_col
 |  |  limit: 5
 |  |  row-size=24B cardinality=5
 |  |
@@ -755,10 +755,10 @@ PLAN-ROOT SINK
 |     row-size=16B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes a]
-   partition predicates: a.year = 2009, a.month <= 2
+   partition predicates: a.`year` = 2009, a.month <= 2
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id > 11, a.tinyint_col != 5
-   runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col
+   runtime filters: RF000 -> a.`month`, RF001 -> a.`year`, RF002 -> a.id, RF003 -> a.tinyint_col
    row-size=13B cardinality=59
 ---- SCANRANGELOCATIONS
 NODE 0:
@@ -775,8 +775,8 @@ PLAN-ROOT SINK
 09:EXCHANGE [UNPARTITIONED]
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
-|  hash predicates: a.id = id, a.month = month, a.year = year, a.tinyint_col = int_col
-|  runtime filters: RF000 <- id, RF001 <- month, RF002 <- year, RF003 <- int_col
+|  hash predicates: a.`month` = `month`, a.`year` = `year`, a.id = id, a.tinyint_col = int_col
+|  runtime filters: RF000 <- `month`, RF001 <- `year`, RF002 <- id, RF003 <- int_col
 |  row-size=37B cardinality=1
 |
 |--08:EXCHANGE [BROADCAST]
@@ -790,15 +790,15 @@ PLAN-ROOT SINK
 |  |
 |  06:AGGREGATE [FINALIZE]
 |  |  output: count:merge(*)
-|  |  group by: year, month, id, int_col
+|  |  group by: `year`, `month`, id, int_col
 |  |  limit: 5
 |  |  row-size=24B cardinality=5
 |  |
-|  05:EXCHANGE [HASH(year,month,id,int_col)]
+|  05:EXCHANGE [HASH(`year`,`month`,id,int_col)]
 |  |
 |  02:AGGREGATE [STREAMING]
 |  |  output: count(*)
-|  |  group by: year, month, id, int_col
+|  |  group by: `year`, `month`, id, int_col
 |  |  row-size=24B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
@@ -807,10 +807,10 @@ PLAN-ROOT SINK
 |     row-size=16B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes a]
-   partition predicates: a.year = 2009, a.month <= 2
+   partition predicates: a.`year` = 2009, a.month <= 2
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id > 11, a.tinyint_col != 5
-   runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col
+   runtime filters: RF000 -> a.`month`, RF001 -> a.`year`, RF002 -> a.id, RF003 -> a.tinyint_col
    row-size=13B cardinality=59
 ====
 # Similar to the above, converts the cross join to a hash join
@@ -834,8 +834,8 @@ where a.year = 2009 and
 PLAN-ROOT SINK
 |
 04:HASH JOIN [INNER JOIN]
-|  hash predicates: a.id = id, a.month = month, a.year = year, a.tinyint_col = int_col
-|  runtime filters: RF000 <- id, RF001 <- month, RF002 <- year, RF003 <- int_col
+|  hash predicates: a.`month` = `month`, a.`year` = `year`, a.id = id, a.tinyint_col = int_col
+|  runtime filters: RF000 <- `month`, RF001 <- `year`, RF002 <- id, RF003 <- int_col
 |  row-size=37B cardinality=1
 |
 |--03:SELECT
@@ -844,7 +844,7 @@ PLAN-ROOT SINK
 |  |
 |  02:AGGREGATE [FINALIZE]
 |  |  output: count(*)
-|  |  group by: year, month, id, int_col
+|  |  group by: `year`, `month`, id, int_col
 |  |  limit: 5
 |  |  row-size=24B cardinality=5
 |  |
@@ -854,10 +854,10 @@ PLAN-ROOT SINK
 |     row-size=16B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes a]
-   partition predicates: a.year = 2009, a.month <= 2
+   partition predicates: a.`year` = 2009, a.month <= 2
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id > 11, a.tinyint_col != 5
-   runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col
+   runtime filters: RF000 -> a.`month`, RF001 -> a.`year`, RF002 -> a.id, RF003 -> a.tinyint_col
    row-size=13B cardinality=59
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
@@ -865,8 +865,8 @@ PLAN-ROOT SINK
 09:EXCHANGE [UNPARTITIONED]
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
-|  hash predicates: a.id = id, a.month = month, a.year = year, a.tinyint_col = int_col
-|  runtime filters: RF000 <- id, RF001 <- month, RF002 <- year, RF003 <- int_col
+|  hash predicates: a.`month` = `month`, a.`year` = `year`, a.id = id, a.tinyint_col = int_col
+|  runtime filters: RF000 <- `month`, RF001 <- `year`, RF002 <- id, RF003 <- int_col
 |  row-size=37B cardinality=1
 |
 |--08:EXCHANGE [BROADCAST]
@@ -880,15 +880,15 @@ PLAN-ROOT SINK
 |  |
 |  06:AGGREGATE [FINALIZE]
 |  |  output: count:merge(*)
-|  |  group by: year, month, id, int_col
+|  |  group by: `year`, `month`, id, int_col
 |  |  limit: 5
 |  |  row-size=24B cardinality=5
 |  |
-|  05:EXCHANGE [HASH(year,month,id,int_col)]
+|  05:EXCHANGE [HASH(`year`,`month`,id,int_col)]
 |  |
 |  02:AGGREGATE [STREAMING]
 |  |  output: count(*)
-|  |  group by: year, month, id, int_col
+|  |  group by: `year`, `month`, id, int_col
 |  |  row-size=24B cardinality=10
 |  |
 |  01:SCAN HDFS [functional.alltypessmall]
@@ -897,10 +897,10 @@ PLAN-ROOT SINK
 |     row-size=16B cardinality=10
 |
 00:SCAN HDFS [functional.alltypes a]
-   partition predicates: a.year = 2009, a.month <= 2
+   partition predicates: a.`year` = 2009, a.month <= 2
    partitions=2/24 files=2 size=38.07KB
    predicates: a.id > 11, a.tinyint_col != 5
-   runtime filters: RF000 -> a.id, RF001 -> a.month, RF002 -> a.year, RF003 -> a.tinyint_col
+   runtime filters: RF000 -> a.`month`, RF001 -> a.`year`, RF002 -> a.id, RF003 -> a.tinyint_col
    row-size=13B cardinality=59
 ====
 # propagation of z.month=1 to alltypesagg is prevented
@@ -967,9 +967,9 @@ PLAN-ROOT SINK
 |     row-size=12B cardinality=10
 |
 02:HASH JOIN [INNER JOIN]
-|  hash predicates: a.int_col = b.int_col, a.year = b.year
+|  hash predicates: a.`year` = b.`year`, a.int_col = b.int_col
 |  other predicates: a.id + b.id = 17
-|  runtime filters: RF002 <- b.int_col, RF003 <- b.year
+|  runtime filters: RF002 <- b.`year`, RF003 <- b.int_col
 |  row-size=24B cardinality=36.50K
 |
 |--01:SCAN HDFS [functional.alltypessmall b]
@@ -980,7 +980,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes a]
    partition predicates: a.year = 2009
    partitions=12/24 files=12 size=238.68KB
-   runtime filters: RF000 -> a.id, RF002 -> a.int_col, RF003 -> a.year
+   runtime filters: RF000 -> a.id, RF002 -> a.`year`, RF003 -> a.int_col
    row-size=12B cardinality=3.65K
 ====
 # correct placement of predicates in the presence of aggregation in an inline view
@@ -1362,19 +1362,19 @@ where t2.year + t2.month > 10
 PLAN-ROOT SINK
 |
 02:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.id = t2.month, t1.year = t2.year
-|  runtime filters: RF000 <- t2.month, RF001 <- t2.year
+|  hash predicates: t1.`year` = t2.`year`, t1.id = t2.`month`
+|  runtime filters: RF000 <- t2.`year`, RF001 <- t2.`month`
 |  row-size=178B cardinality=100
 |
 |--01:SCAN HDFS [functional.alltypessmall t2]
-|     partition predicates: t2.year + t2.month > 10
+|     partition predicates: t2.`year` + t2.`month` > 10
 |     partitions=4/4 files=4 size=6.32KB
 |     row-size=89B cardinality=100
 |
 00:SCAN HDFS [functional.alltypes t1]
    partitions=24/24 files=24 size=478.45KB
    predicates: t1.id = t1.month, t1.year + t1.id > 10
-   runtime filters: RF000 -> t1.id, RF001 -> t1.year
+   runtime filters: RF000 -> t1.`year`, RF001 -> t1.id
    row-size=89B cardinality=730
 ====
 # TODO: Fix this limitation of our getBindingPredicates() implementation:
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/resource-requirements.test b/testdata/workloads/functional-planner/queries/PlannerTest/resource-requirements.test
index 60c9f6d..fdde0a1 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/resource-requirements.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/resource-requirements.test
@@ -4124,8 +4124,8 @@ Per-Host Resource Estimates: Memory=16MB
 Codegen disabled by planner
 Analyzed query: SELECT id, bool_col, tinyint_col, smallint_col, int_col,
 bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col
-FROM functional.alltypes WHERE year = CAST(2009 AS INT) AND month = CAST(5 AS
-INT)
+FROM functional.alltypes WHERE `year` = CAST(2009 AS INT) AND `month` = CAST(5
+AS INT)
 
 F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
 |  Per-Host Resources: mem-estimate=16.02MB mem-reservation=32.00KB thread-reservation=2
@@ -4134,7 +4134,7 @@ WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
 |  mem-estimate=24.52KB mem-reservation=0B thread-reservation=0
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = CAST(2009 AS INT), month = CAST(5 AS INT)
+   partition predicates: `year` = CAST(2009 AS INT), `month` = CAST(5 AS INT)
    partitions=1/24 files=1 size=20.36KB
    stored statistics:
      table: rows=7300 size=478.45KB
@@ -4150,8 +4150,8 @@ Per-Host Resource Estimates: Memory=16MB
 Codegen disabled by planner
 Analyzed query: SELECT id, bool_col, tinyint_col, smallint_col, int_col,
 bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col
-FROM functional.alltypes WHERE year = CAST(2009 AS INT) AND month = CAST(5 AS
-INT)
+FROM functional.alltypes WHERE `year` = CAST(2009 AS INT) AND `month` = CAST(5
+AS INT)
 
 F00:PLAN FRAGMENT [RANDOM] hosts=1 instances=1
 |  Per-Host Resources: mem-estimate=16.02MB mem-reservation=32.00KB thread-reservation=2
@@ -4160,7 +4160,7 @@ WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
 |  mem-estimate=24.52KB mem-reservation=0B thread-reservation=0
 |
 00:SCAN HDFS [functional.alltypes, RANDOM]
-   partition predicates: year = CAST(2009 AS INT), month = CAST(5 AS INT)
+   partition predicates: `year` = CAST(2009 AS INT), `month` = CAST(5 AS INT)
    partitions=1/24 files=1 size=20.36KB
    stored statistics:
      table: rows=7300 size=478.45KB
@@ -4176,8 +4176,8 @@ Per-Host Resource Estimates: Memory=32MB
 Codegen disabled by planner
 Analyzed query: SELECT id, bool_col, tinyint_col, smallint_col, int_col,
 bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col
-FROM functional.alltypes WHERE year = CAST(2009 AS INT) AND month = CAST(5 AS
-INT)
+FROM functional.alltypes WHERE `year` = CAST(2009 AS INT) AND `month` = CAST(5
+AS INT)
 
 F00:PLAN FRAGMENT [RANDOM] hosts=1 instances=2
 |  Per-Host Resources: mem-estimate=32.02MB mem-reservation=64.00KB thread-reservation=2
@@ -4186,7 +4186,7 @@ WRITE TO HDFS [functional.alltypesnopart, OVERWRITE=false]
 |  mem-estimate=12.26KB mem-reservation=0B thread-reservation=0
 |
 00:SCAN HDFS [functional.alltypes, RANDOM]
-   partition predicates: year = CAST(2009 AS INT), month = CAST(5 AS INT)
+   partition predicates: `year` = CAST(2009 AS INT), `month` = CAST(5 AS INT)
    partitions=1/24 files=1 size=20.36KB
    stored statistics:
      table: rows=7300 size=478.45KB
@@ -5850,7 +5850,7 @@ INSERT INTO KUDU [functional_kudu.tinyinttable]
    constant-operands=1
    mem-estimate=0B mem-reservation=0B thread-reservation=0
    tuple-ids=0 row-size=1B cardinality=1
-   in pipelines:
+   in pipelines: <none>
 ---- DISTRIBUTEDPLAN
 Max Per-Host Resource Reservation: Memory=2.00MB Threads=2
 Per-Host Resource Estimates: Memory=22MB
@@ -5867,12 +5867,12 @@ INSERT INTO KUDU [functional_kudu.tinyinttable]
 |  materialized: KuduPartition(1)
 |  mem-estimate=2.00MB mem-reservation=2.00MB spill-buffer=2.00MB thread-reservation=0
 |  tuple-ids=1 row-size=5B cardinality=1
-|  in pipelines:
+|  in pipelines: <none>
 |
 01:EXCHANGE [KUDU(KuduPartition(1))]
 |  mem-estimate=16.00KB mem-reservation=0B thread-reservation=0
 |  tuple-ids=0 row-size=1B cardinality=1
-|  in pipelines:
+|  in pipelines: <none>
 |
 F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
 Per-Host Resources: mem-estimate=0B mem-reservation=0B thread-reservation=1
@@ -5880,5 +5880,5 @@ Per-Host Resources: mem-estimate=0B mem-reservation=0B thread-reservation=1
    constant-operands=1
    mem-estimate=0B mem-reservation=0B thread-reservation=0
    tuple-ids=0 row-size=1B cardinality=1
-   in pipelines:
+   in pipelines: <none>
 ====
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/runtime-filter-propagation.test b/testdata/workloads/functional-planner/queries/PlannerTest/runtime-filter-propagation.test
index a6c32e3..7ceb039 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/runtime-filter-propagation.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/runtime-filter-propagation.test
@@ -5,7 +5,7 @@ where t1.year = t2.int_col and t2.id < 10 and t1.id = 10
 PLAN-ROOT SINK
 |
 02:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.year = t2.int_col
+|  hash predicates: t1.`year` = t2.int_col
 |  runtime filters: RF000 <- t2.int_col
 |  row-size=167B cardinality=1
 |
@@ -17,7 +17,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    predicates: t1.id = 10
-   runtime filters: RF000 -> t1.year
+   runtime filters: RF000 -> t1.`year`
    row-size=95B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
@@ -25,7 +25,7 @@ PLAN-ROOT SINK
 04:EXCHANGE [UNPARTITIONED]
 |
 02:HASH JOIN [INNER JOIN, BROADCAST]
-|  hash predicates: t1.year = t2.int_col
+|  hash predicates: t1.`year` = t2.int_col
 |  runtime filters: RF000 <- t2.int_col
 |  row-size=167B cardinality=1
 |
@@ -39,7 +39,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
    predicates: t1.id = 10
-   runtime filters: RF000 -> t1.year
+   runtime filters: RF000 -> t1.`year`
    row-size=95B cardinality=1
 ====
 # Four-way join query
@@ -51,7 +51,7 @@ where t1.year = t2.int_col and t3.tinyint_col = t2.id and t3.month = t4.id and
 PLAN-ROOT SINK
 |
 06:HASH JOIN [INNER JOIN]
-|  hash predicates: t3.month = t4.id
+|  hash predicates: t3.`month` = t4.id
 |  runtime filters: RF000 <- t4.id
 |  row-size=345B cardinality=9
 |
@@ -67,11 +67,11 @@ PLAN-ROOT SINK
 |
 |--02:SCAN HDFS [functional.alltypestiny t3]
 |     partitions=4/4 files=4 size=460B
-|     runtime filters: RF000 -> t3.month
+|     runtime filters: RF000 -> t3.`month`
 |     row-size=89B cardinality=8
 |
 04:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.year = t2.int_col
+|  hash predicates: t1.`year` = t2.int_col
 |  runtime filters: RF004 <- t2.int_col
 |  row-size=184B cardinality=92
 |
@@ -83,7 +83,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypestiny t1]
    partitions=4/4 files=4 size=460B
-   runtime filters: RF004 -> t1.year
+   runtime filters: RF004 -> t1.`year`
    row-size=89B cardinality=8
 ====
 # Two-way join query where multiple runtime filters are generated
@@ -93,7 +93,7 @@ where t1.year = t2.int_col and t1.month = t2.bigint_col and t2.id = 10
 PLAN-ROOT SINK
 |
 02:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.year = t2.int_col, t1.month = t2.bigint_col
+|  hash predicates: t1.`year` = t2.int_col, t1.`month` = t2.bigint_col
 |  runtime filters: RF000 <- t2.int_col, RF001 <- t2.bigint_col
 |  row-size=167B cardinality=11.00K
 |
@@ -104,7 +104,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
-   runtime filters: RF000 -> t1.year, RF001 -> t1.month
+   runtime filters: RF000 -> t1.`year`, RF001 -> t1.`month`
    row-size=95B cardinality=11.00K
 ====
 # Two-way join query with an inline view in the build side of the join
@@ -115,7 +115,7 @@ where t1.year = v.int_col
 PLAN-ROOT SINK
 |
 02:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.year = t2.int_col
+|  hash predicates: t1.`year` = t2.int_col
 |  runtime filters: RF000 <- t2.int_col
 |  row-size=167B cardinality=11.00K
 |
@@ -126,7 +126,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
-   runtime filters: RF000 -> t1.year
+   runtime filters: RF000 -> t1.`year`
    row-size=95B cardinality=11.00K
 ====
 # Two-way join query with an inline view in the build side of the join where the
@@ -139,7 +139,7 @@ where v.id1 = t1.year
 PLAN-ROOT SINK
 |
 02:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.year = id + int_col
+|  hash predicates: t1.`year` = id + int_col
 |  runtime filters: RF000 <- id + int_col
 |  row-size=111B cardinality=11.00K
 |
@@ -150,7 +150,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
-   runtime filters: RF000 -> t1.year
+   runtime filters: RF000 -> t1.`year`
    row-size=95B cardinality=11.00K
 ====
 # Two-way join query where the lhs of the join predicate is an arithmetic expr
@@ -160,7 +160,7 @@ where t1.year + 1 = t2.id and t2.int_col < 10
 PLAN-ROOT SINK
 |
 02:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.year + 1 = t2.id
+|  hash predicates: t1.`year` + 1 = t2.id
 |  runtime filters: RF000 <- t2.id
 |  row-size=167B cardinality=11.00K
 |
@@ -171,7 +171,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
-   runtime filters: RF000 -> t1.year + 1
+   runtime filters: RF000 -> t1.`year` + 1
    row-size=95B cardinality=11.00K
 ====
 # Two-way join query with join predicates that are not suitable for hashing
@@ -184,7 +184,7 @@ PLAN-ROOT SINK
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: t1.id = t2.id
-|  other predicates: t1.year = t1.month + t2.int_col, t2.tinyint_col = t1.year + t2.smallint_col, t1.year + t2.int_col = t1.month + t2.tinyint_col
+|  other predicates: t1.`year` = t1.`month` + t2.int_col, t2.tinyint_col = t1.`year` + t2.smallint_col, t1.`year` + t2.int_col = t1.`month` + t2.tinyint_col
 |  runtime filters: RF000 <- t2.id
 |  row-size=167B cardinality=11
 |
@@ -208,7 +208,7 @@ and t1.int_col * 100 = t2.bigint_col / 100 and t2.bool_col = false
 PLAN-ROOT SINK
 |
 02:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.year + t1.month = t2.id, t1.int_col * 100 = t2.bigint_col / 100, t1.int_col + 1 - t1.tinyint_col = t2.smallint_col + 10
+|  hash predicates: t1.`year` + t1.`month` = t2.id, t1.int_col * 100 = t2.bigint_col / 100, t1.int_col + 1 - t1.tinyint_col = t2.smallint_col + 10
 |  runtime filters: RF000 <- t2.id, RF001 <- t2.bigint_col / 100, RF002 <- t2.smallint_col + 10
 |  row-size=167B cardinality=11.00K
 |
@@ -219,7 +219,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
-   runtime filters: RF000 -> t1.year + t1.month, RF001 -> t1.int_col * 100, RF002 -> t1.int_col + 1 - t1.tinyint_col
+   runtime filters: RF000 -> t1.`year` + t1.`month`, RF001 -> t1.int_col * 100, RF002 -> t1.int_col + 1 - t1.tinyint_col
    row-size=95B cardinality=11.00K
 ====
 # Three-way join query with an inline view on the probe side of the join where the left
@@ -234,7 +234,7 @@ where v.year = t3.int_col and t3.bool_col = true
 PLAN-ROOT SINK
 |
 04:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.year + t2.year = t3.int_col
+|  hash predicates: t1.`year` + t2.`year` = t3.int_col
 |  row-size=88B cardinality=7.81K
 |
 |--03:SCAN HDFS [functional.alltypesnopart t3]
@@ -266,7 +266,7 @@ where v.cnt = t1.year and v.id = t1.month
 PLAN-ROOT SINK
 |
 03:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.month = id, t1.year = count(int_col)
+|  hash predicates: t1.`month` = id, t1.`year` = count(int_col)
 |  runtime filters: RF000 <- id, RF001 <- count(int_col)
 |  row-size=107B cardinality=0
 |
@@ -283,7 +283,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypesagg t1]
    partition predicates: t1.year < 10
    partitions=0/11 files=0 size=0B
-   runtime filters: RF000 -> t1.month, RF001 -> t1.year
+   runtime filters: RF000 -> t1.`month`, RF001 -> t1.`year`
    row-size=95B cardinality=0
 ====
 # Two-way join query with an inline view in the build side of the join that has a
@@ -296,7 +296,7 @@ where v.id = t1.year and t1.month = v.tinyint_col
 PLAN-ROOT SINK
 |
 04:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.month = t3.tinyint_col, t1.year = t2.id + t3.id
+|  hash predicates: t1.`month` = t3.tinyint_col, t1.`year` = t2.id + t3.id
 |  runtime filters: RF000 <- t3.tinyint_col, RF001 <- t2.id + t3.id
 |  row-size=112B cardinality=11.00K
 |
@@ -316,7 +316,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
-   runtime filters: RF000 -> t1.month, RF001 -> t1.year
+   runtime filters: RF000 -> t1.`month`, RF001 -> t1.`year`
    row-size=95B cardinality=11.00K
 ====
 # Four-way join query with an inline view in the build side of the join where the
@@ -330,7 +330,7 @@ where t1.year = v.int_col and t1.year = v.id and t1.month = v.tinyint_col
 PLAN-ROOT SINK
 |
 06:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.year = t2.int_col, t1.month = t4.tinyint_col
+|  hash predicates: t1.`year` = t2.int_col, t1.`month` = t4.tinyint_col
 |  runtime filters: RF000 <- t2.int_col, RF001 <- t4.tinyint_col
 |  row-size=117B cardinality=11.00K
 |
@@ -361,7 +361,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
-   runtime filters: RF000 -> t1.year, RF001 -> t1.month
+   runtime filters: RF000 -> t1.`year`, RF001 -> t1.`month`
    row-size=95B cardinality=11.00K
 ====
 # Four-way join query between base tables in a star schema
@@ -373,7 +373,7 @@ where t1.year = t2.id and t1.year = t3.int_col and t1.year = t4.tinyint_col and
 PLAN-ROOT SINK
 |
 06:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.year = t4.tinyint_col
+|  hash predicates: t1.`year` = t4.tinyint_col
 |  runtime filters: RF000 <- t4.tinyint_col
 |  row-size=311B cardinality=11.00K
 |
@@ -383,7 +383,7 @@ PLAN-ROOT SINK
 |     row-size=72B cardinality=0
 |
 05:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.year = t3.int_col
+|  hash predicates: t1.`year` = t3.int_col
 |  runtime filters: RF002 <- t3.int_col
 |  row-size=239B cardinality=11.00K
 |
@@ -394,7 +394,7 @@ PLAN-ROOT SINK
 |     row-size=72B cardinality=0
 |
 04:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.year = t2.id
+|  hash predicates: t1.`year` = t2.id
 |  runtime filters: RF004 <- t2.id
 |  row-size=167B cardinality=11.00K
 |
@@ -406,7 +406,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
-   runtime filters: RF000 -> t1.year, RF002 -> t1.year, RF004 -> t1.year
+   runtime filters: RF000 -> t1.`year`, RF002 -> t1.`year`, RF004 -> t1.`year`
    row-size=95B cardinality=11.00K
 ====
 # Five-way cyclic join query
@@ -418,7 +418,7 @@ where t1.year = t2.id and t2.int_col = t3.tinyint_col and t3.month = t4.bigint_c
 PLAN-ROOT SINK
 |
 08:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.month = t5.id, t4.smallint_col = t5.smallint_col
+|  hash predicates: t1.`month` = t5.id, t4.smallint_col = t5.smallint_col
 |  runtime filters: RF000 <- t5.id, RF001 <- t5.smallint_col
 |  row-size=400B cardinality=11.00K
 |
@@ -427,7 +427,7 @@ PLAN-ROOT SINK
 |     row-size=72B cardinality=0
 |
 07:HASH JOIN [INNER JOIN]
-|  hash predicates: t3.month = t4.bigint_col
+|  hash predicates: t3.`month` = t4.bigint_col
 |  runtime filters: RF004 <- t4.bigint_col
 |  row-size=328B cardinality=11.00K
 |
@@ -443,11 +443,11 @@ PLAN-ROOT SINK
 |
 |--02:SCAN HDFS [functional.alltypessmall t3]
 |     partitions=4/4 files=4 size=6.32KB
-|     runtime filters: RF004 -> t3.month
+|     runtime filters: RF004 -> t3.`month`
 |     row-size=89B cardinality=100
 |
 05:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.year = t2.id
+|  hash predicates: t1.`year` = t2.id
 |  runtime filters: RF008 <- t2.id
 |  row-size=167B cardinality=11.00K
 |
@@ -458,7 +458,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
-   runtime filters: RF000 -> t1.month, RF008 -> t1.year
+   runtime filters: RF000 -> t1.`month`, RF008 -> t1.`year`
    row-size=95B cardinality=11.00K
 ====
 # Two-way left outer join query; no runtime filters should be generated from the
@@ -470,7 +470,7 @@ where t2.id = 1
 PLAN-ROOT SINK
 |
 02:HASH JOIN [LEFT OUTER JOIN]
-|  hash predicates: t1.year = t2.int_col
+|  hash predicates: t1.`year` = t2.int_col
 |  other predicates: t2.id = 1
 |  row-size=167B cardinality=11.00K
 |
@@ -492,8 +492,8 @@ where t2.id = 2 and t1.month = t2.tinyint_col
 PLAN-ROOT SINK
 |
 02:HASH JOIN [LEFT OUTER JOIN]
-|  hash predicates: t1.year = t2.int_col
-|  other predicates: t2.id = 2, t1.month = t2.tinyint_col
+|  hash predicates: t1.`year` = t2.int_col
+|  other predicates: t2.id = 2, t1.`month` = t2.tinyint_col
 |  runtime filters: RF000 <- t2.tinyint_col
 |  row-size=167B cardinality=11.00K
 |
@@ -504,7 +504,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
-   runtime filters: RF000 -> t1.month
+   runtime filters: RF000 -> t1.`month`
    row-size=95B cardinality=11.00K
 ====
 # Multi-way join query with outer joins
@@ -518,7 +518,7 @@ where t2.id = 1 and t3.int_col = 1 and t4.bool_col = true and t5.bool_col = fals
 PLAN-ROOT SINK
 |
 08:HASH JOIN [RIGHT OUTER JOIN]
-|  hash predicates: t1.year = t5.smallint_col
+|  hash predicates: t1.`year` = t5.smallint_col
 |  other predicates: t2.id = 1, t3.int_col = 1, t4.bool_col = TRUE
 |  runtime filters: RF000 <- t5.smallint_col
 |  row-size=383B cardinality=11.00K
@@ -529,7 +529,7 @@ PLAN-ROOT SINK
 |     row-size=72B cardinality=0
 |
 07:HASH JOIN [FULL OUTER JOIN]
-|  hash predicates: t1.year = t4.tinyint_col
+|  hash predicates: t1.`year` = t4.tinyint_col
 |  row-size=311B cardinality=11.00K
 |
 |--03:SCAN HDFS [functional.alltypesnopart t4]
@@ -538,7 +538,7 @@ PLAN-ROOT SINK
 |     row-size=72B cardinality=0
 |
 06:HASH JOIN [LEFT OUTER JOIN]
-|  hash predicates: t1.year = t3.id
+|  hash predicates: t1.`year` = t3.id
 |  row-size=239B cardinality=11.00K
 |
 |--02:SCAN HDFS [functional.alltypesnopart t3]
@@ -548,7 +548,7 @@ PLAN-ROOT SINK
 |     row-size=72B cardinality=0
 |
 05:HASH JOIN [LEFT OUTER JOIN]
-|  hash predicates: t1.year = t2.int_col
+|  hash predicates: t1.`year` = t2.int_col
 |  row-size=167B cardinality=11.00K
 |
 |--01:SCAN HDFS [functional.alltypesnopart t2]
@@ -559,7 +559,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
-   runtime filters: RF000 -> t1.year
+   runtime filters: RF000 -> t1.`year`
    row-size=95B cardinality=11.00K
 ====
 # Two-way right outer join query where a runtime filter can be pushed to the nullable
@@ -572,9 +572,9 @@ where t2.id = 10 and t1.month = t2.tinyint_col and t1.int_col = 1
 PLAN-ROOT SINK
 |
 02:HASH JOIN [RIGHT OUTER JOIN]
-|  hash predicates: t1.year = t2.int_col
+|  hash predicates: t1.`year` = t2.int_col
 |  other join predicates: t2.int_col = 10
-|  other predicates: t1.int_col = 1, t1.month = t2.tinyint_col
+|  other predicates: t1.int_col = 1, t1.`month` = t2.tinyint_col
 |  runtime filters: RF000 <- t2.int_col, RF001 <- t2.tinyint_col
 |  row-size=167B cardinality=0
 |
@@ -584,10 +584,10 @@ PLAN-ROOT SINK
 |     row-size=72B cardinality=0
 |
 00:SCAN HDFS [functional.alltypesagg t1]
-   partition predicates: t1.month = 1, t1.year = 10
+   partition predicates: t1.`month` = 1, t1.year = 10
    partitions=0/11 files=0 size=0B
    predicates: t1.int_col = 1
-   runtime filters: RF000 -> t1.year, RF001 -> t1.month
+   runtime filters: RF000 -> t1.`year`, RF001 -> t1.`month`
    row-size=95B cardinality=0
 ====
 # Three-way join query with semi joins
@@ -599,7 +599,7 @@ where t3.id = 1
 PLAN-ROOT SINK
 |
 04:HASH JOIN [RIGHT SEMI JOIN]
-|  hash predicates: t1.month = t3.tinyint_col
+|  hash predicates: t1.`month` = t3.tinyint_col
 |  runtime filters: RF000 <- t3.tinyint_col
 |  row-size=72B cardinality=0
 |
@@ -609,7 +609,7 @@ PLAN-ROOT SINK
 |     row-size=72B cardinality=0
 |
 03:HASH JOIN [LEFT SEMI JOIN]
-|  hash predicates: t1.year = t2.int_col
+|  hash predicates: t1.`year` = t2.int_col
 |  runtime filters: RF002 <- t2.int_col
 |  row-size=95B cardinality=11.00K
 |
@@ -619,7 +619,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
-   runtime filters: RF000 -> t1.month, RF002 -> t1.year
+   runtime filters: RF000 -> t1.`month`, RF002 -> t1.`year`
    row-size=95B cardinality=11.00K
 ====
 # Query with a subquery that is converted to a null-aware left anti join
@@ -630,7 +630,7 @@ and t1.int_col < 10
 PLAN-ROOT SINK
 |
 02:HASH JOIN [NULL AWARE LEFT ANTI JOIN]
-|  hash predicates: t1.year = id
+|  hash predicates: t1.`year` = id
 |  row-size=95B cardinality=1.10K
 |
 |--01:SCAN HDFS [functional.alltypesnopart]
@@ -653,7 +653,7 @@ where v1.year = v2.id
 PLAN-ROOT SINK
 |
 03:HASH JOIN [INNER JOIN]
-|  hash predicates: year = id
+|  hash predicates: `year` = id
 |  runtime filters: RF000 <- id
 |  row-size=21B cardinality=1
 |
@@ -664,7 +664,7 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
-|  group by: year
+|  group by: `year`
 |  row-size=12B cardinality=1
 |
 00:SCAN HDFS [functional.alltypesagg t1]
@@ -692,7 +692,7 @@ PLAN-ROOT SINK
 |
 01:AGGREGATE [FINALIZE]
 |  output: count(*)
-|  group by: year
+|  group by: `year`
 |  row-size=12B cardinality=1
 |
 00:SCAN HDFS [functional.alltypesagg t1]
@@ -712,7 +712,7 @@ where v2.year = t3.smallint_col and t3.id = 1
 PLAN-ROOT SINK
 |
 06:HASH JOIN [INNER JOIN]
-|  hash predicates: v1.year = t3.smallint_col
+|  hash predicates: v1.`year` = t3.smallint_col
 |  runtime filters: RF000 <- t3.smallint_col
 |  row-size=88B cardinality=1
 |
@@ -723,11 +723,11 @@ PLAN-ROOT SINK
 |
 04:AGGREGATE [FINALIZE]
 |  output: count(*)
-|  group by: year, t2.int_col
+|  group by: `year`, t2.int_col
 |  row-size=16B cardinality=1
 |
 03:HASH JOIN [INNER JOIN]
-|  hash predicates: month = t2.int_col
+|  hash predicates: `month` = t2.int_col
 |  runtime filters: RF002 <- t2.int_col
 |  row-size=12B cardinality=1
 |
@@ -736,7 +736,7 @@ PLAN-ROOT SINK
 |     row-size=4B cardinality=0
 |
 01:AGGREGATE [FINALIZE]
-|  group by: year, month
+|  group by: `year`, `month`
 |  row-size=8B cardinality=1
 |
 00:SCAN HDFS [functional.alltypesagg t1]
@@ -755,8 +755,8 @@ where b.int_col < 10
 PLAN-ROOT SINK
 |
 07:HASH JOIN [INNER JOIN]
-|  hash predicates: year = c.year
-|  runtime filters: RF000 <- c.year
+|  hash predicates: `year` = c.`year`
+|  runtime filters: RF000 <- c.`year`
 |  row-size=28B cardinality=58.40K
 |
 |--04:SCAN HDFS [functional.alltypestiny c]
@@ -764,8 +764,8 @@ PLAN-ROOT SINK
 |     row-size=4B cardinality=8
 |
 06:HASH JOIN [INNER JOIN]
-|  hash predicates: year = b.year
-|  runtime filters: RF002 <- b.year
+|  hash predicates: `year` = b.`year`
+|  runtime filters: RF002 <- b.`year`
 |  row-size=24B cardinality=14.60K
 |
 |--03:SCAN HDFS [functional.alltypestiny b]
@@ -775,8 +775,8 @@ PLAN-ROOT SINK
 |     row-size=8B cardinality=1
 |
 05:HASH JOIN [INNER JOIN]
-|  hash predicates: year = a.year
-|  runtime filters: RF004 <- a.year
+|  hash predicates: `year` = a.`year`
+|  runtime filters: RF004 <- a.`year`
 |  row-size=16B cardinality=29.20K
 |
 |--02:SCAN HDFS [functional.alltypestiny a]
@@ -785,7 +785,7 @@ PLAN-ROOT SINK
 |     row-size=4B cardinality=8
 |
 01:AGGREGATE [FINALIZE]
-|  group by: id, year, month
+|  group by: id, `year`, `month`
 |  row-size=12B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
@@ -870,8 +870,8 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=1
 |
 05:HASH JOIN [INNER JOIN]
-|  hash predicates: month = b.month
-|  runtime filters: RF000 <- b.month
+|  hash predicates: month = b.`month`
+|  runtime filters: RF000 <- b.`month`
 |  row-size=12B cardinality=21.90K
 |
 |--04:SCAN HDFS [functional.alltypessmall b]
@@ -916,8 +916,8 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN]
-|  hash predicates: month = b.month
-|  runtime filters: RF000 <- b.month
+|  hash predicates: month = b.`month`
+|  runtime filters: RF000 <- b.`month`
 |  row-size=16B cardinality=216
 |
 |--05:SCAN HDFS [functional.alltypessmall b]
@@ -961,7 +961,7 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=18.30K
 |
 |--06:HASH JOIN [INNER JOIN]
-|  |  hash predicates: t3.month = t4.smallint_col
+|  |  hash predicates: t3.`month` = t4.smallint_col
 |  |  runtime filters: RF002 <- t4.smallint_col
 |  |  row-size=15B cardinality=7.30K
 |  |
@@ -972,11 +972,11 @@ PLAN-ROOT SINK
 |  |
 |  04:SCAN HDFS [functional.alltypes t3]
 |     partitions=24/24 files=24 size=478.45KB
-|     runtime filters: RF002 -> t3.month
+|     runtime filters: RF002 -> t3.`month`
 |     row-size=8B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.year = t2.int_col
+|  hash predicates: t1.`year` = t2.int_col
 |  runtime filters: RF000 <- t2.int_col
 |  row-size=13B cardinality=11.00K
 |
@@ -987,7 +987,7 @@ PLAN-ROOT SINK
 |
 01:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
-   runtime filters: RF000 -> t1.year
+   runtime filters: RF000 -> t1.`year`
    row-size=4B cardinality=11.00K
 ====
 # Query with UNION ALL operator on the rhs of a join node
@@ -1051,7 +1051,7 @@ PLAN-ROOT SINK
 |
 04:ANALYTIC
 |  functions: count(id)
-|  partition by: year
+|  partition by: `year`
 |  order by: month DESC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
 |  row-size=20B cardinality=11.00K
@@ -1061,7 +1061,7 @@ PLAN-ROOT SINK
 |  row-size=12B cardinality=11.00K
 |
 02:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.year = t2.id
+|  hash predicates: t1.`year` = t2.id
 |  runtime filters: RF000 <- t2.id
 |  row-size=12B cardinality=11.00K
 |
@@ -1071,7 +1071,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
-   runtime filters: RF000 -> t1.year
+   runtime filters: RF000 -> t1.`year`
    row-size=8B cardinality=11.00K
 ====
 # Two-way join query with an analytic function on the probe side
@@ -1095,7 +1095,7 @@ PLAN-ROOT SINK
 |
 02:ANALYTIC
 |  functions: sum(int_col)
-|  partition by: year
+|  partition by: `year`
 |  order by: id ASC
 |  window: RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
 |  row-size=20B cardinality=3.65K
@@ -1147,7 +1147,7 @@ PLAN-ROOT SINK
 |     row-size=6B cardinality=0
 |
 04:HASH JOIN [INNER JOIN]
-|  hash predicates: t1.year = t2.id + t3.id + 1
+|  hash predicates: t1.`year` = t2.id + t3.id + 1
 |  runtime filters: RF002 <- t2.id + t3.id + 1
 |  row-size=104B cardinality=11.00K
 |
@@ -1167,7 +1167,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypesagg t1]
    partitions=11/11 files=11 size=814.73KB
-   runtime filters: RF000 -> t1.month, RF002 -> t1.year
+   runtime filters: RF000 -> t1.month, RF002 -> t1.`year`
    row-size=95B cardinality=11.00K
 ====
 # Multi-way join query where the slots of all the join predicates belong to the same
@@ -1398,12 +1398,12 @@ PLAN-ROOT SINK
 |  row-size=4B cardinality=2
 |
 04:HASH JOIN [RIGHT OUTER JOIN]
-|  hash predicates: if(TupleIsNull(), NULL, coalesce(int_col, 384)) = t1.month
-|  runtime filters: RF000 <- t1.month
+|  hash predicates: if(TupleIsNull(), NULL, coalesce(int_col, 384)) = t1.`month`
+|  runtime filters: RF000 <- t1.`month`
 |  row-size=12B cardinality=8
 |
 |--00:SCAN HDFS [functional.alltypestiny t1]
-|     partition predicates: t1.month IS NOT NULL
+|     partition predicates: t1.`month` IS NOT NULL
 |     partitions=4/4 files=4 size=460B
 |     row-size=8B cardinality=8
 |
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/runtime-filter-query-options.test b/testdata/workloads/functional-planner/queries/PlannerTest/runtime-filter-query-options.test
index 64c654b..b8e5b4f 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/runtime-filter-query-options.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/runtime-filter-query-options.test
@@ -13,8 +13,8 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN]
-|  hash predicates: a.bool_col = d.bool_col, a.year = d.year
-|  runtime filters: RF000 <- d.bool_col, RF001 <- d.year
+|  hash predicates: a.`year` = d.`year`, a.bool_col = d.bool_col
+|  runtime filters: RF000 <- d.`year`, RF001 <- d.bool_col
 |  row-size=74B cardinality=16.21G
 |
 |--03:SCAN HDFS [functional.alltypes d]
@@ -22,8 +22,8 @@ PLAN-ROOT SINK
 |     row-size=5B cardinality=7.30K
 |
 05:HASH JOIN [INNER JOIN]
-|  hash predicates: a.int_col = c.int_col, a.month = c.month
-|  runtime filters: RF004 <- c.int_col, RF005 <- c.month
+|  hash predicates: a.`month` = c.`month`, a.int_col = c.int_col
+|  runtime filters: RF004 <- c.`month`, RF005 <- c.int_col
 |  row-size=69B cardinality=4.44M
 |
 |--02:SCAN HDFS [functional.alltypes c]
@@ -41,7 +41,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
-   runtime filters: RF000 -> a.bool_col, RF001 -> a.year, RF004 -> a.int_col, RF005 -> a.month, RF008 -> a.id, RF009 -> a.date_string_col
+   runtime filters: RF000 -> a.`year`, RF001 -> a.bool_col, RF004 -> a.`month`, RF005 -> a.int_col, RF008 -> a.id, RF009 -> a.date_string_col
    row-size=37B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
@@ -57,21 +57,21 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN, PARTITIONED]
-|  hash predicates: a.bool_col = d.bool_col, a.year = d.year
-|  runtime filters: RF000 <- d.bool_col, RF001 <- d.year
+|  hash predicates: a.`year` = d.`year`, a.bool_col = d.bool_col
+|  runtime filters: RF000 <- d.`year`, RF001 <- d.bool_col
 |  row-size=74B cardinality=16.21G
 |
-|--11:EXCHANGE [HASH(d.bool_col,d.year)]
+|--11:EXCHANGE [HASH(d.`year`,d.bool_col)]
 |  |
 |  03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
 |     row-size=5B cardinality=7.30K
 |
-10:EXCHANGE [HASH(a.bool_col,a.year)]
+10:EXCHANGE [HASH(a.`year`,a.bool_col)]
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
-|  hash predicates: a.int_col = c.int_col, a.month = c.month
-|  runtime filters: RF004 <- c.int_col, RF005 <- c.month
+|  hash predicates: a.`month` = c.`month`, a.int_col = c.int_col
+|  runtime filters: RF004 <- c.`month`, RF005 <- c.int_col
 |  row-size=69B cardinality=4.44M
 |
 |--09:EXCHANGE [BROADCAST]
@@ -93,7 +93,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
-   runtime filters: RF000 -> a.bool_col, RF001 -> a.year, RF004 -> a.int_col, RF005 -> a.month, RF008 -> a.id, RF009 -> a.date_string_col
+   runtime filters: RF000 -> a.`year`, RF001 -> a.bool_col, RF004 -> a.`month`, RF005 -> a.int_col, RF008 -> a.id, RF009 -> a.date_string_col
    row-size=37B cardinality=7.30K
 ====
 # Keep only MAX_NUM_RUNTIME_FILTERS most selective filters, remove the rest.
@@ -121,20 +121,20 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN, PARTITIONED]
-|  hash predicates: a.bool_col = d.bool_col, a.year = d.year
+|  hash predicates: a.`year` = d.`year`, a.bool_col = d.bool_col
 |  row-size=74B cardinality=16.21G
 |
-|--11:EXCHANGE [HASH(d.bool_col,d.year)]
+|--11:EXCHANGE [HASH(d.`year`,d.bool_col)]
 |  |
 |  03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
 |     row-size=5B cardinality=7.30K
 |
-10:EXCHANGE [HASH(a.bool_col,a.year)]
+10:EXCHANGE [HASH(a.`year`,a.bool_col)]
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
-|  hash predicates: a.int_col = c.int_col, a.month = c.month
-|  runtime filters: RF004 <- c.int_col, RF005 <- c.month
+|  hash predicates: a.`month` = c.`month`, a.int_col = c.int_col
+|  runtime filters: RF004 <- c.`month`, RF005 <- c.int_col
 |  row-size=69B cardinality=4.44M
 |
 |--09:EXCHANGE [BROADCAST]
@@ -156,7 +156,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
-   runtime filters: RF008 -> a.id, RF009 -> a.date_string_col, RF004 -> a.int_col, RF005 -> a.month
+   runtime filters: RF008 -> a.id, RF009 -> a.date_string_col, RF004 -> a.`month`, RF005 -> a.int_col
    row-size=37B cardinality=7.30K
 ====
 # DISABLE_ROW_RUNTIME_FILTERING is set: only partition column filters are applied.
@@ -176,8 +176,8 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN]
-|  hash predicates: a.bool_col = d.bool_col, a.year = d.year
-|  runtime filters: RF001 <- d.year
+|  hash predicates: a.`year` = d.`year`, a.bool_col = d.bool_col
+|  runtime filters: RF000 <- d.`year`
 |  row-size=74B cardinality=16.21G
 |
 |--03:SCAN HDFS [functional.alltypes d]
@@ -185,8 +185,8 @@ PLAN-ROOT SINK
 |     row-size=5B cardinality=7.30K
 |
 05:HASH JOIN [INNER JOIN]
-|  hash predicates: a.int_col = c.int_col, a.month = c.month
-|  runtime filters: RF005 <- c.month
+|  hash predicates: a.`month` = c.`month`, a.int_col = c.int_col
+|  runtime filters: RF004 <- c.`month`
 |  row-size=69B cardinality=4.44M
 |
 |--02:SCAN HDFS [functional.alltypes c]
@@ -203,7 +203,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
-   runtime filters: RF001 -> a.year, RF005 -> a.month
+   runtime filters: RF000 -> a.`year`, RF004 -> a.`month`
    row-size=37B cardinality=7.30K
 ====
 # DISABLE_ROW_RUNTIME_FILTERING is set and MAX_NUM_RUNTIME_FILTERS is set to 2: only the 2
@@ -225,8 +225,8 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN]
-|  hash predicates: a.bool_col = d.bool_col, a.year = d.year
-|  runtime filters: RF001 <- d.year
+|  hash predicates: a.`year` = d.`year`, a.bool_col = d.bool_col
+|  runtime filters: RF000 <- d.`year`
 |  row-size=74B cardinality=16.21G
 |
 |--03:SCAN HDFS [functional.alltypes d]
@@ -234,8 +234,8 @@ PLAN-ROOT SINK
 |     row-size=5B cardinality=7.30K
 |
 05:HASH JOIN [INNER JOIN]
-|  hash predicates: a.int_col = c.int_col, a.month = c.month
-|  runtime filters: RF005 <- c.month
+|  hash predicates: a.`month` = c.`month`, a.int_col = c.int_col
+|  runtime filters: RF004 <- c.`month`
 |  row-size=69B cardinality=4.44M
 |
 |--02:SCAN HDFS [functional.alltypes c]
@@ -252,7 +252,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
-   runtime filters: RF001 -> a.year, RF005 -> a.month
+   runtime filters: RF000 -> a.`year`, RF004 -> a.`month`
    row-size=37B cardinality=7.30K
 ====
 # RUNTIME_FILTER_MODE is set to LOCAL: only local filters are applied
@@ -272,8 +272,8 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN]
-|  hash predicates: a.bool_col = d.bool_col, a.year = d.year
-|  runtime filters: RF000 <- d.bool_col, RF001 <- d.year
+|  hash predicates: a.`year` = d.`year`, a.bool_col = d.bool_col
+|  runtime filters: RF000 <- d.`year`, RF001 <- d.bool_col
 |  row-size=74B cardinality=16.21G
 |
 |--03:SCAN HDFS [functional.alltypes d]
@@ -281,8 +281,8 @@ PLAN-ROOT SINK
 |     row-size=5B cardinality=7.30K
 |
 05:HASH JOIN [INNER JOIN]
-|  hash predicates: a.int_col = c.int_col, a.month = c.month
-|  runtime filters: RF004 <- c.int_col, RF005 <- c.month
+|  hash predicates: a.`month` = c.`month`, a.int_col = c.int_col
+|  runtime filters: RF004 <- c.`month`, RF005 <- c.int_col
 |  row-size=69B cardinality=4.44M
 |
 |--02:SCAN HDFS [functional.alltypes c]
@@ -300,7 +300,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
-   runtime filters: RF000 -> a.bool_col, RF001 -> a.year, RF004 -> a.int_col, RF005 -> a.month, RF008 -> a.id, RF009 -> a.date_string_col
+   runtime filters: RF000 -> a.`year`, RF001 -> a.bool_col, RF004 -> a.`month`, RF005 -> a.int_col, RF008 -> a.id, RF009 -> a.date_string_col
    row-size=37B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
@@ -316,20 +316,20 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN, PARTITIONED]
-|  hash predicates: a.bool_col = d.bool_col, a.year = d.year
+|  hash predicates: a.`year` = d.`year`, a.bool_col = d.bool_col
 |  row-size=74B cardinality=16.21G
 |
-|--11:EXCHANGE [HASH(d.bool_col,d.year)]
+|--11:EXCHANGE [HASH(d.`year`,d.bool_col)]
 |  |
 |  03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
 |     row-size=5B cardinality=7.30K
 |
-10:EXCHANGE [HASH(a.bool_col,a.year)]
+10:EXCHANGE [HASH(a.`year`,a.bool_col)]
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
-|  hash predicates: a.int_col = c.int_col, a.month = c.month
-|  runtime filters: RF004 <- c.int_col, RF005 <- c.month
+|  hash predicates: a.`month` = c.`month`, a.int_col = c.int_col
+|  runtime filters: RF004 <- c.`month`, RF005 <- c.int_col
 |  row-size=69B cardinality=4.44M
 |
 |--09:EXCHANGE [BROADCAST]
@@ -351,7 +351,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
-   runtime filters: RF004 -> a.int_col, RF005 -> a.month, RF008 -> a.id, RF009 -> a.date_string_col
+   runtime filters: RF004 -> a.`month`, RF005 -> a.int_col, RF008 -> a.id, RF009 -> a.date_string_col
    row-size=37B cardinality=7.30K
 ====
 # RUNTIME_FILTER_MODE is set to LOCAL and MAX_NUM_RUNTIME_FILTERS is set to 3: only 3
@@ -374,7 +374,7 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN]
-|  hash predicates: a.bool_col = d.bool_col, a.year = d.year
+|  hash predicates: a.`year` = d.`year`, a.bool_col = d.bool_col
 |  row-size=74B cardinality=16.21G
 |
 |--03:SCAN HDFS [functional.alltypes d]
@@ -382,8 +382,8 @@ PLAN-ROOT SINK
 |     row-size=5B cardinality=7.30K
 |
 05:HASH JOIN [INNER JOIN]
-|  hash predicates: a.int_col = c.int_col, a.month = c.month
-|  runtime filters: RF004 <- c.int_col
+|  hash predicates: a.`month` = c.`month`, a.int_col = c.int_col
+|  runtime filters: RF004 <- c.`month`
 |  row-size=69B cardinality=4.44M
 |
 |--02:SCAN HDFS [functional.alltypes c]
@@ -401,7 +401,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
-   runtime filters: RF008 -> a.id, RF009 -> a.date_string_col, RF004 -> a.int_col
+   runtime filters: RF008 -> a.id, RF009 -> a.date_string_col, RF004 -> a.`month`
    row-size=37B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
@@ -417,20 +417,20 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN, PARTITIONED]
-|  hash predicates: a.bool_col = d.bool_col, a.year = d.year
+|  hash predicates: a.`year` = d.`year`, a.bool_col = d.bool_col
 |  row-size=74B cardinality=16.21G
 |
-|--11:EXCHANGE [HASH(d.bool_col,d.year)]
+|--11:EXCHANGE [HASH(d.`year`,d.bool_col)]
 |  |
 |  03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
 |     row-size=5B cardinality=7.30K
 |
-10:EXCHANGE [HASH(a.bool_col,a.year)]
+10:EXCHANGE [HASH(a.`year`,a.bool_col)]
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
-|  hash predicates: a.int_col = c.int_col, a.month = c.month
-|  runtime filters: RF004 <- c.int_col
+|  hash predicates: a.`month` = c.`month`, a.int_col = c.int_col
+|  runtime filters: RF004 <- c.`month`
 |  row-size=69B cardinality=4.44M
 |
 |--09:EXCHANGE [BROADCAST]
@@ -452,7 +452,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
-   runtime filters: RF008 -> a.id, RF009 -> a.date_string_col, RF004 -> a.int_col
+   runtime filters: RF008 -> a.id, RF009 -> a.date_string_col, RF004 -> a.`month`
    row-size=37B cardinality=7.30K
 ====
 # DISABLE_ROW_RUNTIME_FILTERING is set and RUNTIME_FILTER_MODE is set to LOCAL: only local
@@ -474,8 +474,8 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN]
-|  hash predicates: a.bool_col = d.bool_col, a.year = d.year
-|  runtime filters: RF001 <- d.year
+|  hash predicates: a.`year` = d.`year`, a.bool_col = d.bool_col
+|  runtime filters: RF000 <- d.`year`
 |  row-size=74B cardinality=16.21G
 |
 |--03:SCAN HDFS [functional.alltypes d]
@@ -483,8 +483,8 @@ PLAN-ROOT SINK
 |     row-size=5B cardinality=7.30K
 |
 05:HASH JOIN [INNER JOIN]
-|  hash predicates: a.int_col = c.int_col, a.month = c.month
-|  runtime filters: RF005 <- c.month
+|  hash predicates: a.`month` = c.`month`, a.int_col = c.int_col
+|  runtime filters: RF004 <- c.`month`
 |  row-size=69B cardinality=4.44M
 |
 |--02:SCAN HDFS [functional.alltypes c]
@@ -501,7 +501,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
-   runtime filters: RF001 -> a.year, RF005 -> a.month
+   runtime filters: RF000 -> a.`year`, RF004 -> a.`month`
    row-size=37B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
@@ -517,20 +517,20 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN, PARTITIONED]
-|  hash predicates: a.bool_col = d.bool_col, a.year = d.year
+|  hash predicates: a.`year` = d.`year`, a.bool_col = d.bool_col
 |  row-size=74B cardinality=16.21G
 |
-|--11:EXCHANGE [HASH(d.bool_col,d.year)]
+|--11:EXCHANGE [HASH(d.`year`,d.bool_col)]
 |  |
 |  03:SCAN HDFS [functional.alltypes d]
 |     partitions=24/24 files=24 size=478.45KB
 |     row-size=5B cardinality=7.30K
 |
-10:EXCHANGE [HASH(a.bool_col,a.year)]
+10:EXCHANGE [HASH(a.`year`,a.bool_col)]
 |
 05:HASH JOIN [INNER JOIN, BROADCAST]
-|  hash predicates: a.int_col = c.int_col, a.month = c.month
-|  runtime filters: RF005 <- c.month
+|  hash predicates: a.`month` = c.`month`, a.int_col = c.int_col
+|  runtime filters: RF004 <- c.`month`
 |  row-size=69B cardinality=4.44M
 |
 |--09:EXCHANGE [BROADCAST]
@@ -551,7 +551,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
-   runtime filters: RF005 -> a.month
+   runtime filters: RF004 -> a.`month`
    row-size=37B cardinality=7.30K
 ====
 # RUNTIME_FILTER_MODE is OFF: no filters are applied
@@ -571,7 +571,7 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN]
-|  hash predicates: a.bool_col = d.bool_col, a.year = d.year
+|  hash predicates: a.`year` = d.`year`, a.bool_col = d.bool_col
 |  row-size=74B cardinality=16.21G
 |
 |--03:SCAN HDFS [functional.alltypes d]
@@ -579,7 +579,7 @@ PLAN-ROOT SINK
 |     row-size=5B cardinality=7.30K
 |
 05:HASH JOIN [INNER JOIN]
-|  hash predicates: a.int_col = c.int_col, a.month = c.month
+|  hash predicates: a.`month` = c.`month`, a.int_col = c.int_col
 |  row-size=69B cardinality=4.44M
 |
 |--02:SCAN HDFS [functional.alltypes c]
@@ -615,7 +615,7 @@ PLAN-ROOT SINK
 |  row-size=8B cardinality=1
 |
 06:HASH JOIN [INNER JOIN]
-|  hash predicates: a.bool_col = d.bool_col, a.year = d.year
+|  hash predicates: a.`year` = d.`year`, a.bool_col = d.bool_col
 |  row-size=74B cardinality=16.21G
 |
 |--03:SCAN HDFS [functional.alltypes d]
@@ -623,7 +623,7 @@ PLAN-ROOT SINK
 |     row-size=5B cardinality=7.30K
 |
 05:HASH JOIN [INNER JOIN]
-|  hash predicates: a.int_col = c.int_col, a.month = c.month
+|  hash predicates: a.`month` = c.`month`, a.int_col = c.int_col
 |  row-size=69B cardinality=4.44M
 |
 |--02:SCAN HDFS [functional.alltypes c]
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/shuffle-by-distinct-exprs.test b/testdata/workloads/functional-planner/queries/PlannerTest/shuffle-by-distinct-exprs.test
index babc89d..aee03f5 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/shuffle-by-distinct-exprs.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/shuffle-by-distinct-exprs.test
@@ -72,17 +72,17 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(int_col)
-|  group by: year
+|  group by: `year`
 |  row-size=12B cardinality=2
 |
 04:AGGREGATE
-|  group by: year, int_col
+|  group by: `year`, int_col
 |  row-size=8B cardinality=20
 |
-03:EXCHANGE [HASH(year)]
+03:EXCHANGE [HASH(`year`)]
 |
 01:AGGREGATE [STREAMING]
-|  group by: year, int_col
+|  group by: `year`, int_col
 |  row-size=8B cardinality=20
 |
 00:SCAN HDFS [functional.alltypes]
@@ -100,24 +100,24 @@ PLAN-ROOT SINK
 |
 06:AGGREGATE [FINALIZE]
 |  output: count:merge(int_col)
-|  group by: year
+|  group by: `year`
 |  row-size=12B cardinality=2
 |
-05:EXCHANGE [HASH(year)]
+05:EXCHANGE [HASH(`year`)]
 |
 02:AGGREGATE [STREAMING]
 |  output: count(int_col)
-|  group by: year
+|  group by: `year`
 |  row-size=12B cardinality=2
 |
 04:AGGREGATE
-|  group by: year, int_col
+|  group by: `year`, int_col
 |  row-size=8B cardinality=20
 |
-03:EXCHANGE [HASH(year,int_col)]
+03:EXCHANGE [HASH(`year`,int_col)]
 |
 01:AGGREGATE [STREAMING]
-|  group by: year, int_col
+|  group by: `year`, int_col
 |  row-size=8B cardinality=20
 |
 00:SCAN HDFS [functional.alltypes]
@@ -217,29 +217,29 @@ PLAN-ROOT SINK
 |
 04:AGGREGATE [FINALIZE]
 |  output: count(a.int_col)
-|  group by: a.year
+|  group by: a.`year`
 |  row-size=12B cardinality=2
 |
 03:AGGREGATE
-|  group by: a.year, a.int_col
+|  group by: a.`year`, a.int_col
 |  row-size=8B cardinality=20
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
-|  hash predicates: a.year = b.year
-|  runtime filters: RF000 <- b.year
+|  hash predicates: a.`year` = b.`year`
+|  runtime filters: RF000 <- b.`year`
 |  row-size=12B cardinality=26.64M
 |
-|--06:EXCHANGE [HASH(b.year)]
+|--06:EXCHANGE [HASH(b.`year`)]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     row-size=4B cardinality=7.30K
 |
-05:EXCHANGE [HASH(a.year)]
+05:EXCHANGE [HASH(a.`year`)]
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
-   runtime filters: RF000 -> a.year
+   runtime filters: RF000 -> a.`year`
    row-size=8B cardinality=7.30K
 ====
 select count(distinct a.int_col) from functional.alltypes a inner join [shuffle]
@@ -254,42 +254,42 @@ PLAN-ROOT SINK
 |
 10:AGGREGATE [FINALIZE]
 |  output: count:merge(a.int_col)
-|  group by: a.year
+|  group by: a.`year`
 |  row-size=12B cardinality=2
 |
-09:EXCHANGE [HASH(a.year)]
+09:EXCHANGE [HASH(a.`year`)]
 |
 04:AGGREGATE [STREAMING]
 |  output: count(a.int_col)
-|  group by: a.year
+|  group by: a.`year`
 |  row-size=12B cardinality=2
 |
 08:AGGREGATE
-|  group by: a.year, a.int_col
+|  group by: a.`year`, a.int_col
 |  row-size=8B cardinality=20
 |
-07:EXCHANGE [HASH(a.year,a.int_col)]
+07:EXCHANGE [HASH(a.`year`,a.int_col)]
 |
 03:AGGREGATE [STREAMING]
-|  group by: a.year, a.int_col
+|  group by: a.`year`, a.int_col
 |  row-size=8B cardinality=20
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
-|  hash predicates: a.year = b.year
-|  runtime filters: RF000 <- b.year
+|  hash predicates: a.`year` = b.`year`
+|  runtime filters: RF000 <- b.`year`
 |  row-size=12B cardinality=26.64M
 |
-|--06:EXCHANGE [HASH(b.year)]
+|--06:EXCHANGE [HASH(b.`year`)]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     row-size=4B cardinality=7.30K
 |
-05:EXCHANGE [HASH(a.year)]
+05:EXCHANGE [HASH(a.`year`)]
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
-   runtime filters: RF000 -> a.year
+   runtime filters: RF000 -> a.`year`
    row-size=8B cardinality=7.30K
 ====
 # The input is partitioned by distinct exprs + grouping exprs
@@ -306,36 +306,36 @@ PLAN-ROOT SINK
 |
 08:AGGREGATE [FINALIZE]
 |  output: count:merge(a.int_col)
-|  group by: a.year
+|  group by: a.`year`
 |  row-size=12B cardinality=2
 |
-07:EXCHANGE [HASH(a.year)]
+07:EXCHANGE [HASH(a.`year`)]
 |
 04:AGGREGATE [STREAMING]
 |  output: count(a.int_col)
-|  group by: a.year
+|  group by: a.`year`
 |  row-size=12B cardinality=2
 |
 03:AGGREGATE
-|  group by: a.year, a.int_col
+|  group by: a.`year`, a.int_col
 |  row-size=8B cardinality=20
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
-|  hash predicates: a.int_col = b.int_col, a.year = b.year
-|  runtime filters: RF000 <- b.int_col, RF001 <- b.year
+|  hash predicates: a.`year` = b.`year`, a.int_col = b.int_col
+|  runtime filters: RF000 <- b.`year`, RF001 <- b.int_col
 |  row-size=16B cardinality=5.33M
 |
-|--06:EXCHANGE [HASH(b.int_col,b.year)]
+|--06:EXCHANGE [HASH(b.`year`,b.int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     row-size=8B cardinality=7.30K
 |
-05:EXCHANGE [HASH(a.int_col,a.year)]
+05:EXCHANGE [HASH(a.`year`,a.int_col)]
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
-   runtime filters: RF000 -> a.int_col, RF001 -> a.year
+   runtime filters: RF000 -> a.`year`, RF001 -> a.int_col
    row-size=8B cardinality=7.30K
 ====
 select count(distinct a.int_col) from functional.alltypes a inner join [shuffle]
@@ -351,34 +351,34 @@ PLAN-ROOT SINK
 |
 04:AGGREGATE [FINALIZE]
 |  output: count(a.int_col)
-|  group by: a.year
+|  group by: a.`year`
 |  row-size=12B cardinality=2
 |
 08:AGGREGATE
-|  group by: a.year, a.int_col
+|  group by: a.`year`, a.int_col
 |  row-size=8B cardinality=20
 |
-07:EXCHANGE [HASH(a.year)]
+07:EXCHANGE [HASH(a.`year`)]
 |
 03:AGGREGATE [STREAMING]
-|  group by: a.year, a.int_col
+|  group by: a.`year`, a.int_col
 |  row-size=8B cardinality=20
 |
 02:HASH JOIN [INNER JOIN, PARTITIONED]
-|  hash predicates: a.int_col = b.int_col, a.year = b.year
-|  runtime filters: RF000 <- b.int_col, RF001 <- b.year
+|  hash predicates: a.`year` = b.`year`, a.int_col = b.int_col
+|  runtime filters: RF000 <- b.`year`, RF001 <- b.int_col
 |  row-size=16B cardinality=5.33M
 |
-|--06:EXCHANGE [HASH(b.int_col,b.year)]
+|--06:EXCHANGE [HASH(b.`year`,b.int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypes b]
 |     partitions=24/24 files=24 size=478.45KB
 |     row-size=8B cardinality=7.30K
 |
-05:EXCHANGE [HASH(a.int_col,a.year)]
+05:EXCHANGE [HASH(a.`year`,a.int_col)]
 |
 00:SCAN HDFS [functional.alltypes a]
    partitions=24/24 files=24 size=478.45KB
-   runtime filters: RF000 -> a.int_col, RF001 -> a.year
+   runtime filters: RF000 -> a.`year`, RF001 -> a.int_col
    row-size=8B cardinality=7.30K
 ====
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/small-query-opt.test b/testdata/workloads/functional-planner/queries/PlannerTest/small-query-opt.test
index e60e032..c0b8831 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/small-query-opt.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/small-query-opt.test
@@ -325,7 +325,7 @@ PLAN-ROOT SINK
 |  row-size=5B cardinality=6
 |
 |--05:SCAN HDFS [functional.alltypestiny c]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=5B cardinality=2
 |
@@ -337,12 +337,12 @@ PLAN-ROOT SINK
 |  row-size=5B cardinality=4
 |
 |--02:SCAN HDFS [functional.alltypestiny b]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=5B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny a]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=5B cardinality=2
 ====
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/subquery-rewrite.test b/testdata/workloads/functional-planner/queries/PlannerTest/subquery-rewrite.test
index 5176491..7953e80 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/subquery-rewrite.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/subquery-rewrite.test
@@ -1725,7 +1725,7 @@ where
 PLAN-ROOT SINK
 |
 03:HASH JOIN [RIGHT OUTER JOIN]
-|  hash predicates: tt1.month = t1.id
+|  hash predicates: tt1.`month` = t1.id
 |  other predicates: t1.id > zeroifnull(count(tt1.smallint_col))
 |  runtime filters: RF000 <- t1.id
 |  row-size=16B cardinality=8
@@ -1736,7 +1736,7 @@ PLAN-ROOT SINK
 |
 02:AGGREGATE [FINALIZE]
 |  output: count(tt1.smallint_col)
-|  group by: tt1.month
+|  group by: tt1.`month`
 |  row-size=12B cardinality=4
 |
 01:SCAN HDFS [functional.alltypestiny tt1]
@@ -2414,12 +2414,12 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=12
 |
 |--05:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--04:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
@@ -2455,12 +2455,12 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=12
 |
 |--05:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--04:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
@@ -2971,7 +2971,7 @@ PLAN-ROOT SINK
 |  |
 |  04:AGGREGATE [FINALIZE]
 |  |  output: max(allt.smallint_col)
-|  |  group by: ata.month
+|  |  group by: ata.`month`
 |  |  limit: 2
 |  |  row-size=6B cardinality=1
 |  |
@@ -2985,7 +2985,7 @@ PLAN-ROOT SINK
 |  |     row-size=6B cardinality=7.30K
 |  |
 |  02:SCAN HDFS [functional.alltypesagg ata]
-|     partition predicates: ata.month = 1
+|     partition predicates: ata.`month` = 1
 |     partitions=11/11 files=11 size=814.73KB
 |     runtime filters: RF002 -> ata.id
 |     row-size=8B cardinality=11.00K
@@ -3015,15 +3015,15 @@ PLAN-ROOT SINK
 |  |
 |  10:AGGREGATE [FINALIZE]
 |  |  output: max:merge(allt.smallint_col)
-|  |  group by: ata.month
+|  |  group by: ata.`month`
 |  |  limit: 2
 |  |  row-size=6B cardinality=1
 |  |
-|  09:EXCHANGE [HASH(ata.month)]
+|  09:EXCHANGE [HASH(ata.`month`)]
 |  |
 |  04:AGGREGATE [STREAMING]
 |  |  output: max(allt.smallint_col)
-|  |  group by: ata.month
+|  |  group by: ata.`month`
 |  |  row-size=6B cardinality=1
 |  |
 |  03:HASH JOIN [INNER JOIN, PARTITIONED]
@@ -3040,7 +3040,7 @@ PLAN-ROOT SINK
 |  07:EXCHANGE [HASH(ata.id)]
 |  |
 |  02:SCAN HDFS [functional.alltypesagg ata]
-|     partition predicates: ata.month = 1
+|     partition predicates: ata.`month` = 1
 |     partitions=11/11 files=11 size=814.73KB
 |     runtime filters: RF002 -> ata.id
 |     row-size=8B cardinality=11.00K
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/tablesample.test b/testdata/workloads/functional-planner/queries/PlannerTest/tablesample.test
index 9569998..5758fe8 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/tablesample.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/tablesample.test
@@ -68,7 +68,7 @@ PLAN-ROOT SINK
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = CAST(2009 AS INT)
+   partition predicates: `year` = CAST(2009 AS INT)
    partitions=6/24 files=6 size=119.70KB
    stored statistics:
      table: rows=7300 size=478.45KB
@@ -127,7 +127,7 @@ PLAN-ROOT SINK
 |  mem-estimate=0B mem-reservation=0B thread-reservation=0
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = CAST(2010 AS INT)
+   partition predicates: `year` = CAST(2010 AS INT)
    partitions=1/24 files=1 size=20.36KB
    stored statistics:
      table: rows=7300 size=478.45KB
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/union.test b/testdata/workloads/functional-planner/queries/PlannerTest/union.test
index fa557b6..e481793 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/union.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/union.test
@@ -11,12 +11,12 @@ where b.month = 1
 PLAN-ROOT SINK
 |
 04:HASH JOIN [INNER JOIN]
-|  hash predicates: month = b.month
-|  runtime filters: RF000 <- b.month
+|  hash predicates: month = b.`month`
+|  runtime filters: RF000 <- b.`month`
 |  row-size=97B cardinality=1.24K
 |
 |--03:SCAN HDFS [functional.alltypessmall b]
-|     partition predicates: b.month = 1
+|     partition predicates: b.`month` = 1
 |     partitions=1/4 files=1 size=1.57KB
 |     row-size=89B cardinality=25
 |
@@ -41,14 +41,14 @@ PLAN-ROOT SINK
 06:EXCHANGE [UNPARTITIONED]
 |
 04:HASH JOIN [INNER JOIN, BROADCAST]
-|  hash predicates: month = b.month
-|  runtime filters: RF000 <- b.month
+|  hash predicates: month = b.`month`
+|  runtime filters: RF000 <- b.`month`
 |  row-size=97B cardinality=1.24K
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  03:SCAN HDFS [functional.alltypessmall b]
-|     partition predicates: b.month = 1
+|     partition predicates: b.`month` = 1
 |     partitions=1/4 files=1 size=1.57KB
 |     row-size=89B cardinality=25
 |
@@ -82,17 +82,17 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -112,17 +112,17 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -140,7 +140,7 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=3
 |
 |--04:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     limit: 1
 |     row-size=89B cardinality=1
@@ -150,12 +150,12 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=1
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    limit: 1
    row-size=89B cardinality=1
@@ -177,7 +177,7 @@ PLAN-ROOT SINK
 |  |  limit: 1
 |  |
 |  04:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     limit: 1
 |     row-size=89B cardinality=1
@@ -191,7 +191,7 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=1
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
@@ -199,7 +199,7 @@ PLAN-ROOT SINK
 |  limit: 1
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    limit: 1
    row-size=89B cardinality=1
@@ -222,17 +222,17 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -262,17 +262,17 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -293,12 +293,12 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -317,12 +317,12 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -347,12 +347,12 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -381,12 +381,12 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -416,22 +416,22 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=8
 |
 |--04:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -469,22 +469,22 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=8
 |
 |--04:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -517,22 +517,22 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 1
+|  |     partition predicates: `year` = 2009, `month` = 1
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--06:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 05:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 2
+   partition predicates: `year` = 2009, `month` = 2
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -574,22 +574,22 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 1
+|  |     partition predicates: `year` = 2009, `month` = 1
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--06:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 05:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 2
+   partition predicates: `year` = 2009, `month` = 2
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -622,22 +622,22 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=6
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 1
+|  |     partition predicates: `year` = 2009, `month` = 1
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 06:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 2
+   partition predicates: `year` = 2009, `month` = 2
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -679,22 +679,22 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=6
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 1
+|  |     partition predicates: `year` = 2009, `month` = 1
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 06:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 2
+   partition predicates: `year` = 2009, `month` = 2
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -712,17 +712,17 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -742,17 +742,17 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -770,17 +770,17 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -800,17 +800,17 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -832,17 +832,17 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -872,17 +872,17 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -904,17 +904,17 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -944,17 +944,17 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -973,7 +973,7 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--05:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
@@ -986,12 +986,12 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=4
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 02:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -1011,7 +1011,7 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--05:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
@@ -1030,12 +1030,12 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=4
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 02:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -1062,17 +1062,17 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -1106,17 +1106,17 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -1137,7 +1137,7 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=8
 |
 |--06:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
@@ -1150,17 +1150,17 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 2
+   partition predicates: `year` = 2009, `month` = 2
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -1182,7 +1182,7 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=8
 |
 |--06:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
@@ -1201,17 +1201,17 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 2
+   partition predicates: `year` = 2009, `month` = 2
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -1240,22 +1240,22 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--05:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  04:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -1291,22 +1291,22 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--05:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  04:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -1341,22 +1341,22 @@ PLAN-ROOT SINK
 |  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  |--04:SCAN HDFS [functional.alltypestiny]
-|  |  |     partition predicates: year = 2009, month = 2
+|  |  |     partition predicates: `year` = 2009, `month` = 2
 |  |  |     partitions=1/4 files=1 size=115B
 |  |  |     row-size=89B cardinality=2
 |  |  |
 |  |  03:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 1
+|  |     partition predicates: `year` = 2009, `month` = 1
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  07:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -1402,22 +1402,22 @@ PLAN-ROOT SINK
 |  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  |--04:SCAN HDFS [functional.alltypestiny]
-|  |  |     partition predicates: year = 2009, month = 2
+|  |  |     partition predicates: `year` = 2009, `month` = 2
 |  |  |     partitions=1/4 files=1 size=115B
 |  |  |     row-size=89B cardinality=2
 |  |  |
 |  |  03:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 1
+|  |     partition predicates: `year` = 2009, `month` = 1
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  07:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -1437,7 +1437,7 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=5
 |
 |--05:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
@@ -1450,12 +1450,12 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=4
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 02:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -1475,7 +1475,7 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=5
 |
 |--05:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
@@ -1494,12 +1494,12 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=4
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 02:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -1527,17 +1527,17 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -1571,17 +1571,17 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -1604,17 +1604,17 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -1644,17 +1644,17 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -1677,17 +1677,17 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -1717,17 +1717,17 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -1752,22 +1752,22 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=8
 |
 |--04:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -1799,22 +1799,22 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=8
 |
 |--04:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -1839,22 +1839,22 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=8
 |
 |--04:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -1886,22 +1886,22 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=8
 |
 |--04:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -1925,7 +1925,7 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=5
 |
 |--05:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
@@ -1938,12 +1938,12 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=4
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 02:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -1973,7 +1973,7 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=5
 |
 |--05:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
@@ -1992,12 +1992,12 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=4
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 02:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -2029,17 +2029,17 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -2083,17 +2083,17 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -2115,27 +2115,27 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=10
 |
 |--05:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 3
+|     partition predicates: `year` = 2009, `month` = 3
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--04:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -2159,27 +2159,27 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=10
 |
 |--05:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 3
+|     partition predicates: `year` = 2009, `month` = 3
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--04:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -2205,27 +2205,27 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=10
 |
 |--05:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 3
+|     partition predicates: `year` = 2009, `month` = 3
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--04:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -2259,27 +2259,27 @@ PLAN-ROOT SINK
 |  row-size=89B cardinality=10
 |
 |--05:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 3
+|     partition predicates: `year` = 2009, `month` = 3
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--04:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -2322,27 +2322,27 @@ PLAN-ROOT SINK
 |  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  |--07:SCAN HDFS [functional.alltypestiny]
-|  |  |     partition predicates: year = 2009, month = 3
+|  |  |     partition predicates: `year` = 2009, `month` = 3
 |  |  |     partitions=1/4 files=1 size=115B
 |  |  |     row-size=89B cardinality=2
 |  |  |
 |  |  06:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -2404,27 +2404,27 @@ PLAN-ROOT SINK
 |  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  |--07:SCAN HDFS [functional.alltypestiny]
-|  |  |     partition predicates: year = 2009, month = 3
+|  |  |     partition predicates: `year` = 2009, `month` = 3
 |  |  |     partitions=1/4 files=1 size=115B
 |  |  |     row-size=89B cardinality=2
 |  |  |
 |  |  06:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  |--04:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -2463,27 +2463,27 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--06:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 3
+|  |     partition predicates: `year` = 2009, `month` = 3
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  05:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -2541,27 +2541,27 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--06:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 3
+|  |     partition predicates: `year` = 2009, `month` = 3
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  05:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -2609,27 +2609,27 @@ PLAN-ROOT SINK
 |  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  |--06:SCAN HDFS [functional.alltypestiny]
-|  |  |     partition predicates: year = 2009, month = 3
+|  |  |     partition predicates: `year` = 2009, `month` = 3
 |  |  |     partitions=1/4 files=1 size=115B
 |  |  |     row-size=89B cardinality=2
 |  |  |
 |  |  05:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 1
+|  |     partition predicates: `year` = 2009, `month` = 1
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
@@ -2642,22 +2642,22 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=2
 |  |
 |  |--14:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 5
+|  |     partition predicates: `year` = 2009, `month` = 5
 |  |     partitions=0/4 files=0 size=0B
 |  |     row-size=89B cardinality=0
 |  |
 |  13:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 4
+|     partition predicates: `year` = 2009, `month` = 4
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--11:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 4
+|     partition predicates: `year` = 2009, `month` = 4
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 10:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 3
+   partition predicates: `year` = 2009, `month` = 3
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -2716,27 +2716,27 @@ PLAN-ROOT SINK
 |  |  |  row-size=89B cardinality=4
 |  |  |
 |  |  |--06:SCAN HDFS [functional.alltypestiny]
-|  |  |     partition predicates: year = 2009, month = 3
+|  |  |     partition predicates: `year` = 2009, `month` = 3
 |  |  |     partitions=1/4 files=1 size=115B
 |  |  |     row-size=89B cardinality=2
 |  |  |
 |  |  05:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 2
+|  |     partition predicates: `year` = 2009, `month` = 2
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  |--02:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 1
+|  |     partition predicates: `year` = 2009, `month` = 1
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
@@ -2755,22 +2755,22 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=2
 |  |
 |  |--14:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 5
+|  |     partition predicates: `year` = 2009, `month` = 5
 |  |     partitions=0/4 files=0 size=0B
 |  |     row-size=89B cardinality=0
 |  |
 |  13:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 4
+|     partition predicates: `year` = 2009, `month` = 4
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 |--11:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 4
+|     partition predicates: `year` = 2009, `month` = 4
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 10:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 3
+   partition predicates: `year` = 2009, `month` = 3
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -2798,17 +2798,17 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 1
+|  |     partition predicates: `year` = 2009, `month` = 1
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 04:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 2
+   partition predicates: `year` = 2009, `month` = 2
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -2838,17 +2838,17 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 1
+|  |     partition predicates: `year` = 2009, `month` = 1
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 04:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 2
+   partition predicates: `year` = 2009, `month` = 2
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -2884,17 +2884,17 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 1
+|  |     partition predicates: `year` = 2009, `month` = 1
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 05:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 2
+   partition predicates: `year` = 2009, `month` = 2
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ---- SCANRANGELOCATIONS
@@ -2944,17 +2944,17 @@ PLAN-ROOT SINK
 |  |  row-size=89B cardinality=4
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny]
-|  |     partition predicates: year = 2009, month = 1
+|  |     partition predicates: `year` = 2009, `month` = 1
 |  |     partitions=1/4 files=1 size=115B
 |  |     row-size=89B cardinality=2
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=89B cardinality=2
 |
 05:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 2
+   partition predicates: `year` = 2009, `month` = 2
    partitions=1/4 files=1 size=115B
    row-size=89B cardinality=2
 ====
@@ -2979,7 +2979,7 @@ PLAN-ROOT SINK
 |  |  row-size=13B cardinality=1
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     predicates: functional.alltypestiny.int_col < 5, functional.alltypestiny.bool_col = FALSE
 |     row-size=5B cardinality=1
@@ -2990,7 +2990,7 @@ PLAN-ROOT SINK
 |  row-size=13B cardinality=1
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    predicates: functional.alltypestiny.int_col < 5, functional.alltypestiny.bool_col = FALSE
    row-size=5B cardinality=1
@@ -3021,7 +3021,7 @@ PLAN-ROOT SINK
 |  |  row-size=13B cardinality=1
 |  |
 |  03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     predicates: functional.alltypestiny.int_col < 5, functional.alltypestiny.bool_col = FALSE
 |     row-size=5B cardinality=1
@@ -3039,7 +3039,7 @@ PLAN-ROOT SINK
 |  row-size=13B cardinality=1
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/4 files=1 size=115B
    predicates: functional.alltypestiny.int_col < 5, functional.alltypestiny.bool_col = FALSE
    row-size=5B cardinality=1
@@ -3183,7 +3183,7 @@ PLAN-ROOT SINK
 |  row-size=4B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=4B cardinality=2
 |
@@ -3192,7 +3192,7 @@ PLAN-ROOT SINK
 |  row-size=4B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 2
+   partition predicates: `year` = 2009, `month` = 2
    partitions=1/4 files=1 size=115B
    row-size=4B cardinality=2
 ---- DISTRIBUTEDPLAN
@@ -3221,7 +3221,7 @@ PLAN-ROOT SINK
 |  row-size=4B cardinality=6
 |
 |--03:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, month = 1
+|     partition predicates: `year` = 2009, `month` = 1
 |     partitions=1/4 files=1 size=115B
 |     row-size=4B cardinality=2
 |
@@ -3236,7 +3236,7 @@ PLAN-ROOT SINK
 |  row-size=4B cardinality=2
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, month = 2
+   partition predicates: `year` = 2009, `month` = 2
    partitions=1/4 files=1 size=115B
    row-size=4B cardinality=2
 ====
@@ -3443,13 +3443,13 @@ PLAN-ROOT SINK
 |  row-size=9B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, functional.alltypestiny.month = 1
+|     partition predicates: `year` = 2009, functional.alltypestiny.month = 1
 |     partitions=1/4 files=1 size=115B
 |     predicates: functional.alltypestiny.int_col < 5, functional.alltypestiny.bool_col = FALSE
 |     row-size=9B cardinality=1
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, functional.alltypestiny.month = 1
+   partition predicates: `year` = 2009, functional.alltypestiny.month = 1
    partitions=1/4 files=1 size=115B
    predicates: functional.alltypestiny.int_col < 5, functional.alltypestiny.bool_col = FALSE
    row-size=9B cardinality=1
@@ -3463,13 +3463,13 @@ PLAN-ROOT SINK
 |  row-size=9B cardinality=2
 |
 |--02:SCAN HDFS [functional.alltypestiny]
-|     partition predicates: year = 2009, functional.alltypestiny.month = 1
+|     partition predicates: `year` = 2009, functional.alltypestiny.month = 1
 |     partitions=1/4 files=1 size=115B
 |     predicates: functional.alltypestiny.int_col < 5, functional.alltypestiny.bool_col = FALSE
 |     row-size=9B cardinality=1
 |
 01:SCAN HDFS [functional.alltypestiny]
-   partition predicates: year = 2009, functional.alltypestiny.month = 1
+   partition predicates: `year` = 2009, functional.alltypestiny.month = 1
    partitions=1/4 files=1 size=115B
    predicates: functional.alltypestiny.int_col < 5, functional.alltypestiny.bool_col = FALSE
    row-size=9B cardinality=1
@@ -3557,7 +3557,7 @@ PLAN-ROOT SINK
 |  |  row-size=16B cardinality=10
 |  |
 |  02:SCAN HDFS [functional.alltypes]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/24 files=1 size=18.12KB
 |     row-size=12B cardinality=280
 |
@@ -3604,7 +3604,7 @@ PLAN-ROOT SINK
 |  |  row-size=16B cardinality=10
 |  |
 |  02:SCAN HDFS [functional.alltypes]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/24 files=1 size=18.12KB
 |     row-size=12B cardinality=280
 |
@@ -3716,7 +3716,7 @@ PLAN-ROOT SINK
 |  |  row-size=16B cardinality=10
 |  |
 |  06:SCAN HDFS [functional.alltypes]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/24 files=1 size=18.12KB
 |     row-size=12B cardinality=280
 |
@@ -3779,7 +3779,7 @@ PLAN-ROOT SINK
 |  |  row-size=16B cardinality=10
 |  |
 |  06:SCAN HDFS [functional.alltypes]
-|     partition predicates: year = 2009, month = 2
+|     partition predicates: `year` = 2009, `month` = 2
 |     partitions=1/24 files=1 size=18.12KB
 |     row-size=12B cardinality=280
 |
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/views.test b/testdata/workloads/functional-planner/queries/PlannerTest/views.test
index 0057e84..7e64613 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/views.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/views.test
@@ -544,7 +544,7 @@ select * from functional.alltypes_parens
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/24 files=1 size=19.95KB
    predicates: (int_col < 100 OR bool_col = FALSE)
    row-size=89B cardinality=31
@@ -554,7 +554,7 @@ PLAN-ROOT SINK
 01:EXCHANGE [UNPARTITIONED]
 |
 00:SCAN HDFS [functional.alltypes]
-   partition predicates: year = 2009, month = 1
+   partition predicates: `year` = 2009, `month` = 1
    partitions=1/24 files=1 size=19.95KB
    predicates: (int_col < 100 OR bool_col = FALSE)
    row-size=89B cardinality=31
diff --git a/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test b/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test
index 1398988..c7e2781 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test
@@ -83,7 +83,7 @@ row_regex:.*Max Per-Host Resource Reservation: Memory=.*
 row_regex:.*Per-Host Resource Estimates: Memory=.*
 'Codegen disabled by planner'
 row_regex:.*Analyzed query: SELECT id FROM test_stats_extrapolation_.*.alltypes WHERE.*
-'month IN (CAST(1 AS INT), CAST(2 AS INT), CAST(3 AS INT))'
+'`month` IN (CAST(1 AS INT), CAST(2 AS INT), CAST(3 AS INT))'
 ''
 'F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1'
 row_regex:.*mem-estimate=.* mem-reservation=.*
@@ -91,7 +91,7 @@ row_regex:.*mem-estimate=.* mem-reservation=.*
 row_regex:.*mem-estimate=.* mem-reservation=.*
 '|'
 '00:SCAN HDFS [$DATABASE.alltypes]'
-'   partition predicates: month IN (CAST(1 AS INT), CAST(2 AS INT), CAST(3 AS INT))'
+'   partition predicates: `month` IN (CAST(1 AS INT), CAST(2 AS INT), CAST(3 AS INT))'
 row_regex:.*partitions=3/12 files=3 size=.*
 '   stored statistics:'
 row_regex:.*table: rows=3650 size=.*
@@ -142,7 +142,7 @@ row_regex:.*Max Per-Host Resource Reservation: Memory=.*
 row_regex:.*Per-Host Resource Estimates: Memory=16MB.*
 'Codegen disabled by planner'
 row_regex:.*Analyzed query: SELECT id FROM test_stats_extrapolation_.*.alltypes WHERE.*
-'year = CAST(2010 AS INT)'
+'`year` = CAST(2010 AS INT)'
 ''
 'F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1'
 row_regex:.*Per-Host Resources: mem-estimate=.* mem-reservation=.*
@@ -150,7 +150,7 @@ row_regex:.*Per-Host Resources: mem-estimate=.* mem-reservation=.*
 row_regex:.*mem-estimate=.* mem-reservation=.*
 '|'
 '00:SCAN HDFS [$DATABASE.alltypes]'
-'   partition predicates: year = CAST(2010 AS INT)'
+'   partition predicates: `year` = CAST(2010 AS INT)'
 row_regex:.*partitions=12/24 files=12 size=.*
 '   stored statistics:'
 row_regex:.*table: rows=3650 size=.*
@@ -172,7 +172,7 @@ row_regex:.*Max Per-Host Resource Reservation: Memory=.*
 row_regex:.*Per-Host Resource Estimates: Memory=16MB.*
 'Codegen disabled by planner'
 row_regex:.*Analyzed query: SELECT id FROM test_stats_extrapolation_.*.alltypes WHERE.*
-'year = CAST(2010 AS INT)'
+'`year` = CAST(2010 AS INT)'
 ''
 'F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1'
 row_regex:.*Per-Host Resources: mem-estimate=.* mem-reservation=.*
@@ -180,7 +180,7 @@ row_regex:.*Per-Host Resources: mem-estimate=.* mem-reservation=.*
 row_regex:.*mem-estimate=.* mem-reservation=.*
 '|'
 '00:SCAN HDFS [$DATABASE.alltypes]'
-'   partition predicates: year = CAST(2010 AS INT)'
+'   partition predicates: `year` = CAST(2010 AS INT)'
 row_regex:.*partitions=12/24 files=12 size=.*
 '   stored statistics:'
 row_regex:.*table: rows=10950 size=.*


[impala] 03/05: IMPALA-7841 (Part 1): Refactor SelectStmt for easier debugging

Posted by mi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

mikeb pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit fe47b2352ea9fc13f2d51da8fa9d0f7c70037779
Author: Paul Rogers <pr...@cloudera.com>
AuthorDate: Thu Nov 8 15:12:11 2018 -0800

    IMPALA-7841 (Part 1): Refactor SelectStmt for easier debugging
    
    Builds on IMPALA-7808 with several additional refactorings:
    
    * IMPALA-7808 minimized code changes. This change cleans up the
      new functions, removing if's and merging the "aggregation"
      function with the body of the new analyze() function.
    * Removed an unneeded analyzer argument.
    
    This is all refactoring: there is no functional change.
    
    Testing: Reran existing tests to ensure that functionality
    remained unchanged.
    
    Change-Id: I4f4fe3d3a1ab3170e294714dab066d40d169eff1
    Reviewed-on: http://gerrit.cloudera.org:8080/11915
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 .../java/org/apache/impala/analysis/Analyzer.java  |   8 +-
 .../apache/impala/analysis/CollectionTableRef.java |   2 +-
 .../org/apache/impala/analysis/InlineViewRef.java  |   2 +-
 .../java/org/apache/impala/analysis/QueryStmt.java |   2 +-
 .../org/apache/impala/analysis/SelectStmt.java     | 176 ++++++++++-----------
 .../java/org/apache/impala/analysis/Subquery.java  |   2 +-
 .../org/apache/impala/analysis/WithClause.java     |   2 +-
 7 files changed, 94 insertions(+), 100 deletions(-)

diff --git a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
index fbe4135..2d39b43 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
@@ -156,7 +156,7 @@ public class Analyzer {
   private boolean isSubquery_ = false;
 
   // Flag indicating whether this analyzer belongs to a WITH clause view.
-  private boolean isWithClause_ = false;
+  private boolean hasWithClause_ = false;
 
   // If set, when masked privilege requests are registered they will use this error
   // error message.
@@ -183,8 +183,8 @@ public class Analyzer {
   }
   public boolean setHasPlanHints() { return globalState_.hasPlanHints = true; }
   public boolean hasPlanHints() { return globalState_.hasPlanHints; }
-  public void setIsWithClause() { isWithClause_ = true; }
-  public boolean isWithClause() { return isWithClause_; }
+  public void setHasWithClause() { hasWithClause_ = true; }
+  public boolean hasWithClause() { return hasWithClause_; }
 
   // State shared between all objects of an Analyzer tree. We use LinkedHashMap and
   // LinkedHashSet where applicable to preserve the iteration order and make the class
@@ -403,7 +403,7 @@ public class Analyzer {
     authErrorMsg_ = parentAnalyzer.authErrorMsg_;
     maskPrivChecks_ = parentAnalyzer.maskPrivChecks_;
     enablePrivChecks_ = parentAnalyzer.enablePrivChecks_;
-    isWithClause_ = parentAnalyzer.isWithClause_;
+    hasWithClause_ = parentAnalyzer.hasWithClause_;
   }
 
   /**
diff --git a/fe/src/main/java/org/apache/impala/analysis/CollectionTableRef.java b/fe/src/main/java/org/apache/impala/analysis/CollectionTableRef.java
index a6f66ab..87f38b7 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CollectionTableRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CollectionTableRef.java
@@ -81,7 +81,7 @@ public class CollectionTableRef extends TableRef {
   public void analyze(Analyzer analyzer) throws AnalysisException {
     if (isAnalyzed_) return;
     desc_ = analyzer.registerTableRef(this);
-    if (isRelative() && !analyzer.isWithClause()) {
+    if (isRelative() && !analyzer.hasWithClause()) {
       SlotDescriptor parentSlotDesc = analyzer.registerSlotRef(resolvedPath_);
       parentSlotDesc.setItemTupleDesc(desc_);
       collectionExpr_ = new SlotRef(parentSlotDesc);
diff --git a/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java b/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
index 5bc5675..5993235 100644
--- a/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
@@ -164,7 +164,7 @@ public class InlineViewRef extends TableRef {
     inlineViewAnalyzer_.setUseHiveColLabels(
         isCatalogView ? true : analyzer.useHiveColLabels());
     queryStmt_.analyze(inlineViewAnalyzer_);
-    correlatedTupleIds_.addAll(queryStmt_.getCorrelatedTupleIds(inlineViewAnalyzer_));
+    correlatedTupleIds_.addAll(queryStmt_.getCorrelatedTupleIds());
     if (explicitColLabels_ != null) {
       Preconditions.checkState(
           explicitColLabels_.size() == queryStmt_.getColLabels().size());
diff --git a/fe/src/main/java/org/apache/impala/analysis/QueryStmt.java b/fe/src/main/java/org/apache/impala/analysis/QueryStmt.java
index 4426026..a80608b 100644
--- a/fe/src/main/java/org/apache/impala/analysis/QueryStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/QueryStmt.java
@@ -171,7 +171,7 @@ public abstract class QueryStmt extends StatementBase {
    * (3) a mix of correlated table refs and table refs rooted at those refs
    *     (the statement is 'self-contained' with respect to correlation)
    */
-  public List<TupleId> getCorrelatedTupleIds(Analyzer analyzer)
+  public List<TupleId> getCorrelatedTupleIds()
       throws AnalysisException {
     // Correlated tuple ids of this stmt.
     List<TupleId> correlatedTupleIds = new ArrayList<>();
diff --git a/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java b/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
index 8c99714..d568429 100644
--- a/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
@@ -223,9 +223,23 @@ public class SelectStmt extends QueryStmt {
       analyzeSelectClause();
       verifyResultExprs();
       analyzeWhereClause();
-
       createSortInfo(analyzer_);
-      analyzeAggregation();
+
+      // Analyze aggregation-relevant components of the select block (Group By
+      // clause, select list, Order By clause), substitute AVG with SUM/COUNT,
+      // create the AggregationInfo, including the agg output tuple, and transform
+      // all post-agg exprs given AggregationInfo's smap.
+      analyzeHavingClause();
+      if (checkForAggregates()) {
+        verifyAggSemantics();
+        analyzeGroupingExprs();
+        collectAggExprs();
+        rewriteCountDistinct();
+        buildAggregateExprs();
+        buildResultExprs();
+        verifyAggregation();
+      }
+
       createAnalyticInfo();
       if (evaluateOrderBy_) createSortTupleInfo(analyzer_);
 
@@ -243,24 +257,12 @@ public class SelectStmt extends QueryStmt {
       buildColumnLineageGraph();
     }
 
-    private void buildColumnLineageGraph() {
-      ColumnLineageGraph graph = analyzer_.getColumnLineageGraph();
-      if (multiAggInfo_ != null && multiAggInfo_.hasAggregateExprs()) {
-        graph.addDependencyPredicates(multiAggInfo_.getGroupingExprs());
-      }
-      if (sortInfo_ != null && hasLimit()) {
-        // When there is a LIMIT clause in conjunction with an ORDER BY, the
-        // ordering exprs must be added in the column lineage graph.
-        graph.addDependencyPredicates(sortInfo_.getSortExprs());
-      }
-    }
-
     private void analyzeSelectClause() throws AnalysisException {
       // Generate !empty() predicates to filter out empty collections.
       // Skip this step when analyzing a WITH-clause because CollectionTableRefs
       // do not register collection slots in their parent in that context
       // (see CollectionTableRef.analyze()).
-      if (!analyzer_.isWithClause()) registerIsNotEmptyPredicates();
+      if (!analyzer_.hasWithClause()) registerIsNotEmptyPredicates();
 
       // analyze plan hints from select list
       selectList_.analyzePlanHints(analyzer_);
@@ -340,20 +342,19 @@ public class SelectStmt extends QueryStmt {
     }
 
     private void analyzeWhereClause() throws AnalysisException {
-      if (whereClause_ != null) {
-        whereClause_.analyze(analyzer_);
-        if (whereClause_.contains(Expr.isAggregatePredicate())) {
-          throw new AnalysisException(
-              "aggregate function not allowed in WHERE clause");
-        }
-        whereClause_.checkReturnsBool("WHERE clause", false);
-        Expr e = whereClause_.findFirstOf(AnalyticExpr.class);
-        if (e != null) {
-          throw new AnalysisException(
-              "WHERE clause must not contain analytic expressions: " + e.toSql());
-        }
-        analyzer_.registerConjuncts(whereClause_, false);
+      if (whereClause_ == null) return;
+      whereClause_.analyze(analyzer_);
+      if (whereClause_.contains(Expr.isAggregatePredicate())) {
+        throw new AnalysisException(
+            "aggregate function not allowed in WHERE clause");
       }
+      whereClause_.checkReturnsBool("WHERE clause", false);
+      Expr e = whereClause_.findFirstOf(AnalyticExpr.class);
+      if (e != null) {
+        throw new AnalysisException(
+            "WHERE clause must not contain analytic expressions: " + e.toSql());
+      }
+      analyzer_.registerConjuncts(whereClause_, false);
     }
 
     /**
@@ -551,26 +552,6 @@ public class SelectStmt extends QueryStmt {
     }
 
     /**
-     * Analyze aggregation-relevant components of the select block (Group By clause,
-     * select list, Order By clause), substitute AVG with SUM/COUNT, create the
-     * AggregationInfo, including the agg output tuple, and transform all post-agg exprs
-     * given AggregationInfo's smap.
-     */
-    private void analyzeAggregation() throws AnalysisException {
-      analyzeHavingClause();
-      if (!checkForAggregates()) {
-        return;
-      }
-      verifyAggSemantics();
-      analyzeGroupingExprs();
-      collectAggExprs();
-      rewriteCountDistinct();
-      buildAggregateExprs();
-      buildResultExprs();
-      verifyAggregation();
-    }
-
-    /**
      * Analyze the HAVING clause. The HAVING clause is a predicate, not a list
      * (like GROUP BY or ORDER BY) and so cannot contain ordinals.
      *
@@ -660,28 +641,28 @@ public class SelectStmt extends QueryStmt {
     }
 
     private void analyzeGroupingExprs() throws AnalysisException {
-      // analyze grouping exprs
-      groupingExprsCopy_ = new ArrayList<>();
-      if (groupingExprs_ != null) {
-        // make a deep copy here, we don't want to modify the original
-        // exprs during analysis (in case we need to print them later)
-        groupingExprsCopy_ = Expr.cloneList(groupingExprs_);
-        substituteOrdinalsAndAliases(groupingExprsCopy_, "GROUP BY", analyzer_);
-
-        for (int i = 0; i < groupingExprsCopy_.size(); ++i) {
-          groupingExprsCopy_.get(i).analyze(analyzer_);
-          if (groupingExprsCopy_.get(i).contains(Expr.isAggregatePredicate())) {
-            // reference the original expr in the error msg
-            throw new AnalysisException(
-                "GROUP BY expression must not contain aggregate functions: "
-                    + groupingExprs_.get(i).toSql());
-          }
-          if (groupingExprsCopy_.get(i).contains(AnalyticExpr.class)) {
-            // reference the original expr in the error msg
-            throw new AnalysisException(
-                "GROUP BY expression must not contain analytic expressions: "
-                    + groupingExprsCopy_.get(i).toSql());
-          }
+      if (groupingExprs_ == null) {
+        groupingExprsCopy_ = new ArrayList<>();
+        return;
+      }
+      // make a deep copy here, we don't want to modify the original
+      // exprs during analysis (in case we need to print them later)
+      groupingExprsCopy_ = Expr.cloneList(groupingExprs_);
+      substituteOrdinalsAndAliases(groupingExprsCopy_, "GROUP BY", analyzer_);
+
+      for (int i = 0; i < groupingExprsCopy_.size(); ++i) {
+        groupingExprsCopy_.get(i).analyze(analyzer_);
+        if (groupingExprsCopy_.get(i).contains(Expr.isAggregatePredicate())) {
+          // reference the original expr in the error msg
+          throw new AnalysisException(
+              "GROUP BY expression must not contain aggregate functions: "
+                  + groupingExprs_.get(i).toSql());
+        }
+        if (groupingExprsCopy_.get(i).contains(AnalyticExpr.class)) {
+          // reference the original expr in the error msg
+          throw new AnalysisException(
+              "GROUP BY expression must not contain analytic expressions: "
+                  + groupingExprsCopy_.get(i).toSql());
         }
       }
     }
@@ -703,28 +684,29 @@ public class SelectStmt extends QueryStmt {
 
     private void rewriteCountDistinct() {
       // Optionally rewrite all count(distinct <expr>) into equivalent NDV() calls.
-      if (analyzer_.getQueryCtx().client_request.query_options.appx_count_distinct) {
-        ndvSmap_ = new ExprSubstitutionMap();
-        for (FunctionCallExpr aggExpr: aggExprs_) {
-          if (!aggExpr.isDistinct()
-              || !aggExpr.getFnName().getFunction().equals("count")
-              || aggExpr.getParams().size() != 1) {
-            continue;
-          }
-          FunctionCallExpr ndvFnCall =
-              new FunctionCallExpr("ndv", aggExpr.getParams().exprs());
-          ndvFnCall.analyzeNoThrow(analyzer_);
-          Preconditions.checkState(ndvFnCall.getType().equals(aggExpr.getType()));
-          ndvSmap_.put(aggExpr, ndvFnCall);
-        }
-        // Replace all count(distinct <expr>) with NDV(<expr>).
-        List<Expr> substAggExprs = Expr.substituteList(aggExprs_,
-            ndvSmap_, analyzer_, false);
-        aggExprs_.clear();
-        for (Expr aggExpr: substAggExprs) {
-          Preconditions.checkState(aggExpr instanceof FunctionCallExpr);
-          aggExprs_.add((FunctionCallExpr) aggExpr);
+      if (!analyzer_.getQueryCtx().client_request.query_options.appx_count_distinct) {
+        return;
+      }
+      ndvSmap_ = new ExprSubstitutionMap();
+      for (FunctionCallExpr aggExpr: aggExprs_) {
+        if (!aggExpr.isDistinct()
+            || !aggExpr.getFnName().getFunction().equals("count")
+            || aggExpr.getParams().size() != 1) {
+          continue;
         }
+        FunctionCallExpr ndvFnCall =
+            new FunctionCallExpr("ndv", aggExpr.getParams().exprs());
+        ndvFnCall.analyzeNoThrow(analyzer_);
+        Preconditions.checkState(ndvFnCall.getType().equals(aggExpr.getType()));
+        ndvSmap_.put(aggExpr, ndvFnCall);
+      }
+      // Replace all count(distinct <expr>) with NDV(<expr>).
+      List<Expr> substAggExprs = Expr.substituteList(aggExprs_,
+          ndvSmap_, analyzer_, false);
+      aggExprs_.clear();
+      for (Expr aggExpr: substAggExprs) {
+        Preconditions.checkState(aggExpr instanceof FunctionCallExpr);
+        aggExprs_.add((FunctionCallExpr) aggExpr);
       }
     }
 
@@ -939,6 +921,18 @@ public class SelectStmt extends QueryStmt {
         }
       }
     }
+
+    private void buildColumnLineageGraph() {
+      ColumnLineageGraph graph = analyzer_.getColumnLineageGraph();
+      if (multiAggInfo_ != null && multiAggInfo_.hasAggregateExprs()) {
+        graph.addDependencyPredicates(multiAggInfo_.getGroupingExprs());
+      }
+      if (sortInfo_ != null && hasLimit()) {
+        // When there is a LIMIT clause in conjunction with an ORDER BY, the
+        // ordering exprs must be added in the column lineage graph.
+        graph.addDependencyPredicates(sortInfo_.getSortExprs());
+      }
+    }
   }
 
   /**
diff --git a/fe/src/main/java/org/apache/impala/analysis/Subquery.java b/fe/src/main/java/org/apache/impala/analysis/Subquery.java
index 17ce30b..68d0355 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Subquery.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Subquery.java
@@ -79,7 +79,7 @@ public class Subquery extends Expr {
     analyzer_.setIsSubquery();
     stmt_.analyze(analyzer_);
     // Check whether the stmt_ contains an illegal mix of un/correlated table refs.
-    stmt_.getCorrelatedTupleIds(analyzer_);
+    stmt_.getCorrelatedTupleIds();
 
     // Set the subquery type based on the types of the exprs in the
     // result list of the associated SelectStmt.
diff --git a/fe/src/main/java/org/apache/impala/analysis/WithClause.java b/fe/src/main/java/org/apache/impala/analysis/WithClause.java
index 6c7cfd0..90a13d4 100644
--- a/fe/src/main/java/org/apache/impala/analysis/WithClause.java
+++ b/fe/src/main/java/org/apache/impala/analysis/WithClause.java
@@ -75,7 +75,7 @@ public class WithClause extends StmtNode {
     // during analysis of the WITH clause. withClauseAnalyzer is a child of 'analyzer' so
     // that local views registered in parent blocks are visible here.
     Analyzer withClauseAnalyzer = Analyzer.createWithNewGlobalState(analyzer);
-    withClauseAnalyzer.setIsWithClause();
+    withClauseAnalyzer.setHasWithClause();
     if (analyzer.isExplain()) withClauseAnalyzer.setIsExplain();
     for (View view: views_) {
       Analyzer viewAnalyzer = new Analyzer(withClauseAnalyzer);