You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by jo...@apache.org on 2020/10/30 17:03:07 UTC

[impala] branch master updated (047906b -> 2357958)

This is an automated email from the ASF dual-hosted git repository.

joemcdonnell pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git.


    from 047906b  IMPALA-10302: Enable logging at the INFO level for test_scanners_fuzz.py
     new f4ed07c  IMPALA-10206: Avoid MD5 Digest Authorization in FIPS approved mode
     new d164bf4  IMPALA-10294: Improvement to test_skew_reporting_in_runtime_profile
     new 3e06d60  IMPALA-10166 (part 1): ALTER TABLE for Iceberg tables
     new 2357958  IMPALA-10304: Fix log level and format for pytests

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 be/src/thirdparty/squeasel/squeasel.c              | 337 +--------------------
 be/src/util/webserver-test.cc                      |  16 +-
 be/src/util/webserver.cc                           |  23 +-
 bin/dump_breakpad_symbols.py                       |   3 +-
 bin/run-workload.py                                |   2 +-
 bin/start-impala-cluster.py                        |   4 +-
 .../analysis/AlterTableAddPartitionStmt.java       |   4 +
 .../impala/analysis/AlterTableAlterColStmt.java    |  11 +
 .../analysis/AlterTableDropPartitionStmt.java      |   4 +
 .../analysis/AlterTableRecoverPartitionsStmt.java  |   6 +
 .../analysis/AlterTableSetFileFormatStmt.java      |   6 +
 .../impala/analysis/AlterTableSetLocationStmt.java |   6 +
 .../analysis/AlterTableSetRowFormatStmt.java       |   7 +
 .../analysis/AlterTableSetTblProperties.java       |  23 +-
 .../org/apache/impala/analysis/AlterTableStmt.java |   5 -
 .../impala/catalog/iceberg/IcebergCatalog.java     |   7 +
 .../catalog/iceberg/IcebergHadoopCatalog.java      |  11 +
 .../catalog/iceberg/IcebergHadoopTables.java       |   7 +
 .../apache/impala/service/CatalogOpExecutor.java   |  79 +++++
 .../impala/service/IcebergCatalogOpExecutor.java   |  62 ++++
 .../java/org/apache/impala/util/IcebergUtil.java   |  73 +++++
 lib/python/impala_py_lib/helpers.py                |   1 -
 .../queries/QueryTest/iceberg-alter.test           |  96 ++++++
 .../queries/QueryTest/iceberg-negative.test        | 109 ++++++-
 tests/conftest.py                                  |   9 +
 tests/query_test/test_iceberg.py                   |   3 +
 tests/query_test/test_observability.py             |  20 +-
 tests/shell/util.py                                |   1 -
 tests/stress/query_retries_stress_runner.py        |   2 +-
 29 files changed, 559 insertions(+), 378 deletions(-)
 create mode 100644 testdata/workloads/functional-query/queries/QueryTest/iceberg-alter.test


[impala] 02/04: IMPALA-10294: Improvement to test_skew_reporting_in_runtime_profile

Posted by jo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

joemcdonnell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit d164bf42f9fd8ec65db44a5904730587d2b961ec
Author: Qifan Chen <qc...@cloudera.com>
AuthorDate: Tue Oct 27 12:33:52 2020 -0400

    IMPALA-10294: Improvement to test_skew_reporting_in_runtime_profile
    
    This fix improved the skew reporting test by lowering the threshold
    to 0 and by taking care of the extreme case of no skews.
    
    Testing:
    1. Unit testing.
    
    Change-Id: I7a36551f2507d724891707d26b7394fbe3a5657b
    Reviewed-on: http://gerrit.cloudera.org:8080/16662
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 tests/query_test/test_observability.py | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/tests/query_test/test_observability.py b/tests/query_test/test_observability.py
index f5f1e7e..57d09f1 100644
--- a/tests/query_test/test_observability.py
+++ b/tests/query_test/test_observability.py
@@ -743,22 +743,22 @@ class TestObservability(ImpalaTestSuite):
             group by ca_state
             order by ca_state
             """
-    "Set up the skew threshold to 0.02"
-    query_opts = {'report_skew_limit': 0.02}
+    "Set up the skew threshold to 0.0"
+    query_opts = {'report_skew_limit': 0.0}
     results = self.execute_query(query, query_opts)
     assert results.success
 
-    "Expect to see the skew summary"
+    "When the skew summary is seen, look for the details"
     skews_found = 'skew\(s\) found at:.*HASH_JOIN.*HASH_JOIN.*HDFS_SCAN_NODE'
-    assert len(re.findall(skews_found, results.runtime_profile, re.M)) == 1
+    if len(re.findall(skews_found, results.runtime_profile, re.M)) == 1:
 
-    "Expect to see skew details twice at the hash join nodes."
-    probe_rows_at_hj = 'HASH_JOIN_NODE.*\n.*Skew details: ProbeRows'
-    assert len(re.findall(probe_rows_at_hj, results.runtime_profile, re.M)) == 2
+      "Expect to see skew details twice at the hash join nodes."
+      probe_rows_at_hj = 'HASH_JOIN_NODE.*\n.*Skew details: ProbeRows'
+      assert len(re.findall(probe_rows_at_hj, results.runtime_profile, re.M)) == 2
 
-    "Expect to see skew details once at the scan node."
-    probe_rows_at_hdfs_scan = 'HDFS_SCAN_NODE.*\n.*Skew details: RowsRead'
-    assert len(re.findall(probe_rows_at_hdfs_scan, results.runtime_profile, re.M)) == 1
+      "Expect to see skew details once at the scan node."
+      probe_rows_at_hdfs_scan = 'HDFS_SCAN_NODE.*\n.*Skew details: RowsRead'
+      assert len(re.findall(probe_rows_at_hdfs_scan, results.runtime_profile, re.M)) == 1
 
 class TestQueryStates(ImpalaTestSuite):
   """Test that the 'Query State' and 'Impala Query State' are set correctly in the


[impala] 04/04: IMPALA-10304: Fix log level and format for pytests

Posted by jo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

joemcdonnell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 2357958e734b0da36b908030f8a3d45c048cf16f
Author: Joe McDonnell <jo...@cloudera.com>
AuthorDate: Thu Oct 29 14:36:07 2020 -0700

    IMPALA-10304: Fix log level and format for pytests
    
    Recent testing showed that the pytests are not
    respecting the log level and format set in
    conftest.py's configure_logging(). It is using
    the default log level of WARNING and the
    default formatter.
    
    The issue is that logging.basicConfig() is only
    effective the first time it is called. The code
    in lib/python/impala_py_lib/helpers.py does a
    call to logging.basicConfig() at the global
    level, and conftest.py imports that file. This
    renders the call in configure_logging()
    ineffective.
    
    To avoid this type of confusion, logging.basicConfig()
    should only be called from the main() functions for
    libraries. This removes the call in lib/python/impala_py_lib
    (as it is not needed for a library without a main function).
    It also fixes up various other locations to move the
    logging.basicConfig() call to the main() function.
    
    Testing:
     - Ran the end to end tests and custom cluster tests
     - Confirmed the logging format
     - Added an assert in configure_logging() to test that
       the INFO log level is applied to the root logger.
    
    Change-Id: I5d91b7f910b3606c50bcba4579179a0bc8c20588
    Reviewed-on: http://gerrit.cloudera.org:8080/16679
    Reviewed-by: Tim Armstrong <ta...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 bin/dump_breakpad_symbols.py                | 3 +--
 bin/run-workload.py                         | 2 +-
 bin/start-impala-cluster.py                 | 4 ++--
 lib/python/impala_py_lib/helpers.py         | 1 -
 tests/conftest.py                           | 9 +++++++++
 tests/shell/util.py                         | 1 -
 tests/stress/query_retries_stress_runner.py | 2 +-
 7 files changed, 14 insertions(+), 8 deletions(-)

diff --git a/bin/dump_breakpad_symbols.py b/bin/dump_breakpad_symbols.py
index f263676..3a6f02e 100755
--- a/bin/dump_breakpad_symbols.py
+++ b/bin/dump_breakpad_symbols.py
@@ -66,8 +66,6 @@ import tempfile
 from argparse import ArgumentParser
 from collections import namedtuple
 
-logging.basicConfig(level=logging.INFO)
-
 BinarySymbolInfo = namedtuple('BinarySymbolInfo', 'path, debug_path')
 
 
@@ -283,6 +281,7 @@ def process_binary(dump_syms, binary, out_dir):
 
 
 def main():
+  logging.basicConfig(level=logging.INFO)
   args = parse_args()
   dump_syms = args.dump_syms or find_dump_syms_binary()
   assert dump_syms
diff --git a/bin/run-workload.py b/bin/run-workload.py
index d21dfe4..d406627 100755
--- a/bin/run-workload.py
+++ b/bin/run-workload.py
@@ -119,7 +119,6 @@ if options.get_password:
   options.password = getpass.getpass()
   options.get_password = None
 
-logging.basicConfig(level=logging.INFO, format='[%(name)s]: %(message)s')
 LOG = logging.getLogger('run-workload')
 
 
@@ -239,6 +238,7 @@ def _validate_options():
       raise RuntimeError("Impalads must be of the form host:port or host.")
 
 if __name__ == "__main__":
+  logging.basicConfig(level=logging.INFO, format='[%(name)s]: %(message)s')
   # Check for badly formed user options.
   _validate_options()
 
diff --git a/bin/start-impala-cluster.py b/bin/start-impala-cluster.py
index 452700c..0edee2c 100755
--- a/bin/start-impala-cluster.py
+++ b/bin/start-impala-cluster.py
@@ -42,8 +42,6 @@ from tests.common.impala_cluster import (ImpalaCluster, DEFAULT_BEESWAX_PORT,
     DEFAULT_CATALOGD_JVM_DEBUG_PORT, DEFAULT_IMPALAD_JVM_DEBUG_PORT,
     find_user_processes, run_daemon)
 
-logging.basicConfig(level=logging.ERROR, format="%(asctime)s %(threadName)s: %(message)s",
-    datefmt="%H:%M:%S")
 LOG = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
 LOG.setLevel(level=logging.DEBUG)
 
@@ -718,6 +716,8 @@ def validate_options():
 
 
 if __name__ == "__main__":
+  logging.basicConfig(level=logging.ERROR, format="%(asctime)s %(threadName)s: %(message)s",
+    datefmt="%H:%M:%S")
   validate_options()
   if options.docker_network is None:
     cluster_ops = MiniClusterOperations()
diff --git a/lib/python/impala_py_lib/helpers.py b/lib/python/impala_py_lib/helpers.py
index a6c68d8..9631ba7 100644
--- a/lib/python/impala_py_lib/helpers.py
+++ b/lib/python/impala_py_lib/helpers.py
@@ -21,7 +21,6 @@ import os
 import re
 import subprocess
 
-logging.basicConfig()
 LOG = logging.getLogger('impala_lib_python_helpers')
 
 
diff --git a/tests/conftest.py b/tests/conftest.py
index f8c98f6..ac6a8c3 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -68,8 +68,17 @@ def configure_logging():
   # Use a "--" since most of our tests output SQL commands, and it's nice to
   # be able to copy-paste directly from the test output back into a shell to
   # try to reproduce a failure.
+  #
+  # This call only takes effect if it is the first call to logging.basicConfig().
+  # For example, if some other library calls logging.basicConfig() at the global
+  # level, then importing that library can render this call ineffective.
   logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
 
+  # Verify that the logging level is set to the correct value.
+  rootLoggerLevel = logging.getLogger().getEffectiveLevel()
+  print("rootLoggerLevel = {0}".format(logging.getLevelName(rootLoggerLevel)))
+  assert(rootLoggerLevel == logging.INFO)
+
 
 def pytest_addoption(parser):
   """Adds a new command line options to py.test"""
diff --git a/tests/shell/util.py b/tests/shell/util.py
index 5e6e9bc..b8e8a91 100755
--- a/tests/shell/util.py
+++ b/tests/shell/util.py
@@ -37,7 +37,6 @@ from tests.common.impala_service import ImpaladService
 from tests.common.impala_test_suite import (IMPALAD_BEESWAX_HOST_PORT,
     IMPALAD_HS2_HOST_PORT, IMPALAD_HS2_HTTP_HOST_PORT)
 
-logging.basicConfig()
 LOG = logging.getLogger('tests/shell/util.py')
 LOG.addHandler(logging.StreamHandler())
 
diff --git a/tests/stress/query_retries_stress_runner.py b/tests/stress/query_retries_stress_runner.py
index a2ec1e2..f0876e1 100755
--- a/tests/stress/query_retries_stress_runner.py
+++ b/tests/stress/query_retries_stress_runner.py
@@ -44,7 +44,6 @@ from tests.util.test_file_parser import load_tpc_queries
 
 IMPALA_HOME = os.environ["IMPALA_HOME"]
 
-logging.basicConfig(level=logging.INFO, format='[%(name)s][%(threadName)s]: %(message)s')
 LOG = logging.getLogger('query_retries_stress_test')
 
 
@@ -293,6 +292,7 @@ def parse_args(parser):
 
 
 def main():
+  logging.basicConfig(level=logging.INFO, format='[%(name)s][%(threadName)s]: %(message)s')
   # Parse the command line args.
   parser = ArgumentParser(description="""
 Runs a stress test for transparent query retries. Starts an impala cluster with a


[impala] 01/04: IMPALA-10206: Avoid MD5 Digest Authorization in FIPS approved mode

Posted by jo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

joemcdonnell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit f4ed07c8eb673fcd2d569ca6474f27ef70ac26b5
Author: wzhou-code <wz...@cloudera.com>
AuthorDate: Thu Oct 22 11:42:21 2020 -0700

    IMPALA-10206: Avoid MD5 Digest Authorization in FIPS approved mode
    
    To compliant with FIPS requirement, we should use OpenSSL libraries
    for cryptographic hash functions, instead of own hash functions.
    This patch replace MD5 and SHA1 functions in Squeasel Web server
    with OpenSSL APIs. It also force to turn off Digest Authorization
    for Web server in FIPS approved mode since Digest Authorization
    use MD5 hash and it doesn't comply with FIPS 140-2.
    
    Testing:
     - Passed webserver-test.
     - Passed exhaustive tests.
     - Manually verified HTTP Digest Authorization could not be enabled
       by setting webserver_password_file on a FIPS enabled cluster.
    
    Change-Id: Ie075389b3ab65c612d64ba58e16a10b19bdf4d6f
    Reviewed-on: http://gerrit.cloudera.org:8080/16630
    Reviewed-by: Thomas Tauber-Marshall <tm...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 be/src/thirdparty/squeasel/squeasel.c | 337 ++--------------------------------
 be/src/util/webserver-test.cc         |  16 +-
 be/src/util/webserver.cc              |  23 ++-
 3 files changed, 38 insertions(+), 338 deletions(-)

diff --git a/be/src/thirdparty/squeasel/squeasel.c b/be/src/thirdparty/squeasel/squeasel.c
index 41c5cfa..d716783 100644
--- a/be/src/thirdparty/squeasel/squeasel.c
+++ b/be/src/thirdparty/squeasel/squeasel.c
@@ -65,6 +65,12 @@
 #include <unistd.h>
 #include <dirent.h>
 #include <pthread.h>
+
+#include <openssl/crypto.h>
+#include <openssl/err.h>
+#include <openssl/md5.h>
+#include <openssl/sha.h>
+
 #if defined(__MACH__)
 #define SSL_LIB   "libssl.dylib"
 #define CRYPTO_LIB  "libcrypto.dylib"
@@ -1516,196 +1522,6 @@ static int is_big_endian(void) {
   return ((char *) &n)[0] == 0;
 }
 
-#ifndef HAVE_MD5
-typedef struct MD5Context {
-  uint32_t buf[4];
-  uint32_t bits[2];
-  unsigned char in[64];
-} MD5_CTX;
-
-static void byteReverse(unsigned char *buf, unsigned longs) {
-  uint32_t t;
-
-  // Forrest: MD5 expect LITTLE_ENDIAN, swap if BIG_ENDIAN
-  if (is_big_endian()) {
-    do {
-      t = (uint32_t) ((unsigned) buf[3] << 8 | buf[2]) << 16 |
-        ((unsigned) buf[1] << 8 | buf[0]);
-      * (uint32_t *) buf = t;
-      buf += 4;
-    } while (--longs);
-  }
-}
-
-#define F1(x, y, z) (z ^ (x & (y ^ z)))
-#define F2(x, y, z) F1(z, x, y)
-#define F3(x, y, z) (x ^ y ^ z)
-#define F4(x, y, z) (y ^ (x | ~z))
-
-#define MD5STEP(f, w, x, y, z, data, s) \
-  ( w += f(x, y, z) + data,  w = w<<s | w>>(32-s),  w += x )
-
-// Start MD5 accumulation.  Set bit count to 0 and buffer to mysterious
-// initialization constants.
-static void MD5Init(MD5_CTX *ctx) {
-  ctx->buf[0] = 0x67452301;
-  ctx->buf[1] = 0xefcdab89;
-  ctx->buf[2] = 0x98badcfe;
-  ctx->buf[3] = 0x10325476;
-
-  ctx->bits[0] = 0;
-  ctx->bits[1] = 0;
-}
-
-static void MD5Transform(uint32_t buf[4], uint32_t const in[16]) {
-  register uint32_t a, b, c, d;
-
-  a = buf[0];
-  b = buf[1];
-  c = buf[2];
-  d = buf[3];
-
-  MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
-  MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
-  MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
-  MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
-  MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
-  MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
-  MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
-  MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
-  MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
-  MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
-  MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
-  MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
-  MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
-  MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
-  MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
-  MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
-
-  MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
-  MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
-  MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
-  MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
-  MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
-  MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
-  MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
-  MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
-  MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
-  MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
-  MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
-  MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
-  MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
-  MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
-  MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
-  MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
-
-  MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
-  MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
-  MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
-  MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
-  MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
-  MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
-  MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
-  MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
-  MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
-  MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
-  MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
-  MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
-  MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
-  MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
-  MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
-  MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
-
-  MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
-  MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
-  MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
-  MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
-  MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
-  MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
-  MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
-  MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
-  MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
-  MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
-  MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
-  MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
-  MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
-  MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
-  MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
-  MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
-
-  buf[0] += a;
-  buf[1] += b;
-  buf[2] += c;
-  buf[3] += d;
-}
-
-static void MD5Update(MD5_CTX *ctx, unsigned char const *buf, unsigned len) {
-  uint32_t t;
-
-  t = ctx->bits[0];
-  if ((ctx->bits[0] = t + ((uint32_t) len << 3)) < t)
-    ctx->bits[1]++;
-  ctx->bits[1] += len >> 29;
-
-  t = (t >> 3) & 0x3f;
-
-  if (t) {
-    unsigned char *p = (unsigned char *) ctx->in + t;
-
-    t = 64 - t;
-    if (len < t) {
-      memcpy(p, buf, len);
-      return;
-    }
-    memcpy(p, buf, t);
-    byteReverse(ctx->in, 16);
-    MD5Transform(ctx->buf, (uint32_t *) ctx->in);
-    buf += t;
-    len -= t;
-  }
-
-  while (len >= 64) {
-    memcpy(ctx->in, buf, 64);
-    byteReverse(ctx->in, 16);
-    MD5Transform(ctx->buf, (uint32_t *) ctx->in);
-    buf += 64;
-    len -= 64;
-  }
-
-  memcpy(ctx->in, buf, len);
-}
-
-static void MD5Final(unsigned char digest[16], MD5_CTX *ctx) {
-  unsigned count;
-  unsigned char *p;
-  uint32_t *a;
-
-  count = (ctx->bits[0] >> 3) & 0x3F;
-
-  p = ctx->in + count;
-  *p++ = 0x80;
-  count = 64 - 1 - count;
-  if (count < 8) {
-    memset(p, 0, count);
-    byteReverse(ctx->in, 16);
-    MD5Transform(ctx->buf, (uint32_t *) ctx->in);
-    memset(ctx->in, 0, 56);
-  } else {
-    memset(p, 0, count - 8);
-  }
-  byteReverse(ctx->in, 14);
-
-  a = (uint32_t *)ctx->in;
-  a[14] = ctx->bits[0];
-  a[15] = ctx->bits[1];
-
-  MD5Transform(ctx->buf, (uint32_t *) ctx->in);
-  byteReverse((unsigned char *) ctx->buf, 4);
-  memcpy(digest, ctx->buf, 16);
-  memset((char *) ctx, 0, sizeof(*ctx));
-}
-#endif // !HAVE_MD5
-
 // Stringify binary data. Output buffer must be twice as big as input,
 // because each byte takes 2 bytes in string representation
 static void bin2str(char *to, const unsigned char *p, size_t len) {
@@ -1725,15 +1541,15 @@ char *sq_md5(char buf[33], ...) {
   va_list ap;
   MD5_CTX ctx;
 
-  MD5Init(&ctx);
+  MD5_Init(&ctx);
 
   va_start(ap, buf);
   while ((p = va_arg(ap, const char *)) != NULL) {
-    MD5Update(&ctx, (const unsigned char *) p, (unsigned) strlen(p));
+    MD5_Update(&ctx, (const unsigned char *) p, (unsigned) strlen(p));
   }
   va_end(ap);
 
-  MD5Final(hash, &ctx);
+  MD5_Final(hash, &ctx);
   bin2str(buf, hash, sizeof(hash));
   return buf;
 }
@@ -3231,133 +3047,6 @@ static void send_options(struct sq_connection *conn) {
 
 #if defined(USE_WEBSOCKET)
 
-// START OF SHA-1 code
-// Copyright(c) By Steve Reid <st...@edmweb.com>
-#define SHA1HANDSOFF
-#if defined(__sun)
-#include "solarisfixes.h"
-#endif
-
-union char64long16 { unsigned char c[64]; uint32_t l[16]; };
-
-#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
-
-static uint32_t blk0(union char64long16 *block, int i) {
-  // Forrest: SHA expect BIG_ENDIAN, swap if LITTLE_ENDIAN
-  if (!is_big_endian()) {
-    block->l[i] = (rol(block->l[i], 24) & 0xFF00FF00) |
-      (rol(block->l[i], 8) & 0x00FF00FF);
-  }
-  return block->l[i];
-}
-
-#define blk(i) (block->l[i&15] = rol(block->l[(i+13)&15]^block->l[(i+8)&15] \
-    ^block->l[(i+2)&15]^block->l[i&15],1))
-#define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk0(block, i)+0x5A827999+rol(v,5);w=rol(w,30);
-#define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5);w=rol(w,30);
-#define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30);
-#define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30);
-#define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30);
-
-typedef struct {
-    uint32_t state[5];
-    uint32_t count[2];
-    unsigned char buffer[64];
-} SHA1_CTX;
-
-static void SHA1Transform(uint32_t state[5], const unsigned char buffer[64]) {
-  uint32_t a, b, c, d, e;
-  union char64long16 block[1];
-
-  memcpy(block, buffer, 64);
-  a = state[0];
-  b = state[1];
-  c = state[2];
-  d = state[3];
-  e = state[4];
-  R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
-  R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
-  R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11);
-  R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15);
-  R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19);
-  R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23);
-  R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27);
-  R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31);
-  R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35);
-  R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39);
-  R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43);
-  R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47);
-  R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51);
-  R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55);
-  R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59);
-  R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63);
-  R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67);
-  R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
-  R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
-  R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
-  state[0] += a;
-  state[1] += b;
-  state[2] += c;
-  state[3] += d;
-  state[4] += e;
-  a = b = c = d = e = 0;
-  memset(block, '\0', sizeof(block));
-}
-
-static void SHA1Init(SHA1_CTX* context) {
-  context->state[0] = 0x67452301;
-  context->state[1] = 0xEFCDAB89;
-  context->state[2] = 0x98BADCFE;
-  context->state[3] = 0x10325476;
-  context->state[4] = 0xC3D2E1F0;
-  context->count[0] = context->count[1] = 0;
-}
-
-static void SHA1Update(SHA1_CTX* context, const unsigned char* data,
-                       uint32_t len) {
-  uint32_t i, j;
-
-  j = context->count[0];
-  if ((context->count[0] += len << 3) < j)
-    context->count[1]++;
-  context->count[1] += (len>>29);
-  j = (j >> 3) & 63;
-  if ((j + len) > 63) {
-    memcpy(&context->buffer[j], data, (i = 64-j));
-    SHA1Transform(context->state, context->buffer);
-    for ( ; i + 63 < len; i += 64) {
-      SHA1Transform(context->state, &data[i]);
-    }
-    j = 0;
-  }
-  else i = 0;
-  memcpy(&context->buffer[j], &data[i], len - i);
-}
-
-static void SHA1Final(unsigned char digest[20], SHA1_CTX* context) {
-  unsigned i;
-  unsigned char finalcount[8], c;
-
-  for (i = 0; i < 8; i++) {
-    finalcount[i] = (unsigned char)((context->count[(i >= 4 ? 0 : 1)]
-                                     >> ((3-(i & 3)) * 8) ) & 255);
-  }
-  c = 0200;
-  SHA1Update(context, &c, 1);
-  while ((context->count[0] & 504) != 448) {
-    c = 0000;
-    SHA1Update(context, &c, 1);
-  }
-  SHA1Update(context, finalcount, 8);
-  for (i = 0; i < 20; i++) {
-    digest[i] = (unsigned char)
-      ((context->state[i>>2] >> ((3-(i & 3)) * 8) ) & 255);
-  }
-  memset(context, '\0', sizeof(*context));
-  memset(&finalcount, '\0', sizeof(finalcount));
-}
-// END OF SHA1 CODE
-
 static void base64_encode(const unsigned char *src, int src_len, char *dst) {
   static const char *b64 =
     "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
@@ -3386,13 +3075,13 @@ static void base64_encode(const unsigned char *src, int src_len, char *dst) {
 static void send_websocket_handshake(struct sq_connection *conn) {
   static const char *magic = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
   char buf[100], sha[20], b64_sha[sizeof(sha) * 2];
-  SHA1_CTX sha_ctx;
+  SHA_CTX sha_ctx;
 
   sq_snprintf(conn, buf, sizeof(buf), "%s%s",
               sq_get_header(conn, "Sec-WebSocket-Key"), magic);
-  SHA1Init(&sha_ctx);
-  SHA1Update(&sha_ctx, (unsigned char *) buf, strlen(buf));
-  SHA1Final((unsigned char *) sha, &sha_ctx);
+  SHA1_Init(&sha_ctx);
+  SHA1_Update(&sha_ctx, (unsigned char *) buf, strlen(buf));
+  SHA1_Final((unsigned char *) sha, &sha_ctx);
   base64_encode((unsigned char *) sha, sizeof(sha), b64_sha);
   sq_printf(conn, "%s%s%s",
             "HTTP/1.1 101 Switching Protocols\r\n"
diff --git a/be/src/util/webserver-test.cc b/be/src/util/webserver-test.cc
index bf18670..6b06444 100644
--- a/be/src/util/webserver-test.cc
+++ b/be/src/util/webserver-test.cc
@@ -21,6 +21,7 @@
 #include <boost/filesystem.hpp>
 #include <boost/lexical_cast.hpp>
 #include <gutil/strings/substitute.h>
+#include <openssl/crypto.h>
 #include <openssl/ssl.h>
 
 #include "common/init.h"
@@ -442,15 +443,20 @@ TEST(Webserver, StartWithPasswordFileTest) {
 
   MetricGroup metrics("webserver-test");
   Webserver webserver("", FLAGS_webserver_port, &metrics);
-  ASSERT_OK(webserver.Start());
+  if (FIPS_mode()) {
+    ASSERT_FALSE(webserver.Start().ok());
+  } else {
+    ASSERT_OK(webserver.Start());
 
-  // Don't expect HTTP requests to work without a password
-  stringstream contents;
-  ASSERT_ERROR_MSG(HttpGet("localhost", FLAGS_webserver_port, "/", &contents),
-      "Unexpected status code: 401");
+    // Don't expect HTTP requests to work without a password
+    stringstream contents;
+    ASSERT_ERROR_MSG(HttpGet("localhost", FLAGS_webserver_port, "/", &contents),
+        "Unexpected status code: 401");
+  }
 }
 
 TEST(Webserver, StartWithMissingPasswordFileTest) {
+  if (FIPS_mode()) return;
   stringstream password_file;
   password_file << getenv("IMPALA_HOME") << "/be/src/testutil/doesntexist";
   auto password =
diff --git a/be/src/util/webserver.cc b/be/src/util/webserver.cc
index 75a1331..3e32025 100644
--- a/be/src/util/webserver.cc
+++ b/be/src/util/webserver.cc
@@ -397,16 +397,21 @@ Status Webserver::Start() {
   }
 
   if (!FLAGS_webserver_password_file.empty()) {
-    // Squeasel doesn't log anything if it can't stat the password file (but will if it
-    // can't open it, which it tries to do during a request)
-    if (!exists(FLAGS_webserver_password_file)) {
-      stringstream ss;
-      ss << "Webserver: Password file does not exist: " << FLAGS_webserver_password_file;
-      return Status(ss.str());
+    if (FIPS_mode()) {
+      return Status("HTTP digest authorization is not supported in FIPS approved mode.");
+    } else {
+      // Squeasel doesn't log anything if it can't stat the password file (but will if it
+      // can't open it, which it tries to do during a request)
+      if (!exists(FLAGS_webserver_password_file)) {
+        stringstream ss;
+        ss << "Webserver: Password file does not exist: "
+           << FLAGS_webserver_password_file;
+        return Status(ss.str());
+      }
+      LOG(INFO) << "Webserver: Password file is " << FLAGS_webserver_password_file;
+      options.push_back("global_auth_file");
+      options.push_back(FLAGS_webserver_password_file.c_str());
     }
-    LOG(INFO) << "Webserver: Password file is " << FLAGS_webserver_password_file;
-    options.push_back("global_auth_file");
-    options.push_back(FLAGS_webserver_password_file.c_str());
   }
 
   if (auth_mode_ == AuthMode::SPNEGO) {


[impala] 03/04: IMPALA-10166 (part 1): ALTER TABLE for Iceberg tables

Posted by jo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

joemcdonnell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 3e06d600c2dcb2c9bcdc4f52cd27cd5d180a900b
Author: skyyws <sk...@163.com>
AuthorDate: Sat Oct 10 15:14:08 2020 +0800

    IMPALA-10166 (part 1): ALTER TABLE for Iceberg tables
    
    This patch mainly implements ALTER TABLE for Iceberg
    tables, we currently support these statements:
      * ADD COLUMNS
      * RENAME TABLE
      * SET TBL_PROPERTIES
      * SET OWNER
    We forbid DROP COLUMN/REPLACE COLUMNS/ALTER COLUMN in this
    patch, since these statemens may make Iceberg tables unreadable.
    We may support column resolution by field id in the near future,
    after that, we will support COLUMN/REPLACE COLUMNS/ALTER COLUMN
    for Iceberg tables.
    
    Here something we still need to pay attention:
    1.RENAME TABLE is not supported for HadoopCatalog/HadoopTables,
    even if we already implement 'RENAME TABLE' statement, so we
    only rename the table in the Hive Metastore for external table.
    2.We cannot ADD/DROP PARTITION now since there is no API for that
    in Iceberg, but related work is already in progess in Iceberg.
    
    Testing:
    - Iceberg table alter test in test_iceberg.py
    - Iceberg table negative test in test_scanners.py
    - Rename tables in iceberg-negative.test
    
    Change-Id: I5104cc47c7b42dacdb52983f503cd263135d6bfc
    Reviewed-on: http://gerrit.cloudera.org:8080/16606
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 .../analysis/AlterTableAddPartitionStmt.java       |   4 +
 .../impala/analysis/AlterTableAlterColStmt.java    |  11 +++
 .../analysis/AlterTableDropPartitionStmt.java      |   4 +
 .../analysis/AlterTableRecoverPartitionsStmt.java  |   6 ++
 .../analysis/AlterTableSetFileFormatStmt.java      |   6 ++
 .../impala/analysis/AlterTableSetLocationStmt.java |   6 ++
 .../analysis/AlterTableSetRowFormatStmt.java       |   7 ++
 .../analysis/AlterTableSetTblProperties.java       |  23 ++++-
 .../org/apache/impala/analysis/AlterTableStmt.java |   5 -
 .../impala/catalog/iceberg/IcebergCatalog.java     |   7 ++
 .../catalog/iceberg/IcebergHadoopCatalog.java      |  11 +++
 .../catalog/iceberg/IcebergHadoopTables.java       |   7 ++
 .../apache/impala/service/CatalogOpExecutor.java   |  79 +++++++++++++++
 .../impala/service/IcebergCatalogOpExecutor.java   |  62 ++++++++++++
 .../java/org/apache/impala/util/IcebergUtil.java   |  73 ++++++++++++++
 .../queries/QueryTest/iceberg-alter.test           |  96 ++++++++++++++++++
 .../queries/QueryTest/iceberg-negative.test        | 109 ++++++++++++++++++---
 tests/query_test/test_iceberg.py                   |   3 +
 18 files changed, 497 insertions(+), 22 deletions(-)

diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
index 41b0308..efdd7e1 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
@@ -20,6 +20,7 @@ package org.apache.impala.analysis;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Joiner;
 
+import org.apache.impala.catalog.FeIcebergTable;
 import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.common.AnalysisException;
@@ -84,6 +85,9 @@ public class AlterTableAddPartitionStmt extends AlterTableStmt {
     if (table instanceof FeKuduTable) {
       throw new AnalysisException("ALTER TABLE ADD PARTITION is not supported for " +
           "Kudu tables: " + table.getTableName());
+    } else if (table instanceof FeIcebergTable) {
+      throw new AnalysisException("ALTER TABLE ADD PARTITION is not supported for " +
+          "Iceberg tables: " + table.getTableName());
     }
     Set<String> partitionSpecs = new HashSet<>();
     for (PartitionDef p: partitions_) {
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAlterColStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableAlterColStmt.java
index 5e6b5cd..6454e37 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableAlterColStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAlterColStmt.java
@@ -22,6 +22,7 @@ import java.util.Map;
 
 import org.apache.impala.catalog.Column;
 import org.apache.impala.catalog.FeHBaseTable;
+import org.apache.impala.catalog.FeIcebergTable;
 import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.KuduColumn;
@@ -174,5 +175,15 @@ public class AlterTableAlterColStmt extends AlterTableStmt {
             "Altering the nullability of a column is not supported.");
       }
     }
+
+    if (t instanceof FeIcebergTable) {
+      // We cannot update column from primitive type to complex type or
+      // from complex type to primitive type
+      if (t.getColumn(colName_).getType().isComplexType() ||
+          newColDef_.getType().isComplexType()) {
+        throw new AnalysisException(String.format("ALTER TABLE CHANGE COLUMN " +
+            "is not supported for complex types in Iceberg tables."));
+      }
+    }
   }
 }
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
index 3cb9ac9..cd52a29 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
@@ -18,6 +18,7 @@
 package org.apache.impala.analysis;
 
 import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.FeIcebergTable;
 import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.common.AnalysisException;
@@ -78,6 +79,9 @@ public class AlterTableDropPartitionStmt extends AlterTableStmt {
     if (table instanceof FeKuduTable) {
       throw new AnalysisException("ALTER TABLE DROP PARTITION is not supported for " +
           "Kudu tables: " + partitionSet_.toSql());
+    } else if (table instanceof FeIcebergTable) {
+      throw new AnalysisException("ALTER TABLE DROP PARTITION is not supported for " +
+          "Iceberg tables: " + table.getFullName());
     }
     if (!ifExists_) partitionSet_.setPartitionShouldExist();
     partitionSet_.setPrivilegeRequirement(Privilege.ALTER);
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java
index d04f042..fe2b617 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java
@@ -18,6 +18,7 @@
 package org.apache.impala.analysis;
 
 import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeIcebergTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableParams;
 import org.apache.impala.thrift.TAlterTableType;
@@ -48,6 +49,11 @@ public class AlterTableRecoverPartitionsStmt extends AlterTableStmt {
           "must target an HDFS table: " + tableName_);
     }
 
+    if (table_ instanceof FeIcebergTable) {
+      throw new AnalysisException("ALTER TABLE RECOVER PARTITIONS is not supported " +
+          "on Iceberg tables: " + table_.getFullName());
+    }
+
     // Make sure the target table is partitioned.
     if (table_.getMetaStoreTable().getPartitionKeysSize() == 0) {
       throw new AnalysisException("Table is not partitioned: " + tableName_);
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
index b88216c..a45da95 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
@@ -17,6 +17,7 @@
 
 package org.apache.impala.analysis;
 
+import org.apache.impala.catalog.FeIcebergTable;
 import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.common.AnalysisException;
@@ -60,5 +61,10 @@ public class AlterTableSetFileFormatStmt extends AlterTableSetStmt {
       throw new AnalysisException("ALTER TABLE SET FILEFORMAT is not supported " +
           "on Kudu tables: " + tbl.getFullName());
     }
+
+    if (tbl instanceof FeIcebergTable) {
+      throw new AnalysisException("ALTER TABLE SET FILEFORMAT is not supported " +
+          "on Iceberg tables: " + tbl.getFullName());
+    }
   }
 }
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
index cb46493..01898d1 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.FeFsPartition;
 import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeIcebergTable;
 import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsPartition;
@@ -77,6 +78,11 @@ public class AlterTableSetLocationStmt extends AlterTableSetStmt {
 
     FeTable table = getTargetTable();
     Preconditions.checkNotNull(table);
+    if (table instanceof FeIcebergTable) {
+      throw new AnalysisException("ALTER TABLE SET LOCATION is not supported on Iceberg "
+          + "tables: " + table.getFullName());
+    }
+
     if (table instanceof FeFsTable) {
       FeFsTable hdfsTable = (FeFsTable) table;
       if (getPartitionSet() != null) {
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetRowFormatStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetRowFormatStmt.java
index a8c9fea..aab5012 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetRowFormatStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetRowFormatStmt.java
@@ -19,6 +19,7 @@ package org.apache.impala.analysis;
 
 import org.apache.impala.catalog.FeFsPartition;
 import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FeIcebergTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.HdfsFileFormat;
 import org.apache.impala.catalog.RowFormat;
@@ -62,6 +63,12 @@ public class AlterTableSetRowFormatStmt extends AlterTableSetStmt {
       throw new AnalysisException(String.format("ALTER TABLE SET ROW FORMAT is only " +
           "supported on HDFS tables. Conflicting table: %1$s", tbl.getFullName()));
     }
+
+    if (tbl instanceof FeIcebergTable) {
+      throw new AnalysisException("ALTER TABLE SET ROWFORMAT is not supported " +
+          "on Iceberg tables: " + tbl.getFullName());
+    }
+
     if (partitionSet_ != null) {
       for (FeFsPartition partition: partitionSet_.getPartitions()) {
         if (partition.getFileFormat() != HdfsFileFormat.TEXT &&
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
index dce7fae..634c65b 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
@@ -27,8 +27,10 @@ import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
 import org.apache.impala.authorization.AuthorizationConfig;
 import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeHBaseTable;
+import org.apache.impala.catalog.FeIcebergTable;
 import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
+import org.apache.impala.catalog.IcebergTable;
 import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.Pair;
@@ -93,7 +95,11 @@ public class AlterTableSetTblProperties extends AlterTableSetStmt {
           hive_metastoreConstants.META_TABLE_STORAGE));
     }
 
-    if (getTargetTable() instanceof FeKuduTable) analyzeKuduTable(analyzer);
+    if (getTargetTable() instanceof FeKuduTable) {
+      analyzeKuduTable(analyzer);
+    } else if (getTargetTable() instanceof FeIcebergTable) {
+      analyzeIcebergTable(analyzer);
+    }
 
     // Check avro schema when it is set in avro.schema.url or avro.schema.literal to
     // avoid potential metadata corruption (see IMPALA-2042).
@@ -139,6 +145,21 @@ public class AlterTableSetTblProperties extends AlterTableSetStmt {
     }
   }
 
+  private void analyzeIcebergTable(Analyzer analyzer) throws AnalysisException {
+    //Cannot set these properties related to metadata
+    icebergPropertyCheck(IcebergTable.ICEBERG_FILE_FORMAT);
+    icebergPropertyCheck(IcebergTable.ICEBERG_CATALOG);
+    icebergPropertyCheck(IcebergTable.ICEBERG_CATALOG_LOCATION);
+    icebergPropertyCheck(IcebergTable.ICEBERG_TABLE_IDENTIFIER);
+  }
+
+  private void icebergPropertyCheck(String property) throws AnalysisException {
+    if (tblProperties_.containsKey(property)) {
+      throw new AnalysisException(String.format("Changing the '%s' table property is " +
+          "not supported for Iceberg table.", property));
+    }
+  }
+
   /**
    * Check that Avro schema provided in avro.schema.url or avro.schema.literal is valid
    * Json and contains only supported Impala types. If both properties are set, then
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
index d4c3b13..249987e 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
@@ -21,7 +21,6 @@ import java.util.List;
 
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.FeDataSourceTable;
-import org.apache.impala.catalog.FeIcebergTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TAlterTableParams;
@@ -89,10 +88,6 @@ public abstract class AlterTableStmt extends StatementBase {
       throw new AnalysisException(String.format(
           "ALTER TABLE not allowed on a nested collection: %s", tableName_));
     }
-    if (tableRef.getTable() instanceof FeIcebergTable) {
-      throw new AnalysisException(String.format(
-          "ALTER TABLE not allowed on iceberg table: %s", tableName_));
-    }
     Preconditions.checkState(tableRef instanceof BaseTableRef);
     table_ = tableRef.getTable();
     analyzer.checkTableCapability(table_, Analyzer.OperationType.WRITE);
diff --git a/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergCatalog.java b/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergCatalog.java
index d12a123..31a45fa 100644
--- a/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergCatalog.java
@@ -62,4 +62,11 @@ public interface IcebergCatalog {
    * Return true if the table was dropped, false if the table did not exist
    */
   boolean dropTable(FeIcebergTable feTable, boolean purge);
+
+  /**
+   * Renames Iceberg table.
+   * For HadoopTables, Iceberg does not supported 'renameTable' method
+   * For HadoopCatalog, Iceberg implement 'renameTable' method with Exception threw
+   */
+  void renameTable(FeIcebergTable feTable, TableIdentifier newTableId);
 }
diff --git a/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergHadoopCatalog.java b/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergHadoopCatalog.java
index f1177ea..411dd52 100644
--- a/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergHadoopCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergHadoopCatalog.java
@@ -107,4 +107,15 @@ public class IcebergHadoopCatalog implements IcebergCatalog {
     TableIdentifier tableId = IcebergUtil.getIcebergTableIdentifier(feTable);
     return hadoopCatalog.dropTable(tableId, purge);
   }
+
+  @Override
+  public void renameTable(FeIcebergTable feTable, TableIdentifier newTableId) {
+    TableIdentifier oldTableId = IcebergUtil.getIcebergTableIdentifier(feTable);
+    try {
+      hadoopCatalog.renameTable(oldTableId, newTableId);
+    } catch (UnsupportedOperationException e) {
+      throw new UnsupportedOperationException(
+          "Cannot rename Iceberg tables that use 'hadoop.catalog' as catalog.");
+    }
+  }
 }
diff --git a/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergHadoopTables.java b/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergHadoopTables.java
index 30dd658..4ebf1a7 100644
--- a/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergHadoopTables.java
+++ b/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergHadoopTables.java
@@ -121,4 +121,11 @@ public class IcebergHadoopTables implements IcebergCatalog {
     }
     return true;
   }
+
+  @Override
+  public void renameTable(FeIcebergTable feTable, TableIdentifier newTableId) {
+    // HadoopTables no renameTable method in Iceberg
+    throw new UnsupportedOperationException(
+        "Cannot rename Iceberg tables that use 'hadoop.tables' as catalog.");
+  }
 }
diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
index 2c30bdd..97a8d69 100644
--- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
@@ -685,6 +685,10 @@ public class CatalogOpExecutor {
       if (tbl instanceof KuduTable && altersKuduTable(params.getAlter_type())) {
         alterKuduTable(params, response, (KuduTable) tbl, newCatalogVersion);
         return;
+      } else if (tbl instanceof IcebergTable &&
+          altersIcebergTable(params.getAlter_type())) {
+        alterIcebergTable(params, response, (IcebergTable) tbl, newCatalogVersion);
+        return;
       }
       switch (params.getAlter_type()) {
         case ADD_COLUMNS:
@@ -932,6 +936,56 @@ public class CatalogOpExecutor {
   }
 
   /**
+   * Returns true if the given alteration type changes the underlying table stored in
+   * Iceberg in addition to the HMS table.
+   */
+  private boolean altersIcebergTable(TAlterTableType type) {
+    return type == TAlterTableType.ADD_COLUMNS
+        || type == TAlterTableType.REPLACE_COLUMNS
+        || type == TAlterTableType.DROP_COLUMN
+        || type == TAlterTableType.ALTER_COLUMN;
+  }
+
+  /**
+   * Executes the ALTER TABLE command for a Iceberg table and reloads its metadata.
+   */
+  private void alterIcebergTable(TAlterTableParams params, TDdlExecResponse response,
+      IcebergTable tbl, long newCatalogVersion) throws ImpalaException {
+    Preconditions.checkState(tbl.getLock().isHeldByCurrentThread());
+    switch (params.getAlter_type()) {
+      case ADD_COLUMNS:
+        TAlterTableAddColsParams addColParams = params.getAdd_cols_params();
+        IcebergCatalogOpExecutor.addColumn(tbl, addColParams.getColumns());
+        addSummary(response, "Column(s) have been added.");
+        break;
+      case REPLACE_COLUMNS:
+        //TODO: we need support resolve column by field id at first, and then
+        // support this statement
+      case DROP_COLUMN:
+        //TODO: we need support resolve column by field id at first, and then
+        // support this statement
+        //TAlterTableDropColParams dropColParams = params.getDrop_col_params();
+        //IcebergCatalogOpExecutor.dropColumn(tbl, dropColParams.getCol_name());
+        //addSummary(response, "Column has been dropped.");
+      case ALTER_COLUMN:
+        //TODO: we need support resolve column by field id at first, and then
+        // support this statement
+        //TAlterTableAlterColParams alterColParams = params.getAlter_col_params();
+        //IcebergCatalogOpExecutor.alterColumn(tbl, alterColParams.getCol_name(),
+        //    alterColParams.getNew_col_def());
+        //addSummary(response, "Column has been altered.");
+      default:
+        throw new UnsupportedOperationException(
+            "Unsupported ALTER TABLE operation for Iceberg tables: " +
+            params.getAlter_type());
+    }
+
+    loadTableMetadata(tbl, newCatalogVersion, true, true, null, "ALTER Iceberg TABLE " +
+        params.getAlter_type().name());
+    addTableToCatalogUpdate(tbl, response.result);
+  }
+
+  /**
    * Loads the metadata of a table 'tbl' and assigns a new catalog version.
    * 'reloadFileMetadata', 'reloadTableSchema', and 'partitionsToUpdate'
    * are used only for HdfsTables and control which metadata to reload.
@@ -3265,6 +3319,13 @@ public class CatalogOpExecutor {
           isKuduHmsIntegrationEnabled);
     }
 
+    // If oldTbl is a synchronized Iceberg table, rename the underlying Iceberg table.
+    boolean isSynchronizedIcebergTable = (oldTbl instanceof IcebergTable) &&
+        IcebergTable.isSynchronizedTable(msTbl);
+    if (isSynchronizedIcebergTable) {
+      renameManagedIcebergTable((IcebergTable) oldTbl, msTbl, newTableName);
+    }
+
     // Always updates the HMS metadata for non-Kudu tables. For Kudu tables, when
     // Kudu is not integrated with the Hive Metastore or if this is an external table,
     // Kudu will not automatically update the HMS metadata, we have to do it
@@ -3321,6 +3382,24 @@ public class CatalogOpExecutor {
   }
 
   /**
+   * Renames the underlying Iceberg table for the given managed table. If the new Iceberg
+   * table name is the same as the old Iceberg table name, this method does nothing.
+   */
+  private void renameManagedIcebergTable(IcebergTable oldTbl,
+      org.apache.hadoop.hive.metastore.api.Table msTbl,
+      TableName newTableName) throws ImpalaRuntimeException {
+    TableIdentifier tableId = TableIdentifier.of(newTableName.getDb(),
+        newTableName.getTbl());
+    IcebergCatalogOpExecutor.renameTable(oldTbl, tableId);
+
+    if (msTbl.getParameters().get(IcebergTable.ICEBERG_TABLE_IDENTIFIER) != null) {
+      // We need update table identifier for HadoopCatalog managed table if exists.
+      msTbl.getParameters().put(IcebergTable.ICEBERG_TABLE_IDENTIFIER,
+          tableId.toString());
+    }
+  }
+
+  /**
    * Changes the file format for the given table or partitions. This is a metadata only
    * operation, existing table data will not be converted to the new format. Returns
    * true if the file metadata to be reloaded.
diff --git a/fe/src/main/java/org/apache/impala/service/IcebergCatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/IcebergCatalogOpExecutor.java
index e347f49..4ab4739 100644
--- a/fe/src/main/java/org/apache/impala/service/IcebergCatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/IcebergCatalogOpExecutor.java
@@ -25,6 +25,7 @@ import org.apache.iceberg.AppendFiles;
 import org.apache.iceberg.BaseTable;
 import org.apache.iceberg.DataFile;
 import org.apache.iceberg.DataFiles;
+import org.apache.iceberg.UpdateSchema;
 import org.apache.iceberg.PartitionSpec;
 import org.apache.iceberg.Schema;
 import org.apache.iceberg.Table;
@@ -99,6 +100,67 @@ public class IcebergCatalogOpExecutor {
   }
 
   /**
+   * Adds a column to an existing Iceberg table.
+   */
+  public static void addColumn(FeIcebergTable feTable, List<TColumn> columns)
+      throws TableLoadingException, ImpalaRuntimeException {
+    UpdateSchema schema = IcebergUtil.getIcebergUpdateSchema(feTable);
+    for (TColumn column : columns) {
+      org.apache.iceberg.types.Type type =
+          IcebergUtil.fromImpalaColumnType(column.getColumnType());
+      schema.addColumn(column.getColumnName(), type, column.getComment());
+    }
+    schema.commit();
+  }
+
+  /**
+   * Updates the column from Iceberg table.
+   * Iceberg only supports these type conversions:
+   *   INTEGER -> LONG
+   *   FLOAT -> DOUBLE
+   *   DECIMAL(s1,p1) -> DECIMAL(s1,p2), same scale, p1<=p2
+   */
+  public static void alterColumn(FeIcebergTable feTable, String colName, TColumn newCol)
+      throws TableLoadingException, ImpalaRuntimeException {
+    UpdateSchema schema = IcebergUtil.getIcebergUpdateSchema(feTable);
+    org.apache.iceberg.types.Type type =
+        IcebergUtil.fromImpalaColumnType(newCol.getColumnType());
+    // Cannot change a column to complex type
+    Preconditions.checkState(type.isPrimitiveType());
+    schema.updateColumn(colName, type.asPrimitiveType());
+
+    // Rename column if newCol name and oldCol name are different
+    if (!colName.equals(newCol.getColumnName())) {
+      schema.renameColumn(colName, newCol.getColumnName());
+    }
+
+    // Update column comment if not empty
+    if (newCol.getComment() != null && !newCol.getComment().isEmpty()) {
+      schema.updateColumnDoc(colName, newCol.getComment());
+    }
+    schema.commit();
+  }
+
+  /**
+   * Drops a column from a Iceberg table.
+   */
+  public static void dropColumn(FeIcebergTable feTable, String colName)
+      throws TableLoadingException, ImpalaRuntimeException {
+    UpdateSchema schema = IcebergUtil.getIcebergUpdateSchema(feTable);
+    schema.deleteColumn(colName);
+    schema.commit();
+  }
+
+  /**
+   * Rename Iceberg table
+   */
+  public static void renameTable(FeIcebergTable feTable, TableIdentifier tableId)
+      throws ImpalaRuntimeException{
+    IcebergCatalog catalog = IcebergUtil.getIcebergCatalog(feTable);
+    catalog.renameTable(feTable, tableId);
+  }
+
+  /**
    * Transform a StructField to Iceberg NestedField
    */
   private static Types.NestedField createIcebergNestedField(StructField structField)
diff --git a/fe/src/main/java/org/apache/impala/util/IcebergUtil.java b/fe/src/main/java/org/apache/impala/util/IcebergUtil.java
index c504060..efd4f87 100644
--- a/fe/src/main/java/org/apache/impala/util/IcebergUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/IcebergUtil.java
@@ -28,6 +28,7 @@ import com.google.common.hash.Hashing;
 
 import org.apache.impala.common.Pair;
 import org.apache.iceberg.BaseTable;
+import org.apache.iceberg.UpdateSchema;
 import org.apache.iceberg.catalog.TableIdentifier;
 import org.apache.iceberg.DataFile;
 import org.apache.iceberg.FileScanTask;
@@ -57,6 +58,7 @@ import org.apache.impala.catalog.iceberg.IcebergHadoopCatalog;
 import org.apache.impala.catalog.iceberg.IcebergHadoopTables;
 import org.apache.impala.catalog.iceberg.IcebergCatalog;
 import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.thrift.TColumnType;
 import org.apache.impala.thrift.TCreateTableParams;
 import org.apache.impala.thrift.THdfsFileFormat;
 import org.apache.impala.thrift.TIcebergCatalog;
@@ -153,6 +155,15 @@ public class IcebergUtil {
   }
 
   /**
+   * Get Iceberg UpdateSchema from 'feTable', usually use UpdateSchema to update Iceberg
+   * table schema.
+   */
+  public static UpdateSchema getIcebergUpdateSchema(FeIcebergTable feTable)
+      throws TableLoadingException, ImpalaRuntimeException {
+    return getIcebergCatalog(feTable).loadTable(feTable).updateSchema();
+  }
+
+  /**
    * Build iceberg PartitionSpec by parameters.
    * partition columns are all from source columns, this is different from hdfs table.
    */
@@ -385,6 +396,68 @@ public class IcebergUtil {
   }
 
   /**
+   * Get iceberg type from impala column type
+   */
+  public static org.apache.iceberg.types.Type fromImpalaColumnType(
+      TColumnType columnType) throws ImpalaRuntimeException {
+    return fromImpalaType(Type.fromThrift(columnType));
+  }
+
+  /**
+   * Transform impala type to iceberg type
+   */
+  public static org.apache.iceberg.types.Type fromImpalaType(Type t)
+      throws ImpalaRuntimeException {
+    if (t.isScalarType()) {
+      ScalarType st = (ScalarType) t;
+      switch (st.getPrimitiveType()) {
+        case BOOLEAN:
+          return Types.BooleanType.get();
+        case INT:
+          return Types.IntegerType.get();
+        case BIGINT:
+          return Types.LongType.get();
+        case FLOAT:
+          return Types.FloatType.get();
+        case DOUBLE:
+          return Types.DoubleType.get();
+        case STRING:
+          return Types.StringType.get();
+        case DATE:
+          return Types.DateType.get();
+        case BINARY:
+          return Types.BinaryType.get();
+        case TIMESTAMP:
+          return Types.TimestampType.withoutZone();
+        case DECIMAL:
+          return Types.DecimalType.of(st.decimalPrecision(), st.decimalScale());
+        default:
+          throw new ImpalaRuntimeException(String.format(
+              "Type %s is not supported in Iceberg", t.toSql()));
+      }
+    } else if (t.isArrayType()) {
+      ArrayType at = (ArrayType) t;
+      return Types.ListType.ofRequired(1, fromImpalaType(at.getItemType()));
+    } else if (t.isMapType()) {
+      MapType mt = (MapType) t;
+      return Types.MapType.ofRequired(1, 2,
+          fromImpalaType(mt.getKeyType()), fromImpalaType(mt.getValueType()));
+    } else if (t.isStructType()) {
+      StructType st = (StructType) t;
+      List<Types.NestedField> icebergFields = new ArrayList<>();
+      int id = 1;
+      for (StructField field : st.getFields()) {
+        icebergFields.add(Types.NestedField.required(id++, field.getName(),
+            fromImpalaType(field.getType()), field.getComment()));
+      }
+      return Types.StructType.of(icebergFields);
+    } else {
+      throw new ImpalaRuntimeException(String.format(
+          "Type %s is not supported in Iceberg", t.toSql()));
+    }
+}
+
+  /**
    * Transform iceberg type to impala type
    */
   public static Type toImpalaType(org.apache.iceberg.types.Type t)
diff --git a/testdata/workloads/functional-query/queries/QueryTest/iceberg-alter.test b/testdata/workloads/functional-query/queries/QueryTest/iceberg-alter.test
new file mode 100644
index 0000000..21557cb
--- /dev/null
+++ b/testdata/workloads/functional-query/queries/QueryTest/iceberg-alter.test
@@ -0,0 +1,96 @@
+====
+---- QUERY
+CREATE TABLE iceberg_test1(
+  level STRING
+)
+STORED AS ICEBERG
+TBLPROPERTIES('iceberg.catalog'='hadoop.tables');
+ALTER TABLE iceberg_test1 ADD COLUMNS(event_time TIMESTAMP, register_time DATE);
+ALTER TABLE iceberg_test1 ADD COLUMNS(message STRING, price DECIMAL(8,1));
+ALTER TABLE iceberg_test1 ADD COLUMNS(map_test MAP <STRING, array <STRING>>, struct_test STRUCT <f1: BIGINT, f2: BIGINT>);
+DESCRIBE iceberg_test1;
+---- RESULTS
+'level','string',''
+'event_time','timestamp',''
+'register_time','date',''
+'message','string',''
+'price','decimal(8,1)',''
+'map_test','map<string,array<string>>',''
+'struct_test','struct<\n  f1:bigint,\n  f2:bigint\n>',''
+---- TYPES
+STRING,STRING,STRING
+====
+---- QUERY
+ALTER TABLE iceberg_test1 set TBLPROPERTIES('fake_key'='fake_value');
+DESCRIBE FORMATTED iceberg_test1;
+---- RESULTS: VERIFY_IS_SUBSET
+'','fake_key            ','fake_value          '
+---- TYPES
+string, string, string
+====
+---- QUERY
+ALTER TABLE iceberg_test1 set OWNER USER fake_user;
+DESCRIBE FORMATTED iceberg_test1;
+---- RESULTS: VERIFY_IS_SUBSET
+'OwnerType:          ','USER                ','NULL'
+'Owner:              ','fake_user           ','NULL'
+---- TYPES
+string, string, string
+====
+---- QUERY
+ALTER TABLE iceberg_test1 set OWNER ROLE fake_role;
+DESCRIBE FORMATTED iceberg_test1;
+---- RESULTS: VERIFY_IS_SUBSET
+'OwnerType:          ','ROLE                ','NULL'
+'Owner:              ','fake_role           ','NULL'
+---- TYPES
+string, string, string
+====
+---- QUERY
+CREATE TABLE iceberg_test2(
+  level STRING
+)
+STORED AS ICEBERG
+TBLPROPERTIES('iceberg.catalog'='hadoop.catalog',
+'iceberg.catalog_location'='/$DATABASE/hadoop_catalog_test');
+ALTER TABLE iceberg_test2 ADD COLUMNS(event_time TIMESTAMP, register_time DATE);
+ALTER TABLE iceberg_test2 ADD COLUMNS(message STRING, price DECIMAL(8,1));
+ALTER TABLE iceberg_test2 ADD COLUMNS(map_test MAP <STRING, array <STRING>>, struct_test STRUCT <f1: BIGINT, f2: BIGINT>);
+DESCRIBE iceberg_test2;
+---- RESULTS
+'level','string',''
+'event_time','timestamp',''
+'register_time','date',''
+'message','string',''
+'price','decimal(8,1)',''
+'map_test','map<string,array<string>>',''
+'struct_test','struct<\n  f1:bigint,\n  f2:bigint\n>',''
+---- TYPES
+STRING,STRING,STRING
+====
+---- QUERY
+ALTER TABLE iceberg_test2 set TBLPROPERTIES('test_key'='test_value');
+DESCRIBE FORMATTED iceberg_test2;
+---- RESULTS: VERIFY_IS_SUBSET
+'','test_key            ','test_value          '
+---- TYPES
+string, string, string
+====
+---- QUERY
+ALTER TABLE iceberg_test2 set OWNER USER fake_user;
+DESCRIBE FORMATTED iceberg_test2;
+---- RESULTS: VERIFY_IS_SUBSET
+'OwnerType:          ','USER                ','NULL'
+'Owner:              ','fake_user           ','NULL'
+---- TYPES
+string, string, string
+====
+---- QUERY
+ALTER TABLE iceberg_test2 set OWNER ROLE fake_role;
+DESCRIBE FORMATTED iceberg_test2;
+---- RESULTS: VERIFY_IS_SUBSET
+'OwnerType:          ','ROLE                ','NULL'
+'Owner:              ','fake_role           ','NULL'
+---- TYPES
+string, string, string
+====
diff --git a/testdata/workloads/functional-query/queries/QueryTest/iceberg-negative.test b/testdata/workloads/functional-query/queries/QueryTest/iceberg-negative.test
index 2e2e79b..6f3dda5 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/iceberg-negative.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/iceberg-negative.test
@@ -1,12 +1,12 @@
 ====
 ---- QUERY
-CREATE TABLE iceberg_test1
+CREATE TABLE iceberg_test
 STORED AS ICEBERG;
 ---- CATCH
 AnalysisException: Table requires at least 1 column for managed iceberg table.
 ====
 ---- QUERY
-CREATE TABLE iceberg_test2(
+CREATE TABLE iceberg_test(
   level STRING
 )
 PARTITION BY SPEC
@@ -19,21 +19,16 @@ STORED AS ICEBERG;
 AnalysisException: Cannot find source column: event_time
 ====
 ---- QUERY
-CREATE TABLE iceberg_test3(
+CREATE TABLE iceberg_table_hadoop_tables(
   level STRING
 )
 STORED AS ICEBERG
 TBLPROPERTIES('iceberg.catalog'='hadoop.tables');
 ====
 ---- QUERY
-TRUNCATE iceberg_test3
+TRUNCATE iceberg_table_hadoop_tables
 ---- CATCH
-AnalysisException: TRUNCATE TABLE not supported on iceberg table: $DATABASE.iceberg_test3
-====
----- QUERY
-ALTER TABLE iceberg_test3 ADD COLUMN event_time TIMESTAMP
----- CATCH
-AnalysisException: ALTER TABLE not allowed on iceberg table: iceberg_test3
+AnalysisException: TRUNCATE TABLE not supported on iceberg table: $DATABASE.iceberg_table_hadoop_tables
 ====
 ---- QUERY
 # iceberg_non_partitioned is not partitioned
@@ -42,7 +37,7 @@ SHOW PARTITIONS functional_parquet.iceberg_non_partitioned
 AnalysisException: Table is not partitioned: functional_parquet.iceberg_non_partitioned
 ====
 ---- QUERY
-CREATE TABLE iceberg_test4(
+CREATE TABLE iceberg_table_hadoop_catalog(
   level STRING
 )
 STORED AS ICEBERG
@@ -52,7 +47,7 @@ TBLPROPERTIES('iceberg.catalog'='hadoop.catalog');
 AnalysisException: Location cannot be set for Iceberg table with 'hadoop.catalog'.
 ====
 ---- QUERY
-CREATE TABLE iceberg_test5(
+CREATE TABLE iceberg_table_hadoop_catalog(
   level STRING
 )
 STORED AS ICEBERG
@@ -61,7 +56,7 @@ TBLPROPERTIES('iceberg.catalog'='hadoop.catalog');
 AnalysisException: Table property 'iceberg.catalog_location' is necessary for Iceberg table with 'hadoop.catalog'.
 ====
 ---- QUERY
-CREATE EXTERNAL TABLE iceberg_test6
+CREATE EXTERNAL TABLE iceberg_external_table_hadoop_catalog
 STORED AS ICEBERG
 LOCATION '/test-warehouse/$DATABASE/hadoop_catalog_test/iceberg_test'
 TBLPROPERTIES('iceberg.catalog_location'='/test-warehouse/fake_table', 'iceberg.table_identifier'='fake_db.fake_table');
@@ -69,17 +64,17 @@ TBLPROPERTIES('iceberg.catalog_location'='/test-warehouse/fake_table', 'iceberg.
 AnalysisException: Location cannot be set for Iceberg table with 'hadoop.catalog'.
 ====
 ---- QUERY
-CREATE EXTERNAL TABLE iceberg_test7
+CREATE EXTERNAL TABLE iceberg_table_hadoop_catalog
 STORED AS ICEBERG
 TBLPROPERTIES('iceberg.table_identifier'='fake_db.fake_table');
 ---- CATCH
 AnalysisException: Table property 'iceberg.catalog_location' is necessary for Iceberg table with 'hadoop.catalog'.
 ====
 ---- QUERY
-CREATE EXTERNAL TABLE iceberg_test8
+CREATE EXTERNAL TABLE fake_iceberg_table_hadoop_catalog
 STORED AS ICEBERG
 TBLPROPERTIES('iceberg.catalog_location'='/test-warehouse/fake_table', 'iceberg.table_identifier'='fake_db.fake_table');
-SHOW CREATE TABLE iceberg_test8;
+SHOW CREATE TABLE fake_iceberg_table_hadoop_catalog;
 ---- CATCH
 row_regex:.*CAUSED BY: TableLoadingException: Table does not exist: fake_db.fake_table*
 ====
@@ -118,3 +113,85 @@ INSERT INTO iceberg_partitioned_insert SELECT * FROM iceberg_partitioned_insert;
 ---- CATCH
 AnalysisException: Impala cannot write partitioned Iceberg tables.
 ====
+---- QUERY
+ALTER TABLE iceberg_table_hadoop_tables RENAME TO iceberg_table_hadoop_tables_new;
+---- CATCH
+UnsupportedOperationException: Cannot rename Iceberg tables that use 'hadoop.tables' as catalog.
+====
+---- QUERY
+CREATE TABLE iceberg_table_hadoop_catalog(
+  level STRING,
+  event_time TIMESTAMP
+)
+STORED AS ICEBERG
+TBLPROPERTIES('iceberg.catalog'='hadoop.catalog',
+'iceberg.catalog_location'='/$DATABASE/hadoop_catalog_test');
+ALTER TABLE iceberg_table_hadoop_catalog RENAME TO iceberg_table_hadoop_catalog_new;
+---- CATCH
+UnsupportedOperationException: Cannot rename Iceberg tables that use 'hadoop.catalog' as catalog.
+====
+---- QUERY
+ALTER TABLE iceberg_table_hadoop_catalog set TBLPROPERTIES('iceberg.file_format'='orc');
+---- CATCH
+AnalysisException: Changing the 'iceberg.file_format' table property is not supported for Iceberg table.
+====
+---- QUERY
+ALTER TABLE iceberg_table_hadoop_catalog set TBLPROPERTIES('iceberg.catalog'='hadoop.tables');
+---- CATCH
+AnalysisException: Changing the 'iceberg.catalog' table property is not supported for Iceberg table.
+====
+---- QUERY
+ALTER TABLE iceberg_table_hadoop_catalog set TBLPROPERTIES('iceberg.catalog_location'='/fake_location');
+---- CATCH
+AnalysisException: Changing the 'iceberg.catalog_location' table property is not supported for Iceberg table.
+====
+---- QUERY
+ALTER TABLE iceberg_table_hadoop_catalog set TBLPROPERTIES('iceberg.table_identifier'='fake_db.fake_table');
+---- CATCH
+AnalysisException: Changing the 'iceberg.table_identifier' table property is not supported for Iceberg table.
+====
+---- QUERY
+ALTER TABLE iceberg_table_hadoop_catalog set FILEFORMAT PARQUET;
+---- CATCH
+AnalysisException: ALTER TABLE SET FILEFORMAT is not supported on Iceberg tables: $DATABASE.iceberg_table_hadoop_catalog
+====
+---- QUERY
+ALTER TABLE iceberg_table_hadoop_catalog SET ROW FORMAT DELIMITED FIELDS TERMINATED BY ',';
+---- CATCH
+AnalysisException: ALTER TABLE SET ROWFORMAT is not supported on Iceberg tables: $DATABASE.iceberg_table_hadoop_catalog
+====
+---- QUERY
+ALTER TABLE iceberg_table_hadoop_catalog SET LOCATION '/fake_location';
+---- CATCH
+AnalysisException: ALTER TABLE SET LOCATION is not supported on Iceberg tables: $DATABASE.iceberg_table_hadoop_catalog
+====
+---- QUERY
+ALTER TABLE iceberg_table_hadoop_catalog ADD PARTITION(fake_col='fake_value');
+---- CATCH
+AnalysisException: ALTER TABLE ADD PARTITION is not supported for Iceberg tables: $DATABASE.iceberg_table_hadoop_catalog
+====
+---- QUERY
+ALTER TABLE iceberg_table_hadoop_catalog DROP PARTITION(fake_col='fake_value');
+---- CATCH
+AnalysisException: ALTER TABLE DROP PARTITION is not supported for Iceberg tables: $DATABASE.iceberg_table_hadoop_catalog
+====
+---- QUERY
+ALTER TABLE iceberg_table_hadoop_catalog RECOVER PARTITIONS;
+---- CATCH
+AnalysisException: ALTER TABLE RECOVER PARTITIONS is not supported on Iceberg tables: $DATABASE.iceberg_table_hadoop_catalog
+====
+---- QUERY
+ALTER TABLE iceberg_table_hadoop_catalog DROP COLUMN level;
+---- CATCH
+UnsupportedOperationException: Unsupported ALTER TABLE operation for Iceberg tables: DROP_COLUMN
+====
+---- QUERY
+ALTER TABLE iceberg_table_hadoop_catalog CHANGE COLUMN level level1 STRING;
+---- CATCH
+UnsupportedOperationException: Unsupported ALTER TABLE operation for Iceberg tables: ALTER_COLUMN
+====
+---- QUERY
+ALTER TABLE iceberg_table_hadoop_catalog REPLACE COLUMNS(level INT, register_time DATE);
+---- CATCH
+UnsupportedOperationException: Unsupported ALTER TABLE operation for Iceberg tables: REPLACE_COLUMNS
+====
diff --git a/tests/query_test/test_iceberg.py b/tests/query_test/test_iceberg.py
index ad39a19..5b772b3 100644
--- a/tests/query_test/test_iceberg.py
+++ b/tests/query_test/test_iceberg.py
@@ -38,6 +38,9 @@ class TestIcebergTable(ImpalaTestSuite):
   def test_create_iceberg_tables(self, vector, unique_database):
     self.run_test_case('QueryTest/iceberg-create', vector, use_db=unique_database)
 
+  def test_alter_iceberg_tables(self, vector, unique_database):
+    self.run_test_case('QueryTest/iceberg-alter', vector, use_db=unique_database)
+
   @SkipIf.not_hdfs
   def test_drop_incomplete_table(self, vector, unique_database):
     """Test DROP TABLE when the underlying directory is deleted. In that case table