You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by lp...@apache.org on 2022/06/03 06:27:07 UTC

[hive] branch master updated: HIVE-26228: Implement Iceberg table rollback feature (#3287) (Laszlo Pinter, reviewed by Adam Szita and Peter Vary)

This is an automated email from the ASF dual-hosted git repository.

lpinter pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new d237a30728 HIVE-26228: Implement Iceberg table rollback feature (#3287) (Laszlo Pinter, reviewed by Adam Szita and Peter Vary)
d237a30728 is described below

commit d237a307286ee0431a9a9ad148896041b5a13ebd
Author: László Pintér <47...@users.noreply.github.com>
AuthorDate: Fri Jun 3 08:26:58 2022 +0200

    HIVE-26228: Implement Iceberg table rollback feature (#3287) (Laszlo Pinter, reviewed by Adam Szita and Peter Vary)
---
 .../src/test/results/negative/hbase_ddl.q.out      |   2 +-
 .../iceberg/mr/hive/HiveIcebergMetaHook.java       |   2 +-
 .../iceberg/mr/hive/HiveIcebergStorageHandler.java |  22 ++++
 .../apache/iceberg/mr/hive/IcebergTableUtil.java   |  20 ++++
 .../iceberg/mr/hive/HiveIcebergTestUtils.java      |  22 ++++
 .../iceberg/mr/hive/TestHiveIcebergRollback.java   | 133 +++++++++++++++++++++
 .../iceberg/mr/hive/TestHiveIcebergTimeTravel.java |  63 +++-------
 .../org/apache/iceberg/mr/hive/TestTables.java     |  30 +++++
 .../test/queries/positive/rollback_iceberg_table.q |   9 ++
 .../results/positive/rollback_iceberg_table.q.out  |  30 +++++
 .../hadoop/hive/ql/parse/AlterClauseParser.g       |   8 ++
 .../org/apache/hadoop/hive/ql/parse/HiveParser.g   |   1 +
 .../ddl/table/AbstractBaseAlterTableAnalyzer.java  |   6 +-
 .../hadoop/hive/ql/ddl/table/AlterTableType.java   |   3 +-
 .../table/execute/AlterTableExecuteAnalyzer.java   |  85 +++++++++++++
 .../ddl/table/execute/AlterTableExecuteDesc.java   |  58 +++++++++
 .../table/execute/AlterTableExecuteOperation.java  |  40 +++++++
 .../org/apache/hadoop/hive/ql/metadata/Hive.java   |  11 +-
 .../hive/ql/metadata/HiveStorageHandler.java       |   8 ++
 .../hive/ql/parse/AlterTableExecuteSpec.java       |  94 +++++++++++++++
 .../apache/hadoop/hive/ql/plan/HiveOperation.java  |   1 +
 .../authorization/plugin/HiveOperationType.java    |   1 +
 .../plugin/sqlstd/Operation2Privilege.java         |   2 +
 .../results/clientnegative/alter_non_native.q.out  |   2 +-
 24 files changed, 595 insertions(+), 58 deletions(-)

diff --git a/hbase-handler/src/test/results/negative/hbase_ddl.q.out b/hbase-handler/src/test/results/negative/hbase_ddl.q.out
index e4c146b8a7..60d7279814 100644
--- a/hbase-handler/src/test/results/negative/hbase_ddl.q.out
+++ b/hbase-handler/src/test/results/negative/hbase_ddl.q.out
@@ -26,4 +26,4 @@ key                 	int                 	It is a column key
 value               	string              	It is the column string value
 	 	 
 #### A masked pattern was here ####
-FAILED: SemanticException [Error 10134]: ALTER TABLE can only be used for [ADDPROPS, DROPPROPS, ADDCOLS] to a non-native table  hbase_table_1
+FAILED: SemanticException [Error 10134]: ALTER TABLE can only be used for [ADDPROPS, DROPPROPS, ADDCOLS, EXECUTE] to a non-native table  hbase_table_1
diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
index 13142ce144..c8f652d577 100644
--- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
+++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
@@ -108,7 +108,7 @@ public class HiveIcebergMetaHook implements HiveMetaHook {
   static final EnumSet<AlterTableType> SUPPORTED_ALTER_OPS = EnumSet.of(
       AlterTableType.ADDCOLS, AlterTableType.REPLACE_COLUMNS, AlterTableType.RENAME_COLUMN,
       AlterTableType.ADDPROPS, AlterTableType.DROPPROPS, AlterTableType.SETPARTITIONSPEC,
-      AlterTableType.UPDATE_COLUMNS);
+      AlterTableType.UPDATE_COLUMNS, AlterTableType.SETPARTITIONSPEC, AlterTableType.EXECUTE);
   private static final List<String> MIGRATION_ALLOWED_SOURCE_FORMATS = ImmutableList.of(
       FileFormat.PARQUET.name().toLowerCase(),
       FileFormat.ORC.name().toLowerCase(),
diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
index a6b5c6deba..c693c941e4 100644
--- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
+++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
 import org.apache.hadoop.hive.ql.metadata.HiveStoragePredicateHandler;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
+import org.apache.hadoop.hive.ql.parse.AlterTableExecuteSpec;
 import org.apache.hadoop.hive.ql.parse.PartitionTransformSpec;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx;
@@ -451,6 +452,27 @@ public class HiveIcebergStorageHandler implements HiveStoragePredicateHandler, H
     return true;
   }
 
+  @Override
+  public void executeOperation(org.apache.hadoop.hive.ql.metadata.Table hmsTable, AlterTableExecuteSpec executeSpec) {
+    switch (executeSpec.getOperationType()) {
+      case ROLLBACK:
+        TableDesc tableDesc = Utilities.getTableDesc(hmsTable);
+        Table icebergTable = IcebergTableUtil.getTable(conf, tableDesc.getProperties());
+        LOG.info("Executing rollback operation on iceberg table. If you would like to revert rollback you could " +
+              "try altering the metadata location to the current metadata location by executing the following query:" +
+              "ALTER TABLE {}.{} SET TBLPROPERTIES('metadata_location'='{}'). This operation is supported for Hive " +
+              "Catalog tables.", hmsTable.getDbName(), hmsTable.getTableName(),
+            ((BaseTable) icebergTable).operations().current().metadataFileLocation());
+        AlterTableExecuteSpec.RollbackSpec rollbackSpec =
+            (AlterTableExecuteSpec.RollbackSpec) executeSpec.getOperationParams();
+        IcebergTableUtil.rollback(icebergTable, rollbackSpec.getRollbackType(), rollbackSpec.getParam());
+        break;
+      default:
+        throw new UnsupportedOperationException(
+            String.format("Operation type %s is not supported", executeSpec.getOperationType().name()));
+    }
+  }
+
   @Override
   public boolean isValidMetadataTable(String metaTableName) {
     return IcebergMetadataTables.isValidMetaTable(metaTableName);
diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTableUtil.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTableUtil.java
index 6e2c01a72a..6e471f7be3 100644
--- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTableUtil.java
+++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTableUtil.java
@@ -24,8 +24,10 @@ import java.util.Properties;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.parse.AlterTableExecuteSpec;
 import org.apache.hadoop.hive.ql.parse.PartitionTransformSpec;
 import org.apache.hadoop.hive.ql.session.SessionStateUtil;
+import org.apache.iceberg.ManageSnapshots;
 import org.apache.iceberg.PartitionSpec;
 import org.apache.iceberg.Schema;
 import org.apache.iceberg.Table;
@@ -184,4 +186,22 @@ public class IcebergTableUtil {
   public static boolean isBucketed(Table table) {
     return table.spec().fields().stream().anyMatch(f -> f.transform().toString().startsWith("bucket["));
   }
+
+  /**
+   * Roll an iceberg table's data back to a specific snapshot identified either by id or before a given timestamp.
+   * @param table the iceberg table
+   * @param type the type of the rollback, can be either time based or version based
+   * @param value parameter of the rollback, that can be a timestamp in millis or a snapshot id
+   */
+  public static void rollback(Table table, AlterTableExecuteSpec.RollbackSpec.RollbackType type, Long value) {
+    ManageSnapshots manageSnapshots = table.manageSnapshots();
+    if (type == AlterTableExecuteSpec.RollbackSpec.RollbackType.TIME) {
+      LOG.debug("Trying to rollback iceberg table to snapshot before timestamp {}", value);
+      manageSnapshots.rollbackToTime(value);
+    } else {
+      LOG.debug("Trying to rollback iceberg table to snapshot ID {}", value);
+      manageSnapshots.rollbackTo(value);
+    }
+    manageSnapshots.commit();
+  }
 }
diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergTestUtils.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergTestUtils.java
index 79e3dfee9e..5e217acc82 100644
--- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergTestUtils.java
+++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergTestUtils.java
@@ -27,6 +27,7 @@ import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.sql.Timestamp;
+import java.text.SimpleDateFormat;
 import java.time.LocalDate;
 import java.time.LocalDateTime;
 import java.time.LocalTime;
@@ -35,6 +36,7 @@ import java.time.ZoneId;
 import java.time.ZoneOffset;
 import java.util.Arrays;
 import java.util.Comparator;
+import java.util.Date;
 import java.util.List;
 import java.util.Map;
 import java.util.UUID;
@@ -59,6 +61,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobID;
 import org.apache.iceberg.DeleteFile;
 import org.apache.iceberg.FileFormat;
+import org.apache.iceberg.HistoryEntry;
 import org.apache.iceberg.PartitionKey;
 import org.apache.iceberg.Schema;
 import org.apache.iceberg.Table;
@@ -379,4 +382,23 @@ public class HiveIcebergTestUtils {
     return posWriter.toDeleteFile();
   }
 
+  /**
+   * Get the timestamp string which we can use in the queries. The timestamp will be after the given snapshot
+   * and before the next one
+   * @param table The table which we want to query
+   * @param snapshotPosition The position of the last snapshot we want to see in the query results
+   * @return The timestamp which we can use in the queries
+   */
+  public static String timestampAfterSnapshot(Table table, int snapshotPosition) {
+    List<HistoryEntry> history = table.history();
+    long snapshotTime = history.get(snapshotPosition).timestampMillis();
+    long time = snapshotTime + 100;
+    if (history.size() > snapshotPosition + 1) {
+      time = snapshotTime + ((history.get(snapshotPosition + 1).timestampMillis() - snapshotTime) / 2);
+    }
+
+    SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS000000");
+    return simpleDateFormat.format(new Date(time));
+  }
+
 }
diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergRollback.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergRollback.java
new file mode 100644
index 0000000000..52a3cccd72
--- /dev/null
+++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergRollback.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.mr.hive;
+
+
+import java.io.IOException;
+import org.apache.iceberg.AssertHelpers;
+import org.apache.iceberg.BaseTable;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.catalog.TableIdentifier;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Test;
+
+
+/**
+ * Tests covering the rollback feature
+ */
+public class TestHiveIcebergRollback extends HiveIcebergStorageHandlerWithEngineBase {
+
+  @Test
+  public void testRollbackToTimestamp() throws IOException, InterruptedException {
+    TableIdentifier identifier = TableIdentifier.of("default", "source");
+    Table table = testTables.createTableWithVersions(shell, identifier.name(),
+        HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, fileFormat,
+        HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 3);
+    shell.executeStatement("ALTER TABLE " + identifier.name() + " EXECUTE ROLLBACK('" +
+        HiveIcebergTestUtils.timestampAfterSnapshot(table, 2) + "')");
+    Assert.assertEquals(5, shell.executeStatement("SELECT * FROM " + identifier.name()).size());
+    Assert.assertEquals(3, table.history().size());
+    shell.executeStatement("ALTER TABLE " + identifier.name() + " EXECUTE ROLLBACK('" +
+        HiveIcebergTestUtils.timestampAfterSnapshot(table, 1) + "')");
+    Assert.assertEquals(4, shell.executeStatement("SELECT * FROM " + identifier.name()).size());
+    table.refresh();
+    Assert.assertEquals(4, table.history().size());
+    shell.executeStatement("ALTER TABLE " + identifier.name() + " EXECUTE ROLLBACK('" +
+        HiveIcebergTestUtils.timestampAfterSnapshot(table, 0) + "')");
+    Assert.assertEquals(3, shell.executeStatement("SELECT * FROM " + identifier.name()).size());
+    table.refresh();
+    Assert.assertEquals(5, table.history().size());
+  }
+
+  @Test
+  public void testRollbackToVersion() throws IOException, InterruptedException {
+    TableIdentifier identifier = TableIdentifier.of("default", "source");
+    Table table = testTables.createTableWithVersions(shell, identifier.name(),
+        HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, fileFormat,
+        HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 3);
+    shell.executeStatement("ALTER TABLE " + identifier.name() + " EXECUTE ROLLBACK(" +
+        table.history().get(2).snapshotId() + ")");
+    Assert.assertEquals(5, shell.executeStatement("SELECT * FROM " + identifier.name()).size());
+    table.refresh();
+    Assert.assertEquals(3, table.history().size());
+    shell.executeStatement("ALTER TABLE " + identifier.name() + " EXECUTE ROLLBACK(" +
+        table.history().get(1).snapshotId() + ")");
+    Assert.assertEquals(4, shell.executeStatement("SELECT * FROM " + identifier.name()).size());
+    table.refresh();
+    Assert.assertEquals(4, table.history().size());
+    shell.executeStatement("ALTER TABLE " + identifier.name() + " EXECUTE ROLLBACK(" +
+        table.history().get(0).snapshotId() + ")");
+    Assert.assertEquals(3, shell.executeStatement("SELECT * FROM " + identifier.name()).size());
+    table.refresh();
+    Assert.assertEquals(5, table.history().size());
+  }
+
+  @Test
+  public void testRevertRollback() throws IOException, InterruptedException {
+    Assume.assumeTrue("Rollback revert is only supported for tables from Hive Catalog",
+        testTableType.equals(TestTables.TestTableType.HIVE_CATALOG));
+    TableIdentifier identifier = TableIdentifier.of("default", "source");
+    Table table = testTables.createTableWithVersions(shell, identifier.name(),
+        HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, fileFormat,
+        HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 2);
+    String metadataLocationBeforeRollback = ((BaseTable) table).operations().current().metadataFileLocation();
+    shell.executeStatement("ALTER TABLE " + identifier.name() + " EXECUTE ROLLBACK(" +
+        table.history().get(0).snapshotId() + ")");
+    Assert.assertEquals(3, shell.executeStatement("SELECT * FROM " + identifier.name()).size());
+    table.refresh();
+    Assert.assertEquals(3, table.history().size());
+    shell.executeStatement("ALTER TABLE " + identifier.name() + " SET TBLPROPERTIES('metadata_location'='" +
+        metadataLocationBeforeRollback + "')");
+    Assert.assertEquals(4, shell.executeStatement("SELECT * FROM " + identifier.name()).size());
+    table.refresh();
+    Assert.assertEquals(2, table.history().size());
+  }
+
+  @Test
+  public void testInvalidRollbackToTimestamp() throws IOException, InterruptedException {
+    TableIdentifier identifier = TableIdentifier.of("default", "source");
+    testTables.createTableWithVersions(shell, identifier.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA,
+        fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 2);
+    AssertHelpers.assertThrows("should throw exception", IllegalArgumentException.class,
+        "Cannot roll back, no valid snapshot older than", () -> {
+        shell.executeStatement("ALTER TABLE " + identifier.name() + " EXECUTE ROLLBACK('1970-01-01 00:00:00')");
+        });
+  }
+
+  @Test
+  public void testInvalidRollbackToVersion() throws IOException, InterruptedException {
+    TableIdentifier identifier = TableIdentifier.of("default", "source");
+    Table table = testTables.createTableWithVersions(shell, identifier.name(),
+        HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, fileFormat,
+        HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 2);
+    AssertHelpers.assertThrows("should throw exception", IllegalArgumentException.class,
+        "Cannot roll back to unknown snapshot id", () -> {
+        shell.executeStatement("ALTER TABLE " + identifier.name() + " EXECUTE ROLLBACK(1111)");
+        });
+    shell.executeStatement("ALTER TABLE " + identifier.name() + " EXECUTE ROLLBACK(" +
+        table.history().get(0).snapshotId() + ")");
+    AssertHelpers.assertThrows("should throw exception", IllegalArgumentException.class,
+        "Cannot roll back to snapshot, not an ancestor of the current state", () -> {
+        shell.executeStatement("ALTER TABLE " + identifier.name() + " EXECUTE ROLLBACK(" +
+            table.history().get(1).snapshotId() + ")");
+        });
+  }
+}
diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergTimeTravel.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergTimeTravel.java
index e2aac33d82..233f87a857 100644
--- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergTimeTravel.java
+++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergTimeTravel.java
@@ -20,8 +20,6 @@
 package org.apache.iceberg.mr.hive;
 
 import java.io.IOException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
 import java.util.List;
 import org.apache.iceberg.AssertHelpers;
 import org.apache.iceberg.HistoryEntry;
@@ -29,6 +27,8 @@ import org.apache.iceberg.Table;
 import org.junit.Assert;
 import org.junit.Test;
 
+import static org.apache.iceberg.mr.hive.HiveIcebergTestUtils.timestampAfterSnapshot;
+
 /**
  * Tests covering the time travel feature, aka reading from a table as of a certain snapshot.
  */
@@ -36,7 +36,9 @@ public class TestHiveIcebergTimeTravel extends HiveIcebergStorageHandlerWithEngi
 
   @Test
   public void testSelectAsOfTimestamp() throws IOException, InterruptedException {
-    Table table = prepareTableWithVersions(2);
+    Table table = testTables.createTableWithVersions(shell, "customers",
+        HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA,
+        fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 2);
 
     List<Object[]> rows = shell.executeStatement(
         "SELECT * FROM customers FOR SYSTEM_TIME AS OF '" + timestampAfterSnapshot(table, 0) + "'");
@@ -56,7 +58,9 @@ public class TestHiveIcebergTimeTravel extends HiveIcebergStorageHandlerWithEngi
 
   @Test
   public void testSelectAsOfVersion() throws IOException, InterruptedException {
-    Table table = prepareTableWithVersions(2);
+    Table table = testTables.createTableWithVersions(shell, "customers",
+        HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA,
+        fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 2);
 
     HistoryEntry first = table.history().get(0);
     List<Object[]> rows =
@@ -77,7 +81,9 @@ public class TestHiveIcebergTimeTravel extends HiveIcebergStorageHandlerWithEngi
 
   @Test
   public void testCTASAsOfVersionAndTimestamp() throws IOException, InterruptedException {
-    Table table = prepareTableWithVersions(3);
+    Table table = testTables.createTableWithVersions(shell, "customers",
+        HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, fileFormat,
+        HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 3);
 
     shell.executeStatement("CREATE TABLE customers2 AS SELECT * FROM customers FOR SYSTEM_VERSION AS OF " +
         table.history().get(0).snapshotId());
@@ -106,7 +112,9 @@ public class TestHiveIcebergTimeTravel extends HiveIcebergStorageHandlerWithEngi
 
   @Test
   public void testAsOfWithJoins() throws IOException, InterruptedException {
-    Table table = prepareTableWithVersions(4);
+    Table table = testTables.createTableWithVersions(shell, "customers",
+        HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, fileFormat,
+        HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 4);
 
     List<Object[]> rows = shell.executeStatement("SELECT * FROM " +
         "customers FOR SYSTEM_TIME AS OF '" + timestampAfterSnapshot(table, 0) + "' fv, " +
@@ -136,47 +144,4 @@ public class TestHiveIcebergTimeTravel extends HiveIcebergStorageHandlerWithEngi
 
     Assert.assertEquals(8, rows.size());
   }
-
-  /**
-   * Creates the 'customers' table with the default records and creates extra snapshots by inserting one more line
-   * into the table.
-   * @param versions The number of history elements we want to create
-   * @return The table created
-   * @throws IOException When there is a problem during table creation
-   * @throws InterruptedException When there is a problem during adding new data to the table
-   */
-  private Table prepareTableWithVersions(int versions) throws IOException, InterruptedException {
-    Table table = testTables.createTable(shell, "customers", HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA,
-        fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS);
-
-    for (int i = 0; i < versions - 1; ++i) {
-      // Just wait a little so we definitely will not have the same timestamp for the snapshots
-      Thread.sleep(100);
-      shell.executeStatement("INSERT INTO customers values(" +
-          (i + HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS.size()) + ",'Alice','Green_" + i + "')");
-    }
-
-    table.refresh();
-
-    return table;
-  }
-
-  /**
-   * Get the timestamp string which we can use in the queries. The timestamp will be after the given snapshot
-   * and before the next one
-   * @param table The table which we want to query
-   * @param snapshotPosition The position of the last snapshot we want to see in the query results
-   * @return The timestamp which we can use in the queries
-   */
-  private String timestampAfterSnapshot(Table table, int snapshotPosition) {
-    List<HistoryEntry> history = table.history();
-    long snapshotTime = history.get(snapshotPosition).timestampMillis();
-    long time = snapshotTime + 100;
-    if (history.size() > snapshotPosition + 1) {
-      time = snapshotTime + ((history.get(snapshotPosition + 1).timestampMillis() - snapshotTime) / 2);
-    }
-
-    SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS000000");
-    return simpleDateFormat.format(new Date(time));
-  }
 }
diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestTables.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestTables.java
index 91c70dde62..f2b58d776c 100644
--- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestTables.java
+++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestTables.java
@@ -169,6 +169,36 @@ abstract class TestTables {
     return createTable(shell, tableName, schema, fileFormat, records, 1);
   }
 
+
+  /**
+   * Creates a non partitioned Hive test table. Creates the Iceberg table/data and creates the corresponding Hive
+   * table as well when needed. The table will be in the 'default' database. The table will be populated with the
+   * provided List of {@link Record}s and will create extra snapshots by inserting one more line into the table.
+   * @param shell The HiveShell used for Hive table creation
+   * @param tableName The name of the test table
+   * @param schema The schema used for the table creation
+   * @param fileFormat The file format used for writing the data
+   * @param records The records with which the table is populated
+   * @param versions The number of history elements we want to create
+   * @return The created table
+   * @throws IOException If there is an error writing data
+   */
+  public Table createTableWithVersions(TestHiveShell shell, String tableName, Schema schema, FileFormat fileFormat,
+      List<Record> records, int versions) throws IOException, InterruptedException {
+    Table table = createTable(shell, tableName, schema, fileFormat, records);
+
+    for (int i = 0; i < versions - 1; ++i) {
+      // Just wait a little so we definitely will not have the same timestamp for the snapshots
+      Thread.sleep(100);
+      shell.executeStatement("INSERT INTO " + tableName + " values(" +
+          (i + records.size()) + ",'Alice','Green_" + i + "')");
+    }
+
+    table.refresh();
+
+    return table;
+  }
+
   /**
    * Creates a non partitioned Hive test table. Creates the Iceberg table/data and creates the corresponding Hive
    * table as well when needed. The table will be in the 'default' database. The table will be populated with the
diff --git a/iceberg/iceberg-handler/src/test/queries/positive/rollback_iceberg_table.q b/iceberg/iceberg-handler/src/test/queries/positive/rollback_iceberg_table.q
new file mode 100644
index 0000000000..0db54dd38b
--- /dev/null
+++ b/iceberg/iceberg-handler/src/test/queries/positive/rollback_iceberg_table.q
@@ -0,0 +1,9 @@
+-- Mask the totalSize value as it can have slight variability, causing test flakiness
+--! qt:replace:/(\s+totalSize\s+)\S+(\s+)/$1#Masked#$2/
+-- Mask random uuid
+--! qt:replace:/(\s+uuid\s+)\S+(\s*)/$1#Masked#$2/
+
+drop table if exists tbl_ice;
+create external table tbl_ice(a int, b string, c int) stored by iceberg stored as parquet;
+explain alter table tbl_ice execute rollback(11111);
+explain alter table tbl_ice execute rollback('2022-05-12 00:00:00');
\ No newline at end of file
diff --git a/iceberg/iceberg-handler/src/test/results/positive/rollback_iceberg_table.q.out b/iceberg/iceberg-handler/src/test/results/positive/rollback_iceberg_table.q.out
new file mode 100644
index 0000000000..394859ffe6
--- /dev/null
+++ b/iceberg/iceberg-handler/src/test/results/positive/rollback_iceberg_table.q.out
@@ -0,0 +1,30 @@
+PREHOOK: query: drop table if exists tbl_ice
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists tbl_ice
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create external table tbl_ice(a int, b string, c int) stored by iceberg stored as parquet
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl_ice
+POSTHOOK: query: create external table tbl_ice(a int, b string, c int) stored by iceberg stored as parquet
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl_ice
+PREHOOK: query: explain alter table tbl_ice execute rollback(11111)
+PREHOOK: type: ALTERTABLE_EXECUTE
+PREHOOK: Input: default@tbl_ice
+POSTHOOK: query: explain alter table tbl_ice execute rollback(11111)
+POSTHOOK: type: ALTERTABLE_EXECUTE
+POSTHOOK: Input: default@tbl_ice
+Stage-0
+  Execute operation{"table name:":"default.tbl_ice","spec:":"AlterTableExecuteSpec{operationType=ROLLBACK, operationParams=RollbackSpec{rollbackType=VERSION, param=11111}}"}
+
+PREHOOK: query: explain alter table tbl_ice execute rollback('2022-05-12 00:00:00')
+PREHOOK: type: ALTERTABLE_EXECUTE
+PREHOOK: Input: default@tbl_ice
+POSTHOOK: query: explain alter table tbl_ice execute rollback('2022-05-12 00:00:00')
+POSTHOOK: type: ALTERTABLE_EXECUTE
+POSTHOOK: Input: default@tbl_ice
+Stage-0
+  Execute operation{"table name:":"default.tbl_ice","spec:":"AlterTableExecuteSpec{operationType=ROLLBACK, operationParams=RollbackSpec{rollbackType=TIME, param=1652338800000}}"}
+
diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/AlterClauseParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/AlterClauseParser.g
index 321eb1af81..a89055d760 100644
--- a/parser/src/java/org/apache/hadoop/hive/ql/parse/AlterClauseParser.g
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/AlterClauseParser.g
@@ -73,6 +73,7 @@ alterTableStatementSuffix
     | partitionSpec alterTblPartitionStatementSuffix[true] -> alterTblPartitionStatementSuffix partitionSpec
     | alterStatementSuffixSetOwner
     | alterStatementSuffixSetPartSpec
+    | alterStatementSuffixExecute
     ;
 
 alterTblPartitionStatementSuffix[boolean partition]
@@ -452,6 +453,13 @@ alterStatementSuffixSetPartSpec
     -> ^(TOK_ALTERTABLE_SETPARTSPEC $spec)
     ;
 
+alterStatementSuffixExecute
+@init { gParent.pushMsg("alter table execute", state); }
+@after { gParent.popMsg(state); }
+    : KW_EXECUTE KW_ROLLBACK LPAREN (rollbackParam=(StringLiteral | Number)) RPAREN
+    -> ^(TOK_ALTERTABLE_EXECUTE KW_ROLLBACK $rollbackParam)
+    ;
+
 fileFormat
 @init { gParent.pushMsg("file format specification", state); }
 @after { gParent.popMsg(state); }
diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index 9a2c485360..25bd5a259f 100644
--- a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -215,6 +215,7 @@ TOK_ALTERTABLE_ADDCONSTRAINT;
 TOK_ALTERTABLE_UPDATECOLUMNS;
 TOK_ALTERTABLE_OWNER;
 TOK_ALTERTABLE_SETPARTSPEC;
+TOK_ALTERTABLE_EXECUTE;
 TOK_MSCK;
 TOK_SHOWDATABASES;
 TOK_SHOWDATACONNECTORS;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java
index 03aa02293b..17f9fec4d1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java
@@ -159,12 +159,10 @@ public abstract class AbstractBaseAlterTableAnalyzer extends BaseSemanticAnalyze
         throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_TABLES.getMsg());
       }
     }
-    if (table.isNonNative() && table.getStorageHandler() != null) {
-      if (!table.getStorageHandler().isAllowedAlterOperation(op) ||
-          (op == AlterTableType.SETPARTITIONSPEC && !table.getStorageHandler().supportsPartitionTransform())) {
+    if (table.isNonNative() && table.getStorageHandler() != null &&
+        !table.getStorageHandler().isAllowedAlterOperation(op)) {
         throw new SemanticException(ErrorMsg.ALTER_TABLE_NON_NATIVE.format(
             AlterTableType.NON_NATIVE_TABLE_ALLOWED.toString(), table.getTableName()));
-      }
     }
   }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableType.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableType.java
index 9b1de08956..df5ba186b6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableType.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableType.java
@@ -39,6 +39,7 @@ public enum AlterTableType {
   RENAMEPARTITION("rename partition"), // Note: used in RenamePartitionDesc, not here.
   ALTERPARTITION("alter partition"), // Note: this is never used in AlterTableDesc.
   SETPARTITIONSPEC("set partition spec"),
+  EXECUTE("execute"),
   // constraint
   ADD_CONSTRAINT("add constraint"),
   DROP_CONSTRAINT("drop constraint"),
@@ -78,7 +79,7 @@ public enum AlterTableType {
   }
 
   public static final List<AlterTableType> NON_NATIVE_TABLE_ALLOWED =
-      ImmutableList.of(ADDPROPS, DROPPROPS, ADDCOLS);
+      ImmutableList.of(ADDPROPS, DROPPROPS, ADDCOLS, EXECUTE);
 
   public static final Set<AlterTableType> SUPPORT_PARTIAL_PARTITION_SPEC =
       ImmutableSet.of(ADDCOLS, REPLACE_COLUMNS, RENAME_COLUMN, ADDPROPS, DROPPROPS, SET_SERDE,
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/execute/AlterTableExecuteAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/execute/AlterTableExecuteAnalyzer.java
new file mode 100644
index 0000000000..6c4471dc60
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/execute/AlterTableExecuteAnalyzer.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.table.execute;
+
+import org.apache.hadoop.hive.common.TableName;
+import org.apache.hadoop.hive.common.type.TimestampTZ;
+import org.apache.hadoop.hive.common.type.TimestampTZUtil;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType;
+import org.apache.hadoop.hive.ql.ddl.DDLWork;
+import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableAnalyzer;
+import org.apache.hadoop.hive.ql.ddl.table.AlterTableType;
+import org.apache.hadoop.hive.ql.exec.TaskFactory;
+import org.apache.hadoop.hive.ql.hooks.ReadEntity;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.ASTNode;
+import org.apache.hadoop.hive.ql.parse.HiveParser;
+import org.apache.hadoop.hive.ql.parse.AlterTableExecuteSpec;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.PlanUtils;
+import org.apache.hadoop.hive.ql.session.SessionState;
+
+import java.time.ZoneId;
+import java.util.Map;
+
+import static org.apache.hadoop.hive.ql.parse.AlterTableExecuteSpec.ExecuteOperationType.ROLLBACK;
+import static org.apache.hadoop.hive.ql.parse.AlterTableExecuteSpec.RollbackSpec.RollbackType.TIME;
+import static org.apache.hadoop.hive.ql.parse.AlterTableExecuteSpec.RollbackSpec.RollbackType.VERSION;
+
+/**
+ * Analyzer for ALTER TABLE ... EXECUTE commands.
+ */
+@DDLType(types = HiveParser.TOK_ALTERTABLE_EXECUTE)
+public class AlterTableExecuteAnalyzer extends AbstractAlterTableAnalyzer {
+
+  public AlterTableExecuteAnalyzer(QueryState queryState) throws SemanticException {
+    super(queryState);
+  }
+
+  @Override
+  protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpec, ASTNode command)
+      throws SemanticException {
+    Table table = getTable(tableName);
+    // the first child must be the execute operation type
+    ASTNode executeCommandType = (ASTNode) command.getChild(0);
+    validateAlterTableType(table, AlterTableType.EXECUTE, false);
+    inputs.add(new ReadEntity(table));
+    AlterTableExecuteDesc desc = null;
+    if (HiveParser.KW_ROLLBACK == executeCommandType.getType()) {
+      AlterTableExecuteSpec<AlterTableExecuteSpec.RollbackSpec> spec;
+      // the second child must be the rollback parameter
+      ASTNode child = (ASTNode) command.getChild(1);
+
+      if (child.getType() == HiveParser.StringLiteral) {
+        ZoneId timeZone = SessionState.get() == null ? new HiveConf().getLocalTimeZone() : SessionState.get().getConf()
+            .getLocalTimeZone();
+        TimestampTZ time = TimestampTZUtil.parse(PlanUtils.stripQuotes(child.getText()), timeZone);
+        spec = new AlterTableExecuteSpec(ROLLBACK, new AlterTableExecuteSpec.RollbackSpec(TIME, time.toEpochMilli()));
+      } else {
+        spec = new AlterTableExecuteSpec(ROLLBACK, new AlterTableExecuteSpec.RollbackSpec(VERSION,
+            Long.valueOf(child.getText())));
+      }
+      desc = new AlterTableExecuteDesc(tableName, partitionSpec, spec);
+    }
+
+    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/execute/AlterTableExecuteDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/execute/AlterTableExecuteDesc.java
new file mode 100644
index 0000000000..ba5ddf55d9
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/execute/AlterTableExecuteDesc.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.ddl.table.execute;
+
+import org.apache.hadoop.hive.common.TableName;
+import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc;
+import org.apache.hadoop.hive.ql.ddl.table.AlterTableType;
+import org.apache.hadoop.hive.ql.parse.AlterTableExecuteSpec;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.Explain;
+import org.apache.hadoop.hive.ql.plan.Explain.Level;
+
+import java.util.Map;
+
+/**
+ * DDL task task description for ALTER TABLE ... EXECUTE commands.
+ */
+@Explain(displayName = "Execute operation", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+public class AlterTableExecuteDesc extends AbstractAlterTableDesc {
+  private static final long serialVersionUID = 1L;
+
+  private final AlterTableExecuteSpec executeSpec;
+
+  public AlterTableExecuteDesc(TableName tableName, Map<String, String> partitionSpec, AlterTableExecuteSpec executeSpec)
+      throws SemanticException {
+    super(AlterTableType.EXECUTE, tableName, partitionSpec, null, false, false, null);
+    this.executeSpec = executeSpec;
+  }
+
+  public AlterTableExecuteSpec getExecuteSpec() {
+    return executeSpec;
+  }
+
+  @Explain(displayName = "spec", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+  public String getExplainOutput() {
+    return executeSpec.toString();
+  }
+
+  @Override
+  public boolean mayNeedWriteId() {
+    return false;
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/execute/AlterTableExecuteOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/execute/AlterTableExecuteOperation.java
new file mode 100644
index 0000000000..e36234c7ef
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/execute/AlterTableExecuteOperation.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.table.execute;
+
+import org.apache.hadoop.hive.ql.ddl.DDLOperation;
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
+import org.apache.hadoop.hive.ql.metadata.Table;
+
+/**
+ * Operation process of ALTER TABLE ... EXECUTE command
+ */
+public class AlterTableExecuteOperation extends DDLOperation<AlterTableExecuteDesc> {
+
+  public AlterTableExecuteOperation(DDLOperationContext context, AlterTableExecuteDesc desc) {
+    super(context, desc);
+  }
+
+  @Override
+  public int execute() throws Exception {
+    Table table = context.getDb().getTable(desc.getFullTableName());
+    context.getDb().alterTableExecuteOperation(table, desc.getExecuteSpec());
+    return 0;
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index d2617ca006..719c3edf24 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -211,6 +211,7 @@ import org.apache.hadoop.hive.ql.log.PerfLogger;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils;
 import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
+import org.apache.hadoop.hive.ql.parse.AlterTableExecuteSpec;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
@@ -223,7 +224,6 @@ import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 import org.apache.hadoop.hive.shims.HadoopShims;
 import org.apache.hadoop.hive.shims.ShimLoader;
-import org.apache.hadoop.hive.shims.Utils;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
@@ -6505,4 +6505,13 @@ private void constructOneLBLocationMap(FileStatus fSta,
       throw new HiveException(e);
     }
   }
+
+  public void alterTableExecuteOperation(Table table, AlterTableExecuteSpec executeSpec) throws HiveException {
+    try {
+      HiveStorageHandler storageHandler = createStorageHandler(table.getTTable());
+      storageHandler.executeOperation(table, executeSpec);
+    } catch (MetaException e) {
+      throw new HiveException(e);
+    }
+  }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
index 5dac8c0661..b4b3cfca9d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.ql.Context.Operation;
 import org.apache.hadoop.hive.ql.ddl.table.AlterTableType;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.parse.AlterTableExecuteSpec;
 import org.apache.hadoop.hive.ql.parse.PartitionTransformSpec;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx;
@@ -462,4 +463,11 @@ public interface HiveStorageHandler extends Configurable {
    */
   default void validateSinkDesc(FileSinkDesc sinkDesc) throws SemanticException {
   }
+
+  /**
+   * Execute an operation on storage handler level
+   * @param executeSpec operation specification
+   */
+  default void executeOperation(org.apache.hadoop.hive.ql.metadata.Table table, AlterTableExecuteSpec executeSpec) {
+  }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/AlterTableExecuteSpec.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/AlterTableExecuteSpec.java
new file mode 100644
index 0000000000..5480b090c3
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/AlterTableExecuteSpec.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.parse;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Execute operation specification. It stores the type of the operation and its parameters.
+ * The following operation are supported
+ * <ul>
+ *   <li>Rollback</li>
+ * </ul>
+ * @param <T> Value object class to store the operation specific parameters
+ */
+public class AlterTableExecuteSpec<T> {
+
+  public enum ExecuteOperationType {
+    ROLLBACK
+  }
+
+  private final ExecuteOperationType operationType;
+  private final T operationParams;
+
+  public AlterTableExecuteSpec(ExecuteOperationType type, T value) {
+    this.operationType = type;
+    this.operationParams = value;
+  }
+
+  public ExecuteOperationType getOperationType() {
+    return operationType;
+  }
+
+  public T getOperationParams() {
+    return operationParams;
+  }
+
+  @Override
+  public String toString() {
+    return MoreObjects.toStringHelper(this).add("operationType", operationType.name())
+        .add("operationParams", operationParams).toString();
+  }
+
+  /**
+   * Value object class, that stores the rollback operation specific parameters
+   * <ul>
+   *   <li>Rollback type: it can be either version based or time based</li>
+   *   <li>Rollback value: it should either a snapshot id or a timestamp in milliseconds</li>
+   * </ul>
+   */
+  public static class RollbackSpec {
+
+    public enum RollbackType {
+      VERSION, TIME
+    }
+
+    private final RollbackType rollbackType;
+    private final Long param;
+
+    public RollbackSpec(RollbackType rollbackType, Long param) {
+      this.rollbackType = rollbackType;
+      this.param = param;
+    }
+
+    public RollbackType getRollbackType() {
+      return rollbackType;
+    }
+
+    public Long getParam() {
+      return param;
+    }
+
+    @Override
+    public String toString() {
+      return MoreObjects.toStringHelper(this).add("rollbackType", rollbackType.name())
+          .add("param", param).toString();
+    }
+  }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
index 0c72ed7d04..03c40e68b3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
@@ -76,6 +76,7 @@ public enum HiveOperation {
       new Privilege[]{Privilege.ALTER_METADATA}, null),
   ALTERTABLE_OWNER("ALTERTABLE_OWNER", HiveParser.TOK_ALTERTABLE_OWNER, null, null),
   ALTERTABLE_SETPARTSPEC("ALTERTABLE_SETPARTSPEC", HiveParser.TOK_ALTERTABLE_SETPARTSPEC, null, null),
+  ALTERTABLE_EXECUTE("ALTERTABLE_EXECUTE", HiveParser.TOK_ALTERTABLE_EXECUTE, null, null),
   ALTERTABLE_SERIALIZER("ALTERTABLE_SERIALIZER", HiveParser.TOK_ALTERTABLE_SERIALIZER,
       new Privilege[]{Privilege.ALTER_METADATA}, null),
   ALTERPARTITION_SERIALIZER("ALTERPARTITION_SERIALIZER", HiveParser.TOK_ALTERPARTITION_SERIALIZER,
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java
index d903b9a551..52023affd9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java
@@ -59,6 +59,7 @@ public enum HiveOperationType {
   ALTERTABLE_PROPERTIES,
   ALTERTABLE_OWNER,
   ALTERTABLE_SETPARTSPEC,
+  ALTERTABLE_EXECUTE,
   ALTERTABLE_SERIALIZER,
   ALTERTABLE_PARTCOLTYPE,
   ALTERTABLE_DROPCONSTRAINT,
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
index 354f4f0e04..dcd9d9aac7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
@@ -190,6 +190,8 @@ public class Operation2Privilege {
         PrivRequirement.newIOPrivRequirement(OWNER_PRIV_AR, OWNER_PRIV_AR));
     op2Priv.put(HiveOperationType.ALTERTABLE_SETPARTSPEC,
         PrivRequirement.newIOPrivRequirement(OWNER_PRIV_AR, OWNER_PRIV_AR));
+    op2Priv.put(HiveOperationType.ALTERTABLE_EXECUTE,
+        PrivRequirement.newIOPrivRequirement(OWNER_PRIV_AR, OWNER_PRIV_AR));
     op2Priv.put(HiveOperationType.ALTERTABLE_SERIALIZER,
         PrivRequirement.newIOPrivRequirement(OWNER_PRIV_AR, OWNER_PRIV_AR));
     op2Priv.put(HiveOperationType.ALTERTABLE_PARTCOLTYPE,
diff --git a/ql/src/test/results/clientnegative/alter_non_native.q.out b/ql/src/test/results/clientnegative/alter_non_native.q.out
index bd8fb4fbf5..922d2147f4 100644
--- a/ql/src/test/results/clientnegative/alter_non_native.q.out
+++ b/ql/src/test/results/clientnegative/alter_non_native.q.out
@@ -8,4 +8,4 @@ STORED BY 'org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler'
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@non_native1
-FAILED: SemanticException [Error 10134]: ALTER TABLE can only be used for [ADDPROPS, DROPPROPS, ADDCOLS] to a non-native table  non_native1
+FAILED: SemanticException [Error 10134]: ALTER TABLE can only be used for [ADDPROPS, DROPPROPS, ADDCOLS, EXECUTE] to a non-native table  non_native1