You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ay...@apache.org on 2022/08/03 05:32:08 UTC

[hive] branch master updated: HIVE-24484: Upgrade Hadoop to 3.3.1 And Tez to 0.10.2 (#3279). (Ayush Saxena, reviewed by Laszlo Bodor, Zoltan Haindrich and Steve Loughran)

This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 8e0a1ec4b5c HIVE-24484: Upgrade Hadoop to 3.3.1 And Tez to 0.10.2 (#3279). (Ayush Saxena, reviewed by Laszlo Bodor, Zoltan Haindrich and Steve Loughran)
8e0a1ec4b5c is described below

commit 8e0a1ec4b5c6bdf92d5063d2a66df241899566f2
Author: Ayush Saxena <ay...@apache.org>
AuthorDate: Tue Aug 2 22:32:02 2022 -0700

    HIVE-24484: Upgrade Hadoop to 3.3.1 And Tez to 0.10.2 (#3279). (Ayush Saxena, reviewed by Laszlo Bodor, Zoltan Haindrich and Steve Loughran)
---
 common/pom.xml                                     |  6 +-
 data/conf/hive-site.xml                            | 16 ++++
 .../mapreduce/TestHCatMultiOutputFormat.java       | 29 ++++---
 .../apache/hive/hcatalog/api/TestHCatClient.java   | 20 +++--
 .../ql/parse/BaseReplicationAcrossInstances.java   |  8 +-
 .../parse/TestReplicationOnHDFSEncryptedZones.java | 99 +++++++---------------
 .../hive/ql/parse/TestReplicationScenarios.java    | 30 ++++---
 .../java/org/apache/hive/jdbc/TestJdbcDriver2.java | 14 +--
 .../cli/operation/OperationLoggingAPITestBase.java |  4 +-
 itests/pom.xml                                     |  6 ++
 itests/qtest-druid/pom.xml                         |  2 +-
 itests/qtest/pom.xml                               | 18 ++++
 llap-common/pom.xml                                |  1 -
 llap-server/pom.xml                                |  1 -
 .../hive/llap/daemon/impl/ContainerRunnerImpl.java |  4 +-
 .../hive/llap/daemon/impl/LlapTaskReporter.java    |  2 +-
 .../hive/llap/daemon/impl/TaskRunnerCallable.java  |  2 +-
 .../hive/llap/io/api/impl/LlapInputFormat.java     |  2 +
 .../hive/llap/tezplugins/LlapTaskCommunicator.java | 10 +--
 .../llap/tezplugins/LlapTaskSchedulerService.java  |  8 +-
 .../llap/tezplugins/TestLlapTaskCommunicator.java  |  1 +
 pom.xml                                            | 22 ++++-
 .../apache/hadoop/hive/ql/exec/FetchOperator.java  | 10 ++-
 .../apache/hadoop/hive/ql/exec/tez/TezTask.java    |  7 ++
 .../hadoop/hive/ql/io/RecordReaderWrapper.java     | 11 ++-
 .../StorageBasedAuthorizationProvider.java         |  3 +-
 .../plugin/metastore/HiveMetaStoreAuthorizer.java  |  2 +
 .../clientpositive/acid_table_directories_test.q   |  2 +
 .../encryption_load_data_to_encrypted_tables.q     |  3 +-
 .../apache/hadoop/hive/shims/Hadoop23Shims.java    |  6 ++
 .../DefaultIncompatibleTableChangeHandler.java     |  2 +-
 .../apache/hadoop/hive/metastore/HMSHandler.java   |  6 +-
 .../hadoop/hive/metastore/HiveMetaStore.java       |  2 +-
 standalone-metastore/pom.xml                       |  8 +-
 storage-api/pom.xml                                |  2 +-
 .../hadoop/hive/common/ValidReadTxnList.java       |  4 +-
 streaming/pom.xml                                  |  5 ++
 .../org/apache/hive/streaming/TestStreaming.java   | 12 +--
 38 files changed, 236 insertions(+), 154 deletions(-)

diff --git a/common/pom.xml b/common/pom.xml
index c0fdaa70f0b..b64d57fe5fa 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -81,7 +81,6 @@
     <dependency>
       <groupId>org.eclipse.jetty</groupId>
       <artifactId>jetty-http</artifactId>
-      <version>${jetty.version}</version>
     </dependency>
     <dependency>
       <groupId>org.eclipse.jetty</groupId>
@@ -195,6 +194,11 @@
       <artifactId>tez-api</artifactId>
       <version>${tez.version}</version>
     </dependency>
+    <dependency>
+      <groupId>org.fusesource.jansi</groupId>
+      <artifactId>jansi</artifactId>
+      <version>${jansi.version}</version>
+    </dependency>
     <!-- test inter-project -->
     <dependency>
       <groupId>com.google.code.tempus-fugit</groupId>
diff --git a/data/conf/hive-site.xml b/data/conf/hive-site.xml
index 7681932c404..e8a3798749e 100644
--- a/data/conf/hive-site.xml
+++ b/data/conf/hive-site.xml
@@ -409,4 +409,20 @@
   <name>hive.txn.xlock.ctas</name>
   <value>false</value>
 </property>
+
+  <property>
+    <!-- Set large for tests. This acts as an artifical LIMIT. See HIVE-24484 for details -->
+    <name>hive.server2.thrift.resultset.max.fetch.size</name>
+    <value>1000000</value>
+  </property>
+
+  <property>
+    <name>hive.server2.webui.max.threads</name>
+    <value>4</value>
+  </property>
+
+  <property>
+    <name>hive.async.cleanup.service.thread.count</name>
+    <value>4</value>
+  </property>
 </configuration>
diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java
index 93d1418afad..e601992fc40 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
@@ -279,6 +280,10 @@ public class TestHCatMultiOutputFormat {
     infoList.add(OutputJobInfo.create("default", tableNames[1], partitionValues));
     infoList.add(OutputJobInfo.create("default", tableNames[2], partitionValues));
 
+    // There are tests that check file permissions (which are manually set)
+    // Disable NN ACLS so that the manual permissions are observed
+    hiveConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, false);
+
     Job job = new Job(hiveConf, "SampleJob");
 
     job.setMapperClass(MyMapper.class);
@@ -315,18 +320,18 @@ public class TestHCatMultiOutputFormat {
 
     // Check permisssion on partition dirs and files created
     for (int i = 0; i < tableNames.length; i++) {
-      Path partitionFile = new Path(warehousedir + "/" + tableNames[i]
-        + "/ds=1/cluster=ag/part-m-00000");
-      FileSystem fs = partitionFile.getFileSystem(mrConf);
-      Assert.assertEquals("File permissions of table " + tableNames[i] + " is not correct",
-        fs.getFileStatus(partitionFile).getPermission(),
-        new FsPermission(tablePerms[i]));
-      Assert.assertEquals("File permissions of table " + tableNames[i] + " is not correct",
-        fs.getFileStatus(partitionFile.getParent()).getPermission(),
-        new FsPermission(tablePerms[i]));
-      Assert.assertEquals("File permissions of table " + tableNames[i] + " is not correct",
-        fs.getFileStatus(partitionFile.getParent().getParent()).getPermission(),
-        new FsPermission(tablePerms[i]));
+      final Path partitionFile = new Path(warehousedir + "/" + tableNames[i] + "/ds=1/cluster=ag/part-m-00000");
+
+      final FileSystem fs = partitionFile.getFileSystem(mrConf);
+
+      Assert.assertEquals("File permissions of table " + tableNames[i] + " is not correct [" + partitionFile + "]",
+          new FsPermission(tablePerms[i]), fs.getFileStatus(partitionFile).getPermission());
+      Assert.assertEquals(
+          "File permissions of table " + tableNames[i] + " is not correct [" + partitionFile + "]",
+          new FsPermission(tablePerms[i]), fs.getFileStatus(partitionFile).getPermission());
+      Assert.assertEquals(
+          "File permissions of table " + tableNames[i] + " is not correct [" +  partitionFile.getParent() + "]",
+          new FsPermission(tablePerms[i]), fs.getFileStatus(partitionFile.getParent()).getPermission());
 
     }
     LOG.info("File permissions verified");
diff --git a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
index 5b43323a62a..0420c506136 100644
--- a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
+++ b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
@@ -37,11 +37,13 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
+import org.apache.hadoop.hive.metastore.TransactionalMetaStoreEventListener;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.messaging.MessageEncoder;
 import org.apache.hadoop.hive.metastore.messaging.json.JSONMessageEncoder;
 import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
 import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
@@ -114,16 +116,16 @@ public class TestHCatClient {
       return;
     }
 
-    // Set proxy user privilege and initialize the global state of ProxyUsers
-    Configuration conf = new Configuration();
-    conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*");
-    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+    Configuration conf = MetastoreConf.newMetastoreConf();
 
-    System.setProperty(HiveConf.ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS.varname,
-        DbNotificationListener.class.getName()); // turn on db notification listener on metastore
-    System.setProperty(MetastoreConf.ConfVars.EVENT_MESSAGE_FACTORY.getHiveName(),
-            JSONMessageEncoder.class.getName());
-    msPort = MetaStoreTestUtils.startMetaStoreWithRetry();
+    // Disable proxy authorization white-list for testing
+    MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false);
+
+    // turn on db notification listener on metastore
+    MetastoreConf.setClass(conf, MetastoreConf.ConfVars.TRANSACTIONAL_EVENT_LISTENERS, DbNotificationListener.class, TransactionalMetaStoreEventListener.class);
+    MetastoreConf.setClass(conf, MetastoreConf.ConfVars.EVENT_MESSAGE_FACTORY, JSONMessageEncoder.class, MessageEncoder.class);
+
+    msPort = MetaStoreTestUtils.startMetaStoreWithRetry(conf);
     securityManager = System.getSecurityManager();
     System.setSecurityManager(new NoExitSecurityManager());
     Policy.setPolicy(new DerbyPolicy());
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationAcrossInstances.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationAcrossInstances.java
index f5f8dbdd898..f486fc35a05 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationAcrossInstances.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationAcrossInstances.java
@@ -56,7 +56,6 @@ public class BaseReplicationAcrossInstances {
       throws Exception {
     conf = new HiveConf(clazz);
     conf.set("dfs.client.use.datanode.hostname", "true");
-    conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*");
     conf.set("hive.repl.cmrootdir", "/tmp/");
     conf.set("dfs.namenode.acls.enabled", "true");
     MiniDFSCluster miniDFSCluster =
@@ -64,6 +63,8 @@ public class BaseReplicationAcrossInstances {
     Map<String, String> localOverrides = new HashMap<String, String>() {{
       put("fs.defaultFS", miniDFSCluster.getFileSystem().getUri().toString());
       put(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
+      // Disable proxy authorization white-list for testing
+      put(MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH.getVarname(), "false");
     }};
     localOverrides.putAll(overrides);
     setFullyQualifiedReplicaExternalTableBase(miniDFSCluster.getFileSystem());
@@ -88,7 +89,6 @@ public class BaseReplicationAcrossInstances {
     replicaConf = new HiveConf(clazz);
     replicaConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, replicaBaseDir);
     replicaConf.set("dfs.client.use.datanode.hostname", "true");
-    replicaConf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*");
     MiniDFSCluster miniReplicaDFSCluster =
             new MiniDFSCluster.Builder(replicaConf).numDataNodes(2).format(true).build();
 
@@ -97,7 +97,6 @@ public class BaseReplicationAcrossInstances {
     conf = new HiveConf(clazz);
     conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, primaryBaseDir);
     conf.set("dfs.client.use.datanode.hostname", "true");
-    conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*");
     MiniDFSCluster miniPrimaryDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();
 
     // Setup primary warehouse.
@@ -106,6 +105,8 @@ public class BaseReplicationAcrossInstances {
     localOverrides.put(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
     localOverrides.put(HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname, fullyQualifiedReplicaExternalBase);
     localOverrides.put("fs.defaultFS", miniPrimaryDFSCluster.getFileSystem().getUri().toString());
+    // Disable proxy authorization white-list for testing
+    localOverrides.put(MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH.getVarname(), "false");
     localOverrides.putAll(primaryOverrides);
     primary = new WarehouseInstance(LOG, miniPrimaryDFSCluster, localOverrides);
 
@@ -114,6 +115,7 @@ public class BaseReplicationAcrossInstances {
     localOverrides.put(HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname, fullyQualifiedReplicaExternalBase);
     localOverrides.put("fs.defaultFS", miniReplicaDFSCluster.getFileSystem().getUri().toString());
     localOverrides.put(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
+    localOverrides.put(MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH.getVarname(), "false");
     localOverrides.putAll(replicaOverrides);
     replica = new WarehouseInstance(LOG, miniReplicaDFSCluster, localOverrides);
   }
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java
index c33188ff163..731eb9c6bd7 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java
@@ -28,7 +28,6 @@ import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TestName;
@@ -60,8 +59,8 @@ public class TestReplicationOnHDFSEncryptedZones {
   @BeforeClass
   public static void beforeClassSetup() throws Exception {
     System.setProperty("jceks.key.serialFilter", "java.lang.Enum;java.security.KeyRep;" +
-            "java.security.KeyRep$Type;javax.crypto.spec.SecretKeySpec;" +
-            "org.apache.hadoop.crypto.key.JavaKeyStoreProvider$KeyMetadata;!*");
+        "java.security.KeyRep$Type;javax.crypto.spec.SecretKeySpec;" +
+        "org.apache.hadoop.crypto.key.JavaKeyStoreProvider$KeyMetadata;!*");
     conf = new Configuration();
     conf.set("dfs.client.use.datanode.hostname", "true");
     conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*");
@@ -95,7 +94,7 @@ public class TestReplicationOnHDFSEncryptedZones {
     primaryDbName = testName.getMethodName() + "_" + +System.currentTimeMillis();
     replicatedDbName = "replicated_" + primaryDbName;
     primary.run("create database " + primaryDbName + " WITH DBPROPERTIES ( '" +
-            SOURCE_OF_REPLICATION + "' = '1,2,3')");
+        SOURCE_OF_REPLICATION + "' = '1,2,3')");
   }
 
   @Test
@@ -109,74 +108,38 @@ public class TestReplicationOnHDFSEncryptedZones {
     replicaConf.setBoolean("dfs.namenode.delegation.token.always-use", true);
 
     MiniDFSCluster miniReplicaDFSCluster =
-            new MiniDFSCluster.Builder(replicaConf).numDataNodes(2).format(true).build();
+        new MiniDFSCluster.Builder(replicaConf).numDataNodes(2).format(true).build();
     replicaConf.setBoolean(METASTORE_AGGREGATE_STATS_CACHE_ENABLED.varname, false);
 
     DFSTestUtil.createKey("test_key123", miniReplicaDFSCluster, replicaConf);
 
     WarehouseInstance replica = new WarehouseInstance(LOG, miniReplicaDFSCluster,
-            new HashMap<String, String>() {{
-              put(HiveConf.ConfVars.HIVE_IN_TEST.varname, "false");
-              put(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "false");
-              put(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname,
-                      UserGroupInformation.getCurrentUser().getUserName());
-              put(HiveConf.ConfVars.REPLDIR.varname, primary.repldDir);
-            }}, "test_key123");
-
-    List<String> dumpWithClause = Arrays.asList(
-            "'hive.repl.add.raw.reserved.namespace'='true'",
-            "'" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname + "'='"
-                    + replica.externalTableWarehouseRoot + "'",
-            "'distcp.options.skipcrccheck'=''",
-            "'" + HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname + "'='false'",
-            "'" + HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname + "'='"
-                    + UserGroupInformation.getCurrentUser().getUserName() +"'");
-    WarehouseInstance.Tuple tuple =
-            primary.run("use " + primaryDbName)
-                    .run("create table encrypted_table (id int, value string)")
-                    .run("insert into table encrypted_table values (1,'value1')")
-                    .run("insert into table encrypted_table values (2,'value2')")
-                    .dump(primaryDbName, dumpWithClause);
-
-    replica
-            .run("repl load " + primaryDbName + " into " + replicatedDbName
-                    + " with('hive.repl.add.raw.reserved.namespace'='true', "
-                    + "'hive.repl.replica.external.table.base.dir'='" + replica.externalTableWarehouseRoot + "', "
-                    + "'hive.exec.copyfile.maxsize'='0', 'distcp.options.skipcrccheck'='')")
-            .run("use " + replicatedDbName)
-            .run("repl status " + replicatedDbName)
-            .verifyResult(tuple.lastReplicationId);
-
-    try {
-      replica
-              .run("select value from encrypted_table")
-              .verifyResults(new String[] { "value1", "value2" });
-      Assert.fail("Src EZKey shouldn't be present on target");
-    } catch (Throwable e) {
-      while (e.getCause() != null) {
-        e = e.getCause();
-      }
-      Assert.assertTrue(e.getMessage().contains("KeyVersion name 'test_key@0' does not exist"));
-    }
+        new HashMap<String, String>() {{
+          put(HiveConf.ConfVars.HIVE_IN_TEST.varname, "false");
+          put(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "false");
+          put(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname,
+              UserGroupInformation.getCurrentUser().getUserName());
+          put(HiveConf.ConfVars.REPLDIR.varname, primary.repldDir);
+        }}, "test_key123");
 
     //read should pass without raw-byte distcp
-    dumpWithClause = Arrays.asList( "'" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname + "'='"
-            + replica.externalTableWarehouseRoot + "'");
-    tuple = primary.run("use " + primaryDbName)
+    List<String> dumpWithClause = Arrays.asList( "'" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname + "'='"
+        + replica.externalTableWarehouseRoot + "'");
+    WarehouseInstance.Tuple tuple =
+        primary.run("use " + primaryDbName)
             .run("create external table encrypted_table2 (id int, value string)")
             .run("insert into table encrypted_table2 values (1,'value1')")
             .run("insert into table encrypted_table2 values (2,'value2')")
             .dump(primaryDbName, dumpWithClause);
 
     replica
-            .run("repl load " + primaryDbName + " into " + replicatedDbName
-                    + " with('hive.repl.replica.external.table.base.dir'='" + replica.externalTableWarehouseRoot + "', "
-                    + "'hive.exec.copyfile.maxsize'='0', 'distcp.options.skipcrccheck'='')")
-            .run("use " + replicatedDbName)
-            .run("repl status " + replicatedDbName)
-            .verifyResult(tuple.lastReplicationId)
-            .run("select value from encrypted_table2")
-            .verifyResults(new String[] { "value1", "value2" });
+        .run("repl load " + primaryDbName + " into " + replicatedDbName
+            + " with('hive.repl.replica.external.table.base.dir'='" + replica.externalTableWarehouseRoot + "', "
+            + "'hive.exec.copyfile.maxsize'='0', 'distcp.options.skipcrccheck'='')")
+        .run("use " + replicatedDbName)
+        .run("repl status " + replicatedDbName)
+        .run("select value from encrypted_table2")
+        .verifyResults(new String[] { "value1", "value2" });
   }
 
   @Test
@@ -190,7 +153,7 @@ public class TestReplicationOnHDFSEncryptedZones {
     replicaConf.setBoolean("dfs.namenode.delegation.token.always-use", true);
 
     MiniDFSCluster miniReplicaDFSCluster =
-            new MiniDFSCluster.Builder(replicaConf).numDataNodes(2).format(true).build();
+        new MiniDFSCluster.Builder(replicaConf).numDataNodes(2).format(true).build();
     replicaConf.setBoolean(METASTORE_AGGREGATE_STATS_CACHE_ENABLED.varname, false);
 
     WarehouseInstance replica = new WarehouseInstance(LOG, miniReplicaDFSCluster,
@@ -203,13 +166,13 @@ public class TestReplicationOnHDFSEncryptedZones {
         }}, "test_key");
 
     List<String> dumpWithClause = Arrays.asList(
-            "'hive.repl.add.raw.reserved.namespace'='true'",
-            "'" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname + "'='"
-                    + replica.externalTableWarehouseRoot + "'",
-            "'distcp.options.skipcrccheck'=''",
-            "'" + HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname + "'='false'",
-            "'" + HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname + "'='"
-                    + UserGroupInformation.getCurrentUser().getUserName() +"'");
+        "'hive.repl.add.raw.reserved.namespace'='true'",
+        "'" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname + "'='"
+            + replica.externalTableWarehouseRoot + "'",
+        "'distcp.options.skipcrccheck'=''",
+        "'" + HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname + "'='false'",
+        "'" + HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname + "'='"
+            + UserGroupInformation.getCurrentUser().getUserName() +"'");
 
     WarehouseInstance.Tuple tuple =
         primary.run("use " + primaryDbName)
@@ -229,4 +192,4 @@ public class TestReplicationOnHDFSEncryptedZones {
         .run("select value from encrypted_table")
         .verifyResults(new String[] { "value1", "value2" });
   }
-}
+}
\ No newline at end of file
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
index afb8648ec34..910dda67658 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
@@ -199,7 +199,8 @@ public class TestReplicationScenarios {
       hconf.set(MetastoreConf.ConfVars.THRIFT_URIS.getHiveName(), metastoreUri);
       return;
     }
-
+    // Disable auth so the call should succeed
+    MetastoreConf.setBoolVar(hconf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false);
     hconf.set(MetastoreConf.ConfVars.TRANSACTIONAL_EVENT_LISTENERS.getHiveName(),
         DBNOTIF_LISTENER_CLASSNAME); // turn on db notification listener on metastore
     hconf.setBoolVar(HiveConf.ConfVars.REPLCMENABLED, true);
@@ -207,6 +208,7 @@ public class TestReplicationScenarios {
     hconf.setVar(HiveConf.ConfVars.REPLCMDIR, TEST_PATH + "/cmroot/");
     proxySettingName = "hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts";
     hconf.set(proxySettingName, "*");
+    MetastoreConf.setBoolVar(hconf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false);
     hconf.setVar(HiveConf.ConfVars.REPLDIR,TEST_PATH + "/hrepl/");
     hconf.set(MetastoreConf.ConfVars.THRIFT_CONNECTION_RETRIES.getHiveName(), "3");
     hconf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
@@ -247,9 +249,10 @@ public class TestReplicationScenarios {
     hconfMirrorServer.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:;databaseName=metastore_db2;create=true");
     MetaStoreTestUtils.startMetaStoreWithRetry(hconfMirrorServer, true);
     hconfMirror = new HiveConf(hconf);
+    MetastoreConf.setBoolVar(hconfMirror, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false);
+    hconfMirrorServer.set(proxySettingName, "*");
     String thriftUri = MetastoreConf.getVar(hconfMirrorServer, MetastoreConf.ConfVars.THRIFT_URIS);
     MetastoreConf.setVar(hconfMirror, MetastoreConf.ConfVars.THRIFT_URIS, thriftUri);
-
     driverMirror = DriverFactory.newDriver(hconfMirror);
     metaStoreClientMirror = new HiveMetaStoreClient(hconfMirror);
 
@@ -4188,27 +4191,32 @@ public class TestReplicationScenarios {
     NotificationEventResponse rsp = metaStoreClient.getNextNotification(firstEventId, 0, null);
     assertEquals(1, rsp.getEventsSize());
     // Test various scenarios
-    // Remove the proxy privilege and the auth should fail (in reality the proxy setting should not be changed on the fly)
-    hconf.unset(proxySettingName);
-    // Need to explicitly update ProxyUsers
-    ProxyUsers.refreshSuperUserGroupsConfiguration(hconf);
-    // Verify if the auth should fail
-    Exception ex = null;
+    // Remove the proxy privilege by reseting proxy configuration to default value.
+    // The auth should fail (in reality the proxy setting should not be changed on the fly)
+    // Pretty hacky: Affects both instances of HMS
+    ProxyUsers.refreshSuperUserGroupsConfiguration();
     try {
+      hconf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, false);
+      MetastoreConf.setBoolVar(hconf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, true);
       rsp = metaStoreClient.getNextNotification(firstEventId, 0, null);
+      Assert.fail("Get Next Nofitication should have failed due to no proxy auth");
     } catch (TException e) {
-      ex = e;
+      // Expected to throw an Exception - keep going
     }
-    assertNotNull(ex);
     // Disable auth so the call should succeed
     MetastoreConf.setBoolVar(hconf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false);
+    MetastoreConf.setBoolVar(hconfMirror, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false);
     try {
       rsp = metaStoreClient.getNextNotification(firstEventId, 0, null);
       assertEquals(1, rsp.getEventsSize());
     } finally {
       // Restore the settings
-      MetastoreConf.setBoolVar(hconf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, true);
+      MetastoreConf.setBoolVar(hconf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false);
+      hconf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, true);
       hconf.set(proxySettingName, "*");
+
+      // Restore Proxy configurations to test values
+      // Pretty hacky: Applies one setting to both instances of HMS
       ProxyUsers.refreshSuperUserGroupsConfiguration(hconf);
     }
   }
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
index 6efcf681bc6..9256d288f22 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
@@ -2815,11 +2815,12 @@ public class TestJdbcDriver2 {
     String sql = "select count(*) from " + tableName;
 
     // Verify the fetched log (from the beginning of log file)
-    HiveStatement stmt = (HiveStatement)con.createStatement();
-    assertNotNull("Statement is null", stmt);
-    stmt.executeQuery(sql);
-    List<String> logs = stmt.getQueryLog(false, 10000);
-    stmt.close();
+    List<String> logs;
+    try (HiveStatement stmt = (HiveStatement) con.createStatement()) {
+      assertNotNull("Statement is null", stmt);
+      stmt.executeQuery(sql);
+      logs = stmt.getQueryLog(false, 200000);
+    }
     verifyFetchedLog(logs, expectedLogs);
 
     // Verify the fetched log (incrementally)
@@ -3010,8 +3011,7 @@ public class TestJdbcDriver2 {
     }
     String accumulatedLogs = stringBuilder.toString();
     for (String expectedLog : expectedLogs) {
-      LOG.info("Checking match for " + expectedLog);
-      assertTrue(accumulatedLogs.contains(expectedLog));
+      assertTrue("Failed to find match for " + expectedLog, accumulatedLogs.contains(expectedLog));
     }
   }
 
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/OperationLoggingAPITestBase.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/OperationLoggingAPITestBase.java
index c1b93780673..6e101486cd2 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/OperationLoggingAPITestBase.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/OperationLoggingAPITestBase.java
@@ -86,7 +86,7 @@ public abstract class OperationLoggingAPITestBase {
     client.executeStatement(sessionHandle, queryString, null);
     // verify whether the sql operation log is generated and fetch correctly.
     OperationHandle operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null);
-    RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
+    RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 100000,
       FetchType.LOG);
     // Verbose Logs should contain everything, including execution and performance
     verifyFetchedLog(rowSetLog, expectedLogsVerbose);
@@ -101,7 +101,7 @@ public abstract class OperationLoggingAPITestBase {
       client.executeStatement(sessionHandle, queryString, null);
       // verify whether the sql operation log is generated and fetch correctly.
       OperationHandle operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null);
-      RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
+      RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 100000,
         FetchType.LOG);
       // rowSetLog should contain execution as well as performance logs
       verifyFetchedLog(rowSetLog, expectedLogsExecution);
diff --git a/itests/pom.xml b/itests/pom.xml
index c74ae7d9b0f..52508f12894 100644
--- a/itests/pom.xml
+++ b/itests/pom.xml
@@ -352,6 +352,12 @@
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-client</artifactId>
         <version>${hadoop.version}</version>
+        <exclusions>
+          <exclusion>
+            <groupId>org.jline</groupId>
+            <artifactId>jline</artifactId>
+          </exclusion>
+        </exclusions>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
diff --git a/itests/qtest-druid/pom.xml b/itests/qtest-druid/pom.xml
index c0e74a0d923..95fcc516aec 100644
--- a/itests/qtest-druid/pom.xml
+++ b/itests/qtest-druid/pom.xml
@@ -33,7 +33,7 @@
     <druid.avatica.version>1.15.0</druid.avatica.version>
     <druid.curator.version>4.0.0</druid.curator.version>
     <druid.jersey.version>1.19.3</druid.jersey.version>
-    <druid.jetty.version>9.4.10.v20180503</druid.jetty.version>
+    <druid.jetty.version>9.4.40.v20210413</druid.jetty.version>
     <druid.derby.version>10.11.1.1</druid.derby.version>
     <druid.guava.version>16.0.1</druid.guava.version>
     <druid.guice.version>4.1.0</druid.guice.version>
diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml
index fc975fe9b28..bb478359893 100644
--- a/itests/qtest/pom.xml
+++ b/itests/qtest/pom.xml
@@ -412,6 +412,24 @@
       <groupId>com.oracle.database.jdbc</groupId>
       <artifactId>ojdbc8</artifactId>
     </dependency>
+    <dependency>
+      <groupId>com.google.inject</groupId>
+      <artifactId>guice</artifactId>
+      <version>4.1.0</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.inject.extensions</groupId>
+      <artifactId>guice-multibindings</artifactId>
+      <version>4.1.0</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.inject.extensions</groupId>
+      <artifactId>guice-servlet</artifactId>
+      <version>4.1.0</version>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
   <profiles>
     <profile>
diff --git a/llap-common/pom.xml b/llap-common/pom.xml
index 686e85fb47d..6624065bbf1 100644
--- a/llap-common/pom.xml
+++ b/llap-common/pom.xml
@@ -64,7 +64,6 @@
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-common</artifactId>
-      <optional>true</optional>
       <exclusions>
         <exclusion>
           <groupId>org.slf4j</groupId>
diff --git a/llap-server/pom.xml b/llap-server/pom.xml
index 42251033679..0b225d6a81a 100644
--- a/llap-server/pom.xml
+++ b/llap-server/pom.xml
@@ -221,7 +221,6 @@
     <dependency>
       <groupId>org.eclipse.jetty</groupId>
       <artifactId>jetty-util</artifactId>
-      <version>${jetty.version}</version>
     </dependency>
     <dependency>
       <groupId>com.lmax</groupId>
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
index 06c6009496e..336bf056915 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
@@ -248,7 +248,7 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu
         vertex.getVertexName(), request.getFragmentNumber(), request.getAttemptNumber());
 
     // This is the start of container-annotated logging.
-    final String dagId = attemptId.getTaskID().getVertexID().getDAGId().toString();
+    final String dagId = attemptId.getDAGID().toString();
     final String queryId = vertex.getHiveQueryId();
     final String fragmentId = LlapTezUtils.stripAttemptPrefix(fragmentIdString);
     MDC.put("dagId", dagId);
@@ -270,7 +270,7 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu
       env.put(ApplicationConstants.Environment.USER.name(), vertex.getUser());
 
       TezTaskAttemptID taskAttemptId = TezTaskAttemptID.fromString(fragmentIdString);
-      int dagIdentifier = taskAttemptId.getTaskID().getVertexID().getDAGId().getId();
+      int dagIdentifier = taskAttemptId.getDAGID().getId();
 
       QueryIdentifier queryIdentifier = new QueryIdentifier(
           qIdProto.getApplicationIdString(), dagIdentifier);
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
index b4f51f0560d..f3b337032bd 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
@@ -291,7 +291,7 @@ public class LlapTaskReporter implements TaskReporterInterface {
       int fromPreRoutedEventId = task.getNextPreRoutedEventId();
       int maxEvents = Math.min(maxEventsToGet, task.getMaxEventsToHandle());
       TezHeartbeatRequest request = new TezHeartbeatRequest(requestId, events, fromPreRoutedEventId,
-          containerIdStr, task.getTaskAttemptID(), fromEventId, maxEvents);
+          containerIdStr, task.getTaskAttemptID(), fromEventId, maxEvents, 0);
       LOG.debug("Sending heartbeat to AM, request={}", request);
 
       maybeLogCounters();
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
index 99f226c799d..f299c58c5b8 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
@@ -330,7 +330,7 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
     StringBuilder sb = new StringBuilder();
     TezTaskID taskId = taskAttemptId.getTaskID();
     TezVertexID vertexId = taskId.getVertexID();
-    TezDAGID dagId = vertexId.getDAGId();
+    TezDAGID dagId = vertexId.getDAGID();
     ApplicationId appId = dagId.getApplicationId();
     long clusterTs = appId.getClusterTimestamp();
     long clusterTsShort = clusterTs % 1_000_000L;
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapInputFormat.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapInputFormat.java
index 789b637bd6b..19f8ca8dd60 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapInputFormat.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapInputFormat.java
@@ -141,6 +141,8 @@ public class LlapInputFormat implements InputFormat<NullWritable, VectorizedRowB
       // This starts the reader in the background.
       rr.start();
       return result;
+    } catch (IOException ioe) {
+      throw ioe;
     } catch (Exception ex) {
       Throwable rootCause = JavaUtils.findRootCause(ex);
       if (checkLimitReached(job)
diff --git a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
index a9ae27f25dc..c5bb75b8ec6 100644
--- a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
+++ b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
@@ -430,7 +430,7 @@ public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
     UpdateFragmentRequestProto request = UpdateFragmentRequestProto.newBuilder()
         .setIsGuaranteed(newState).setFragmentIdentifierString(attemptId.toString())
         .setQueryIdentifier(constructQueryIdentifierProto(
-            attemptId.getTaskID().getVertexID().getDAGId().getId())).build();
+            attemptId.getDAGID().getId())).build();
 
     communicator.sendUpdateFragment(request, nodeId.getHostname(), nodeId.getPort(),
         new LlapProtocolClientProxy.ExecuteRequestCallback<UpdateFragmentResponseProto>() {
@@ -457,7 +457,7 @@ public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
                                          int priority)  {
     super.registerRunningTaskAttempt(containerId, taskSpec, additionalResources, credentials,
         credentialsChanged, priority);
-    int dagId = taskSpec.getTaskAttemptID().getTaskID().getVertexID().getDAGId().getId();
+    int dagId = taskSpec.getDAGID().getId();
     if (currentQueryIdentifierProto == null || (dagId != currentQueryIdentifierProto.getDagIndex())) {
       String hiveQueryId = extractQueryIdFromContext();
       try {
@@ -611,7 +611,7 @@ public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
       TerminateFragmentRequestProto request =
           TerminateFragmentRequestProto.newBuilder().setQueryIdentifier(
               constructQueryIdentifierProto(
-                  taskAttemptId.getTaskID().getVertexID().getDAGId().getId()))
+                  taskAttemptId.getDAGID().getId()))
               .setFragmentIdentifierString(taskAttemptId.toString()).build();
       communicator.sendTerminateFragment(request, nodeId.getHostname(), nodeId.getPort(),
           new LlapProtocolClientProxy.ExecuteRequestCallback<TerminateFragmentResponseProto>() {
@@ -755,7 +755,7 @@ public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
 
   private String constructLlapLogUrl(final TezTaskAttemptID attemptID, final String containerIdString,
     final boolean isDone, final String nmAddress) {
-    String dagId = attemptID.getTaskID().getVertexID().getDAGId().toString();
+    String dagId = attemptID.getDAGID().toString();
     String filename = JOINER.join(currentHiveQueryId, "-", dagId, ".log", (isDone ? ".done" : ""),
       "?nm.id=", nmAddress);
     String url = PATH_JOINER.join(timelineServerUri, "ws", "v1", "applicationhistory", "containers",
@@ -904,7 +904,7 @@ public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
     builder.setAmPort(getAddress().getPort());
 
     Preconditions.checkState(currentQueryIdentifierProto.getDagIndex() ==
-        taskSpec.getTaskAttemptID().getTaskID().getVertexID().getDAGId().getId());
+        taskSpec.getDAGID().getId());
 
 
     builder.setCredentialsBinary(
diff --git a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
index 473ab1eb18a..85c34ddfc06 100644
--- a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
+++ b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
@@ -1170,7 +1170,7 @@ public class LlapTaskSchedulerService extends TaskScheduler {
             task, priority, capability);
     if (!dagRunning) {
       if (metrics != null && id != null) {
-        metrics.setDagId(id.getTaskID().getVertexID().getDAGId().toString());
+        metrics.setDagId(id.getDAGID().toString());
       }
       dagRunning = true;
     }
@@ -1191,7 +1191,7 @@ public class LlapTaskSchedulerService extends TaskScheduler {
         task, priority, capability, containerId);
     if (!dagRunning) {
       if (metrics != null && id != null) {
-        metrics.setDagId(id.getTaskID().getVertexID().getDAGId().toString());
+        metrics.setDagId(id.getDAGID().toString());
       }
       dagRunning = true;
     }
@@ -1204,7 +1204,7 @@ public class LlapTaskSchedulerService extends TaskScheduler {
   protected TezTaskAttemptID getTaskAttemptId(Object task) {
     // TODO: why does Tez API use "Object" for this?
     if (task instanceof TaskAttempt) {
-      return ((TaskAttempt)task).getID();
+      return ((TaskAttempt)task).getTaskAttemptID();
     }
     throw new AssertionError("LLAP plugin can only schedule task attempts");
   }
@@ -2108,7 +2108,7 @@ public class LlapTaskSchedulerService extends TaskScheduler {
           if (preemptHosts != null && !preemptHosts.contains(taskInfo.getAssignedNode().getHost())) {
             continue; // Not the right host.
           }
-          Map<Integer, Set<Integer>> depInfo = getDependencyInfo(taskInfo.getAttemptId().getTaskID().getVertexID().getDAGId());
+          Map<Integer, Set<Integer>> depInfo = getDependencyInfo(taskInfo.getAttemptId().getDAGID());
           Set<Integer> vertexDepInfo = null;
           if (depInfo != null) {
             vertexDepInfo = depInfo.get(vertexNum(forTask));
diff --git a/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java b/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java
index 3bbbdf3fff2..5a23d6d6cc1 100644
--- a/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java
+++ b/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java
@@ -363,6 +363,7 @@ public class TestLlapTaskCommunicator {
       TezTaskAttemptID taskAttemptId = TezTaskAttemptID.getInstance(
           TezTaskID.getInstance(vertexId, taskIdx), 0);
       doReturn(taskAttemptId).when(taskSpec).getTaskAttemptID();
+      doReturn(taskAttemptId.getDAGID()).when(taskSpec).getDAGID();
       doReturn(DAG_NAME).when(taskSpec).getDAGName();
       doReturn(vertexName).when(taskSpec).getVertexName();
       ProcessorDescriptor processorDescriptor = ProcessorDescriptor.create("fakeClassName").setUserPayload(userPayload);
diff --git a/pom.xml b/pom.xml
index b77c249781f..9358f76a8a9 100644
--- a/pom.xml
+++ b/pom.xml
@@ -131,7 +131,7 @@
     <guava.version>19.0</guava.version>
     <groovy.version>2.4.21</groovy.version>
     <h2database.version>2.1.210</h2database.version>
-    <hadoop.version>3.1.0</hadoop.version>
+    <hadoop.version>3.3.1</hadoop.version>
     <hadoop.bin.path>${basedir}/${hive.path.to.root}/testutils/hadoop</hadoop.bin.path>
     <hamcrest.version>1.3</hamcrest.version>
     <hbase.version>2.0.0-alpha4</hbase.version>
@@ -151,7 +151,7 @@
     <javax-servlet-jsp.version>2.3.1</javax-servlet-jsp.version>
     <javolution.version>5.5.1</javolution.version>
     <jettison.version>1.1</jettison.version>
-    <jetty.version>9.3.27.v20190418</jetty.version>
+    <jetty.version>9.4.40.v20210413</jetty.version>
     <jersey.version>1.19</jersey.version>
     <jline.version>2.14.6</jline.version>
     <jms.version>2.0.2</jms.version>
@@ -192,7 +192,7 @@
     <slf4j.version>1.7.30</slf4j.version>
     <ST4.version>4.0.4</ST4.version>
     <storage-api.version>4.0.0-alpha-2-SNAPSHOT</storage-api.version>
-    <tez.version>0.10.1</tez.version>
+    <tez.version>0.10.2</tez.version>
     <super-csv.version>2.2.0</super-csv.version>
     <tempus-fugit.version>1.1</tempus-fugit.version>
     <snappy.version>1.1.8.4</snappy.version>
@@ -216,6 +216,7 @@
     <validation-api.version>1.1.0.Final</validation-api.version>
     <aws-secretsmanager-caching.version>1.0.1</aws-secretsmanager-caching.version>
     <aws-java-sdk.version>1.11.901</aws-java-sdk.version>
+    <jansi.version>2.3.4</jansi.version>
   </properties>
   <repositories>
     <!-- This needs to be removed before checking in-->
@@ -744,6 +745,11 @@
         <artifactId>jetty-webapp</artifactId>
         <version>${jetty.version}</version>
       </dependency>
+      <dependency>
+        <groupId>org.eclipse.jetty</groupId>
+        <artifactId>jetty-http</artifactId>
+        <version>${jetty.version}</version>
+      </dependency>
       <dependency>
         <groupId>org.eclipse.jetty</groupId>
         <artifactId>jetty-util</artifactId>
@@ -1018,6 +1024,10 @@
             <groupId>org.slf4j</groupId>
             <artifactId>slf4j-log4j12</artifactId>
           </exclusion>
+          <exclusion>
+            <groupId>org.jline</groupId>
+            <artifactId>jline</artifactId>
+          </exclusion>
           <exclusion>
             <groupId>commons-logging</groupId>
             <artifactId>commons-logging</artifactId>
@@ -1052,6 +1062,12 @@
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-client</artifactId>
         <version>${hadoop.version}</version>
+        <exclusions>
+          <exclusion>
+            <groupId>org.jline</groupId>
+            <artifactId>jline</artifactId>
+          </exclusion>
+        </exclusions>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
index 173a283f807..c3fc4094504 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
@@ -430,16 +430,18 @@ public class FetchOperator implements Serializable {
 
   private void generateWrappedSplits(InputFormat inputFormat, List<FetchInputFormatSplit> inputSplits, JobConf job)
       throws IOException {
-    InputSplit[] splits;
+    InputSplit[] splits = new InputSplit[0];
     try {
       splits = inputFormat.getSplits(job, 1);
+    } catch (InvalidInputException iie) {
+      LOG.warn("Input path " + currPath + " is empty", iie);
     } catch (Exception ex) {
       Throwable t = ExceptionUtils.getRootCause(ex);
       if (t instanceof FileNotFoundException || t instanceof InvalidInputException) {
-        LOG.warn("Input path " + currPath + " is empty", t.getMessage());
-        return;
+        LOG.warn("Input path " + currPath + " is empty", t);
+      } else {
+        throw ex;
       }
-      throw ex;
     }
     for (int i = 0; i < splits.length; i++) {
       inputSplits.add(new FetchInputFormatSplit(splits[i], inputFormat));
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
index 2235904c818..66db1676b30 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
@@ -867,5 +867,12 @@ public class TezTask extends Task<TezWork> {
         return dagClient.waitForCompletionWithStatusUpdates(statusGetOpts);
       }
     }
+
+    @Override
+    public String getWebUIAddress() throws IOException, TezException {
+      synchronized (dagClient) {
+        return dagClient.getWebUIAddress();
+      }
+    }
   }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/RecordReaderWrapper.java b/ql/src/java/org/apache/hadoop/hive/ql/io/RecordReaderWrapper.java
index 523c770cf74..e950da21820 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/RecordReaderWrapper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/RecordReaderWrapper.java
@@ -36,6 +36,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.util.ArrayList;
@@ -69,7 +70,15 @@ class RecordReaderWrapper extends LineRecordReader {
       JobConf jobConf, Reporter reporter) throws IOException {
     int headerCount = Utilities.getHeaderCount(tableDesc);
     int footerCount = Utilities.getFooterCount(tableDesc, jobConf);
-    RecordReader innerReader = inputFormat.getRecordReader(split.getInputSplit(), jobConf, reporter);
+
+    RecordReader innerReader = null;
+    try {
+     innerReader = inputFormat.getRecordReader(split.getInputSplit(), jobConf, reporter);
+    } catch (InterruptedIOException iioe) {
+      // If reading from the underlying record reader is interrupted, return a no-op record reader
+      LOG.info("Interrupted while getting the input reader for {}", split.getInputSplit());
+      return new ZeroRowsInputFormat().getRecordReader(split.getInputSplit(), jobConf, reporter);
+    }
 
     if (LOG.isDebugEnabled()) {
       LOG.debug("Using {} to read data with skip.header.line.count {} and skip.footer.line.count {}",
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
index 56be7196274..1058ce3b6d1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
@@ -178,8 +178,7 @@ public class StorageBasedAuthorizationProvider extends HiveAuthorizationProvider
 
   private static boolean userHasProxyPrivilege(String user, Configuration conf) {
     try {
-      if (MetaStoreServerUtils.checkUserHasHostProxyPrivileges(user, conf,
-              HMSHandler.getIPAddress())) {
+      if (MetaStoreServerUtils.checkUserHasHostProxyPrivileges(user, conf, HMSHandler.getIPAddress())) {
         LOG.info("user {} has host proxy privilege.", user);
         return true;
       }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/HiveMetaStoreAuthorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/HiveMetaStoreAuthorizer.java
index c3a6ef683bc..969254ab1ae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/HiveMetaStoreAuthorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/HiveMetaStoreAuthorizer.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObje
 import org.apache.hadoop.hive.ql.security.authorization.plugin.metastore.filtercontext.DatabaseFilterContext;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.metastore.filtercontext.TableFilterContext;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -489,6 +490,7 @@ public class HiveMetaStoreAuthorizer extends MetaStorePreEventListener implement
   boolean isSuperUser(String userName) {
     Configuration conf      = getConf();
     String        ipAddress = HMSHandler.getIPAddress();
+    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     return (MetaStoreServerUtils.checkUserHasHostProxyPrivileges(userName, conf, ipAddress));
   }
 
diff --git a/ql/src/test/queries/clientpositive/acid_table_directories_test.q b/ql/src/test/queries/clientpositive/acid_table_directories_test.q
index 2ad21263b23..048e8561e88 100644
--- a/ql/src/test/queries/clientpositive/acid_table_directories_test.q
+++ b/ql/src/test/queries/clientpositive/acid_table_directories_test.q
@@ -1,3 +1,5 @@
+--! qt:disabled:disabled Tests the output of LS -R and that changes, Post Hadoop 3.3.x the output isn't sorted, so
+--disabled as part of HIVE-24484 (Upgrade Hadoop to 3.3.1)
 set hive.mapred.mode=nonstrict;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
diff --git a/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q b/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q
index 834bfbcc2bb..03b00ca1eb0 100644
--- a/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q
+++ b/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q
@@ -11,7 +11,8 @@ LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE encrypted
 SELECT * FROM encrypted_table_n0;
 
 -- Test loading data from the hdfs filesystem;
-dfs -copyFromLocal ../../data/files/kv1.txt hdfs:///tmp/kv1.txt;
+dfs -mkdir hdfs:///tmp;
+dfs -copyFromLocal -f ../../data/files/kv1.txt hdfs:///tmp/kv1.txt;
 LOAD DATA INPATH '/tmp/kv1.txt' OVERWRITE INTO TABLE encrypted_table_n0;
 SELECT * FROM encrypted_table_n0;
 
diff --git a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
index b8ff0e35671..a73c50253bd 100644
--- a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
+++ b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FutureDataInputStreamBuilder;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
@@ -766,6 +767,11 @@ public class Hadoop23Shims extends HadoopShimsSecure {
       super(fs, uri);
     }
 
+    @Override
+    public FutureDataInputStreamBuilder openFile(Path path) throws IOException, UnsupportedOperationException {
+      return super.openFile(ProxyFileSystem23.super.swizzleParamPath(path));
+    }
+
     @Override
     public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f)
       throws FileNotFoundException, IOException {
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DefaultIncompatibleTableChangeHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DefaultIncompatibleTableChangeHandler.java
index f45772f3da4..c9317c93394 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DefaultIncompatibleTableChangeHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DefaultIncompatibleTableChangeHandler.java
@@ -109,7 +109,7 @@ public class DefaultIncompatibleTableChangeHandler implements
       throw new InvalidOperationException(
           "The following columns have types incompatible with the existing " +
               "columns in their respective positions :\n" +
-              org.apache.commons.lang.StringUtils.join(incompatibleCols, ',')
+              org.apache.commons.lang3.StringUtils.join(incompatibleCols, ',')
       );
     }
   }
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
index 6b6f5e463c8..79be3ec81db 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
@@ -124,6 +124,7 @@ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_COMMEN
 import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
 import static org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedTableName;
 import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.TABLE_IS_CTLT;
+import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.HIVE_IN_TEST;
 import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.CAT_NAME;
 import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DB_NAME;
 import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
@@ -293,7 +294,7 @@ public class HMSHandler extends FacebookBase implements IHMSHandler {
   public HMSHandler(String name, Configuration conf) {
     super(name);
     this.conf = conf;
-    isInTest = MetastoreConf.getBoolVar(this.conf, ConfVars.HIVE_IN_TEST);
+    isInTest = MetastoreConf.getBoolVar(this.conf, HIVE_IN_TEST);
     if (threadPool == null) {
       synchronized (HMSHandler.class) {
         if (threadPool == null) {
@@ -9373,7 +9374,8 @@ public class HMSHandler extends FacebookBase implements IHMSHandler {
   private void authorizeProxyPrivilege() throws TException {
     // Skip the auth in embedded mode or if the auth is disabled
     if (!HiveMetaStore.isMetaStoreRemote() ||
-        !MetastoreConf.getBoolVar(conf, ConfVars.EVENT_DB_NOTIFICATION_API_AUTH)) {
+        !MetastoreConf.getBoolVar(conf, ConfVars.EVENT_DB_NOTIFICATION_API_AUTH) || conf.getBoolean(HIVE_IN_TEST.getVarname(),
+        false)) {
       return;
     }
     String user = null;
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 083d1e39c4d..a9df86b2b87 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -408,7 +408,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       thread.setName("Metastore-HttpHandler-Pool: Thread-" + thread.getId());
       return thread;
     });
-    ExecutorThreadPool threadPool = new ExecutorThreadPool(executorService);
+    ExecutorThreadPool threadPool = new ExecutorThreadPool((ThreadPoolExecutor) executorService);
     // HTTP Server
     org.eclipse.jetty.server.Server server = new Server(threadPool);
     server.setStopAtShutdown(true);
diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml
index 42f5e963190..302d05d36a6 100644
--- a/standalone-metastore/pom.xml
+++ b/standalone-metastore/pom.xml
@@ -75,7 +75,7 @@
     </dropwizard-metrics-hadoop-metrics2-reporter.version>
     <dropwizard.version>3.1.0</dropwizard.version>
     <guava.version>19.0</guava.version>
-    <hadoop.version>3.1.0</hadoop.version>
+    <hadoop.version>3.3.1</hadoop.version>
     <hikaricp.version>2.6.1</hikaricp.version>
     <jackson.version>2.12.7</jackson.version>
     <javolution.version>5.5.1</javolution.version>
@@ -106,7 +106,7 @@
     <httpcomponents.core.version>4.4.13</httpcomponents.core.version>
     <pac4j-core.version>4.5.5</pac4j-core.version>
     <nimbus-jose-jwt.version>9.20</nimbus-jose-jwt.version>
-    <jetty.version>9.3.27.v20190418</jetty.version>
+    <jetty.version>9.4.40.v20210413</jetty.version>
     <!-- Thrift properties -->
     <thrift.home>you-must-set-this-to-run-thrift</thrift.home>
     <thrift.gen.dir>${basedir}/src/gen/thrift</thrift.gen.dir>
@@ -240,6 +240,10 @@
         <artifactId>hadoop-mapreduce-client-core</artifactId>
         <version>${hadoop.version}</version>
         <exclusions>
+          <exclusion>
+            <groupId>org.jline</groupId>
+            <artifactId>jline</artifactId>
+          </exclusion>
           <exclusion>
             <groupId>io.netty</groupId>
             <artifactId>netty</artifactId>
diff --git a/storage-api/pom.xml b/storage-api/pom.xml
index d34c71c1138..e96b8a2a6a5 100644
--- a/storage-api/pom.xml
+++ b/storage-api/pom.xml
@@ -29,7 +29,7 @@
     <maven.compiler.target>1.8</maven.compiler.target>
     <commons-logging.version>1.1.3</commons-logging.version>
     <guava.version>19.0</guava.version>
-    <hadoop.version>3.1.0</hadoop.version>
+    <hadoop.version>3.3.1</hadoop.version>
     <junit.version>4.13</junit.version>
     <junit.jupiter.version>5.6.3</junit.jupiter.version>
     <junit.vintage.version>5.6.3</junit.vintage.version>
diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java
index f75aeb37d71..4f32583e240 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java
@@ -18,10 +18,10 @@
 
 package org.apache.hadoop.hive.common;
 
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hive.common.util.SuppressFBWarnings;
 
-import org.apache.commons.lang.ArrayUtils;
+import org.apache.commons.lang3.ArrayUtils;
 
 import java.util.ArrayList;
 import java.util.Arrays;
diff --git a/streaming/pom.xml b/streaming/pom.xml
index ff7b12b6033..86ee5bc8efd 100644
--- a/streaming/pom.xml
+++ b/streaming/pom.xml
@@ -68,6 +68,11 @@
         </exclusion>
       </exclusions>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <optional>true</optional>
+    </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-mapreduce-client-core</artifactId>
diff --git a/streaming/src/test/org/apache/hive/streaming/TestStreaming.java b/streaming/src/test/org/apache/hive/streaming/TestStreaming.java
index 3655bf236af..277081750cf 100644
--- a/streaming/src/test/org/apache/hive/streaming/TestStreaming.java
+++ b/streaming/src/test/org/apache/hive/streaming/TestStreaming.java
@@ -78,6 +78,7 @@ import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils;
 import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil;
 import org.apache.hadoop.hive.metastore.txn.TxnStore;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil;
 import org.apache.hadoop.hive.ql.DriverFactory;
 import org.apache.hadoop.hive.ql.IDriver;
 import org.apache.hadoop.hive.ql.io.AcidDirectory;
@@ -110,6 +111,7 @@ import org.apache.thrift.TException;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
@@ -1317,10 +1319,14 @@ public class TestStreaming {
     connection.close();
   }
 
+  /**
+   * Starting with HDFS 3.3.1, the underlying system NOW SUPPORTS hflush so there should
+   * be no exception.
+   */
   @Test
   public void testTransactionBatchSizeValidation() throws Exception {
     final String schemes = conf.get(HiveConf.ConfVars.HIVE_BLOBSTORE_SUPPORTED_SCHEMES.varname);
-    // the output stream of this FS doesn't support hflush, so the below test will fail
+    // the output stream of this FS doesn't used to support hflush earlier, now it shouldn't throw any exception
     conf.setVar(HiveConf.ConfVars.HIVE_BLOBSTORE_SUPPORTED_SCHEMES, "raw");
 
     StrictDelimitedInputWriter writer = StrictDelimitedInputWriter.newBuilder()
@@ -1337,10 +1343,6 @@ public class TestStreaming {
           .withHiveConf(conf)
           .connect();
 
-      Assert.fail();
-    } catch (ConnectionError e) {
-      Assert.assertTrue("Expected connection error due to batch sizes",
-          e.getMessage().contains("only supports transaction batch"));
     } finally {
       conf.setVar(HiveConf.ConfVars.HIVE_BLOBSTORE_SUPPORTED_SCHEMES, schemes);
     }