You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by db...@apache.org on 2022/08/04 17:32:10 UTC

[impala] branch master updated (c0b0875bd -> abcb62b67)

This is an automated email from the ASF dual-hosted git repository.

dbecker pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git


    from c0b0875bd IMPALA-11378: Allow INSERT OVERWRITE for bucket tranforms in some cases
     new f5a7e1c1f IMPALA-11454: part-1: use standard binaries path to start kudu test cluster
     new 830625b10 IMPALA-9442: Add Ozone to minicluster
     new abcb62b67 IMPALA-11469: Make prefix of ignored staging dirs configurable

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 be/src/runtime/io/disk-io-mgr.cc                   |  2 +-
 be/src/util/backend-gflag-util.cc                  |  5 +++
 bin/bootstrap_toolchain.py                         | 32 +++++++------
 bin/clean.sh                                       |  2 +-
 bin/create-test-configuration.sh                   |  4 +-
 bin/impala-config.sh                               | 52 +++++++++++++++++++---
 common/thrift/BackendGflags.thrift                 |  2 +
 fe/pom.xml                                         |  1 +
 .../org/apache/impala/common/FileSystemUtil.java   | 29 +++++++++---
 .../org/apache/impala/service/BackendConfig.java   |  4 ++
 .../apache/impala/common/FileSystemUtilTest.java   | 19 +++++++-
 fe/src/test/resources/.gitignore                   |  1 +
 java/executor-deps/pom.xml                         |  6 +--
 java/pom.xml                                       |  2 +
 testdata/bin/kill-hbase.sh                         |  2 +-
 testdata/bin/run-all.sh                            | 20 ++++-----
 testdata/cluster/admin                             | 14 +++++-
 .../common/etc/hadoop/conf/core-site.xml.py        |  3 ++
 .../common/etc/hadoop/conf/ozone-site.xml.py       | 43 ++++++++++++++++++
 .../node_templates/common/etc/init.d/common.tmpl   |  2 +
 .../node_templates/common/etc/init.d/kudu-common   | 10 ++---
 .../node_templates/common/etc/init.d/kudu-master   |  2 +-
 .../node_templates/common/etc/init.d/kudu-tserver  |  2 +-
 .../node_templates/common/etc/init.d/ozone-common  |  8 +---
 .../etc/init.d/{hdfs-datanode => ozone-datanode}   |  4 +-
 .../etc/init.d/{hdfs-datanode => ozone-manager}    |  5 ++-
 .../common/etc/init.d/{hdfs-datanode => ozone-scm} |  5 ++-
 tests/authorization/test_ranger.py                 |  4 +-
 tests/common/impala_test_suite.py                  |  5 ++-
 tests/common/skip.py                               | 18 ++++++++
 tests/custom_cluster/test_coordinators.py          |  3 +-
 tests/custom_cluster/test_events_custom_configs.py |  3 +-
 tests/custom_cluster/test_hdfs_fd_caching.py       |  9 ++--
 .../test_hive_parquet_codec_interop.py             |  3 +-
 .../custom_cluster/test_hive_text_codec_interop.py |  3 +-
 tests/custom_cluster/test_insert_behaviour.py      |  3 +-
 tests/custom_cluster/test_lineage.py               |  4 +-
 tests/custom_cluster/test_local_catalog.py         |  3 +-
 tests/custom_cluster/test_local_tz_conversion.py   |  4 +-
 .../test_metadata_no_events_processing.py          |  3 +-
 tests/custom_cluster/test_metadata_replicas.py     |  2 +
 .../custom_cluster/test_parquet_max_page_header.py |  3 +-
 tests/custom_cluster/test_permanent_udfs.py        |  6 ++-
 tests/custom_cluster/test_runtime_profile.py       |  4 +-
 tests/data_errors/test_data_errors.py              |  5 ++-
 tests/failure/test_failpoints.py                   |  3 +-
 tests/metadata/test_compute_stats.py               |  6 ++-
 tests/metadata/test_ddl.py                         | 45 +++++++++----------
 tests/metadata/test_event_processing.py            |  3 +-
 tests/metadata/test_hdfs_encryption.py             |  3 +-
 tests/metadata/test_hdfs_permissions.py            |  3 +-
 tests/metadata/test_hms_integration.py             |  4 +-
 tests/metadata/test_metadata_query_statements.py   |  5 ++-
 tests/metadata/test_partition_metadata.py          |  3 +-
 tests/metadata/test_refresh_partition.py           |  3 +-
 tests/metadata/test_stats_extrapolation.py         |  3 +-
 tests/metadata/test_views_compatibility.py         |  3 +-
 tests/query_test/test_acid.py                      | 16 ++++++-
 tests/query_test/test_date_queries.py              |  3 +-
 tests/query_test/test_hbase_queries.py             |  2 +
 tests/query_test/test_hdfs_caching.py              |  6 ++-
 tests/query_test/test_insert_behaviour.py          |  9 +++-
 tests/query_test/test_insert_parquet.py            |  4 +-
 tests/query_test/test_join_queries.py              |  2 +
 tests/query_test/test_nested_types.py              |  4 ++
 tests/query_test/test_observability.py             |  4 +-
 tests/query_test/test_partitioning.py              |  3 +-
 tests/query_test/test_resource_limits.py           |  3 +-
 tests/query_test/test_scanners.py                  | 11 +++++
 tests/stress/test_acid_stress.py                   |  3 +-
 tests/stress/test_ddl_stress.py                    |  3 +-
 tests/util/filesystem_utils.py                     |  3 +-
 72 files changed, 394 insertions(+), 132 deletions(-)
 create mode 100644 testdata/cluster/node_templates/common/etc/hadoop/conf/ozone-site.xml.py
 copy bin/cmake_aux/add_override.sh => testdata/cluster/node_templates/common/etc/init.d/ozone-common (81%)
 mode change 100755 => 100644
 copy testdata/cluster/node_templates/common/etc/init.d/{hdfs-datanode => ozone-datanode} (94%)
 copy testdata/cluster/node_templates/common/etc/init.d/{hdfs-datanode => ozone-manager} (89%)
 copy testdata/cluster/node_templates/common/etc/init.d/{hdfs-datanode => ozone-scm} (90%)


[impala] 03/03: IMPALA-11469: Make prefix of ignored staging dirs configurable

Posted by db...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

dbecker pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit abcb62b676539b85c7c428ed385177f591de3492
Author: stiga-huang <hu...@gmail.com>
AuthorDate: Wed Aug 3 16:46:45 2022 +0800

    IMPALA-11469: Make prefix of ignored staging dirs configurable
    
    External systems like Hive or Spark will write temporary or "non-data"
    files in the table location. Catalogd will skip them when loading file
    metadata. However, the prefix is currently hard coded. We recently found
    that Spark streaming will generated a _spark_metadata dir which is not
    handled correctly.
    
    To avoid future code changes when interact with more systems, this patch
    adds a new startup flag, ignored_dir_prefix_list, for catalogd. It's a
    comma separated list for the prefix of ignored dirs. Currently, the
    default value is ".,_tmp.,_spark_metadata". Users can add more in the
    future.
    
    Tests:
     - Add a case for _spark_metadata in FileSystemUtilTest
    
    Change-Id: I108bfa823281a35d28932f7ccce0b12a0c5af57d
    Reviewed-on: http://gerrit.cloudera.org:8080/18811
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 be/src/util/backend-gflag-util.cc                  |  5 ++++
 common/thrift/BackendGflags.thrift                 |  2 ++
 .../org/apache/impala/common/FileSystemUtil.java   | 27 +++++++++++++++++-----
 .../org/apache/impala/service/BackendConfig.java   |  4 ++++
 .../apache/impala/common/FileSystemUtilTest.java   | 19 +++++++++++++--
 5 files changed, 49 insertions(+), 8 deletions(-)

diff --git a/be/src/util/backend-gflag-util.cc b/be/src/util/backend-gflag-util.cc
index 3e69c3dd6..9a8d17f1d 100644
--- a/be/src/util/backend-gflag-util.cc
+++ b/be/src/util/backend-gflag-util.cc
@@ -191,6 +191,10 @@ DEFINE_bool(use_hms_column_order_for_hbase_tables, false,
     "Use the column order in HMS for HBase tables instead of ordering the columns by "
     "family/qualifier. Keeping the default as false for backward compatibility.");
 
+DEFINE_string(ignored_dir_prefix_list, ".,_tmp.,_spark_metadata",
+    "Comma separated list to specify the prefix for tmp/staging dirs that catalogd should"
+    " skip in loading file metadata.");
+
 namespace impala {
 
 Status GetConfigFromCommand(const string& flag_cmd, string& result) {
@@ -330,6 +334,7 @@ Status PopulateThriftBackendGflags(TBackendGflags& cfg) {
   cfg.__set_pull_table_types_and_comments(FLAGS_pull_table_types_and_comments);
   cfg.__set_use_hms_column_order_for_hbase_tables(
       FLAGS_use_hms_column_order_for_hbase_tables);
+  cfg.__set_ignored_dir_prefix_list(FLAGS_ignored_dir_prefix_list);
   return Status::OK();
 }
 
diff --git a/common/thrift/BackendGflags.thrift b/common/thrift/BackendGflags.thrift
index 9271beecf..a3317be89 100644
--- a/common/thrift/BackendGflags.thrift
+++ b/common/thrift/BackendGflags.thrift
@@ -227,4 +227,6 @@ struct TBackendGflags {
   101: required bool pull_table_types_and_comments
 
   102: required bool use_hms_column_order_for_hbase_tables
+
+  103: required string ignored_dir_prefix_list
 }
diff --git a/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java b/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
index e3c56a6f4..d3e58c0eb 100644
--- a/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
+++ b/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.impala.catalog.HdfsCompression;
+import org.apache.impala.service.BackendConfig;
 import org.apache.impala.util.DebugUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -44,6 +45,8 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.net.URI;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Map;
 import java.util.NoSuchElementException;
 import java.util.Set;
@@ -846,12 +849,19 @@ public class FileSystemUtil {
   }
 
   /**
-   * Prefix string used by hive to write certain temporary or "non-data" files in the
-   * table location
+   * Prefix string used by tools like hive/spark/flink to write certain temporary or
+   * "non-data" files in the table location
    */
-  public static final String HIVE_TEMP_FILE_PREFIX = "_tmp.";
-
-  public static final String DOT = ".";
+  private static final List<String> TMP_DIR_PREFIX_LIST = new ArrayList<>();
+  static {
+    String s = BackendConfig.INSTANCE.getIgnoredDirPrefixList();
+    for (String prefix : s.split(",")) {
+      if (!prefix.isEmpty()) {
+        TMP_DIR_PREFIX_LIST.add(prefix);
+      }
+    }
+    LOG.info("Prefix list of ignored dirs: " + TMP_DIR_PREFIX_LIST);
+  }
 
   /**
    * Util method used to filter out hidden and temporary staging directories
@@ -861,7 +871,12 @@ public class FileSystemUtil {
   @VisibleForTesting
   static boolean isIgnoredDir(Path path) {
     String filename = path.getName();
-    return filename.startsWith(DOT) || filename.startsWith(HIVE_TEMP_FILE_PREFIX);
+    for (String prefix : TMP_DIR_PREFIX_LIST) {
+      if (filename.startsWith(prefix)) {
+        return true;
+      }
+    }
+    return false;
   }
 
   /**
diff --git a/fe/src/main/java/org/apache/impala/service/BackendConfig.java b/fe/src/main/java/org/apache/impala/service/BackendConfig.java
index 086a4c213..0261ec08e 100644
--- a/fe/src/main/java/org/apache/impala/service/BackendConfig.java
+++ b/fe/src/main/java/org/apache/impala/service/BackendConfig.java
@@ -363,4 +363,8 @@ public class BackendConfig {
   public boolean useHmsColumnOrderForHBaseTables() {
     return backendCfg_.use_hms_column_order_for_hbase_tables;
   }
+
+  public String getIgnoredDirPrefixList() {
+    return backendCfg_.ignored_dir_prefix_list;
+  }
 }
diff --git a/fe/src/test/java/org/apache/impala/common/FileSystemUtilTest.java b/fe/src/test/java/org/apache/impala/common/FileSystemUtilTest.java
index adbefe9b0..dba2bf4c4 100644
--- a/fe/src/test/java/org/apache/impala/common/FileSystemUtilTest.java
+++ b/fe/src/test/java/org/apache/impala/common/FileSystemUtilTest.java
@@ -17,7 +17,6 @@
 
 package org.apache.impala.common;
 
-import static org.apache.impala.common.FileSystemUtil.HIVE_TEMP_FILE_PREFIX;
 import static org.apache.impala.common.FileSystemUtil.isIgnoredDir;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -26,6 +25,9 @@ import static org.junit.Assert.assertEquals;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.impala.service.BackendConfig;
+import org.apache.impala.thrift.TBackendGflags;
+import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -38,10 +40,19 @@ import java.util.List;
  * Tests for the various util methods in FileSystemUtil class
  */
 public class FileSystemUtilTest {
-
+  private static final String HIVE_TEMP_FILE_PREFIX = "_tmp.";
+  private static final String SPARK_TEMP_FILE_PREFIX = "_spark_metadata";
   private static final Path TEST_TABLE_PATH = new Path("/test-warehouse/foo"
       + ".db/filesystem-util-test");
 
+  @Before
+  public void setUp()  {
+    // Make sure BackendConfig is initialized.
+    if (BackendConfig.INSTANCE == null) {
+      BackendConfig.create(new TBackendGflags());
+    }
+  }
+
   @Test
   public void testIsInIgnoredDirectory() {
     // test positive cases
@@ -60,6 +71,10 @@ public class FileSystemUtilTest {
         testIsInIgnoredDirectory(new Path(TEST_TABLE_PATH,
             HIVE_TEMP_FILE_PREFIX + "delta_000000_2/test.manifest")));
 
+    assertTrue("Files in spark temporary directories should be ignored",
+        testIsInIgnoredDirectory(new Path(TEST_TABLE_PATH,
+            SPARK_TEMP_FILE_PREFIX + "/0")));
+
     //multiple nested levels
     assertTrue(testIsInIgnoredDirectory(new Path(TEST_TABLE_PATH,
         ".hive-staging/nested-1/nested-2/nested-3/tempfile")));


[impala] 02/03: IMPALA-9442: Add Ozone to minicluster

Posted by db...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

dbecker pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 830625b10483f748d96893ca5b9ca7a236b94aec
Author: Michael Smith <mi...@cloudera.com>
AuthorDate: Wed Jun 29 16:57:09 2022 -0700

    IMPALA-9442: Add Ozone to minicluster
    
    Adds Ozone as an alternative to hdfs in the minicluster. Select by
    setting `export TARGET_FILESYSTEM=ozone`. With that flag,
    run-mini-dfs.sh will start Ozone instead of HDFS. Requires a snapshot
    because Ozone does not support HBase (HDDS-3589); snapshot loading
    doesn't work yet primarily due to HDDS-5502.
    
    Uses the o3fs interface because Ozone puts specific restrictions on
    bucket names (no underscores, for instance), and it was a lot easier to
    use an interface where everything is written to a single bucket than to
    update all Impala's use of HDFS-style paths to make `test-warehouse` a
    bucket inside a volume.
    
    Specifies reduced Ozone client retries during shutdown where Ozone may
    not be available.
    
    Passes tests with FE_TEST=false BE_TEST=false.
    
    Change-Id: Ibf8b0f7b2d685d8b011df1926e12bf5434b5a2be
    Reviewed-on: http://gerrit.cloudera.org:8080/18738
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
    Reviewed-by: Joe McDonnell <jo...@cloudera.com>
---
 be/src/runtime/io/disk-io-mgr.cc                   |  2 +-
 bin/bootstrap_toolchain.py                         | 32 +++++++------
 bin/clean.sh                                       |  2 +-
 bin/create-test-configuration.sh                   |  4 +-
 bin/impala-config.sh                               | 52 +++++++++++++++++++---
 fe/pom.xml                                         |  1 +
 .../org/apache/impala/common/FileSystemUtil.java   |  2 +
 fe/src/test/resources/.gitignore                   |  1 +
 java/executor-deps/pom.xml                         |  6 +--
 java/pom.xml                                       |  2 +
 testdata/bin/kill-hbase.sh                         |  2 +-
 testdata/bin/run-all.sh                            | 20 ++++-----
 testdata/cluster/admin                             | 14 +++++-
 .../common/etc/hadoop/conf/core-site.xml.py        |  3 ++
 .../common/etc/hadoop/conf/ozone-site.xml.py       | 43 ++++++++++++++++++
 .../node_templates/common/etc/init.d/common.tmpl   |  2 +
 .../node_templates/common/etc/init.d/ozone-common  | 19 ++++++++
 .../common/etc/init.d/ozone-datanode               | 29 ++++++++++++
 .../node_templates/common/etc/init.d/ozone-manager | 30 +++++++++++++
 .../node_templates/common/etc/init.d/ozone-scm     | 30 +++++++++++++
 tests/authorization/test_ranger.py                 |  4 +-
 tests/common/impala_test_suite.py                  |  5 ++-
 tests/common/skip.py                               | 18 ++++++++
 tests/custom_cluster/test_coordinators.py          |  3 +-
 tests/custom_cluster/test_events_custom_configs.py |  3 +-
 tests/custom_cluster/test_hdfs_fd_caching.py       |  9 ++--
 .../test_hive_parquet_codec_interop.py             |  3 +-
 .../custom_cluster/test_hive_text_codec_interop.py |  3 +-
 tests/custom_cluster/test_insert_behaviour.py      |  3 +-
 tests/custom_cluster/test_lineage.py               |  4 +-
 tests/custom_cluster/test_local_catalog.py         |  3 +-
 tests/custom_cluster/test_local_tz_conversion.py   |  4 +-
 .../test_metadata_no_events_processing.py          |  3 +-
 tests/custom_cluster/test_metadata_replicas.py     |  2 +
 .../custom_cluster/test_parquet_max_page_header.py |  3 +-
 tests/custom_cluster/test_permanent_udfs.py        |  6 ++-
 tests/custom_cluster/test_runtime_profile.py       |  4 +-
 tests/data_errors/test_data_errors.py              |  5 ++-
 tests/failure/test_failpoints.py                   |  3 +-
 tests/metadata/test_compute_stats.py               |  6 ++-
 tests/metadata/test_ddl.py                         | 45 +++++++++----------
 tests/metadata/test_event_processing.py            |  3 +-
 tests/metadata/test_hdfs_encryption.py             |  3 +-
 tests/metadata/test_hdfs_permissions.py            |  3 +-
 tests/metadata/test_hms_integration.py             |  4 +-
 tests/metadata/test_metadata_query_statements.py   |  5 ++-
 tests/metadata/test_partition_metadata.py          |  3 +-
 tests/metadata/test_refresh_partition.py           |  3 +-
 tests/metadata/test_stats_extrapolation.py         |  3 +-
 tests/metadata/test_views_compatibility.py         |  3 +-
 tests/query_test/test_acid.py                      | 16 ++++++-
 tests/query_test/test_date_queries.py              |  3 +-
 tests/query_test/test_hbase_queries.py             |  2 +
 tests/query_test/test_hdfs_caching.py              |  6 ++-
 tests/query_test/test_insert_behaviour.py          |  9 +++-
 tests/query_test/test_insert_parquet.py            |  4 +-
 tests/query_test/test_join_queries.py              |  2 +
 tests/query_test/test_nested_types.py              |  4 ++
 tests/query_test/test_observability.py             |  4 +-
 tests/query_test/test_partitioning.py              |  3 +-
 tests/query_test/test_resource_limits.py           |  3 +-
 tests/query_test/test_scanners.py                  | 11 +++++
 tests/stress/test_acid_stress.py                   |  3 +-
 tests/stress/test_ddl_stress.py                    |  3 +-
 tests/util/filesystem_utils.py                     |  3 +-
 65 files changed, 436 insertions(+), 105 deletions(-)

diff --git a/be/src/runtime/io/disk-io-mgr.cc b/be/src/runtime/io/disk-io-mgr.cc
index 5ae8e0722..86bc7f719 100644
--- a/be/src/runtime/io/disk-io-mgr.cc
+++ b/be/src/runtime/io/disk-io-mgr.cc
@@ -841,7 +841,7 @@ int DiskIoMgr::AssignQueue(
   DCHECK(!IsOSSPath(file, check_default_fs)); // OSS/JindoFS is always remote.
   DCHECK(!IsGcsPath(file, check_default_fs)); // GCS is always remote.
   DCHECK(!IsCosPath(file, check_default_fs)); // COS is always remote.
-  DCHECK(!IsOzonePath(file, check_default_fs)); // Ozone is always remote.
+  // TODO: why is Ozone sometimes local?
   DCHECK(!IsSFSPath(file, check_default_fs)); // SFS is always remote.
   if (disk_id == -1) {
     // disk id is unknown, assign it an arbitrary one.
diff --git a/bin/bootstrap_toolchain.py b/bin/bootstrap_toolchain.py
index f0cecab12..e52cddafe 100755
--- a/bin/bootstrap_toolchain.py
+++ b/bin/bootstrap_toolchain.py
@@ -39,7 +39,7 @@
 #   other. The way to specify a single consistent set of components is via a build
 #   number. This determines the location in s3 to get the artifacts.
 # DOWNLOAD_CDH_COMPONENTS - When set to true, this script will also download and extract
-#   the CDP Hadoop components (i.e. Hadoop, Hive, HBase, Ranger, etc) into
+#   the CDP Hadoop components (i.e. Hadoop, Hive, HBase, Ranger, Ozone, etc) into
 #   CDP_COMPONENTS_HOME as appropriate.
 # IMPALA_<PACKAGE>_VERSION - The version expected for <PACKAGE>. This is typically
 #   configured in bin/impala-config.sh and must exist for every package. This is used
@@ -509,6 +509,14 @@ def get_hadoop_downloads():
   hadoop = CdpComponent("hadoop")
   hbase = CdpComponent("hbase", archive_basename_tmpl="hbase-${version}-bin",
                        unpack_directory_tmpl="hbase-${version}")
+
+  use_apache_ozone = os.environ["USE_APACHE_OZONE"] == "true"
+  if use_apache_ozone:
+    ozone = ApacheComponent("ozone", component_path_tmpl="ozone/${version}")
+  else:
+    ozone = CdpComponent("ozone", archive_basename_tmpl="hadoop-ozone-${version}",
+                        unpack_directory_tmpl="ozone-${version}")
+
   use_apache_hive = os.environ["USE_APACHE_HIVE"] == "true"
   if use_apache_hive:
     hive = ApacheComponent("hive", archive_basename_tmpl="apache-hive-${version}-bin")
@@ -516,22 +524,20 @@ def get_hadoop_downloads():
   else:
     hive = CdpComponent("hive", archive_basename_tmpl="apache-hive-${version}-bin")
     hive_src = CdpComponent("hive-source",
-                          explicit_version=os.environ.get("IMPALA_HIVE_VERSION"),
-                          archive_basename_tmpl="hive-${version}-source",
-                          unpack_directory_tmpl="hive-${version}")
-  tez = CdpComponent("tez", archive_basename_tmpl="tez-${version}-minimal",
-                     makedir=True)
+                            explicit_version=os.environ.get("IMPALA_HIVE_VERSION"),
+                            archive_basename_tmpl="hive-${version}-source",
+                            unpack_directory_tmpl="hive-${version}")
+
+  tez = CdpComponent("tez", archive_basename_tmpl="tez-${version}-minimal", makedir=True)
+  ranger = CdpComponent("ranger", archive_basename_tmpl="ranger-${version}-admin")
   use_override_hive = \
       "HIVE_VERSION_OVERRIDE" in os.environ and os.environ["HIVE_VERSION_OVERRIDE"] != ""
   # If we are using a locally built Hive we do not have a need to pull hive as a
   # dependency
-  if use_override_hive:
-    cluster_components.extend([hadoop, hbase, tez])
-  else:
-    cluster_components.extend([hadoop, hbase, hive, hive_src, tez])
-  # Ranger is always CDP
-  cluster_components.append(CdpComponent("ranger",
-                                         archive_basename_tmpl="ranger-${version}-admin"))
+  cluster_components.extend([hadoop, hbase, ozone])
+  if not use_override_hive:
+    cluster_components.extend([hive, hive_src])
+  cluster_components.extend([tez, ranger])
   return cluster_components
 
 
diff --git a/bin/clean.sh b/bin/clean.sh
index 259648a23..0e685542a 100755
--- a/bin/clean.sh
+++ b/bin/clean.sh
@@ -39,7 +39,7 @@ popd
 # don't use git clean because we need to retain Eclipse conf files
 pushd "${IMPALA_FE_DIR}"
 rm -rf target
-rm -f src/test/resources/{core,hbase,hive}-site.xml
+rm -f src/test/resources/{core,hbase,hive,ozone}-site.xml
 rm -rf generated-sources/*
 [ -z "${IMPALA_LOGS_DIR}" ] || rm -rf "${IMPALA_LOGS_DIR}"/*
 mkdir -p ${IMPALA_ALL_LOGS_DIRS}
diff --git a/bin/create-test-configuration.sh b/bin/create-test-configuration.sh
index 90fa257e8..4ee8f5fa2 100755
--- a/bin/create-test-configuration.sh
+++ b/bin/create-test-configuration.sh
@@ -126,7 +126,7 @@ echo "Ranger DB   : ${RANGER_POLICY_DB}"
 
 pushd ${CONFIG_DIR}
 # Cleanup any existing files
-rm -f {core,hdfs,hbase,hive,yarn,mapred}-site.xml
+rm -f {core,hdfs,hbase,hive,ozone,yarn,mapred}-site.xml
 rm -f authz-provider.ini
 
 # Generate hive configs first so that schemaTool can be used to init the metastore schema
@@ -200,7 +200,7 @@ fi
 
 echo "Copying common conf files from local cluster:"
 CLUSTER_HADOOP_CONF_DIR=$(${CLUSTER_DIR}/admin get_hadoop_client_conf_dir)
-for file in core-site.xml hdfs-site.xml yarn-site.xml ; do
+for file in core-site.xml hdfs-site.xml ozone-site.xml yarn-site.xml ; do
   echo ... $file
   # These need to be copied instead of symlinked so that they can be accessed when the
   # directory is bind-mounted into /opt/impala/conf in docker containers.
diff --git a/bin/impala-config.sh b/bin/impala-config.sh
index c03b362e7..753b8b13d 100755
--- a/bin/impala-config.sh
+++ b/bin/impala-config.sh
@@ -218,6 +218,7 @@ export CDP_GCS_VERSION=2.1.2.7.2.16.0-77
 export APACHE_MIRROR
 export APACHE_HIVE_VERSION=3.1.2
 export APACHE_HIVE_STORAGE_API_VERSION=2.7.0
+export APACHE_OZONE_VERSION=1.2.1
 
 export ARCH_NAME=$(uname -p)
 
@@ -231,6 +232,7 @@ export IMPALA_COS_VERSION=3.1.0-5.9.3
 unset IMPALA_HADOOP_URL
 unset IMPALA_HBASE_URL
 unset IMPALA_HIVE_URL
+unset IMPALA_OZONE_URL
 unset IMPALA_KUDU_URL
 unset IMPALA_KUDU_VERSION
 
@@ -257,12 +259,14 @@ export CDP_HADOOP_URL=${CDP_HADOOP_URL-}
 export CDP_HBASE_URL=${CDP_HBASE_URL-}
 export CDP_HIVE_URL=${CDP_HIVE_URL-}
 export CDP_HIVE_SOURCE_URL=${CDP_HIVE_SOURCE_URL-}
+export CDP_OZONE_URL=${CDP_OZONE_URL-}
 export CDP_ICEBERG_URL=${CDP_ICEBERG_URL-}
 export CDP_RANGER_URL=${CDP_RANGER_URL-}
 export CDP_TEZ_URL=${CDP_TEZ_URL-}
 
 export APACHE_HIVE_URL=${APACHE_HIVE_URL-}
 export APACHE_HIVE_SOURCE_URL=${APACHE_HIVE_SOURCE_URL-}
+export APACHE_OZONE_URL=${APACHE_OZONE_URL-}
 
 export CDP_COMPONENTS_HOME="$IMPALA_TOOLCHAIN/cdp_components-$CDP_BUILD_NUMBER"
 export CDH_MAJOR_VERSION=7
@@ -275,7 +279,6 @@ export IMPALA_HBASE_URL=${CDP_HBASE_URL-}
 export IMPALA_ICEBERG_VERSION=${CDP_ICEBERG_VERSION}
 export IMPALA_ICEBERG_URL=${CDP_ICEBERG_URL-}
 export IMPALA_KNOX_VERSION=${CDP_KNOX_VERSION}
-export IMPALA_OZONE_VERSION=${CDP_OZONE_VERSION}
 export IMPALA_PARQUET_VERSION=${CDP_PARQUET_VERSION}
 export IMPALA_RANGER_VERSION=${CDP_RANGER_VERSION}
 export IMPALA_RANGER_URL=${CDP_RANGER_URL-}
@@ -317,6 +320,28 @@ if [[ "${IMPALA_HIVE_MAJOR_VERSION}" == "1" ||
   return 1
 fi
 
+# Defaults to Apache because Ozone in newer CDP releases is currently unmaintained
+export USE_APACHE_OZONE=${USE_APACHE_OZONE-true}
+if $USE_APACHE_OZONE; then
+  export IMPALA_OZONE_DIST_TYPE="apache-ozone"
+  export IMPALA_OZONE_VERSION=${APACHE_OZONE_VERSION}
+  export IMPALA_OZONE_URL=${APACHE_OZONE_URL-}
+else
+  export IMPALA_OZONE_DIST_TYPE="ozone"
+  export IMPALA_OZONE_VERSION=${CDP_OZONE_VERSION}
+  export IMPALA_OZONE_URL=${CDP_OZONE_URL-}
+fi
+
+# Ozone changed jar groupId and artifactId in Ozone 1.2
+export IMPALA_OZONE_MINOR_VERSION=$(echo "$IMPALA_OZONE_VERSION" | cut -d . -f 2)
+if [[ ${IMPALA_OZONE_MINOR_VERSION} < 2 ]]; then
+  export IMPALA_OZONE_JAR_GROUP_ID="org.apache.hadoop"
+  export IMPALA_OZONE_JAR_ARTIFACT_ID="hadoop-ozone-filesystem-hadoop3"
+else
+  export IMPALA_OZONE_JAR_GROUP_ID="org.apache.ozone"
+  export IMPALA_OZONE_JAR_ARTIFACT_ID="ozone-filesystem-hadoop3"
+fi
+
 # It is important to have a coherent view of the JAVA_HOME and JAVA executable.
 # The JAVA_HOME should be determined first, then the JAVA executable should be
 # derived from JAVA_HOME. bin/bootstrap_development.sh adds code to
@@ -448,8 +473,13 @@ export HIVE_METASTORE_THRIFT_DIR=${HIVE_METASTORE_THRIFT_DIR_OVERRIDE:-\
 "$HIVE_SRC_DIR/standalone-metastore/src/main/thrift"}
 export TEZ_HOME="$CDP_COMPONENTS_HOME/tez-${IMPALA_TEZ_VERSION}-minimal"
 export HBASE_HOME="$CDP_COMPONENTS_HOME/hbase-${IMPALA_HBASE_VERSION}/"
+if $USE_APACHE_OZONE; then
+  export OZONE_HOME="$APACHE_COMPONENTS_HOME/ozone-${IMPALA_OZONE_VERSION}/"
+else
+  export OZONE_HOME="$CDP_COMPONENTS_HOME/ozone-${IMPALA_OZONE_VERSION}/"
+fi
 # Set the Hive binaries in the path
-export PATH="$HIVE_HOME/bin:$PATH"
+export PATH="$HIVE_HOME/bin:$HBASE_HOME/bin:$OZONE_HOME/bin:$PATH"
 
 RANGER_POLICY_DB=${RANGER_POLICY_DB-$(cut -c-63 <<< ranger$ESCAPED_IMPALA_HOME)}
 # The DB script in Ranger expects the database name to be in lower case.
@@ -628,9 +658,13 @@ elif [ "${TARGET_FILESYSTEM}" = "hdfs" ]; then
     export HDFS_ERASURECODE_POLICY="RS-3-2-1024k"
     export HDFS_ERASURECODE_PATH="/test-warehouse"
   fi
+elif [ "${TARGET_FILESYSTEM}" = "ozone" ]; then
+  export OZONE_VOLUME="impala"
+  export OZONE_BUCKET="base"
+  export DEFAULT_FS="o3fs://${OZONE_BUCKET}.${OZONE_VOLUME}.${INTERNAL_LISTEN_HOST}:9862"
 else
   echo "Unsupported filesystem '$TARGET_FILESYSTEM'"
-  echo "Valid values are: hdfs, isilon, s3, abfs, adls, gs, local"
+  echo "Valid values are: hdfs, isilon, s3, abfs, adls, gs, local, ozone"
   return 1
 fi
 
@@ -683,6 +717,11 @@ export HADOOP_LIB_DIR=${HADOOP_LIB_DIR_OVERRIDE:-"${HADOOP_HOME}/lib"}
 # Beware of adding entries from $HADOOP_HOME here, because they can change
 # the order of the classpath, leading to configuration not showing up first.
 export HADOOP_CLASSPATH="${HADOOP_CLASSPATH-}"
+# Add Ozone Hadoop filesystem implementation when using Ozone
+if [ "${TARGET_FILESYSTEM}" = "ozone" ]; then
+  OZONE_JAR="ozone-filesystem-hadoop3-${IMPALA_OZONE_VERSION}.jar"
+  HADOOP_CLASSPATH="${HADOOP_CLASSPATH}:${OZONE_HOME}/share/ozone/lib/${OZONE_JAR}"
+fi
 # Add the path containing the hadoop-aws jar, which is required to access AWS from the
 # minicluster.
 # Please note that the * is inside quotes, thus it won't get expanded by bash but
@@ -721,8 +760,6 @@ export AUX_CLASSPATH=""
 ### Tell hive not to use jline
 export HADOOP_USER_CLASSPATH_FIRST=true
 
-export PATH="$HBASE_HOME/bin:$PATH"
-
 # Add the jars so hive can create hbase tables.
 export AUX_CLASSPATH="$AUX_CLASSPATH:$HBASE_HOME/lib/hbase-common-${IMPALA_HBASE_VERSION}.jar"
 export AUX_CLASSPATH="$AUX_CLASSPATH:$HBASE_HOME/lib/hbase-client-${IMPALA_HBASE_VERSION}.jar"
@@ -731,6 +768,8 @@ export AUX_CLASSPATH="$AUX_CLASSPATH:$HBASE_HOME/lib/hbase-protocol-${IMPALA_HBA
 export AUX_CLASSPATH="$AUX_CLASSPATH:$HBASE_HOME/lib/hbase-hadoop-compat-${IMPALA_HBASE_VERSION}.jar"
 
 export HBASE_CONF_DIR="$IMPALA_FE_DIR/src/test/resources"
+# Suppress Ozone deprecation warning
+export OZONE_CONF_DIR="$IMPALA_FE_DIR/src/test/resources"
 
 # To use a local build of Kudu, set KUDU_BUILD_DIR to the path Kudu was built in and
 # set KUDU_CLIENT_DIR to the path KUDU was installed in.
@@ -820,6 +859,8 @@ echo "HIVE_CONF_DIR           = $HIVE_CONF_DIR"
 echo "HIVE_SRC_DIR            = $HIVE_SRC_DIR"
 echo "HBASE_HOME              = $HBASE_HOME"
 echo "HBASE_CONF_DIR          = $HBASE_CONF_DIR"
+echo "OZONE_HOME              = $OZONE_HOME"
+echo "OZONE_CONF_DIR          = $OZONE_CONF_DIR"
 echo "RANGER_HOME             = $RANGER_HOME"
 echo "RANGER_CONF_DIR         = $RANGER_CONF_DIR "
 echo "THRIFT_CPP_HOME         = $THRIFT_CPP_HOME"
@@ -846,6 +887,7 @@ echo "IMPALA_AVRO_JAVA_VERSION= $IMPALA_AVRO_JAVA_VERSION"
 echo "IMPALA_PARQUET_VERSION  = $IMPALA_PARQUET_VERSION"
 echo "IMPALA_HIVE_VERSION     = $IMPALA_HIVE_VERSION"
 echo "IMPALA_HBASE_VERSION    = $IMPALA_HBASE_VERSION"
+echo "IMPALA_OZONE_VERSION    = $IMPALA_OZONE_VERSION"
 echo "IMPALA_HUDI_VERSION     = $IMPALA_HUDI_VERSION"
 echo "IMPALA_KUDU_VERSION     = $IMPALA_KUDU_VERSION"
 echo "IMPALA_RANGER_VERSION   = $IMPALA_RANGER_VERSION"
diff --git a/fe/pom.xml b/fe/pom.xml
index b88a777ba..25496d633 100644
--- a/fe/pom.xml
+++ b/fe/pom.xml
@@ -871,6 +871,7 @@ under the License.
                     <include>org.apache.avro:*:${avro.version}</include>
                     <include>org.apache.parquet:*:${parquet.version}</include>
                     <include>org.apache.orc:*:${orc.version}</include>
+                    <include>org.apache.ozone:*:${ozone.version}</include>
                   </includes>
                 </bannedDependencies>
               </rules>
diff --git a/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java b/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
index e5439d6b7..e3c56a6f4 100644
--- a/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
+++ b/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
@@ -110,6 +110,8 @@ public class FileSystemUtil {
           .add(SCHEME_ADL)
           .add(SCHEME_HDFS)
           .add(SCHEME_S3A)
+          .add(SCHEME_O3FS)
+          .add(SCHEME_OFS)
           .add(SCHEME_GCS)
           .add(SCHEME_COS)
           .build();
diff --git a/fe/src/test/resources/.gitignore b/fe/src/test/resources/.gitignore
index 778efab1a..c9e206f60 100644
--- a/fe/src/test/resources/.gitignore
+++ b/fe/src/test/resources/.gitignore
@@ -1,5 +1,6 @@
 hbase-env.sh
 hdfs-site.xml
+ozone-site.xml
 hive-log4j.properties
 log4j.properties
 sentry-site*.xml
diff --git a/java/executor-deps/pom.xml b/java/executor-deps/pom.xml
index d9b7616c2..06f4e6971 100644
--- a/java/executor-deps/pom.xml
+++ b/java/executor-deps/pom.xml
@@ -211,11 +211,11 @@ under the License.
     </dependency>
 
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-filesystem-hadoop3</artifactId>
+      <groupId>${ozone.groupId}</groupId>
+      <artifactId>${ozone.artifactId}</artifactId>
       <version>${ozone.version}</version>
       <!-- Remove all transitive dependencies from the Apache Ozone dependency.
-      hadoop-ozone-filesystem-hadoop3 is a shaded-jar, which already includes
+      ozone-filesystem-hadoop3 is a shaded-jar, which already includes
       all required transitive dependencies. For some reason, Ozone still pulls
       in some transitive dependencies even though they are not needed. -->
       <exclusions>
diff --git a/java/pom.xml b/java/pom.xml
index 3cb9bfd22..8bc7c4531 100644
--- a/java/pom.xml
+++ b/java/pom.xml
@@ -41,6 +41,8 @@ under the License.
     <hbase.version>${env.IMPALA_HBASE_VERSION}</hbase.version>
     <avro.version>${env.IMPALA_AVRO_JAVA_VERSION}</avro.version>
     <orc.version>${env.IMPALA_ORC_JAVA_VERSION}</orc.version>
+    <ozone.groupId>${env.IMPALA_OZONE_JAR_GROUP_ID}</ozone.groupId>
+    <ozone.artifactId>${env.IMPALA_OZONE_JAR_ARTIFACT_ID}</ozone.artifactId>
     <ozone.version>${env.IMPALA_OZONE_VERSION}</ozone.version>
     <parquet.version>${env.IMPALA_PARQUET_VERSION}</parquet.version>
     <kite.version>${env.IMPALA_KITE_VERSION}</kite.version>
diff --git a/testdata/bin/kill-hbase.sh b/testdata/bin/kill-hbase.sh
index 47298c8e4..2589d3032 100755
--- a/testdata/bin/kill-hbase.sh
+++ b/testdata/bin/kill-hbase.sh
@@ -38,4 +38,4 @@ rm -rf /tmp/hbase-*
 # HACK: Some jobs have seen the HBase master fail to initialize with mesages like:
 # "Master startup cannot progress, in holding-pattern until region onlined."
 # Anecdotally, removing the MasterProcWALs directory avoids the issue.
-hdfs dfs -rm /hbase/MasterProcWALs/* || true
+hdfs dfs -Dozone.client.failover.max.attempts=3 -rm /hbase/MasterProcWALs/* || true
diff --git a/testdata/bin/run-all.sh b/testdata/bin/run-all.sh
index e95476e9a..801552144 100755
--- a/testdata/bin/run-all.sh
+++ b/testdata/bin/run-all.sh
@@ -44,9 +44,10 @@ $IMPALA_HOME/testdata/bin/kill-all.sh &>${IMPALA_CLUSTER_LOGS_DIR}/kill-all.log
 # if necessary. This is not intended to be a perfect test, but it is enough to
 # detect that bin/clean.sh removed the configurations.
 pushd "${IMPALA_HOME}/fe/src/test/resources/"
-if [ ! -f core-site.xml ] || [ ! -f hbase-site.xml ] || [ ! -f hive-site.xml ]; then
-    echo "Configuration files missing, running bin/create-test-configuration.sh"
-    ${IMPALA_HOME}/bin/create-test-configuration.sh
+if [ ! -f core-site.xml ] || [ ! -f hbase-site.xml ] \
+    || [ ! -f hive-site.xml ] || [ ! -f ozone-site.xml ]; then
+  echo "Configuration files missing, running bin/create-test-configuration.sh"
+  ${IMPALA_HOME}/bin/create-test-configuration.sh
 fi
 popd
 
@@ -73,21 +74,16 @@ if [[ ${DEFAULT_FS} == "hdfs://${INTERNAL_LISTEN_HOST}:20500" ]]; then
   fi
   $IMPALA_HOME/testdata/bin/run-hive-server.sh $HIVE_FLAGS 2>&1 | \
       tee ${IMPALA_CLUSTER_LOGS_DIR}/run-hive-server.log
-
-elif [[ ${DEFAULT_FS} == "${LOCAL_FS}" ]]; then
-  # When the local file system is used as default, we only start the Hive metastore.
-  # Impala can run locally without additional services.
-  $IMPALA_HOME/testdata/bin/run-hive-server.sh -only_metastore 2>&1 | \
-      tee ${IMPALA_CLUSTER_LOGS_DIR}/run-hive-server.log
 else
-  # With Isilon, ABFS, ADLS, GCS or COS we only start the Hive metastore.
-  #   - HDFS is not started becuase remote storage is used as the defaultFs in core-site
+  # With other data stores we only start the Hive metastore.
+  #   - HDFS is not started because remote storage is used as the defaultFs in core-site
   #   - HBase is irrelevent for Impala testing with remote storage.
-  #   - We don't yet have a good way to start YARN using a different defaultFS. Moreoever,
+  #   - We don't yet have a good way to start YARN using a different defaultFS. Moreoever
   #     we currently don't run hive queries against Isilon for testing.
   #   - LLAMA is avoided because we cannot start YARN.
   #   - KMS is used for encryption testing, which is not available on remote storage.
   #   - Hive needs YARN, and we don't run Hive queries.
+  # Impala can also run on a local file system without additional services.
   # TODO: Figure out how to start YARN, LLAMA and Hive with a different defaultFs.
   echo " --> Starting Hive Metastore Service"
   $IMPALA_HOME/testdata/bin/run-hive-server.sh -only_metastore 2>&1 | \
diff --git a/testdata/cluster/admin b/testdata/cluster/admin
index 135123efe..4bce41712 100755
--- a/testdata/cluster/admin
+++ b/testdata/cluster/admin
@@ -18,7 +18,7 @@
 # specific language governing permissions and limitations
 # under the License.
 
-# This will create/control/destroy a local hdfs/yarn/kms/kudu cluster.
+# This will create/control/destroy a local hdfs/yarn/kms or ozone + kudu cluster.
 #
 # All roles run on 127.0.0.1, just like the standard mini cluster included with hadoop.
 # The difference is with this cluster, each role runs in its own process and has its own
@@ -56,6 +56,8 @@ export KILL_CLUSTER_MARKER=IBelongToTheMiniCluster
 if [[ "$TARGET_FILESYSTEM" == "hdfs" ]]; then
   # The check above indicates that the regular mini-cluster is in use.
   SUPPORTED_SERVICES=(hdfs kms yarn)
+elif [[ "$TARGET_FILESYSTEM" == "ozone" ]]; then
+  SUPPORTED_SERVICES=(ozone)
 else
   # Either a remote distributed file system or a local non-distributed file system is
   # in use. Currently the only service that is expected to work is Kudu, though in theory
@@ -86,6 +88,7 @@ export YARN_WEBUI_PORT=8088   # same as default
 export KMS_WEBUI_PORT=9600    # changed to make room for non-ephemeral HBase ports
                               # (HADOOP-12811)
 export KUDU_WEBUI_PORT=8051   # same as default
+export OZONE_WEBUI_PORT=9874  # same as default for OM WebUI
 
 # Empty dirs that should be included in the templates. Since git ignores empty dirs it is
 # easier to maintain them here.
@@ -216,7 +219,7 @@ function create_cluster {
     if [[ $NODE_IDX -gt 1 ]]; then
       # Remove master role scripts from slave nodes
       rm -f "$NODE_DIR/etc/init.d/"{hdfs-namenode,yarn-resourcemanager} \
-            "$NODE_DIR/etc/init.d/"{kms,kudu-master}
+            "$NODE_DIR/etc/init.d/"{kms,kudu-master,ozone-scm,ozone-manager}
       # Only run one YARN nodemanager (more memory-efficient to scale up a
       # single NM than run several)
       rm -f "$NODE_DIR/etc/init.d/yarn-nodemanager"
@@ -322,6 +325,12 @@ function start_cluster {
     sleep 10
     check_cluster_status
   fi
+
+  if [[ "${TARGET_FILESYSTEM}" = "ozone" ]]; then
+    ozone sh volume create /${OZONE_VOLUME} || true
+    ozone sh bucket create /${OZONE_VOLUME}/${OZONE_BUCKET} || true
+  fi
+
   return $?
 }
 
@@ -460,6 +469,7 @@ function delete_data {
   # Delete namenode, datanode and KMS data while preserving directory structure.
   rm -rf "$IMPALA_CLUSTER_NODES_DIR/$NODE_PREFIX"*/data/dfs/{nn,dn}/*
   rm -f "$IMPALA_CLUSTER_NODES_DIR/$NODE_PREFIX"*/data/kms.keystore
+  rm -rf "$IMPALA_CLUSTER_NODES_DIR/$NODE_PREFIX"*/data/ozone
   delete_kudu_data
 }
 
diff --git a/testdata/cluster/node_templates/common/etc/hadoop/conf/core-site.xml.py b/testdata/cluster/node_templates/common/etc/hadoop/conf/core-site.xml.py
index b8ba65e80..d3777178f 100644
--- a/testdata/cluster/node_templates/common/etc/hadoop/conf/core-site.xml.py
+++ b/testdata/cluster/node_templates/common/etc/hadoop/conf/core-site.xml.py
@@ -121,6 +121,9 @@ if target_filesystem == 's3':
       'fs.s3a.s3guard.ddb.region': '${S3GUARD_DYNAMODB_REGION}',
     })
 
+if target_filesystem == 'ozone':
+  CONFIG.update({'fs.AbstractFileSystem.o3fs.impl': 'org.apache.hadoop.fs.ozone.OzFs'})
+
 if kerberize:
   CONFIG.update({
     'hadoop.security.authentication': 'kerberos',
diff --git a/testdata/cluster/node_templates/common/etc/hadoop/conf/ozone-site.xml.py b/testdata/cluster/node_templates/common/etc/hadoop/conf/ozone-site.xml.py
new file mode 100644
index 000000000..2da2d9726
--- /dev/null
+++ b/testdata/cluster/node_templates/common/etc/hadoop/conf/ozone-site.xml.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import os
+import sys
+
+CONFIG = {
+  # Host/port configs
+  'ozone.om.http-port': '${OZONE_WEBUI_PORT}',
+  'dfs.container.ratis.server.port': '${DATANODE_HTTP_PORT}',
+  'dfs.container.ratis.admin.port': '${DATANODE_HTTPS_PORT}',
+  'dfs.container.ratis.ipc': '${DATANODE_IPC_PORT}',
+  'dfs.container.ipc': '${DATANODE_PORT}',
+  'ozone.scm.block.client.address': '${INTERNAL_LISTEN_HOST}',
+  'ozone.scm.client.address': '${INTERNAL_LISTEN_HOST}',
+  'ozone.scm.names': '${INTERNAL_LISTEN_HOST}',
+  'ozone.om.address': '${INTERNAL_LISTEN_HOST}',
+  # Select a random available port
+  'hdds.datanode.http-address': '${EXTERNAL_LISTEN_HOST}:0',
+  'hdds.datanode.replication.port': '0',
+
+  # Directories
+  'ozone.metadata.dirs': '${NODE_DIR}/data/ozone',
+  'hdds.datanode.dir': '${NODE_DIR}/data/ozone/dn',
+  'ozone.om.ratis.storage.dir': '${NODE_DIR}/data/ozone/om-ratis',
+  'dfs.container.ratis.datanode.storage.dir': '${NODE_DIR}/data/ozone/ratis',
+}
diff --git a/testdata/cluster/node_templates/common/etc/init.d/common.tmpl b/testdata/cluster/node_templates/common/etc/init.d/common.tmpl
index 525ffdb34..15fe3fe4b 100644
--- a/testdata/cluster/node_templates/common/etc/init.d/common.tmpl
+++ b/testdata/cluster/node_templates/common/etc/init.d/common.tmpl
@@ -21,6 +21,8 @@ LOG_DIR="$NODE_DIR/var/log"
 
 export HADOOP_CONF_DIR="$NODE_DIR/etc/hadoop/conf"
 export HADOOP_PID_DIR=$NODE_DIR/var/run
+# Suppress Ozone deprecation warning
+export OZONE_CONF_DIR="$NODE_DIR/etc/hadoop/conf"
 
 # Mark each process so they can be killed if needed. This is a safety mechanism for
 # stopping the processes if the pid file has been removed for whatever reason.
diff --git a/testdata/cluster/node_templates/common/etc/init.d/ozone-common b/testdata/cluster/node_templates/common/etc/init.d/ozone-common
new file mode 100644
index 000000000..26e3849e5
--- /dev/null
+++ b/testdata/cluster/node_templates/common/etc/init.d/ozone-common
@@ -0,0 +1,19 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+export OZONE_LOG_DIR="$LOG_DIR/hadoop-ozone"
+export OZONE_OPTS="-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
diff --git a/testdata/cluster/node_templates/common/etc/init.d/ozone-datanode b/testdata/cluster/node_templates/common/etc/init.d/ozone-datanode
new file mode 100755
index 000000000..d5e1d0e2f
--- /dev/null
+++ b/testdata/cluster/node_templates/common/etc/init.d/ozone-datanode
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+DIR=$(dirname $0)
+
+. "$DIR/common"
+. "$DIR/ozone-common"
+
+function start {
+  do_start ozone datanode
+}
+
+$1
diff --git a/testdata/cluster/node_templates/common/etc/init.d/ozone-manager b/testdata/cluster/node_templates/common/etc/init.d/ozone-manager
new file mode 100755
index 000000000..3a3ff77dc
--- /dev/null
+++ b/testdata/cluster/node_templates/common/etc/init.d/ozone-manager
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+DIR=$(dirname $0)
+
+. "$DIR/common"
+. "$DIR/ozone-common"
+
+function start {
+  ozone om --init &> "$LOG_DIR/ozone-manager-init.out"
+  do_start ozone om
+}
+
+$1
diff --git a/testdata/cluster/node_templates/common/etc/init.d/ozone-scm b/testdata/cluster/node_templates/common/etc/init.d/ozone-scm
new file mode 100755
index 000000000..9a901fbe8
--- /dev/null
+++ b/testdata/cluster/node_templates/common/etc/init.d/ozone-scm
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+DIR=$(dirname $0)
+
+. "$DIR/common"
+. "$DIR/ozone-common"
+
+function start {
+  ozone scm --init &> "$LOG_DIR/ozone-scm-init.out"
+  do_start ozone scm
+}
+
+$1
diff --git a/tests/authorization/test_ranger.py b/tests/authorization/test_ranger.py
index 9bca4aa50..f68645576 100644
--- a/tests/authorization/test_ranger.py
+++ b/tests/authorization/test_ranger.py
@@ -28,7 +28,8 @@ from subprocess import check_call
 from getpass import getuser
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfLocal, SkipIfHive2, SkipIfGCS, SkipIfCOS)
+                               SkipIfLocal, SkipIfHive2, SkipIfGCS, SkipIfCOS,
+                               SkipIfOzone)
 from tests.common.test_dimensions import (create_client_protocol_dimension,
     create_exec_option_dimension, create_orc_dimension)
 from tests.util.hdfs_util import NAMENODE
@@ -1406,6 +1407,7 @@ class TestRanger(CustomClusterTestSuite):
   @SkipIfIsilon.hive
   @SkipIfLocal.hive
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfHive2.ranger_auth
diff --git a/tests/common/impala_test_suite.py b/tests/common/impala_test_suite.py
index fd8c69cc8..2fad54e66 100644
--- a/tests/common/impala_test_suite.py
+++ b/tests/common/impala_test_suite.py
@@ -63,6 +63,7 @@ from tests.performance.query_exec_functions import execute_using_jdbc
 from tests.performance.query_executor import JdbcQueryExecConfig
 from tests.util.filesystem_utils import (
     IS_S3,
+    IS_OZONE,
     IS_ABFS,
     IS_ADLS,
     IS_GCS,
@@ -269,6 +270,8 @@ class ImpalaTestSuite(BaseTestSuite):
     elif IS_COS:
       # COS is implemented via HDFS command line client
       cls.filesystem_client = HadoopFsCommandLineClient("COS")
+    elif IS_OZONE:
+      cls.filesystem_client = HadoopFsCommandLineClient("Ozone")
 
     # Override the shell history path so that commands run by any tests
     # don't write any history into the developer's file.
@@ -1057,7 +1060,7 @@ class ImpalaTestSuite(BaseTestSuite):
     # If 'skip_hbase' is specified or the filesystem is isilon, s3, GCS(gs), COS(cosn) or
     # local, we don't need the hbase dimension.
     if pytest.config.option.skip_hbase or TARGET_FILESYSTEM.lower() \
-        in ['s3', 'isilon', 'local', 'abfs', 'adls', 'gs', 'cosn']:
+        in ['s3', 'isilon', 'local', 'abfs', 'adls', 'gs', 'cosn', 'ozone']:
       for tf_dimension in tf_dimensions:
         if tf_dimension.value.file_format == "hbase":
           tf_dimensions.remove(tf_dimension)
diff --git a/tests/common/skip.py b/tests/common/skip.py
index d7e3f0128..0657155a0 100644
--- a/tests/common/skip.py
+++ b/tests/common/skip.py
@@ -38,6 +38,7 @@ from tests.util.filesystem_utils import (
     IS_ISILON,
     IS_LOCAL,
     IS_S3,
+    IS_OZONE,
     SECONDARY_FILESYSTEM)
 
 IMPALA_TEST_CLUSTER_PROPERTIES = ImpalaTestClusterProperties.get_instance()
@@ -67,6 +68,23 @@ class SkipIfS3:
       reason="Flakiness due to unpredictable listing times on S3.")
 
 
+class SkipIfOzone:
+  # These are skipped due to product limitations.
+  caching = pytest.mark.skipif(IS_OZONE, reason="SET CACHED not implemented for Ozone")
+  hdfs_block_size = pytest.mark.skipif(IS_OZONE, reason="Ozone uses it's own block size")
+  hdfs_acls = pytest.mark.skipif(IS_OZONE, reason="HDFS acls are not supported on Ozone")
+  hdfs_encryption = pytest.mark.skipif(IS_OZONE,
+      reason="HDFS encryption is not supported with Ozone")
+  no_storage_ids = pytest.mark.skipif(IS_OZONE,
+        reason="Ozone does not return storage ids, IMPALA-10213")
+
+  # These need test infra work to re-enable.
+  hive = pytest.mark.skipif(IS_OZONE, reason="Hive not started with Ozone")
+  hbase = pytest.mark.skipif(IS_OZONE, reason="HBase not started with Ozone")
+  qualified_path = pytest.mark.skipif(IS_OZONE,
+      reason="Tests rely on HDFS qualified paths, IMPALA-1872")
+
+
 class SkipIfABFS:
 
   # These are skipped due to product limitations.
diff --git a/tests/custom_cluster/test_coordinators.py b/tests/custom_cluster/test_coordinators.py
index c6257ed31..e21c02900 100644
--- a/tests/custom_cluster/test_coordinators.py
+++ b/tests/custom_cluster/test_coordinators.py
@@ -24,7 +24,7 @@ import time
 from subprocess import check_call
 from tests.util.filesystem_utils import get_fs_path
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import (SkipIf, SkipIfS3, SkipIfABFS, SkipIfADLS,
+from tests.common.skip import (SkipIf, SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfOzone,
                                SkipIfIsilon, SkipIfGCS, SkipIfCOS, SkipIfLocal)
 
 LOG = logging.getLogger('test_coordinators')
@@ -321,6 +321,7 @@ class TestCoordinators(CustomClusterTestSuite):
     assert num_hosts in str(ret)
 
   @SkipIfS3.hbase
+  @SkipIfOzone.hbase
   @SkipIfGCS.hbase
   @SkipIfCOS.hbase
   @SkipIfABFS.hbase
diff --git a/tests/custom_cluster/test_events_custom_configs.py b/tests/custom_cluster/test_events_custom_configs.py
index fa0668f8e..827ba9d28 100644
--- a/tests/custom_cluster/test_events_custom_configs.py
+++ b/tests/custom_cluster/test_events_custom_configs.py
@@ -20,13 +20,14 @@ import pytest
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-  SkipIfGCS, SkipIfLocal)
+  SkipIfGCS, SkipIfLocal, SkipIfOzone)
 from tests.util.hive_utils import HiveDbWrapper
 from tests.util.event_processor_utils import EventProcessorUtils
 from tests.util.filesystem_utils import WAREHOUSE
 
 
 @SkipIfS3.hive
+@SkipIfOzone.hive
 @SkipIfABFS.hive
 @SkipIfADLS.hive
 @SkipIfGCS.hive
diff --git a/tests/custom_cluster/test_hdfs_fd_caching.py b/tests/custom_cluster/test_hdfs_fd_caching.py
index 1623a179c..b682ce5ce 100644
--- a/tests/custom_cluster/test_hdfs_fd_caching.py
+++ b/tests/custom_cluster/test_hdfs_fd_caching.py
@@ -23,7 +23,8 @@ from tests.util.filesystem_utils import (
     IS_ISILON,
     IS_ADLS,
     IS_GCS,
-    IS_COS)
+    IS_COS,
+    IS_OZONE)
 from time import sleep
 
 @SkipIfLocal.hdfs_fd_caching
@@ -132,7 +133,7 @@ class TestHdfsFdCaching(CustomClusterTestSuite):
 
     # Caching applies to HDFS, S3, and ABFS files. If this is HDFS, S3, or ABFS, then
     # verify that caching works. Otherwise, verify that file handles are not cached.
-    if IS_ADLS or IS_ISILON or IS_GCS or IS_COS:
+    if IS_ADLS or IS_ISILON or IS_GCS or IS_COS or IS_OZONE:
       caching_expected = False
     else:
       caching_expected = True
@@ -148,7 +149,7 @@ class TestHdfsFdCaching(CustomClusterTestSuite):
     handle_timeout = 5
 
     # Only test eviction on platforms where caching is enabled.
-    if IS_ADLS or IS_ISILON or IS_GCS or IS_COS:
+    if IS_ADLS or IS_ISILON or IS_GCS or IS_COS or IS_OZONE:
       return
     caching_expected = True
     self.run_fd_caching_test(vector, caching_expected, cache_capacity, handle_timeout)
@@ -176,7 +177,7 @@ class TestHdfsFdCaching(CustomClusterTestSuite):
     eviction_timeout_secs = 5
 
     # Only test eviction on platforms where caching is enabled.
-    if IS_ADLS or IS_ISILON or IS_GCS or IS_COS:
+    if IS_ADLS or IS_ISILON or IS_GCS or IS_COS or IS_OZONE:
       return
 
     # Maximum number of file handles cached.
diff --git a/tests/custom_cluster/test_hive_parquet_codec_interop.py b/tests/custom_cluster/test_hive_parquet_codec_interop.py
index 51ff2cf24..d03867b9d 100644
--- a/tests/custom_cluster/test_hive_parquet_codec_interop.py
+++ b/tests/custom_cluster/test_hive_parquet_codec_interop.py
@@ -22,7 +22,7 @@ import pytest
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.util.event_processor_utils import EventProcessorUtils
 from tests.common.environ import HIVE_MAJOR_VERSION
-from tests.common.skip import SkipIfS3, SkipIfGCS, SkipIfCOS
+from tests.common.skip import SkipIfS3, SkipIfGCS, SkipIfCOS, SkipIfOzone
 from tests.common.test_dimensions import create_exec_option_dimension
 from tests.common.test_result_verifier import verify_query_result_is_equal
 from tests.util.filesystem_utils import get_fs_path
@@ -53,6 +53,7 @@ class TestParquetInterop(CustomClusterTestSuite):
         lambda v: v.get_value('table_format').file_format == 'parquet')
 
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @pytest.mark.execute_serially
diff --git a/tests/custom_cluster/test_hive_text_codec_interop.py b/tests/custom_cluster/test_hive_text_codec_interop.py
index 4b281381c..ee41e3f29 100644
--- a/tests/custom_cluster/test_hive_text_codec_interop.py
+++ b/tests/custom_cluster/test_hive_text_codec_interop.py
@@ -21,7 +21,7 @@ import pytest
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.environ import HIVE_MAJOR_VERSION
-from tests.common.skip import SkipIfS3, SkipIfGCS, SkipIfCOS
+from tests.common.skip import SkipIfS3, SkipIfGCS, SkipIfCOS, SkipIfOzone
 from tests.common.test_dimensions import create_exec_option_dimension
 from tests.common.test_result_verifier import verify_query_result_is_equal
 
@@ -52,6 +52,7 @@ class TestTextInterop(CustomClusterTestSuite):
         lambda v: v.get_value('table_format').file_format == 'textfile')
 
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @pytest.mark.execute_serially
diff --git a/tests/custom_cluster/test_insert_behaviour.py b/tests/custom_cluster/test_insert_behaviour.py
index 57c222e2c..2cd0be2ee 100644
--- a/tests/custom_cluster/test_insert_behaviour.py
+++ b/tests/custom_cluster/test_insert_behaviour.py
@@ -19,7 +19,7 @@ import pytest
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfLocal,
-                               SkipIfGCS, SkipIfCOS)
+                               SkipIfGCS, SkipIfCOS, SkipIfOzone)
 from tests.util.filesystem_utils import IS_ISILON, WAREHOUSE
 from tests.util.hdfs_util import (
     HdfsConfig,
@@ -29,6 +29,7 @@ from tests.util.hdfs_util import (
 TEST_TBL = "insert_inherit_permission"
 
 @SkipIfS3.hdfs_acls
+@SkipIfOzone.hdfs_acls
 @SkipIfGCS.hdfs_acls
 @SkipIfCOS.hdfs_acls
 @SkipIfABFS.hdfs_acls
diff --git a/tests/custom_cluster/test_lineage.py b/tests/custom_cluster/test_lineage.py
index 62b9d447f..6d08914b7 100644
--- a/tests/custom_cluster/test_lineage.py
+++ b/tests/custom_cluster/test_lineage.py
@@ -26,7 +26,8 @@ import tempfile
 import time
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import SkipIfABFS, SkipIfADLS, SkipIfS3, SkipIfGCS, SkipIfCOS
+from tests.common.skip import (SkipIfABFS, SkipIfADLS, SkipIfS3, SkipIfGCS, SkipIfCOS,
+  SkipIfOzone)
 
 LOG = logging.getLogger(__name__)
 
@@ -152,6 +153,7 @@ class TestLineage(CustomClusterTestSuite):
   @SkipIfABFS.hbase
   @SkipIfADLS.hbase
   @SkipIfS3.hbase
+  @SkipIfOzone.hbase
   @SkipIfGCS.hbase
   @SkipIfCOS.hbase
   @pytest.mark.execute_serially
diff --git a/tests/custom_cluster/test_local_catalog.py b/tests/custom_cluster/test_local_catalog.py
index 6e74a4de0..1b576f569 100644
--- a/tests/custom_cluster/test_local_catalog.py
+++ b/tests/custom_cluster/test_local_catalog.py
@@ -28,7 +28,7 @@ from multiprocessing.pool import ThreadPool
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.skip import (SkipIfHive2, SkipIfS3, SkipIfABFS, SkipIfGCS, SkipIfCOS,
-                               SkipIfADLS, SkipIfIsilon, SkipIfLocal)
+                               SkipIfADLS, SkipIfIsilon, SkipIfLocal, SkipIfOzone)
 from tests.util.filesystem_utils import WAREHOUSE
 
 RETRY_PROFILE_MSG = 'Retried query planning due to inconsistent metadata'
@@ -536,6 +536,7 @@ class TestFullAcid(CustomClusterTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfGCS.hive
diff --git a/tests/custom_cluster/test_local_tz_conversion.py b/tests/custom_cluster/test_local_tz_conversion.py
index 241750b6e..1d7635a6b 100644
--- a/tests/custom_cluster/test_local_tz_conversion.py
+++ b/tests/custom_cluster/test_local_tz_conversion.py
@@ -19,7 +19,8 @@ import pytest
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.test_vector import ImpalaTestDimension
-from tests.common.skip import SkipIfABFS, SkipIfADLS, SkipIfS3, SkipIfGCS, SkipIfCOS
+from tests.common.skip import (SkipIfABFS, SkipIfADLS, SkipIfS3, SkipIfOzone, SkipIfGCS,
+  SkipIfCOS)
 from tests.common.test_dimensions import create_exec_option_dimension
 
 class TestLocalTzConversion(CustomClusterTestSuite):
@@ -50,6 +51,7 @@ class TestLocalTzConversion(CustomClusterTestSuite):
   @SkipIfABFS.hbase
   @SkipIfADLS.hbase
   @SkipIfS3.hbase
+  @SkipIfOzone.hbase
   @SkipIfGCS.hbase
   @SkipIfCOS.hbase
   @pytest.mark.execute_serially
diff --git a/tests/custom_cluster/test_metadata_no_events_processing.py b/tests/custom_cluster/test_metadata_no_events_processing.py
index fb40eb909..71af95cc5 100644
--- a/tests/custom_cluster/test_metadata_no_events_processing.py
+++ b/tests/custom_cluster/test_metadata_no_events_processing.py
@@ -17,10 +17,11 @@
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfGCS,
-                               SkipIfIsilon, SkipIfLocal)
+                               SkipIfIsilon, SkipIfLocal, SkipIfOzone)
 
 
 @SkipIfS3.hive
+@SkipIfOzone.hive
 @SkipIfGCS.hive
 @SkipIfABFS.hive
 @SkipIfADLS.hive
diff --git a/tests/custom_cluster/test_metadata_replicas.py b/tests/custom_cluster/test_metadata_replicas.py
index 3f52bcc5e..cee6732e8 100644
--- a/tests/custom_cluster/test_metadata_replicas.py
+++ b/tests/custom_cluster/test_metadata_replicas.py
@@ -21,6 +21,7 @@ from time import sleep
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.skip import (
     SkipIfS3,
+    SkipIfOzone,
     SkipIfABFS,
     SkipIfADLS,
     SkipIfGCS,
@@ -30,6 +31,7 @@ from tests.common.skip import (
 from tests.util.hive_utils import HiveDbWrapper
 
 @SkipIfS3.hive
+@SkipIfOzone.hive
 @SkipIfGCS.hive
 @SkipIfCOS.hive
 @SkipIfABFS.hive
diff --git a/tests/custom_cluster/test_parquet_max_page_header.py b/tests/custom_cluster/test_parquet_max_page_header.py
index 9bd5ca315..63fce015c 100644
--- a/tests/custom_cluster/test_parquet_max_page_header.py
+++ b/tests/custom_cluster/test_parquet_max_page_header.py
@@ -25,7 +25,7 @@ import subprocess
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon, SkipIfGCS,
-                               SkipIfCOS)
+                               SkipIfCOS, SkipIfOzone)
 
 class TestParquetMaxPageHeader(CustomClusterTestSuite):
   '''This tests large page headers in parquet files. Parquet page header size can
@@ -102,6 +102,7 @@ class TestParquetMaxPageHeader(CustomClusterTestSuite):
     put.wait()
 
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
diff --git a/tests/custom_cluster/test_permanent_udfs.py b/tests/custom_cluster/test_permanent_udfs.py
index 203182310..581e7ba40 100644
--- a/tests/custom_cluster/test_permanent_udfs.py
+++ b/tests/custom_cluster/test_permanent_udfs.py
@@ -25,7 +25,7 @@ import subprocess
 from tempfile import mkdtemp
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon, SkipIfGCS,
-                               SkipIfCOS, SkipIfLocal)
+                               SkipIfCOS, SkipIfLocal, SkipIfOzone)
 from tests.common.test_dimensions import create_uncompressed_text_dimension
 from tests.util.filesystem_utils import get_fs_path
 
@@ -163,6 +163,7 @@ class TestUdfPersistence(CustomClusterTestSuite):
 
   @SkipIfIsilon.hive
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -187,6 +188,7 @@ class TestUdfPersistence(CustomClusterTestSuite):
 
   @SkipIfIsilon.hive
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -254,6 +256,7 @@ class TestUdfPersistence(CustomClusterTestSuite):
 
   @SkipIfIsilon.hive
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -319,6 +322,7 @@ class TestUdfPersistence(CustomClusterTestSuite):
 
   @SkipIfIsilon.hive
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
diff --git a/tests/custom_cluster/test_runtime_profile.py b/tests/custom_cluster/test_runtime_profile.py
index 7be216682..a03385962 100644
--- a/tests/custom_cluster/test_runtime_profile.py
+++ b/tests/custom_cluster/test_runtime_profile.py
@@ -17,7 +17,7 @@
 
 import pytest
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import SkipIfEC
+from tests.common.skip import SkipIfEC, SkipIfOzone
 
 
 class TestRuntimeProfile(CustomClusterTestSuite):
@@ -29,6 +29,8 @@ class TestRuntimeProfile(CustomClusterTestSuite):
 
   PERIODIC_COUNTER_UPDATE_FLAG = '--periodic_counter_update_period_ms=50'
 
+  # Test depends on block size < 256MiB so larger table is stored in at least 4 blocks.
+  @SkipIfOzone.hdfs_block_size
   @SkipIfEC.different_schedule
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args('--gen_experimental_profile=true ' +
diff --git a/tests/data_errors/test_data_errors.py b/tests/data_errors/test_data_errors.py
index a507314c1..0b3260f0e 100644
--- a/tests/data_errors/test_data_errors.py
+++ b/tests/data_errors/test_data_errors.py
@@ -26,7 +26,7 @@ import subprocess
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIf, SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfGCS,
-                               SkipIfCOS, SkipIfLocal)
+                               SkipIfCOS, SkipIfLocal, SkipIfOzone)
 from tests.common.test_dimensions import create_exec_option_dimension
 
 class TestDataErrors(ImpalaTestSuite):
@@ -107,6 +107,7 @@ class TestHdfsUnknownErrors(ImpalaTestSuite):
       assert "Safe mode is OFF" in output
 
 @SkipIfS3.qualified_path
+@SkipIfOzone.qualified_path
 @SkipIfGCS.qualified_path
 @SkipIfCOS.qualified_path
 @SkipIfABFS.qualified_path
@@ -128,6 +129,7 @@ class TestHdfsScanNodeErrors(TestDataErrors):
     self.run_test_case('DataErrorsTest/hdfs-scan-node-errors', vector)
 
 @SkipIfS3.qualified_path
+@SkipIfOzone.qualified_path
 @SkipIfGCS.qualified_path
 @SkipIfCOS.qualified_path
 @SkipIfABFS.qualified_path
@@ -146,6 +148,7 @@ class TestHdfsSeqScanNodeErrors(TestHdfsScanNodeErrors):
 
 
 @SkipIfS3.qualified_path
+@SkipIfOzone.qualified_path
 @SkipIfGCS.qualified_path
 @SkipIfCOS.qualified_path
 @SkipIfABFS.qualified_path
diff --git a/tests/failure/test_failpoints.py b/tests/failure/test_failpoints.py
index d885962f9..1d6e23891 100644
--- a/tests/failure/test_failpoints.py
+++ b/tests/failure/test_failpoints.py
@@ -28,7 +28,7 @@ from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_cluster import ImpalaCluster
 from tests.common.impala_test_suite import ImpalaTestSuite, LOG
 from tests.common.skip import (SkipIf, SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal)
+                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfOzone)
 from tests.common.test_dimensions import create_exec_option_dimension
 from tests.common.test_vector import ImpalaTestDimension
 from tests.verifiers.metric_verifier import MetricVerifier
@@ -63,6 +63,7 @@ QUERIES = [
 
 @SkipIf.skip_hbase # -skip_hbase argument specified
 @SkipIfS3.hbase # S3: missing coverage: failures
+@SkipIfOzone.hbase
 @SkipIfGCS.hbase
 @SkipIfCOS.hbase
 @SkipIfABFS.hbase
diff --git a/tests/metadata/test_compute_stats.py b/tests/metadata/test_compute_stats.py
index 889f0b330..69fa50739 100644
--- a/tests/metadata/test_compute_stats.py
+++ b/tests/metadata/test_compute_stats.py
@@ -22,7 +22,8 @@ from tests.common.environ import ImpalaTestClusterProperties
 from tests.common.impala_cluster import ImpalaCluster
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfCatalogV2)
+                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfCatalogV2,
+                               SkipIfOzone)
 from tests.common.test_dimensions import (
     create_exec_option_dimension,
     create_single_exec_option_dimension,
@@ -114,6 +115,7 @@ class TestComputeStats(ImpalaTestSuite):
         self.execute_query_expect_success(self.client, "drop stats {0}".format(table))
 
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -195,6 +197,7 @@ class TestComputeStats(ImpalaTestSuite):
          assert("cardinality=0" not in explain_result.data[i + 2])
 
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -244,6 +247,7 @@ class TestComputeStats(ImpalaTestSuite):
             table_name, 2, 2)
 
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
diff --git a/tests/metadata/test_ddl.py b/tests/metadata/test_ddl.py
index 7584ae5ed..2183a788e 100644
--- a/tests/metadata/test_ddl.py
+++ b/tests/metadata/test_ddl.py
@@ -31,7 +31,7 @@ from tests.common.impala_test_suite import LOG
 from tests.common.parametrize import UniqueDatabase
 from tests.common.skip import (SkipIf, SkipIfABFS, SkipIfADLS, SkipIfKudu, SkipIfLocal,
                                SkipIfCatalogV2, SkipIfHive2, SkipIfS3, SkipIfGCS,
-                               SkipIfCOS)
+                               SkipIfCOS, SkipIfOzone)
 from tests.common.test_dimensions import create_single_exec_option_dimension
 from tests.common.test_dimensions import (create_exec_option_dimension,
     create_client_protocol_dimension)
@@ -42,11 +42,15 @@ from tests.util.filesystem_utils import (
     IS_HDFS,
     IS_S3,
     IS_ADLS,
+    IS_OZONE,
     FILESYSTEM_NAME)
 from tests.common.impala_cluster import ImpalaCluster
 from tests.util.filesystem_utils import FILESYSTEM_PREFIX
 
 
+TRASH_PATH = ('.Trash/{0}/Current' if IS_OZONE else 'user/{0}/.Trash/Current').\
+  format(getpass.getuser())
+
 # Validates DDL statements (create, drop)
 class TestDdlStatements(TestDdlBase):
   @SkipIfLocal.hdfs_client
@@ -66,12 +70,10 @@ class TestDdlStatements(TestDdlBase):
         format(unique_database))
     assert not self.filesystem_client.exists("test-warehouse/{0}.db/t1/".\
         format(unique_database))
-    assert self.filesystem_client.exists(\
-        "user/{0}/.Trash/Current/test-warehouse/{1}.db/t1/t1.txt".\
-        format(getpass.getuser(), unique_database))
-    assert self.filesystem_client.exists(\
-        "user/{0}/.Trash/Current/test-warehouse/{1}.db/t1".\
-        format(getpass.getuser(), unique_database))
+    assert self.filesystem_client.exists(
+      '{0}/test-warehouse/{1}.db/t1/t1.txt'.format(TRASH_PATH, unique_database))
+    assert self.filesystem_client.exists(
+      '{0}/test-warehouse/{1}.db/t1'.format(TRASH_PATH, unique_database))
     # Drop the table (with purge) and make sure it doesn't exist in trash
     self.client.execute("drop table {0}.t2 purge".format(unique_database))
     if not IS_S3 and not IS_ADLS:
@@ -85,12 +87,10 @@ class TestDdlStatements(TestDdlBase):
           format(unique_database))
       assert not self.filesystem_client.exists("test-warehouse/{0}.db/t2/t2.txt".\
           format(unique_database))
-    assert not self.filesystem_client.exists(\
-        "user/{0}/.Trash/Current/test-warehouse/{1}.db/t2/t2.txt".\
-        format(getpass.getuser(), unique_database))
-    assert not self.filesystem_client.exists(\
-        "user/{0}/.Trash/Current/test-warehouse/{1}.db/t2".\
-        format(getpass.getuser(), unique_database))
+    assert not self.filesystem_client.exists(
+      '{0}/test-warehouse/{1}.db/t2/t2.txt'.format(TRASH_PATH, unique_database))
+    assert not self.filesystem_client.exists(
+      '{0}/test-warehouse/{1}.db/t2'.format(TRASH_PATH, unique_database))
     # Create an external table t3 and run the same test as above. Make
     # sure the data is not deleted
     self.filesystem_client.make_dir(
@@ -306,6 +306,7 @@ class TestDdlStatements(TestDdlBase):
 
   @SkipIfHive2.orc
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @UniqueDatabase.parametrize(sync_ddl=True)
@@ -496,12 +497,10 @@ class TestDdlStatements(TestDdlBase):
         format(unique_database))
     assert not self.filesystem_client.exists("test-warehouse/{0}.db/t1/j=1".\
         format(unique_database))
-    assert self.filesystem_client.exists(\
-        "user/{0}/.Trash/Current/test-warehouse/{1}.db/t1/j=1/j1.txt".\
-        format(getpass.getuser(), unique_database))
-    assert self.filesystem_client.exists(\
-        "user/{0}/.Trash/Current/test-warehouse/{1}.db/t1/j=1".\
-        format(getpass.getuser(), unique_database))
+    assert self.filesystem_client.exists(
+      '{0}/test-warehouse/{1}.db/t1/j=1/j1.txt'.format(TRASH_PATH, unique_database))
+    assert self.filesystem_client.exists(
+      '{0}/test-warehouse/{1}.db/t1/j=1'.format(TRASH_PATH, unique_database))
     # Drop the partition (with purge) and make sure it doesn't exist in trash
     self.client.execute("alter table {0}.t1 drop partition(j=2) purge".\
         format(unique_database));
@@ -516,12 +515,10 @@ class TestDdlStatements(TestDdlBase):
           format(unique_database))
       assert not self.filesystem_client.exists("test-warehouse/{0}.db/t1/j=2".\
           format(unique_database))
-    assert not self.filesystem_client.exists(\
-        "user/{0}/.Trash/Current/test-warehouse/{1}.db/t1/j=2".\
-        format(getpass.getuser(), unique_database))
     assert not self.filesystem_client.exists(
-        "user/{0}/.Trash/Current/test-warehouse/{1}.db/t1/j=2/j2.txt".\
-        format(getpass.getuser(), unique_database))
+      '{0}/test-warehouse/{1}.db/t1/j=2/j2.txt'.format(TRASH_PATH, unique_database))
+    assert not self.filesystem_client.exists(
+      '{0}/test-warehouse/{1}.db/t1/j=2'.format(TRASH_PATH, unique_database))
 
   @UniqueDatabase.parametrize(sync_ddl=True)
   def test_views_ddl(self, vector, unique_database):
diff --git a/tests/metadata/test_event_processing.py b/tests/metadata/test_event_processing.py
index 9080934c1..eeac52914 100644
--- a/tests/metadata/test_event_processing.py
+++ b/tests/metadata/test_event_processing.py
@@ -17,11 +17,12 @@
 from tests.common.skip import SkipIfHive2, SkipIfCatalogV2
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfLocal)
+                               SkipIfGCS, SkipIfLocal, SkipIfOzone)
 from tests.util.event_processor_utils import EventProcessorUtils
 
 
 @SkipIfS3.hive
+@SkipIfOzone.hive
 @SkipIfABFS.hive
 @SkipIfADLS.hive
 @SkipIfGCS.hive
diff --git a/tests/metadata/test_hdfs_encryption.py b/tests/metadata/test_hdfs_encryption.py
index bb6cba530..06724d120 100644
--- a/tests/metadata/test_hdfs_encryption.py
+++ b/tests/metadata/test_hdfs_encryption.py
@@ -20,7 +20,7 @@ import pytest
 
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal)
+                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfOzone)
 from tests.common.test_dimensions import (
     create_single_exec_option_dimension,
     create_uncompressed_text_dimension)
@@ -35,6 +35,7 @@ TMP_DIR = '/%s' % (PYWEBHDFS_TMP_DIR)
 
 
 @SkipIfS3.hdfs_encryption
+@SkipIfOzone.hdfs_encryption
 @SkipIfGCS.hdfs_encryption
 @SkipIfCOS.hdfs_encryption
 @SkipIfABFS.hdfs_encryption
diff --git a/tests/metadata/test_hdfs_permissions.py b/tests/metadata/test_hdfs_permissions.py
index e505165b0..d0e74e368 100644
--- a/tests/metadata/test_hdfs_permissions.py
+++ b/tests/metadata/test_hdfs_permissions.py
@@ -17,7 +17,7 @@
 
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfLocal,
-                               SkipIfGCS, SkipIfCOS, SkipIfCatalogV2)
+                               SkipIfGCS, SkipIfCOS, SkipIfCatalogV2, SkipIfOzone)
 from tests.common.test_dimensions import (
     create_single_exec_option_dimension,
     create_uncompressed_text_dimension)
@@ -29,6 +29,7 @@ TBL_LOC = '%s/%s' % (WAREHOUSE, TEST_TBL)
 
 
 @SkipIfS3.hdfs_acls
+@SkipIfOzone.hdfs_acls
 @SkipIfGCS.hdfs_acls
 @SkipIfCOS.hdfs_acls
 @SkipIfABFS.hdfs_acls
diff --git a/tests/metadata/test_hms_integration.py b/tests/metadata/test_hms_integration.py
index 100cdfe68..e303c626b 100644
--- a/tests/metadata/test_hms_integration.py
+++ b/tests/metadata/test_hms_integration.py
@@ -33,7 +33,7 @@ from tests.common.environ import HIVE_MAJOR_VERSION
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfHive2, SkipIfHive3,
                                SkipIfIsilon, SkipIfGCS, SkipIfCOS, SkipIfLocal,
-                               SkipIfCatalogV2)
+                               SkipIfOzone)
 from tests.common.test_dimensions import (
     create_single_exec_option_dimension,
     create_uncompressed_text_dimension)
@@ -42,6 +42,7 @@ from tests.util.hive_utils import HiveDbWrapper, HiveTableWrapper
 
 
 @SkipIfS3.hive
+@SkipIfOzone.hive
 @SkipIfGCS.hive
 @SkipIfCOS.hive
 @SkipIfABFS.hive
@@ -153,6 +154,7 @@ class TestHmsIntegrationSanity(ImpalaTestSuite):
       assert False
 
 @SkipIfS3.hive
+@SkipIfOzone.hive
 @SkipIfGCS.hive
 @SkipIfCOS.hive
 @SkipIfABFS.hive
diff --git a/tests/metadata/test_metadata_query_statements.py b/tests/metadata/test_metadata_query_statements.py
index 9cd301966..ed2f54302 100644
--- a/tests/metadata/test_metadata_query_statements.py
+++ b/tests/metadata/test_metadata_query_statements.py
@@ -23,7 +23,8 @@ import re
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfIsilon, SkipIfS3, SkipIfABFS, SkipIfADLS,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfCatalogV2)
+                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfCatalogV2,
+                               SkipIfOzone)
 from tests.common.test_dimensions import ALL_NODES_ONLY
 from tests.common.test_dimensions import create_exec_option_dimension
 from tests.common.test_dimensions import create_uncompressed_text_dimension
@@ -77,6 +78,7 @@ class TestMetadataQueryStatements(ImpalaTestSuite):
   # data doesn't reside in hdfs.
   @SkipIfIsilon.hive
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -170,6 +172,7 @@ class TestMetadataQueryStatements(ImpalaTestSuite):
       self.client.execute(self.CREATE_DATA_SRC_STMT % (name,))
 
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
diff --git a/tests/metadata/test_partition_metadata.py b/tests/metadata/test_partition_metadata.py
index 363014761..b68e2cd91 100644
--- a/tests/metadata/test_partition_metadata.py
+++ b/tests/metadata/test_partition_metadata.py
@@ -18,7 +18,7 @@
 import pytest
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal)
+                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfOzone)
 from tests.common.test_dimensions import (create_single_exec_option_dimension,
     create_uncompressed_text_dimension)
 from tests.util.filesystem_utils import get_fs_path, WAREHOUSE, FILESYSTEM_PREFIX
@@ -90,6 +90,7 @@ class TestPartitionMetadata(ImpalaTestSuite):
     assert data.split('\t') == ['6', '9']
 
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
diff --git a/tests/metadata/test_refresh_partition.py b/tests/metadata/test_refresh_partition.py
index 304664718..cf2c63474 100644
--- a/tests/metadata/test_refresh_partition.py
+++ b/tests/metadata/test_refresh_partition.py
@@ -18,11 +18,12 @@ from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.test_dimensions import create_single_exec_option_dimension
 from tests.common.test_dimensions import create_uncompressed_text_dimension
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal)
+                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfOzone)
 from tests.util.filesystem_utils import get_fs_path
 
 
 @SkipIfS3.hive
+@SkipIfOzone.hive
 @SkipIfGCS.hive
 @SkipIfCOS.hive
 @SkipIfABFS.hive
diff --git a/tests/metadata/test_stats_extrapolation.py b/tests/metadata/test_stats_extrapolation.py
index 8de917d62..8add7ebe8 100644
--- a/tests/metadata/test_stats_extrapolation.py
+++ b/tests/metadata/test_stats_extrapolation.py
@@ -17,7 +17,7 @@
 
 from os import path
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfEC
+from tests.common.skip import SkipIfEC, SkipIfOzone
 from tests.common.test_dimensions import (
     create_exec_option_dimension,
     create_single_exec_option_dimension,
@@ -39,6 +39,7 @@ class TestStatsExtrapolation(ImpalaTestSuite):
     cls.ImpalaTestMatrix.add_dimension(
         create_uncompressed_text_dimension(cls.get_workload()))
 
+  @SkipIfOzone.no_storage_ids
   @SkipIfEC.contain_full_explain
   def test_stats_extrapolation(self, vector, unique_database):
     vector.get_value('exec_option')['num_nodes'] = 1
diff --git a/tests/metadata/test_views_compatibility.py b/tests/metadata/test_views_compatibility.py
index ffb2107a8..69a57acad 100644
--- a/tests/metadata/test_views_compatibility.py
+++ b/tests/metadata/test_views_compatibility.py
@@ -25,7 +25,7 @@ from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.environ import HIVE_MAJOR_VERSION
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal)
+                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfOzone)
 from tests.common.test_dimensions import create_uncompressed_text_dimension
 from tests.util.test_file_parser import QueryTestSectionReader
 
@@ -49,6 +49,7 @@ from tests.util.test_file_parser import QueryTestSectionReader
 # Missing Coverage: Views created by Hive and Impala being visible and queryble by each
 # other on non hdfs storage.
 @SkipIfS3.hive
+@SkipIfOzone.hive
 @SkipIfGCS.hive
 @SkipIfCOS.hive
 @SkipIfABFS.hive
diff --git a/tests/query_test/test_acid.py b/tests/query_test/test_acid.py
index fbe89438a..1335e1454 100644
--- a/tests/query_test/test_acid.py
+++ b/tests/query_test/test_acid.py
@@ -26,7 +26,7 @@ from subprocess import check_call
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIf, SkipIfHive2, SkipIfCatalogV2, SkipIfS3, SkipIfABFS,
                                SkipIfADLS, SkipIfIsilon, SkipIfGCS, SkipIfCOS,
-                               SkipIfLocal)
+                               SkipIfLocal, SkipIfOzone)
 from tests.common.test_dimensions import create_single_exec_option_dimension
 from tests.util.acid_txn import AcidTxn
 
@@ -47,6 +47,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -66,6 +67,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -77,6 +79,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -88,6 +91,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -101,6 +105,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -112,6 +117,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -133,6 +139,7 @@ class TestAcid(ImpalaTestSuite):
   @SkipIfHive2.acid
   @SkipIfCatalogV2.hms_event_polling_enabled()
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -148,6 +155,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -172,6 +180,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -190,6 +199,7 @@ class TestAcid(ImpalaTestSuite):
     assert "2" in result
 
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -210,6 +220,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -226,6 +237,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -305,6 +317,7 @@ class TestAcid(ImpalaTestSuite):
     return self.hive_client.commit_txn(commit_req)
 
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -352,6 +365,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
diff --git a/tests/query_test/test_date_queries.py b/tests/query_test/test_date_queries.py
index 34622f263..ae64cdd88 100644
--- a/tests/query_test/test_date_queries.py
+++ b/tests/query_test/test_date_queries.py
@@ -21,7 +21,7 @@ import pytest
 from tests.common.file_utils import create_table_and_copy_files
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfLocal, SkipIfGCS,
-                               SkipIfCOS)
+                               SkipIfCOS, SkipIfOzone)
 from tests.common.test_dimensions import (create_exec_option_dimension_from_dict,
     create_client_protocol_dimension, hs2_parquet_constraint)
 from tests.shell.util import create_impala_shell_executable_dimension
@@ -72,6 +72,7 @@ class TestDateQueries(ImpalaTestSuite):
     self.run_test_case('QueryTest/date-partitioning', vector, use_db=unique_database)
 
   @SkipIfS3.qualified_path
+  @SkipIfOzone.qualified_path
   @SkipIfGCS.qualified_path
   @SkipIfCOS.qualified_path
   @SkipIfABFS.qualified_path
diff --git a/tests/query_test/test_hbase_queries.py b/tests/query_test/test_hbase_queries.py
index a209adac4..f92d1b057 100644
--- a/tests/query_test/test_hbase_queries.py
+++ b/tests/query_test/test_hbase_queries.py
@@ -22,6 +22,7 @@ import pytest
 from tests.common.skip import (
     SkipIfIsilon,
     SkipIfS3,
+    SkipIfOzone,
     SkipIfGCS,
     SkipIfCOS,
     SkipIfABFS,
@@ -68,6 +69,7 @@ class TestHBaseQueries(ImpalaTestSuite):
 
   @SkipIfIsilon.hive
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
diff --git a/tests/query_test/test_hdfs_caching.py b/tests/query_test/test_hdfs_caching.py
index 305c376ec..666829a63 100644
--- a/tests/query_test/test_hdfs_caching.py
+++ b/tests/query_test/test_hdfs_caching.py
@@ -27,13 +27,14 @@ from tests.common.impala_cluster import ImpalaCluster
 from tests.common.impala_test_suite import ImpalaTestSuite, LOG
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
                                SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfEC,
-                               SkipIfDockerizedCluster, SkipIfCatalogV2)
+                               SkipIfDockerizedCluster, SkipIfOzone)
 from tests.common.test_dimensions import create_single_exec_option_dimension
 from tests.util.filesystem_utils import get_fs_path
 from tests.util.shell_util import exec_process
 
 # End to end test that hdfs caching is working.
 @SkipIfS3.caching # S3: missing coverage: verify SET CACHED gives error
+@SkipIfOzone.caching
 @SkipIfGCS.caching
 @SkipIfCOS.caching
 @SkipIfABFS.caching
@@ -116,6 +117,7 @@ class TestHdfsCaching(ImpalaTestSuite):
 # run as a part of exhaustive tests which require the workload to be 'functional-query'.
 # TODO: Move this to TestHdfsCaching once we make exhaustive tests run for other workloads
 @SkipIfS3.caching
+@SkipIfOzone.caching
 @SkipIfGCS.caching
 @SkipIfCOS.caching
 @SkipIfABFS.caching
@@ -128,6 +130,7 @@ class TestHdfsCachingFallbackPath(ImpalaTestSuite):
     return 'functional-query'
 
   @SkipIfS3.hdfs_encryption
+  @SkipIfOzone.hdfs_encryption
   @SkipIfGCS.hdfs_encryption
   @SkipIfCOS.hdfs_encryption
   @SkipIfABFS.hdfs_encryption
@@ -182,6 +185,7 @@ class TestHdfsCachingFallbackPath(ImpalaTestSuite):
 
 
 @SkipIfS3.caching
+@SkipIfOzone.caching
 @SkipIfGCS.caching
 @SkipIfCOS.caching
 @SkipIfABFS.caching
diff --git a/tests/query_test/test_insert_behaviour.py b/tests/query_test/test_insert_behaviour.py
index 8a3c6c1c2..bef5a6ef5 100644
--- a/tests/query_test/test_insert_behaviour.py
+++ b/tests/query_test/test_insert_behaviour.py
@@ -25,7 +25,7 @@ from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.parametrize import UniqueDatabase
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
                                SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfDockerizedCluster,
-                               SkipIfCatalogV2)
+                               SkipIfCatalogV2, SkipIfOzone)
 from tests.util.filesystem_utils import WAREHOUSE, get_fs_path, IS_S3
 
 @SkipIfLocal.hdfs_client
@@ -134,6 +134,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     assert len(self.filesystem_client.ls(part_dir)) == 1
 
   @SkipIfS3.hdfs_acls
+  @SkipIfOzone.hdfs_acls
   @SkipIfGCS.hdfs_acls
   @SkipIfCOS.hdfs_acls
   @SkipIfABFS.hdfs_acls
@@ -198,6 +199,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     check_has_acls("p1=1/p2=2/p3=30", "default:group:new_leaf_group:-w-")
 
   @SkipIfS3.hdfs_acls
+  @SkipIfOzone.hdfs_acls
   @SkipIfGCS.hdfs_acls
   @SkipIfCOS.hdfs_acls
   @SkipIfABFS.hdfs_acls
@@ -253,6 +255,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     self.execute_query_expect_success(self.client, insert_query)
 
   @SkipIfS3.hdfs_acls
+  @SkipIfOzone.hdfs_acls
   @SkipIfGCS.hdfs_acls
   @SkipIfCOS.hdfs_acls
   @SkipIfABFS.hdfs_acls
@@ -337,6 +340,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     load_data(self.execute_query_expect_success, "added_part")
 
   @SkipIfS3.hdfs_acls
+  @SkipIfOzone.hdfs_acls
   @SkipIfGCS.hdfs_acls
   @SkipIfCOS.hdfs_acls
   @SkipIfABFS.hdfs_acls
@@ -372,6 +376,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     assert re.search(r'Impala does not have WRITE access.*' + table_path, str(err))
 
   @SkipIfS3.hdfs_acls
+  @SkipIfOzone.hdfs_acls
   @SkipIfGCS.hdfs_acls
   @SkipIfCOS.hdfs_acls
   @SkipIfABFS.hdfs_acls
@@ -455,6 +460,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     self.execute_query_expect_success(self.client, insert_query)
 
   @SkipIfS3.hdfs_acls
+  @SkipIfOzone.hdfs_acls
   @SkipIfGCS.hdfs_acls
   @SkipIfCOS.hdfs_acls
   @SkipIfABFS.hdfs_acls
@@ -584,6 +590,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     self.execute_query_expect_failure(self.client, insert_query)
 
   @SkipIfS3.hdfs_acls
+  @SkipIfOzone.hdfs_acls
   @SkipIfGCS.hdfs_acls
   @SkipIfCOS.hdfs_acls
   @SkipIfABFS.hdfs_acls
diff --git a/tests/query_test/test_insert_parquet.py b/tests/query_test/test_insert_parquet.py
index 8cbdba687..8bd71757d 100644
--- a/tests/query_test/test_insert_parquet.py
+++ b/tests/query_test/test_insert_parquet.py
@@ -29,7 +29,7 @@ from tests.common.environ import impalad_basedir
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.parametrize import UniqueDatabase
 from tests.common.skip import (SkipIfEC, SkipIfIsilon, SkipIfLocal, SkipIfS3, SkipIfABFS,
-                               SkipIfADLS, SkipIfGCS, SkipIfCOS)
+                               SkipIfADLS, SkipIfGCS, SkipIfCOS, SkipIfOzone)
 from tests.common.test_dimensions import create_exec_option_dimension
 from tests.common.test_result_verifier import verify_query_result_is_equal
 from tests.common.test_vector import ImpalaTestDimension
@@ -536,6 +536,7 @@ class TestHdfsParquetTableWriter(ImpalaTestSuite):
   # precision uptil 16 decimal digits and test needs 17.
   # IMPALA-9365 describes why HS2 is not started on non-HDFS test env.
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -558,6 +559,7 @@ class TestHdfsParquetTableWriter(ImpalaTestSuite):
 @SkipIfIsilon.hive
 @SkipIfLocal.hive
 @SkipIfS3.hive
+@SkipIfOzone.hive
 @SkipIfGCS.hive
 @SkipIfCOS.hive
 @SkipIfABFS.hive
diff --git a/tests/query_test/test_join_queries.py b/tests/query_test/test_join_queries.py
index 6ecd5c0a9..29993f5d9 100644
--- a/tests/query_test/test_join_queries.py
+++ b/tests/query_test/test_join_queries.py
@@ -26,6 +26,7 @@ from tests.common.skip import (
     SkipIfIsilon,
     SkipIfLocal,
     SkipIfS3,
+    SkipIfOzone,
     SkipIfGCS,
     SkipIfCOS,
     SkipIfABFS,
@@ -81,6 +82,7 @@ class TestJoinQueries(ImpalaTestSuite):
     self.run_test_case('QueryTest/single-node-joins-with-limits-exhaustive', new_vector)
 
   @SkipIfS3.hbase
+  @SkipIfOzone.hbase
   @SkipIfGCS.hbase
   @SkipIfCOS.hbase
   @SkipIfABFS.hbase
diff --git a/tests/query_test/test_nested_types.py b/tests/query_test/test_nested_types.py
index 196b7c69a..d28314d26 100644
--- a/tests/query_test/test_nested_types.py
+++ b/tests/query_test/test_nested_types.py
@@ -26,6 +26,7 @@ from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (
     SkipIfIsilon,
     SkipIfS3,
+    SkipIfOzone,
     SkipIfGCS,
     SkipIfCOS,
     SkipIfABFS,
@@ -318,6 +319,7 @@ class TestNestedTypesNoMtDop(ImpalaTestSuite):
 
   @SkipIfIsilon.hive
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
@@ -378,6 +380,7 @@ class TestNestedTypesNoMtDop(ImpalaTestSuite):
   @SkipIfIsilon.hive
   @SkipIfLocal.hive
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfHive2.acid
@@ -883,6 +886,7 @@ class TestMaxNestingDepth(ImpalaTestSuite):
 
   @SkipIfIsilon.hive
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
diff --git a/tests/query_test/test_observability.py b/tests/query_test/test_observability.py
index 776e1c686..aae450604 100644
--- a/tests/query_test/test_observability.py
+++ b/tests/query_test/test_observability.py
@@ -20,7 +20,7 @@ from datetime import datetime
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal,
+                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfOzone,
                                SkipIfNotHdfsMinicluster)
 from tests.util.filesystem_utils import IS_EC
 from time import sleep
@@ -93,6 +93,7 @@ class TestObservability(ImpalaTestSuite):
     self.hs2_client.close_query(handle)
 
   @SkipIfS3.hbase
+  @SkipIfOzone.hbase
   @SkipIfGCS.hbase
   @SkipIfCOS.hbase
   @SkipIfLocal.hbase
@@ -676,6 +677,7 @@ class TestObservability(ImpalaTestSuite):
         cluster_properties)
 
   @SkipIfS3.hbase
+  @SkipIfOzone.hbase
   @SkipIfGCS.hbase
   @SkipIfCOS.hbase
   @SkipIfLocal.hbase
diff --git a/tests/query_test/test_partitioning.py b/tests/query_test/test_partitioning.py
index e1385591d..4bcc792de 100644
--- a/tests/query_test/test_partitioning.py
+++ b/tests/query_test/test_partitioning.py
@@ -21,7 +21,7 @@ import pytest
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal)
+                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfOzone)
 from tests.common.test_dimensions import create_single_exec_option_dimension
 
 # Tests to validate HDFS partitioning.
@@ -48,6 +48,7 @@ class TestPartitioning(ImpalaTestSuite):
   # Missing Coverage: Impala deals with boolean partitions created by Hive on a non-hdfs
   # filesystem.
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfABFS.hive
diff --git a/tests/query_test/test_resource_limits.py b/tests/query_test/test_resource_limits.py
index 1e78228ae..9b0d0d347 100644
--- a/tests/query_test/test_resource_limits.py
+++ b/tests/query_test/test_resource_limits.py
@@ -17,7 +17,7 @@
 
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfEC, SkipIfLocal, SkipIfS3, SkipIfABFS,
-                               SkipIfGCS, SkipIfCOS, SkipIfADLS)
+                               SkipIfGCS, SkipIfCOS, SkipIfADLS, SkipIfOzone)
 from tests.common.test_dimensions import create_parquet_dimension
 
 
@@ -46,6 +46,7 @@ class TestResourceLimits(ImpalaTestSuite):
     self.run_test_case('QueryTest/query-resource-limits', vector)
 
   @SkipIfS3.hbase
+  @SkipIfOzone.hbase
   @SkipIfGCS.hbase
   @SkipIfCOS.hbase
   @SkipIfADLS.hbase
diff --git a/tests/query_test/test_scanners.py b/tests/query_test/test_scanners.py
index 2bb4b97ed..ab64c6b64 100644
--- a/tests/query_test/test_scanners.py
+++ b/tests/query_test/test_scanners.py
@@ -35,6 +35,7 @@ from tests.common.impala_test_suite import ImpalaTestSuite, LOG
 from tests.common.skip import (
     SkipIf,
     SkipIfS3,
+    SkipIfOzone,
     SkipIfGCS,
     SkipIfCOS,
     SkipIfABFS,
@@ -486,6 +487,7 @@ class TestParquet(ImpalaTestSuite):
   @SkipIfIsilon.hive
   @SkipIfLocal.hive
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   def test_multi_compression_types(self, vector, unique_database):
@@ -615,6 +617,7 @@ class TestParquet(ImpalaTestSuite):
     assert "AAAAAAAACPKFFAAA" in result.data
 
   @SkipIfS3.hdfs_block_size
+  @SkipIfOzone.hdfs_block_size
   @SkipIfGCS.hdfs_block_size
   @SkipIfCOS.hdfs_block_size
   @SkipIfABFS.hdfs_block_size
@@ -674,6 +677,7 @@ class TestParquet(ImpalaTestSuite):
     assert total == num_scanners_with_no_reads
 
   @SkipIfS3.hdfs_block_size
+  @SkipIfOzone.hdfs_block_size
   @SkipIfGCS.hdfs_block_size
   @SkipIfCOS.hdfs_block_size
   @SkipIfABFS.hdfs_block_size
@@ -725,6 +729,7 @@ class TestParquet(ImpalaTestSuite):
       self.client.clear_configuration()
 
   @SkipIfS3.hdfs_block_size
+  @SkipIfOzone.hdfs_block_size
   @SkipIfGCS.hdfs_block_size
   @SkipIfCOS.hdfs_block_size
   @SkipIfABFS.hdfs_block_size
@@ -743,6 +748,7 @@ class TestParquet(ImpalaTestSuite):
     self._multiple_blocks_helper(table_name, 40000, ranges_per_node=2)
 
   @SkipIfS3.hdfs_block_size
+  @SkipIfOzone.hdfs_block_size
   @SkipIfGCS.hdfs_block_size
   @SkipIfCOS.hdfs_block_size
   @SkipIfABFS.hdfs_block_size
@@ -1406,6 +1412,7 @@ class TestTextScanRangeLengths(ImpalaTestSuite):
 
 # Missing Coverage: No coverage for truncated files errors or scans.
 @SkipIfS3.hive
+@SkipIfOzone.hive
 @SkipIfGCS.hive
 @SkipIfCOS.hive
 @SkipIfABFS.hive
@@ -1492,6 +1499,7 @@ class TestOrc(ImpalaTestSuite):
     cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension('orc_schema_resolution', 0, 1))
 
   @SkipIfS3.hdfs_block_size
+  @SkipIfOzone.hdfs_block_size
   @SkipIfGCS.hdfs_block_size
   @SkipIfCOS.hdfs_block_size
   @SkipIfABFS.hdfs_block_size
@@ -1574,6 +1582,7 @@ class TestOrc(ImpalaTestSuite):
   @SkipIfIsilon.hive
   @SkipIfLocal.hive
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfHive3.non_acid
@@ -1625,6 +1634,7 @@ class TestOrc(ImpalaTestSuite):
   @SkipIfIsilon.hive
   @SkipIfLocal.hive
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @SkipIfHive2.acid
@@ -1751,6 +1761,7 @@ class TestOrc(ImpalaTestSuite):
   @SkipIfIsilon.hive
   @SkipIfLocal.hive
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   def test_missing_field_orc(self, unique_database):
diff --git a/tests/stress/test_acid_stress.py b/tests/stress/test_acid_stress.py
index 3c3ce3a7e..4a90ab25b 100644
--- a/tests/stress/test_acid_stress.py
+++ b/tests/stress/test_acid_stress.py
@@ -24,7 +24,7 @@ from multiprocessing import Value
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.parametrize import UniqueDatabase
 from tests.common.skip import (SkipIf, SkipIfHive2, SkipIfS3, SkipIfGCS, SkipIfCOS,
-                               SkipIfDockerizedCluster)
+                               SkipIfDockerizedCluster, SkipIfOzone)
 from tests.stress.stress_util import Task, run_tasks
 
 NUM_OVERWRITES = 2
@@ -161,6 +161,7 @@ class TestAcidInsertsBasic(TestAcidStress):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfOzone.hive
   @SkipIfGCS.hive
   @SkipIfCOS.hive
   @pytest.mark.execute_serially
diff --git a/tests/stress/test_ddl_stress.py b/tests/stress/test_ddl_stress.py
index 8117cfc8d..3cd433d56 100644
--- a/tests/stress/test_ddl_stress.py
+++ b/tests/stress/test_ddl_stress.py
@@ -19,7 +19,7 @@ import pytest
 
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal)
+                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfOzone)
 
 # Number of tables to create per thread
 NUM_TBLS_PER_THREAD = 10
@@ -49,6 +49,7 @@ class TestDdlStress(ImpalaTestSuite):
                    v.get_value('table_format').compression_codec == 'none'))
 
   @SkipIfS3.caching
+  @SkipIfOzone.caching
   @SkipIfGCS.caching
   @SkipIfCOS.caching
   @SkipIfABFS.caching
diff --git a/tests/util/filesystem_utils.py b/tests/util/filesystem_utils.py
index 605b33227..15ce54891 100644
--- a/tests/util/filesystem_utils.py
+++ b/tests/util/filesystem_utils.py
@@ -33,6 +33,7 @@ IS_ADLS = FILESYSTEM == "adls"
 IS_ABFS = FILESYSTEM == "abfs"
 IS_GCS = FILESYSTEM == "gs"
 IS_COS = FILESYSTEM == "cosn"
+IS_OZONE = FILESYSTEM == "ozone"
 IS_EC = os.getenv("ERASURE_CODING") == "true"
 # This condition satisfies both the states where one can assume a default fs
 #   - The environment variable is set to an empty string.
@@ -58,7 +59,7 @@ ADLS_CLIENT_SECRET = os.getenv("azure_client_secret")
 
 # A map of FILESYSTEM values to their corresponding Scan Node types
 fs_to_name = {'s3': 'S3', 'hdfs': 'HDFS', 'local': 'LOCAL', 'adls': 'ADLS',
-              'abfs': 'ADLS', 'gs': 'GCS', 'cosn': 'COS'}
+              'abfs': 'ADLS', 'gs': 'GCS', 'cosn': 'COS', 'ozone': 'OZONE'}
 
 
 def get_fs_name(fs):


[impala] 01/03: IMPALA-11454: part-1: use standard binaries path to start kudu test cluster

Posted by db...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

dbecker pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit f5a7e1c1f7f09fa425ef70260ee405cc0fd72bf3
Author: yx91490 <yx...@126.com>
AuthorDate: Sat Jul 23 15:19:25 2022 +0800

    IMPALA-11454: part-1: use standard binaries path to start kudu test cluster
    
    The standard apache kudu binaries (kudu-master and kudu-tserver) only
    exists in /sbin, however the toolchain packaging script will also copy
    them to /bin, leading to larger package size.
    
    This patch use /sbin/kudu-(master|tserver) to start kudu test cluster,
    then we can drop /bin/kudu-(master|tserver) in toolchain package later.
    
    Testing:
    run existing CI jobs.
    
    Change-Id: I2276ee3e1650532770b856443f1087ad5036dc7a
    Reviewed-on: http://gerrit.cloudera.org:8080/18777
    Reviewed-by: Quanlong Huang <hu...@gmail.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 testdata/cluster/node_templates/common/etc/init.d/kudu-common  | 10 +++++-----
 testdata/cluster/node_templates/common/etc/init.d/kudu-master  |  2 +-
 testdata/cluster/node_templates/common/etc/init.d/kudu-tserver |  2 +-
 3 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/testdata/cluster/node_templates/common/etc/init.d/kudu-common b/testdata/cluster/node_templates/common/etc/init.d/kudu-common
index 035acf730..b9f74d595 100644
--- a/testdata/cluster/node_templates/common/etc/init.d/kudu-common
+++ b/testdata/cluster/node_templates/common/etc/init.d/kudu-common
@@ -24,16 +24,16 @@
 # problem).
 
 if [[ -n "$KUDU_BUILD_DIR" ]]; then
-  KUDU_BIN_DIR="$KUDU_BUILD_DIR/bin"
+  KUDU_SBIN_DIR="$KUDU_BUILD_DIR/sbin"
   KUDU_WWW_DIR="$KUDU_HOME/www"
 else
-  KUDU_BIN_DIR="$IMPALA_KUDU_HOME"
+  KUDU_SBIN_DIR="$IMPALA_KUDU_HOME"
   if $USE_KUDU_DEBUG_BUILD; then
-    KUDU_BIN_DIR+=/debug/bin
+    KUDU_SBIN_DIR+=/debug/sbin
   else
-    KUDU_BIN_DIR+=/release/bin
+    KUDU_SBIN_DIR+=/release/sbin
   fi
-  KUDU_WWW_DIR="$KUDU_BIN_DIR/../lib/kudu/www"
+  KUDU_WWW_DIR="$KUDU_SBIN_DIR/../lib/kudu/www"
 fi
 
 KUDU_COMMON_ARGS=("-webserver_doc_root=$KUDU_WWW_DIR")
diff --git a/testdata/cluster/node_templates/common/etc/init.d/kudu-master b/testdata/cluster/node_templates/common/etc/init.d/kudu-master
index b73366287..677322157 100755
--- a/testdata/cluster/node_templates/common/etc/init.d/kudu-master
+++ b/testdata/cluster/node_templates/common/etc/init.d/kudu-master
@@ -28,7 +28,7 @@ function start {
   if [[ -n "$IMPALA_KUDU_STARTUP_FLAGS" ]]; then
     KUDU_COMMON_ARGS+=(${IMPALA_KUDU_STARTUP_FLAGS})
   fi
-  do_start "$KUDU_BIN_DIR"/kudu-master \
+  do_start "$KUDU_SBIN_DIR"/kudu-master \
       -flagfile "$NODE_DIR"/etc/kudu/master.conf \
       "${KUDU_COMMON_ARGS[@]}"
 }
diff --git a/testdata/cluster/node_templates/common/etc/init.d/kudu-tserver b/testdata/cluster/node_templates/common/etc/init.d/kudu-tserver
index 60e49e655..7dead0226 100755
--- a/testdata/cluster/node_templates/common/etc/init.d/kudu-tserver
+++ b/testdata/cluster/node_templates/common/etc/init.d/kudu-tserver
@@ -25,7 +25,7 @@ DIR=$(dirname $0)
 . "$DIR/kudu-common"   # Sets KUDU_COMMON_ARGS
 
 function start {
-  do_start "$KUDU_BIN_DIR"/kudu-tserver \
+  do_start "$KUDU_SBIN_DIR"/kudu-tserver \
       -flagfile "$NODE_DIR"/etc/kudu/tserver.conf \
       "${KUDU_COMMON_ARGS[@]}"
 }