You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by jo...@apache.org on 2022/08/11 05:48:28 UTC

[impala] branch master updated (3ed71756c -> 88aee2f2b)

This is an automated email from the ASF dual-hosted git repository.

joemcdonnell pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git


    from 3ed71756c IMPALA-11458: Update zlib and zstd
     new 0e3e4b57a IMPALA-11408: Fill missing partition columns when INSERT INTO iceberg_tbl (col_list)
     new 1eb0510ea IMPALA-11456: Collapse filesystem Skip logic
     new 88aee2f2b IMPALA-11450: Support building on Centos 8 alternatives

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 bin/bootstrap_toolchain.py                         |   5 +-
 .../org/apache/impala/analysis/InsertStmt.java     |  51 ++++--
 .../java/org/apache/impala/util/IcebergUtil.java   |  11 ++
 .../iceberg-partition-transform-insert.test        | 171 +++++++++++++++++++++
 .../QueryTest/iceberg-partitioned-insert.test      | 118 ++++++++++++++
 tests/authorization/test_ranger.py                 |  13 +-
 tests/common/environ.py                            |   5 +-
 tests/common/skip.py                               | 164 +++-----------------
 tests/custom_cluster/test_admission_controller.py  |  16 +-
 tests/custom_cluster/test_coordinators.py          |  12 +-
 tests/custom_cluster/test_events_custom_configs.py |  11 +-
 .../test_hive_parquet_codec_interop.py             |   7 +-
 .../custom_cluster/test_hive_text_codec_interop.py |   7 +-
 tests/custom_cluster/test_insert_behaviour.py      |  14 +-
 tests/custom_cluster/test_lineage.py               |  10 +-
 tests/custom_cluster/test_local_catalog.py         |  12 +-
 tests/custom_cluster/test_local_tz_conversion.py   |  11 +-
 .../test_metadata_no_events_processing.py          |  11 +-
 tests/custom_cluster/test_metadata_replicas.py     |  22 +--
 .../custom_cluster/test_parquet_max_page_header.py |  11 +-
 tests/custom_cluster/test_partition.py             |   8 +-
 tests/custom_cluster/test_permanent_udfs.py        |  41 +----
 tests/custom_cluster/test_query_retries.py         |  10 +-
 tests/custom_cluster/test_restart_services.py      |   8 +-
 tests/custom_cluster/test_rpc_timeout.py           |   5 +-
 tests/custom_cluster/test_runtime_profile.py       |   4 +-
 .../custom_cluster/test_topic_update_frequency.py  |   6 +-
 tests/data_errors/test_data_errors.py              |  28 +---
 tests/failure/test_failpoints.py                   |  17 +-
 tests/metadata/test_catalogd_debug_actions.py      |   6 +-
 tests/metadata/test_compute_stats.py               |  31 +---
 tests/metadata/test_ddl.py                         |  14 +-
 tests/metadata/test_event_processing.py            |  12 +-
 tests/metadata/test_hdfs_encryption.py             |  14 +-
 tests/metadata/test_hdfs_permissions.py            |  10 +-
 tests/metadata/test_hms_integration.py             |  24 +--
 tests/metadata/test_metadata_query_statements.py   |  23 +--
 tests/metadata/test_partition_metadata.py          |  14 +-
 tests/metadata/test_recover_partitions.py          |   4 +-
 tests/metadata/test_recursive_listing.py           |  15 +-
 tests/metadata/test_refresh_partition.py           |  12 +-
 tests/metadata/test_stale_metadata.py              |   5 +-
 tests/metadata/test_stats_extrapolation.py         |   6 +-
 tests/metadata/test_views_compatibility.py         |  15 +-
 tests/query_test/test_acid.py                      | 130 ++--------------
 tests/query_test/test_date_queries.py              |  11 +-
 tests/query_test/test_hbase_queries.py             |  20 +--
 tests/query_test/test_hdfs_caching.py              |  42 +----
 tests/query_test/test_insert.py                    |   6 +-
 tests/query_test/test_insert_behaviour.py          |  67 ++------
 tests/query_test/test_insert_parquet.py            |  26 +---
 tests/query_test/test_join_queries.py              |  20 +--
 tests/query_test/test_mt_dop.py                    |   4 +-
 tests/query_test/test_nested_types.py              |  45 +-----
 tests/query_test/test_observability.py             |  22 +--
 tests/query_test/test_partitioning.py              |  13 +-
 tests/query_test/test_resource_limits.py           |  10 +-
 tests/query_test/test_runtime_filters.py           |   9 +-
 tests/query_test/test_scanners.py                  |  97 ++----------
 tests/stress/test_acid_stress.py                   |  14 +-
 tests/stress/test_ddl_stress.py                    |  12 +-
 61 files changed, 562 insertions(+), 1000 deletions(-)


[impala] 01/03: IMPALA-11408: Fill missing partition columns when INSERT INTO iceberg_tbl (col_list)

Posted by jo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

joemcdonnell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 0e3e4b57a1c164f34cb24af854aa67f4cfb6f251
Author: LPL <li...@sensorsdata.cn>
AuthorDate: Fri Jul 1 10:51:27 2022 +0800

    IMPALA-11408: Fill missing partition columns when INSERT INTO iceberg_tbl (col_list)
    
    In the case of INSERT INTO iceberg_tbl (col_a, col_b, ...), if the
    partition columns of Iceberg table are not in the columns permutation,
    in order for data to be written to the default
    partition '__HIVE_DEFAULT_PARTITION__' we will fill the missing
    partition columns with NullLiteral.
    
    Testing:
     - add e2e tests
    
    Change-Id: I40c733755d65e5c81a12ffe09b6d16ed5d115368
    Reviewed-on: http://gerrit.cloudera.org:8080/18790
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 .../org/apache/impala/analysis/InsertStmt.java     |  51 ++++--
 .../java/org/apache/impala/util/IcebergUtil.java   |  11 ++
 .../iceberg-partition-transform-insert.test        | 171 +++++++++++++++++++++
 .../QueryTest/iceberg-partitioned-insert.test      | 118 ++++++++++++++
 4 files changed, 342 insertions(+), 9 deletions(-)

diff --git a/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java b/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
index c043c3c2f..97ab4202d 100644
--- a/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
@@ -17,15 +17,23 @@
 
 package org.apache.impala.analysis;
 
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
 import java.io.IOException;
-import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Comparator;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
+import org.apache.commons.collections.CollectionUtils;
 import org.apache.iceberg.types.Types;
+
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.BuiltinsDb;
 import org.apache.impala.catalog.Column;
@@ -54,12 +62,6 @@ import org.apache.impala.rewrite.ExprRewriter;
 import org.apache.impala.thrift.TIcebergPartitionTransformType;
 import org.apache.impala.thrift.TSortingOrder;
 import org.apache.impala.util.IcebergUtil;
-import org.apache.thrift.TBaseHelper;
-
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Sets;
 
 /**
  * Representation of a single insert or upsert statement, including the select statement
@@ -929,7 +931,9 @@ public class InsertStmt extends StatementBase {
           ++parts;
         }
       }
-      Preconditions.checkState(partitionKeyExprs_.size() == parts);
+      if (CollectionUtils.isEmpty(columnPermutation_)) {
+        Preconditions.checkState(partitionKeyExprs_.size() == parts);
+      }
     }
     else if (isKuduTable) {
       Preconditions.checkState(
@@ -972,7 +976,19 @@ public class InsertStmt extends StatementBase {
           } else {
             // Unmentioned non-clustering columns get NULL literals with the appropriate
             // target type because Parquet cannot handle NULL_TYPE (IMPALA-617).
-            resultExprs_.add(NullLiteral.create(tblColumn.getType()));
+            NullLiteral nullExpr = NullLiteral.create(tblColumn.getType());
+            resultExprs_.add(nullExpr);
+            // In the case of INSERT INTO iceberg_tbl (col_a, col_b, ...), if the
+            // partition columns are not in the columnPermutation_, we should fill it
+            // with NullLiteral to partitionKeyExprs_ (IMPALA-11408).
+            if (isIcebergTarget() && !CollectionUtils.isEmpty(columnPermutation_)
+                && icebergPartSpec != null) {
+              IcebergColumn targetColumn = (IcebergColumn) tblColumn;
+              if (IcebergUtil.isPartitionColumn(targetColumn, icebergPartSpec)) {
+                partitionKeyExprs_.add(nullExpr);
+                partitionColPos_.add(targetColumn.getPosition());
+              }
+            }
           }
         }
       }
@@ -985,6 +1001,23 @@ public class InsertStmt extends StatementBase {
       }
     }
 
+    // In the case of INSERT INTO iceberg_tbl (col_a, col_b, ...), to ensure that data is
+    // written to the correct partition, we need to make sure that the partitionKeyExprs_
+    // is in ascending order according to the column position of the Iceberg tables.
+    if (isIcebergTarget() && !CollectionUtils.isEmpty(columnPermutation_)) {
+      List<Pair<Integer, Expr>> exprPairs = Lists.newArrayList();
+      for (int i = 0; i < partitionColPos_.size(); i++) {
+        exprPairs.add(Pair.create(partitionColPos_.get(i), partitionKeyExprs_.get(i)));
+      }
+      exprPairs.sort(Comparator.comparingInt(p -> p.first));
+      partitionColPos_.clear();
+      partitionKeyExprs_.clear();
+      for (Pair<Integer, Expr> exprPair : exprPairs) {
+        partitionColPos_.add(exprPair.first);
+        partitionKeyExprs_.add(exprPair.second);
+      }
+    }
+
     if (table_ instanceof FeKuduTable) {
       Preconditions.checkState(!primaryKeyExprs_.isEmpty());
     }
diff --git a/fe/src/main/java/org/apache/impala/util/IcebergUtil.java b/fe/src/main/java/org/apache/impala/util/IcebergUtil.java
index ace59ae2a..f0ec43185 100644
--- a/fe/src/main/java/org/apache/impala/util/IcebergUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/IcebergUtil.java
@@ -67,6 +67,7 @@ import org.apache.impala.analysis.TimeTravelSpec.Kind;
 import org.apache.impala.catalog.Catalog;
 import org.apache.impala.catalog.FeIcebergTable;
 import org.apache.impala.catalog.HdfsFileFormat;
+import org.apache.impala.catalog.IcebergColumn;
 import org.apache.impala.catalog.IcebergTable;
 import org.apache.impala.catalog.TableLoadingException;
 import org.apache.impala.catalog.iceberg.IcebergCatalog;
@@ -984,4 +985,14 @@ public class IcebergUtil {
       }
     });
   }
+
+  public static boolean isPartitionColumn(IcebergColumn column,
+      IcebergPartitionSpec spec) {
+    for (IcebergPartitionField partField : spec.getIcebergPartitionFields()) {
+      if (partField.getTransformType() == TIcebergPartitionTransformType.VOID) continue;
+      if (column.getFieldId() != partField.getSourceId()) continue;
+      return true;
+    }
+    return false;
+  }
 }
diff --git a/testdata/workloads/functional-query/queries/QueryTest/iceberg-partition-transform-insert.test b/testdata/workloads/functional-query/queries/QueryTest/iceberg-partition-transform-insert.test
index 971d7a776..f2f725e09 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/iceberg-partition-transform-insert.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/iceberg-partition-transform-insert.test
@@ -1,5 +1,176 @@
 ====
 ---- QUERY
+create table ice_part_transform (col_i int, col_str string, col_ts timestamp)
+partitioned by spec (
+  bucket(3, col_i),
+  truncate(1, col_str),
+  day(col_ts)
+)
+stored as iceberg;
+====
+---- QUERY
+insert into
+  ice_part_transform
+values
+  (2, 'two', '2001-02-03 07:08:00'),
+  (1, 'one', '2001-01-03 07:08:00'),
+  (3, 'three', '2002-03-03 07:08:00'),
+  (5, 'five', '2003-05-03 07:08:00');
+select col_i,col_str,col_ts from ice_part_transform order by 1,2,3;
+---- RESULTS
+1,'one',2001-01-03 07:08:00
+2,'two',2001-02-03 07:08:00
+3,'three',2002-03-03 07:08:00
+5,'five',2003-05-03 07:08:00
+---- TYPES
+int,string,timestamp
+====
+---- QUERY
+insert into ice_part_transform(col_i) values (0), (4);
+select
+  col_i, col_str, col_ts
+from
+  ice_part_transform
+where
+  col_str is null
+  and col_ts is null
+order by
+  1, 2, 3;
+---- RESULTS
+0,'NULL',NULL
+4,'NULL',NULL
+---- TYPES
+int,string,timestamp
+---- RUNTIME_PROFILE
+aggregation(SUM, RowsRead): 2
+aggregation(SUM, NumRowGroups): 2
+====
+---- QUERY
+insert into ice_part_transform(col_str) values ('zero'), ('four');
+select
+  col_i, col_str, col_ts
+from
+  ice_part_transform
+where
+  col_i is null
+  and col_ts is null
+order by
+  1, 2, 3;
+---- RESULTS
+NULL,'four',NULL
+NULL,'zero',NULL
+---- TYPES
+int,string,timestamp
+---- RUNTIME_PROFILE
+aggregation(SUM, RowsRead): 2
+aggregation(SUM, NumRowGroups): 2
+====
+---- QUERY
+insert into ice_part_transform(col_ts) values ('2001-04-03 07:08:00'), ('2001-05-03 07:08:00');
+select
+  col_i, col_str, col_ts
+from
+  ice_part_transform
+where
+  col_i is null
+  and col_str is null
+order by
+  1, 2, 3;
+---- RESULTS
+NULL,'NULL',2001-04-03 07:08:00
+NULL,'NULL',2001-05-03 07:08:00
+---- TYPES
+int,string,timestamp
+---- RUNTIME_PROFILE
+aggregation(SUM, RowsRead): 2
+aggregation(SUM, NumRowGroups): 2
+====
+---- QUERY
+insert into ice_part_transform(col_i, col_str) values (2, 'two'), (1, 'one');
+select
+  col_i, col_str, col_ts
+from
+  ice_part_transform
+where
+  col_i is not null
+  and col_str is not null
+  and col_ts is null
+order by
+  1, 2, 3;
+---- RESULTS
+1,'one',NULL
+2,'two',NULL
+---- TYPES
+int,string,timestamp
+---- RUNTIME_PROFILE
+aggregation(SUM, RowsRead): 2
+aggregation(SUM, NumRowGroups): 2
+====
+---- QUERY
+insert into ice_part_transform(col_i, col_ts) values (2, '2001-02-03 07:08:00'), (1, '2001-01-03 07:08:00');
+select
+  col_i, col_str, col_ts
+from
+  ice_part_transform
+where
+  col_i is not null
+  and col_str is null
+  and col_ts is not null
+order by
+  1, 2, 3;
+---- RESULTS
+1,'NULL',2001-01-03 07:08:00
+2,'NULL',2001-02-03 07:08:00
+---- TYPES
+int,string,timestamp
+---- RUNTIME_PROFILE
+aggregation(SUM, RowsRead): 2
+aggregation(SUM, NumRowGroups): 2
+====
+---- QUERY
+insert into ice_part_transform(col_str, col_ts) values ('two', '2001-02-03 07:08:00'), ('one', '2001-01-03 07:08:00');
+select
+  col_i, col_str, col_ts
+from
+  ice_part_transform
+where
+  col_i is null
+  and col_str is not null
+  and col_ts is not null
+order by
+  1, 2, 3;
+---- RESULTS
+NULL,'one',2001-01-03 07:08:00
+NULL,'two',2001-02-03 07:08:00
+---- TYPES
+int,string,timestamp
+---- RUNTIME_PROFILE
+aggregation(SUM, RowsRead): 2
+aggregation(SUM, NumRowGroups): 2
+====
+---- QUERY
+show files in ice_part_transform;
+---- RESULTS
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_part_transform/data/col_i_bucket=0/col_str_trunc=__HIVE_DEFAULT_PARTITION__/col_ts_day=2001-02-03/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_part_transform/data/col_i_bucket=0/col_str_trunc=__HIVE_DEFAULT_PARTITION__/col_ts_day=__HIVE_DEFAULT_PARTITION__/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_part_transform/data/col_i_bucket=0/col_str_trunc=t/col_ts_day=2001-02-03/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_part_transform/data/col_i_bucket=0/col_str_trunc=t/col_ts_day=2002-03-03/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_part_transform/data/col_i_bucket=0/col_str_trunc=t/col_ts_day=__HIVE_DEFAULT_PARTITION__/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_part_transform/data/col_i_bucket=1/col_str_trunc=__HIVE_DEFAULT_PARTITION__/col_ts_day=__HIVE_DEFAULT_PARTITION__/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_part_transform/data/col_i_bucket=2/col_str_trunc=__HIVE_DEFAULT_PARTITION__/col_ts_day=2001-01-03/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_part_transform/data/col_i_bucket=2/col_str_trunc=f/col_ts_day=2003-05-03/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_part_transform/data/col_i_bucket=2/col_str_trunc=o/col_ts_day=2001-01-03/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_part_transform/data/col_i_bucket=2/col_str_trunc=o/col_ts_day=__HIVE_DEFAULT_PARTITION__/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_part_transform/data/col_i_bucket=__HIVE_DEFAULT_PARTITION__/col_str_trunc=__HIVE_DEFAULT_PARTITION__/col_ts_day=2001-04-03/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_part_transform/data/col_i_bucket=__HIVE_DEFAULT_PARTITION__/col_str_trunc=__HIVE_DEFAULT_PARTITION__/col_ts_day=2001-05-03/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_part_transform/data/col_i_bucket=__HIVE_DEFAULT_PARTITION__/col_str_trunc=f/col_ts_day=__HIVE_DEFAULT_PARTITION__/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_part_transform/data/col_i_bucket=__HIVE_DEFAULT_PARTITION__/col_str_trunc=o/col_ts_day=2001-01-03/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_part_transform/data/col_i_bucket=__HIVE_DEFAULT_PARTITION__/col_str_trunc=t/col_ts_day=2001-02-03/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_part_transform/data/col_i_bucket=__HIVE_DEFAULT_PARTITION__/col_str_trunc=z/col_ts_day=__HIVE_DEFAULT_PARTITION__/.*.0.parq','.*',''
+---- TYPES
+string, string, string
+====
+---- QUERY
 # Test partitioned INSERTs with single column that is also
 # the partitioned column. Partition transform is BUCKET.
 create table single_col_bucket (s string)
diff --git a/testdata/workloads/functional-query/queries/QueryTest/iceberg-partitioned-insert.test b/testdata/workloads/functional-query/queries/QueryTest/iceberg-partitioned-insert.test
index fa0c7b413..ee7355170 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/iceberg-partitioned-insert.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/iceberg-partitioned-insert.test
@@ -76,6 +76,56 @@ select * from ice_multi_part;
 INT, DATE, STRING
 ====
 ---- QUERY
+insert into
+  ice_multi_part (d, s)
+values
+  ('2022-07-26', 'third'),
+  ('2022-07-27', 'fourth'),
+  ('2022-07-28', 'fifth');
+select * from ice_multi_part order by d;
+---- RESULTS
+1,2020-12-07,'first'
+2,2020-12-08,'second'
+NULL,2022-07-26,'third'
+NULL,2022-07-27,'fourth'
+NULL,2022-07-28,'fifth'
+---- TYPES
+INT, DATE, STRING
+====
+---- QUERY
+insert into
+  ice_multi_part (i, s)
+values
+  (6, 'sixth'),
+  (7, 'seventh');
+select * from ice_multi_part order by d, i;
+---- RESULTS
+1,2020-12-07,'first'
+2,2020-12-08,'second'
+NULL,2022-07-26,'third'
+NULL,2022-07-27,'fourth'
+NULL,2022-07-28,'fifth'
+6,NULL,'sixth'
+7,NULL,'seventh'
+---- TYPES
+INT, DATE, STRING
+====
+---- QUERY
+show files in ice_multi_part;
+---- LABELS
+Path,Size,Partition
+---- RESULTS
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_multi_part/data/i=1/d=2020-12-07/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_multi_part/data/i=2/d=2020-12-08/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_multi_part/data/i=6/d=__HIVE_DEFAULT_PARTITION__/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_multi_part/data/i=7/d=__HIVE_DEFAULT_PARTITION__/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_multi_part/data/i=__HIVE_DEFAULT_PARTITION__/d=2022-07-26/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_multi_part/data/i=__HIVE_DEFAULT_PARTITION__/d=2022-07-27/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/ice_multi_part/data/i=__HIVE_DEFAULT_PARTITION__/d=2022-07-28/.*.0.parq','.*',''
+---- TYPES
+STRING, STRING, STRING
+====
+---- QUERY
 select * from ice_multi_part
 where d = '2020-12-08';
 ---- RESULTS
@@ -86,6 +136,24 @@ INT, DATE, STRING
 aggregation(SUM, RowsRead): 1
 ====
 ---- QUERY
+select
+  *
+from
+  ice_multi_part
+where
+  i is null
+order by
+  d;
+---- RESULTS
+NULL,2022-07-26,'third'
+NULL,2022-07-27,'fourth'
+NULL,2022-07-28,'fifth'
+---- TYPES
+INT, DATE, STRING
+---- RUNTIME_PROFILE
+aggregation(SUM, RowsRead): 3
+====
+---- QUERY
 # Test that Impala only writes one file per partitions.
 create table ice_bigints (i BIGINT, j BIGINT, k BIGINT)
 partitioned by spec (i, j)
@@ -263,6 +331,56 @@ BIGINT
 aggregation(SUM, NumRowGroups): 0
 ====
 ---- QUERY
+create table alltypes_part_2 like alltypes_part;
+---- RESULTS
+'Table has been created.'
+====
+---- QUERY
+insert into
+  alltypes_part_2(
+    id,
+    int_col,
+    bigint_col,
+    float_col,
+    double_col,
+    date_col,
+    string_col,
+    timestamp_col
+  )
+select
+  id,
+  int_col,
+  bigint_col,
+  float_col,
+  double_col,
+  CAST(date_string_col as date FORMAT 'MM/DD/YY'),
+  string_col,
+  timestamp_col
+from
+  functional.alltypestiny;
+select count(*) from alltypes_part;
+---- RESULTS
+8
+---- TYPES
+BIGINT
+====
+---- QUERY
+show files in alltypes_part_2;
+---- LABELS
+Path,Size,Partition
+---- RESULTS
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/alltypes_part_2/data/id=0/bool_col=__HIVE_DEFAULT_PARTITION__/int_col=0/bigint_col=0/float_col=0/double_col=0/date_col=2009-01-01/string_col=0/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/alltypes_part_2/data/id=1/bool_col=__HIVE_DEFAULT_PARTITION__/int_col=1/bigint_col=10/float_col=1.100000023841858/double_col=10.1/date_col=2009-01-01/string_col=1/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/alltypes_part_2/data/id=2/bool_col=__HIVE_DEFAULT_PARTITION__/int_col=0/bigint_col=0/float_col=0/double_col=0/date_col=2009-02-01/string_col=0/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/alltypes_part_2/data/id=3/bool_col=__HIVE_DEFAULT_PARTITION__/int_col=1/bigint_col=10/float_col=1.100000023841858/double_col=10.1/date_col=2009-02-01/string_col=1/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/alltypes_part_2/data/id=4/bool_col=__HIVE_DEFAULT_PARTITION__/int_col=0/bigint_col=0/float_col=0/double_col=0/date_col=2009-03-01/string_col=0/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/alltypes_part_2/data/id=5/bool_col=__HIVE_DEFAULT_PARTITION__/int_col=1/bigint_col=10/float_col=1.100000023841858/double_col=10.1/date_col=2009-03-01/string_col=1/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/alltypes_part_2/data/id=6/bool_col=__HIVE_DEFAULT_PARTITION__/int_col=0/bigint_col=0/float_col=0/double_col=0/date_col=2009-04-01/string_col=0/.*.0.parq','.*',''
+row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/alltypes_part_2/data/id=7/bool_col=__HIVE_DEFAULT_PARTITION__/int_col=1/bigint_col=10/float_col=1.100000023841858/double_col=10.1/date_col=2009-04-01/string_col=1/.*.0.parq','.*',''
+---- TYPES
+STRING, STRING, STRING
+====
+---- QUERY
 # Iceberg partitions independent of column order
 ---- QUERY
 # Test inserts with multple partition columns.


[impala] 03/03: IMPALA-11450: Support building on Centos 8 alternatives

Posted by jo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

joemcdonnell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 88aee2f2b4d29fa0563cbf520347aa32d3d99b89
Author: Joe McDonnell <jo...@cloudera.com>
AuthorDate: Thu Jul 21 15:12:22 2022 -0700

    IMPALA-11450: Support building on Centos 8 alternatives
    
    This adds support for Rocky Linux 8 and Alma Linux 8,
    which are new Centos 8 alternatives. They use the
    same toolchain as Centos 8.
    
    Testing:
     - Ran docker-based tests on Rocky Linux and Alma Linux.
       The build passed and tests ran.
    
    Change-Id: If10d71caa90d24e14d4cf6a28f5c27e03ef3c4c6
    Reviewed-on: http://gerrit.cloudera.org:8080/18773
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 bin/bootstrap_toolchain.py | 5 ++++-
 tests/common/environ.py    | 5 +++--
 2 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/bin/bootstrap_toolchain.py b/bin/bootstrap_toolchain.py
index e52cddafe..e6c4d0423 100755
--- a/bin/bootstrap_toolchain.py
+++ b/bin/bootstrap_toolchain.py
@@ -77,6 +77,8 @@ OS_MAPPING = [
   OsMapping("centos6", "ec2-package-centos-6", "redhat6"),
   OsMapping("centos7", "ec2-package-centos-7", "redhat7"),
   OsMapping("centos8", "ec2-package-centos-8", "redhat8"),
+  OsMapping("rocky8", "ec2-package-centos-8", "redhat8"),
+  OsMapping("almalinux8", "ec2-package-centos-8", "redhat8"),
   OsMapping("redhatenterpriseserver5", "ec2-package-centos-5", None),
   OsMapping("redhatenterpriseserver6", "ec2-package-centos-6", "redhat6"),
   OsMapping("redhatenterpriseserver7", "ec2-package-centos-7", "redhat7"),
@@ -401,7 +403,8 @@ def get_platform_release_label(release=None):
       lsb_release = check_output(["lsb_release", "-irs"])
       release = "".join(map(lambda x: x.lower(), lsb_release.split()))
       # Only need to check against the major release if RHEL, CentOS or Suse
-      for distro in ['centos', 'redhatenterprise', 'redhatenterpriseserver', 'suse']:
+      for distro in ['centos', 'rocky', 'almalinux', 'redhatenterprise',
+                     'redhatenterpriseserver', 'suse']:
         if distro in release:
           release = release.split('.')[0]
           break
diff --git a/tests/common/environ.py b/tests/common/environ.py
index e62eb87c3..831809247 100644
--- a/tests/common/environ.py
+++ b/tests/common/environ.py
@@ -46,13 +46,14 @@ if os.path.isfile(IMPALA_LOCAL_VERSION_INFO):
   if IMPALA_LOCAL_BUILD_VERSION is None:
     raise Exception("Could not find VERSION in {0}".format(IMPALA_LOCAL_VERSION_INFO))
 
-# Check if it is Red Hat/CentOS Linux
+# Check if it is Red Hat/CentOS/Rocky/AlmaLinux Linux
 distribution = platform.linux_distribution()
 distname = distribution[0].lower()
 version = distribution[1]
 IS_REDHAT_6_DERIVATIVE = False
 IS_REDHAT_DERIVATIVE = False
-if distname.find('centos') or distname.find('red hat'):
+if distname.find('centos') or distname.find('rocky') or \
+   distname.find('almalinux') or distname.find('red hat'):
   IS_REDHAT_DERIVATIVE = True
   if len(re.findall('^6\.*', version)) > 0:
     IS_REDHAT_6_DERIVATIVE = True


[impala] 02/03: IMPALA-11456: Collapse filesystem Skip logic

Posted by jo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

joemcdonnell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 1eb0510eaa8b54e021847775db32e250775101d9
Author: Michael Smith <mi...@cloudera.com>
AuthorDate: Mon Jul 25 10:53:25 2022 -0700

    IMPALA-11456: Collapse filesystem Skip logic
    
    Combines all SkipIf* classes for different filesystems into a single
    SkipIfFS class. Many cases are simplified to 'not IS_HDFS', with the
    rest as filesystem-specific special cases. The 'jira' option is removed
    in favor of specific flags for each issue.
    
    Change-Id: Ib928a6274baaaec45614887b9e762346a25812a1
    Reviewed-on: http://gerrit.cloudera.org:8080/18781
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 tests/authorization/test_ranger.py                 |  13 +-
 tests/common/skip.py                               | 164 +++------------------
 tests/custom_cluster/test_admission_controller.py  |  16 +-
 tests/custom_cluster/test_coordinators.py          |  12 +-
 tests/custom_cluster/test_events_custom_configs.py |  11 +-
 .../test_hive_parquet_codec_interop.py             |   7 +-
 .../custom_cluster/test_hive_text_codec_interop.py |   7 +-
 tests/custom_cluster/test_insert_behaviour.py      |  14 +-
 tests/custom_cluster/test_lineage.py               |  10 +-
 tests/custom_cluster/test_local_catalog.py         |  12 +-
 tests/custom_cluster/test_local_tz_conversion.py   |  11 +-
 .../test_metadata_no_events_processing.py          |  11 +-
 tests/custom_cluster/test_metadata_replicas.py     |  22 +--
 .../custom_cluster/test_parquet_max_page_header.py |  11 +-
 tests/custom_cluster/test_partition.py             |   8 +-
 tests/custom_cluster/test_permanent_udfs.py        |  41 +-----
 tests/custom_cluster/test_query_retries.py         |  10 +-
 tests/custom_cluster/test_restart_services.py      |   8 +-
 tests/custom_cluster/test_rpc_timeout.py           |   5 +-
 tests/custom_cluster/test_runtime_profile.py       |   4 +-
 .../custom_cluster/test_topic_update_frequency.py  |   6 +-
 tests/data_errors/test_data_errors.py              |  28 +---
 tests/failure/test_failpoints.py                   |  17 +--
 tests/metadata/test_catalogd_debug_actions.py      |   6 +-
 tests/metadata/test_compute_stats.py               |  31 +---
 tests/metadata/test_ddl.py                         |  14 +-
 tests/metadata/test_event_processing.py            |  12 +-
 tests/metadata/test_hdfs_encryption.py             |  14 +-
 tests/metadata/test_hdfs_permissions.py            |  10 +-
 tests/metadata/test_hms_integration.py             |  24 +--
 tests/metadata/test_metadata_query_statements.py   |  23 +--
 tests/metadata/test_partition_metadata.py          |  14 +-
 tests/metadata/test_recover_partitions.py          |   4 +-
 tests/metadata/test_recursive_listing.py           |  15 +-
 tests/metadata/test_refresh_partition.py           |  12 +-
 tests/metadata/test_stale_metadata.py              |   5 +-
 tests/metadata/test_stats_extrapolation.py         |   6 +-
 tests/metadata/test_views_compatibility.py         |  15 +-
 tests/query_test/test_acid.py                      | 130 ++--------------
 tests/query_test/test_date_queries.py              |  11 +-
 tests/query_test/test_hbase_queries.py             |  20 +--
 tests/query_test/test_hdfs_caching.py              |  42 +-----
 tests/query_test/test_insert.py                    |   6 +-
 tests/query_test/test_insert_behaviour.py          |  67 ++-------
 tests/query_test/test_insert_parquet.py            |  26 +---
 tests/query_test/test_join_queries.py              |  20 +--
 tests/query_test/test_mt_dop.py                    |   4 +-
 tests/query_test/test_nested_types.py              |  45 +-----
 tests/query_test/test_observability.py             |  22 +--
 tests/query_test/test_partitioning.py              |  13 +-
 tests/query_test/test_resource_limits.py           |  10 +-
 tests/query_test/test_runtime_filters.py           |   9 +-
 tests/query_test/test_scanners.py                  |  97 ++----------
 tests/stress/test_acid_stress.py                   |  14 +-
 tests/stress/test_ddl_stress.py                    |  12 +-
 55 files changed, 213 insertions(+), 988 deletions(-)

diff --git a/tests/authorization/test_ranger.py b/tests/authorization/test_ranger.py
index f68645576..049d3f4f1 100644
--- a/tests/authorization/test_ranger.py
+++ b/tests/authorization/test_ranger.py
@@ -27,9 +27,7 @@ from subprocess import check_call
 
 from getpass import getuser
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfLocal, SkipIfHive2, SkipIfGCS, SkipIfCOS,
-                               SkipIfOzone)
+from tests.common.skip import SkipIfFS, SkipIfHive2
 from tests.common.test_dimensions import (create_client_protocol_dimension,
     create_exec_option_dimension, create_orc_dimension)
 from tests.util.hdfs_util import NAMENODE
@@ -1402,14 +1400,7 @@ class TestRanger(CustomClusterTestSuite):
         TestRanger._remove_policy(unique_name + str(i))
 
   @pytest.mark.execute_serially
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
+  @SkipIfFS.hive
   @SkipIfHive2.ranger_auth
   @CustomClusterTestSuite.with_args()
   def test_hive_with_ranger_setup(self, vector):
diff --git a/tests/common/skip.py b/tests/common/skip.py
index 0657155a0..70bb0717b 100644
--- a/tests/common/skip.py
+++ b/tests/common/skip.py
@@ -20,7 +20,6 @@
 # annotate the class or test routine with the marker.
 #
 
-import os
 import pytest
 from functools import partial
 
@@ -43,130 +42,39 @@ from tests.util.filesystem_utils import (
 
 IMPALA_TEST_CLUSTER_PROPERTIES = ImpalaTestClusterProperties.get_instance()
 
-class SkipIfS3:
 
+class SkipIfFS:
   # These are skipped due to product limitations.
-  caching = pytest.mark.skipif(IS_S3, reason="SET CACHED not implemented for S3")
-  hive = pytest.mark.skipif(IS_S3, reason="Hive doesn't work with S3")
-  hdfs_block_size = pytest.mark.skipif(IS_S3, reason="S3 uses it's own block size")
-  hdfs_acls = pytest.mark.skipif(IS_S3, reason="HDFS acls are not supported on S3")
-  jira = partial(pytest.mark.skipif, IS_S3)
-  hdfs_encryption = pytest.mark.skipif(IS_S3,
-      reason="HDFS encryption is not supported with S3")
+  hdfs_caching = pytest.mark.skipif(not IS_HDFS, reason="SET CACHED not implemented")
+  hdfs_encryption = pytest.mark.skipif(not IS_HDFS,
+      reason="HDFS encryption is not supported")
+  hdfs_block_size = pytest.mark.skipif(not IS_HDFS, reason="uses it's own block size")
+  hdfs_acls = pytest.mark.skipif(not IS_HDFS, reason="HDFS acls are not supported")
+
+  # Special case product limitations.
   empty_directory = pytest.mark.skipif(IS_S3,
       reason="Empty directories are not supported on S3")
-
-  # These need test infra work to re-enable.
-  udfs = pytest.mark.skipif(IS_S3, reason="udas/udfs not copied to S3")
-  datasrc = pytest.mark.skipif(IS_S3, reason="data sources not copied to S3")
-  hbase = pytest.mark.skipif(IS_S3, reason="HBase not started with S3")
-  qualified_path = pytest.mark.skipif(IS_S3,
-      reason="Tests rely on HDFS qualified paths, IMPALA-1872")
-  iceberg = pytest.mark.skipif(IS_S3,
-      reason="Currently Iceberg is only supported on HDFS.")
-  variable_listing_times = pytest.mark.skipif(IS_S3,
-      reason="Flakiness due to unpredictable listing times on S3.")
-
-
-class SkipIfOzone:
-  # These are skipped due to product limitations.
-  caching = pytest.mark.skipif(IS_OZONE, reason="SET CACHED not implemented for Ozone")
-  hdfs_block_size = pytest.mark.skipif(IS_OZONE, reason="Ozone uses it's own block size")
-  hdfs_acls = pytest.mark.skipif(IS_OZONE, reason="HDFS acls are not supported on Ozone")
-  hdfs_encryption = pytest.mark.skipif(IS_OZONE,
-      reason="HDFS encryption is not supported with Ozone")
-  no_storage_ids = pytest.mark.skipif(IS_OZONE,
-        reason="Ozone does not return storage ids, IMPALA-10213")
-
-  # These need test infra work to re-enable.
-  hive = pytest.mark.skipif(IS_OZONE, reason="Hive not started with Ozone")
-  hbase = pytest.mark.skipif(IS_OZONE, reason="HBase not started with Ozone")
-  qualified_path = pytest.mark.skipif(IS_OZONE,
-      reason="Tests rely on HDFS qualified paths, IMPALA-1872")
-
-
-class SkipIfABFS:
-
-  # These are skipped due to product limitations.
-  caching = pytest.mark.skipif(IS_ABFS, reason="SET CACHED not implemented for ABFS")
-  hive = pytest.mark.skipif(IS_ABFS, reason="Hive doesn't work with ABFS")
-  hdfs_block_size = pytest.mark.skipif(IS_ABFS, reason="ABFS uses it's own block size")
-  hdfs_acls = pytest.mark.skipif(IS_ABFS, reason="HDFS acls are not supported on ABFS")
-  jira = partial(pytest.mark.skipif, IS_ABFS)
-  hdfs_encryption = pytest.mark.skipif(IS_ABFS,
-      reason="HDFS encryption is not supported with ABFS")
-  trash = pytest.mark.skipif(IS_ABFS,
-      reason="Drop/purge not working as expected on ABFS, IMPALA-7726")
   file_or_folder_name_ends_with_period = pytest.mark.skipif(IS_ABFS,
       reason="ABFS does not support file / directories that end with a period")
+  stress_insert_timeouts = pytest.mark.skipif(IS_COS or IS_GCS,
+      reason="IMPALA-10563, IMPALA-10773")
+  shutdown_idle_fails = pytest.mark.skipif(IS_COS or IS_GCS,
+      reason="IMPALA-10562")
+  late_filters = pytest.mark.skipif(IS_ISILON, reason="IMPALA-6998")
+  read_past_eof = pytest.mark.skipif(IS_S3 or IS_GCS, reason="IMPALA-2512")
+  no_storage_ids = pytest.mark.skipif(IS_OZONE,
+      reason="Ozone does not return storage ids, IMPALA-10213")
+  large_block_size = pytest.mark.skipif(IS_OZONE,
+      reason="block size is larger than 128MB")
 
   # These need test infra work to re-enable.
-  udfs = pytest.mark.skipif(IS_ABFS, reason="udas/udfs not copied to ABFS")
-  datasrc = pytest.mark.skipif(IS_ABFS, reason="data sources not copied to ABFS")
-  hbase = pytest.mark.skipif(IS_ABFS, reason="HBase not started with ABFS")
-  qualified_path = pytest.mark.skipif(IS_ABFS,
-      reason="Tests rely on HDFS qualified paths, IMPALA-1872")
-
-class SkipIfADLS:
-
-  # These are skipped due to product limitations.
-  caching = pytest.mark.skipif(IS_ADLS, reason="SET CACHED not implemented for ADLS")
-  hive = pytest.mark.skipif(IS_ADLS, reason="Hive doesn't work with ADLS")
-  hdfs_block_size = pytest.mark.skipif(IS_ADLS, reason="ADLS uses it's own block size")
-  hdfs_acls = pytest.mark.skipif(IS_ADLS, reason="HDFS acls are not supported on ADLS")
-  jira = partial(pytest.mark.skipif, IS_ADLS)
-  hdfs_encryption = pytest.mark.skipif(IS_ADLS,
-      reason="HDFS encryption is not supported with ADLS")
-
-  # These need test infra work to re-enable.
-  udfs = pytest.mark.skipif(IS_ADLS, reason="udas/udfs not copied to ADLS")
-  datasrc = pytest.mark.skipif(IS_ADLS, reason="data sources not copied to ADLS")
-  hbase = pytest.mark.skipif(IS_ADLS, reason="HBase not started with ADLS")
-  qualified_path = pytest.mark.skipif(IS_ADLS,
-      reason="Tests rely on HDFS qualified paths, IMPALA-1872")
-  eventually_consistent = pytest.mark.skipif(IS_ADLS,
-      reason="The client is slow to realize changes to file metadata")
-
-
-class SkipIfGCS:
-
-  # These are skipped due to product limitations.
-  caching = pytest.mark.skipif(IS_GCS, reason="SET CACHED not implemented for GCS")
-  hive = pytest.mark.skipif(IS_GCS, reason="Hive doesn't work with GCS")
-  hdfs_block_size = pytest.mark.skipif(IS_GCS, reason="GCS uses it's own block size")
-  hdfs_acls = pytest.mark.skipif(IS_GCS, reason="HDFS acls are not supported on GCS")
-  jira = partial(pytest.mark.skipif, IS_GCS)
-  hdfs_encryption = pytest.mark.skipif(IS_GCS,
-      reason="HDFS encryption is not supported with GCS")
-
-  # These need test infra work to re-enable.
-  hbase = pytest.mark.skipif(IS_GCS, reason="HBase not started with GCS")
-  qualified_path = pytest.mark.skipif(IS_GCS,
-      reason="Tests rely on HDFS qualified paths, IMPALA-1872")
-  variable_listing_times = pytest.mark.skipif(IS_GCS,
-      reason="Flakiness due to unpredictable listing times on GCS.")
-
-
-class SkipIfCOS:
-
-  # These are skipped due to product limitations.
-  caching = pytest.mark.skipif(IS_COS, reason="SET CACHED not implemented for COS")
-  hive = pytest.mark.skipif(IS_COS, reason="Hive doesn't work with COS")
-  hdfs_block_size = pytest.mark.skipif(IS_COS, reason="COS uses it's own block size")
-  hdfs_acls = pytest.mark.skipif(IS_COS, reason="HDFS acls are not supported on COS")
-  jira = partial(pytest.mark.skipif, IS_COS)
-  hdfs_encryption = pytest.mark.skipif(IS_COS,
-      reason="HDFS encryption is not supported with COS")
-
-  # These need test infra work to re-enable.
-  udfs = pytest.mark.skipif(IS_COS, reason="udas/udfs not copied to COS")
-  datasrc = pytest.mark.skipif(IS_COS, reason="data sources not copied to COS")
-  hbase = pytest.mark.skipif(IS_COS, reason="HBase not started with COS")
-  qualified_path = pytest.mark.skipif(IS_COS,
+  hive = pytest.mark.skipif(not IS_HDFS, reason="Hive doesn't work")
+  hbase = pytest.mark.skipif(not IS_HDFS, reason="HBase not started")
+  qualified_path = pytest.mark.skipif(not IS_HDFS,
       reason="Tests rely on HDFS qualified paths, IMPALA-1872")
-  variable_listing_times = pytest.mark.skipif(IS_COS,
-      reason="Flakiness due to unpredictable listing times on COS.")
-  eventually_consistent = pytest.mark.skipif(IS_COS,
+  variable_listing_times = pytest.mark.skipif(IS_S3 or IS_GCS or IS_COS,
+      reason="Flakiness due to unpredictable listing times on S3.")
+  eventually_consistent = pytest.mark.skipif(IS_ADLS or IS_COS,
       reason="The client is slow to realize changes to file metadata")
 
 class SkipIfKudu:
@@ -188,30 +96,10 @@ class SkipIf:
   is_buggy_el6_kernel = pytest.mark.skipif(
       IS_BUGGY_EL6_KERNEL, reason="Kernel is affected by KUDU-1508")
 
-
-class SkipIfIsilon:
-  caching = pytest.mark.skipif(IS_ISILON, reason="SET CACHED not implemented for Isilon")
-  hbase = pytest.mark.skipif(IS_ISILON, reason="HBase not tested with Isilon")
-  hive = pytest.mark.skipif(IS_ISILON, reason="Hive not tested with Isilon")
-  hdfs_acls = pytest.mark.skipif(IS_ISILON, reason="HDFS acls are not supported on Isilon")
-  hdfs_block_size = pytest.mark.skipif(IS_ISILON,
-      reason="Isilon uses its own block size")
-  hdfs_encryption = pytest.mark.skipif(IS_ISILON,
-      reason="HDFS encryption is not supported with Isilon")
-  untriaged = pytest.mark.skipif(IS_ISILON,
-      reason="This Isilon issue has yet to be triaged.")
-  jira = partial(pytest.mark.skipif, IS_ISILON)
-
 class SkipIfLocal:
   # These are skipped due to product limitations.
-  caching = pytest.mark.skipif(IS_LOCAL,
-      reason="HDFS caching not supported on local file system")
   hdfs_blocks = pytest.mark.skipif(IS_LOCAL,
       reason="Files on local filesystem are not split into blocks")
-  hdfs_encryption = pytest.mark.skipif(IS_LOCAL,
-      reason="HDFS encryption is not supported on local filesystem")
-  hive = pytest.mark.skipif(IS_LOCAL,
-      reason="Hive not started when using local file system")
   multiple_impalad = pytest.mark.skipif(IS_LOCAL,
       reason="Multiple impalads are not supported when using local file system")
   parquet_file_size = pytest.mark.skipif(IS_LOCAL,
@@ -220,12 +108,8 @@ class SkipIfLocal:
       reason="HDFS file handle caching not supported for local non-HDFS files")
 
   # These need test infra work to re-enable.
-  hbase = pytest.mark.skipif(IS_LOCAL,
-      reason="HBase not started when using local file system")
   hdfs_client = pytest.mark.skipif(IS_LOCAL,
       reason="HDFS not started when using local file system")
-  qualified_path = pytest.mark.skipif(IS_LOCAL,
-      reason="Tests rely on HDFS qualified paths")
   root_path = pytest.mark.skipif(IS_LOCAL,
       reason="Tests rely on the root directory")
 
diff --git a/tests/custom_cluster/test_admission_controller.py b/tests/custom_cluster/test_admission_controller.py
index 37da089fe..8fdc8667f 100644
--- a/tests/custom_cluster/test_admission_controller.py
+++ b/tests/custom_cluster/test_admission_controller.py
@@ -35,15 +35,7 @@ from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.environ import build_flavor_timeout, ImpalaTestClusterProperties
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.resource_pool_config import ResourcePoolConfig
-from tests.common.skip import (
-    SkipIfS3,
-    SkipIfGCS,
-    SkipIfCOS,
-    SkipIfABFS,
-    SkipIfADLS,
-    SkipIfEC,
-    SkipIfNotHdfsMinicluster,
-    SkipIfOS)
+from tests.common.skip import SkipIfFS, SkipIfEC, SkipIfNotHdfsMinicluster, SkipIfOS
 from tests.common.test_dimensions import (
     create_single_exec_option_dimension,
     create_uncompressed_text_dimension)
@@ -444,11 +436,7 @@ class TestAdmissionController(TestAdmissionControllerBase, HS2TestSuite):
       assert re.search("Rejected query from pool default-pool: request memory needed "
                        ".* is greater than pool max mem resources 10.00 MB", str(ex))
 
-  @SkipIfS3.hdfs_block_size
-  @SkipIfGCS.hdfs_block_size
-  @SkipIfCOS.hdfs_block_size
-  @SkipIfABFS.hdfs_block_size
-  @SkipIfADLS.hdfs_block_size
+  @SkipIfFS.hdfs_block_size
   @SkipIfEC.fix_later
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args(
diff --git a/tests/custom_cluster/test_coordinators.py b/tests/custom_cluster/test_coordinators.py
index e21c02900..2530f68ba 100644
--- a/tests/custom_cluster/test_coordinators.py
+++ b/tests/custom_cluster/test_coordinators.py
@@ -24,8 +24,7 @@ import time
 from subprocess import check_call
 from tests.util.filesystem_utils import get_fs_path
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import (SkipIf, SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfOzone,
-                               SkipIfIsilon, SkipIfGCS, SkipIfCOS, SkipIfLocal)
+from tests.common.skip import SkipIf, SkipIfFS
 
 LOG = logging.getLogger('test_coordinators')
 LOG.setLevel(level=logging.DEBUG)
@@ -320,14 +319,7 @@ class TestCoordinators(CustomClusterTestSuite):
     num_hosts = "hosts=10 instances=10"
     assert num_hosts in str(ret)
 
-  @SkipIfS3.hbase
-  @SkipIfOzone.hbase
-  @SkipIfGCS.hbase
-  @SkipIfCOS.hbase
-  @SkipIfABFS.hbase
-  @SkipIfADLS.hbase
-  @SkipIfIsilon.hbase
-  @SkipIfLocal.hbase
+  @SkipIfFS.hbase
   @SkipIf.skip_hbase
   @pytest.mark.execute_serially
   def test_executor_only_hbase(self):
diff --git a/tests/custom_cluster/test_events_custom_configs.py b/tests/custom_cluster/test_events_custom_configs.py
index 827ba9d28..8fe6ae24a 100644
--- a/tests/custom_cluster/test_events_custom_configs.py
+++ b/tests/custom_cluster/test_events_custom_configs.py
@@ -19,20 +19,13 @@ import pytest
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-  SkipIfGCS, SkipIfLocal, SkipIfOzone)
+from tests.common.skip import SkipIfFS
 from tests.util.hive_utils import HiveDbWrapper
 from tests.util.event_processor_utils import EventProcessorUtils
 from tests.util.filesystem_utils import WAREHOUSE
 
 
-@SkipIfS3.hive
-@SkipIfOzone.hive
-@SkipIfABFS.hive
-@SkipIfADLS.hive
-@SkipIfGCS.hive
-@SkipIfIsilon.hive
-@SkipIfLocal.hive
+@SkipIfFS.hive
 class TestEventProcessingCustomConfigs(CustomClusterTestSuite):
   """This class contains tests that exercise the event processing mechanism in the
   catalog for non-default configurations"""
diff --git a/tests/custom_cluster/test_hive_parquet_codec_interop.py b/tests/custom_cluster/test_hive_parquet_codec_interop.py
index d03867b9d..c9601d72d 100644
--- a/tests/custom_cluster/test_hive_parquet_codec_interop.py
+++ b/tests/custom_cluster/test_hive_parquet_codec_interop.py
@@ -22,7 +22,7 @@ import pytest
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.util.event_processor_utils import EventProcessorUtils
 from tests.common.environ import HIVE_MAJOR_VERSION
-from tests.common.skip import SkipIfS3, SkipIfGCS, SkipIfCOS, SkipIfOzone
+from tests.common.skip import SkipIfFS
 from tests.common.test_dimensions import create_exec_option_dimension
 from tests.common.test_result_verifier import verify_query_result_is_equal
 from tests.util.filesystem_utils import get_fs_path
@@ -52,10 +52,7 @@ class TestParquetInterop(CustomClusterTestSuite):
     cls.ImpalaTestMatrix.add_constraint(
         lambda v: v.get_value('table_format').file_format == 'parquet')
 
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
+  @SkipIfFS.hive
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args("-convert_legacy_hive_parquet_utc_timestamps=true "
       "-hdfs_zone_info_zip=%s" % get_fs_path("/test-warehouse/tzdb/2017c.zip"))
diff --git a/tests/custom_cluster/test_hive_text_codec_interop.py b/tests/custom_cluster/test_hive_text_codec_interop.py
index ee41e3f29..a23d88102 100644
--- a/tests/custom_cluster/test_hive_text_codec_interop.py
+++ b/tests/custom_cluster/test_hive_text_codec_interop.py
@@ -21,7 +21,7 @@ import pytest
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.environ import HIVE_MAJOR_VERSION
-from tests.common.skip import SkipIfS3, SkipIfGCS, SkipIfCOS, SkipIfOzone
+from tests.common.skip import SkipIfFS
 from tests.common.test_dimensions import create_exec_option_dimension
 from tests.common.test_result_verifier import verify_query_result_is_equal
 
@@ -51,10 +51,7 @@ class TestTextInterop(CustomClusterTestSuite):
     cls.ImpalaTestMatrix.add_constraint(
         lambda v: v.get_value('table_format').file_format == 'textfile')
 
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
+  @SkipIfFS.hive
   @pytest.mark.execute_serially
   def test_hive_impala_interop(self, unique_database, cluster_properties):
     """Tests compressed text file written by Hive with different codecs
diff --git a/tests/custom_cluster/test_insert_behaviour.py b/tests/custom_cluster/test_insert_behaviour.py
index 2cd0be2ee..89d06f5cc 100644
--- a/tests/custom_cluster/test_insert_behaviour.py
+++ b/tests/custom_cluster/test_insert_behaviour.py
@@ -18,8 +18,7 @@
 import pytest
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfLocal,
-                               SkipIfGCS, SkipIfCOS, SkipIfOzone)
+from tests.common.skip import SkipIfFS, SkipIfLocal
 from tests.util.filesystem_utils import IS_ISILON, WAREHOUSE
 from tests.util.hdfs_util import (
     HdfsConfig,
@@ -28,12 +27,9 @@ from tests.util.hdfs_util import (
 
 TEST_TBL = "insert_inherit_permission"
 
-@SkipIfS3.hdfs_acls
-@SkipIfOzone.hdfs_acls
-@SkipIfGCS.hdfs_acls
-@SkipIfCOS.hdfs_acls
-@SkipIfABFS.hdfs_acls
-@SkipIfADLS.hdfs_acls
+
+@SkipIfFS.hdfs_acls
+@SkipIfLocal.hdfs_client
 class TestInsertBehaviourCustomCluster(CustomClusterTestSuite):
 
   @classmethod
@@ -84,7 +80,6 @@ class TestInsertBehaviourCustomCluster(CustomClusterTestSuite):
     cls._drop_test_tbl()
     super(TestInsertBehaviourCustomCluster, cls).teardown_method(method)
 
-  @SkipIfLocal.hdfs_client
   @SkipIfLocal.root_path
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args("--insert_inherit_permissions=true")
@@ -114,7 +109,6 @@ class TestInsertBehaviourCustomCluster(CustomClusterTestSuite):
     finally:
       client.close()
 
-  @SkipIfLocal.hdfs_client
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args("--insert_inherit_permissions=false")
   def test_insert_inherit_permission_disabled(self):
diff --git a/tests/custom_cluster/test_lineage.py b/tests/custom_cluster/test_lineage.py
index 6d08914b7..71668b0db 100644
--- a/tests/custom_cluster/test_lineage.py
+++ b/tests/custom_cluster/test_lineage.py
@@ -26,8 +26,7 @@ import tempfile
 import time
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import (SkipIfABFS, SkipIfADLS, SkipIfS3, SkipIfGCS, SkipIfCOS,
-  SkipIfOzone)
+from tests.common.skip import SkipIfFS
 
 LOG = logging.getLogger(__name__)
 
@@ -150,12 +149,7 @@ class TestLineage(CustomClusterTestSuite):
           lineage_json = json.load(log_file)
           assert lineage_json["queryId"] is not profile_query_id
 
-  @SkipIfABFS.hbase
-  @SkipIfADLS.hbase
-  @SkipIfS3.hbase
-  @SkipIfOzone.hbase
-  @SkipIfGCS.hbase
-  @SkipIfCOS.hbase
+  @SkipIfFS.hbase
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args("--lineage_event_log_dir={0}"
                                     .format(LINEAGE_TESTS_DIR))
diff --git a/tests/custom_cluster/test_local_catalog.py b/tests/custom_cluster/test_local_catalog.py
index 1b576f569..53d535ec6 100644
--- a/tests/custom_cluster/test_local_catalog.py
+++ b/tests/custom_cluster/test_local_catalog.py
@@ -27,8 +27,7 @@ import time
 from multiprocessing.pool import ThreadPool
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import (SkipIfHive2, SkipIfS3, SkipIfABFS, SkipIfGCS, SkipIfCOS,
-                               SkipIfADLS, SkipIfIsilon, SkipIfLocal, SkipIfOzone)
+from tests.common.skip import SkipIfHive2, SkipIfFS
 from tests.util.filesystem_utils import WAREHOUSE
 
 RETRY_PROFILE_MSG = 'Retried query planning due to inconsistent metadata'
@@ -535,14 +534,7 @@ class TestFullAcid(CustomClusterTestSuite):
     assert res.data == ['0', '1', '2', '3', '4', '5', '6', '7']
 
   @SkipIfHive2.acid
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   @pytest.mark.execute_serially
   def test_full_acid_scans(self, vector, unique_database):
     self.run_test_case('QueryTest/full-acid-scans', vector, use_db=unique_database)
diff --git a/tests/custom_cluster/test_local_tz_conversion.py b/tests/custom_cluster/test_local_tz_conversion.py
index 1d7635a6b..ca1a720d7 100644
--- a/tests/custom_cluster/test_local_tz_conversion.py
+++ b/tests/custom_cluster/test_local_tz_conversion.py
@@ -17,10 +17,8 @@
 
 import pytest
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.test_vector import ImpalaTestDimension
-from tests.common.skip import (SkipIfABFS, SkipIfADLS, SkipIfS3, SkipIfOzone, SkipIfGCS,
-  SkipIfCOS)
+from tests.common.skip import SkipIfFS
 from tests.common.test_dimensions import create_exec_option_dimension
 
 class TestLocalTzConversion(CustomClusterTestSuite):
@@ -48,12 +46,7 @@ class TestLocalTzConversion(CustomClusterTestSuite):
   def get_workload(self):
     return 'functional-query'
 
-  @SkipIfABFS.hbase
-  @SkipIfADLS.hbase
-  @SkipIfS3.hbase
-  @SkipIfOzone.hbase
-  @SkipIfGCS.hbase
-  @SkipIfCOS.hbase
+  @SkipIfFS.hbase
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args("--use_local_tz_for_unix_timestamp_conversions=true")
   def test_timestamp_functions(self, vector):
diff --git a/tests/custom_cluster/test_metadata_no_events_processing.py b/tests/custom_cluster/test_metadata_no_events_processing.py
index 71af95cc5..9d380cf4b 100644
--- a/tests/custom_cluster/test_metadata_no_events_processing.py
+++ b/tests/custom_cluster/test_metadata_no_events_processing.py
@@ -16,17 +16,10 @@
 # under the License.
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfGCS,
-                               SkipIfIsilon, SkipIfLocal, SkipIfOzone)
+from tests.common.skip import SkipIfFS
 
 
-@SkipIfS3.hive
-@SkipIfOzone.hive
-@SkipIfGCS.hive
-@SkipIfABFS.hive
-@SkipIfADLS.hive
-@SkipIfIsilon.hive
-@SkipIfLocal.hive
+@SkipIfFS.hive
 class TestMetadataNoEventsProcessing(CustomClusterTestSuite):
 
   @CustomClusterTestSuite.with_args(catalogd_args="--hms_event_polling_interval_s=0")
diff --git a/tests/custom_cluster/test_metadata_replicas.py b/tests/custom_cluster/test_metadata_replicas.py
index cee6732e8..a458b23aa 100644
--- a/tests/custom_cluster/test_metadata_replicas.py
+++ b/tests/custom_cluster/test_metadata_replicas.py
@@ -16,28 +16,12 @@
 # under the License.
 
 import pytest
-import re
-from time import sleep
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import (
-    SkipIfS3,
-    SkipIfOzone,
-    SkipIfABFS,
-    SkipIfADLS,
-    SkipIfGCS,
-    SkipIfCOS,
-    SkipIfIsilon,
-    SkipIfLocal)
+from tests.common.skip import SkipIfFS
 from tests.util.hive_utils import HiveDbWrapper
 
-@SkipIfS3.hive
-@SkipIfOzone.hive
-@SkipIfGCS.hive
-@SkipIfCOS.hive
-@SkipIfABFS.hive
-@SkipIfADLS.hive
-@SkipIfIsilon.hive
-@SkipIfLocal.hive
+
+@SkipIfFS.hive
 class TestMetadataReplicas(CustomClusterTestSuite):
   """ Validates metadata content across catalogd and impalad coordinators."""
 
diff --git a/tests/custom_cluster/test_parquet_max_page_header.py b/tests/custom_cluster/test_parquet_max_page_header.py
index 63fce015c..48cfa0fce 100644
--- a/tests/custom_cluster/test_parquet_max_page_header.py
+++ b/tests/custom_cluster/test_parquet_max_page_header.py
@@ -24,8 +24,7 @@ import string
 import subprocess
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon, SkipIfGCS,
-                               SkipIfCOS, SkipIfOzone)
+from tests.common.skip import SkipIfFS
 
 class TestParquetMaxPageHeader(CustomClusterTestSuite):
   '''This tests large page headers in parquet files. Parquet page header size can
@@ -101,13 +100,7 @@ class TestParquetMaxPageHeader(CustomClusterTestSuite):
     put.stdin.close()
     put.wait()
 
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
+  @SkipIfFS.hive
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args("-max_page_header_size=31457280")
   def test_large_page_header_config(self, vector):
diff --git a/tests/custom_cluster/test_partition.py b/tests/custom_cluster/test_partition.py
index 102a159c5..aa996a5ce 100644
--- a/tests/custom_cluster/test_partition.py
+++ b/tests/custom_cluster/test_partition.py
@@ -20,7 +20,7 @@ import pytest
 import shutil
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import (SkipIfIsilon, SkipIfLocal)
+from tests.common.skip import SkipIfFS
 from tests.common.test_dimensions import (
   create_single_exec_option_dimension,
   create_uncompressed_text_dimension)
@@ -43,8 +43,7 @@ class TestPartition(CustomClusterTestSuite):
     cls.ImpalaTestMatrix.add_dimension(
       create_uncompressed_text_dimension(cls.get_workload()))
 
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args(
     impalad_args="--use_local_catalog=true",
@@ -61,8 +60,7 @@ class TestPartition(CustomClusterTestSuite):
       except OSError as e:
         LOG.info("Cannot remove directory %s, %s " % (local_file_dir, e.strerror))
 
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args(
     impalad_args="--use_local_catalog=false",
diff --git a/tests/custom_cluster/test_permanent_udfs.py b/tests/custom_cluster/test_permanent_udfs.py
index 581e7ba40..3c6719bf6 100644
--- a/tests/custom_cluster/test_permanent_udfs.py
+++ b/tests/custom_cluster/test_permanent_udfs.py
@@ -20,12 +20,10 @@ import os
 import pytest
 import re
 import shutil
-import subprocess
 
 from tempfile import mkdtemp
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon, SkipIfGCS,
-                               SkipIfCOS, SkipIfLocal, SkipIfOzone)
+from tests.common.skip import SkipIfFS
 from tests.common.test_dimensions import create_uncompressed_text_dimension
 from tests.util.filesystem_utils import get_fs_path
 
@@ -161,14 +159,7 @@ class TestUdfPersistence(CustomClusterTestSuite):
     stmt = "RELOAD FUNCTION ; DESCRIBE FUNCTION {0}.{1}".format(db, udf)
     return self.run_stmt_in_hive(stmt)
 
-  @SkipIfIsilon.hive
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   @pytest.mark.execute_serially
   def test_corrupt_java_udf(self):
     """ IMPALA-3820: This tests if the Catalog server can gracefully handle
@@ -185,15 +176,7 @@ class TestUdfPersistence(CustomClusterTestSuite):
     self.verify_function_count(
         "SHOW FUNCTIONS in {0}".format(self.JAVA_FN_TEST_DB), 0)
 
-
-  @SkipIfIsilon.hive
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args(
      catalogd_args= "--local_library_dir={0}".format(LOCAL_LIBRARY_DIR))
@@ -254,14 +237,7 @@ class TestUdfPersistence(CustomClusterTestSuite):
       # Make sure we deleted all the temporary jars we copied to the local fs
       assert len(glob.glob(self.LOCAL_LIBRARY_DIR + "/*.jar")) == 0
 
-  @SkipIfIsilon.hive
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args(
      catalogd_args= "--local_library_dir={0}".format(LOCAL_LIBRARY_DIR))
@@ -320,14 +296,7 @@ class TestUdfPersistence(CustomClusterTestSuite):
     # Make sure we deleted all the temporary jars we copied to the local fs
     assert len(glob.glob(self.LOCAL_LIBRARY_DIR + "/*.jar")) == 0
 
-  @SkipIfIsilon.hive
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args(
      catalogd_args= "--local_library_dir={0}".format(LOCAL_LIBRARY_DIR))
diff --git a/tests/custom_cluster/test_query_retries.py b/tests/custom_cluster/test_query_retries.py
index 23febb8f1..34e83ae92 100644
--- a/tests/custom_cluster/test_query_retries.py
+++ b/tests/custom_cluster/test_query_retries.py
@@ -34,8 +34,8 @@ from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite, LOG
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.errors import Timeout
-from tests.common.skip import SkipIfEC, SkipIfBuildType, SkipIfGCS, SkipIfCOS
-from tests.common.skip import SkipIfNotHdfsMinicluster
+from tests.common.skip import (SkipIfEC, SkipIfBuildType, SkipIfFS,
+    SkipIfNotHdfsMinicluster)
 
 # The BE krpc port of the impalad to simulate rpc or disk errors in tests.
 FAILED_KRPC_PORT = 27001
@@ -292,8 +292,7 @@ class TestQueryRetries(CustomClusterTestSuite):
     self.__validate_web_ui_state()
     self.__validate_memz()
 
-  @SkipIfGCS.jira(reason="IMPALA-10562")
-  @SkipIfCOS.jira(reason="IMPALA-10562")
+  @SkipIfFS.shutdown_idle_fails
   @SkipIfBuildType.not_dev_build
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args(
@@ -359,8 +358,7 @@ class TestQueryRetries(CustomClusterTestSuite):
     self.client.close_query(handle)
     self.__validate_web_ui_state()
 
-  @SkipIfGCS.jira(reason="IMPALA-10562")
-  @SkipIfCOS.jira(reason="IMPALA-10562")
+  @SkipIfFS.shutdown_idle_fails
   @SkipIfBuildType.not_dev_build
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args(
diff --git a/tests/custom_cluster/test_restart_services.py b/tests/custom_cluster/test_restart_services.py
index 55b3800c2..a97f7e857 100644
--- a/tests/custom_cluster/test_restart_services.py
+++ b/tests/custom_cluster/test_restart_services.py
@@ -35,7 +35,7 @@ from TCLIService import TCLIService
 from beeswaxd.BeeswaxService import QueryState
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import SkipIfNotHdfsMinicluster, SkipIfGCS, SkipIfCOS
+from tests.common.skip import SkipIfNotHdfsMinicluster, SkipIfFS
 from tests.hs2.hs2_test_suite import HS2TestSuite, needs_session
 
 LOG = logging.getLogger(__name__)
@@ -360,8 +360,7 @@ class TestGracefulShutdown(CustomClusterTestSuite, HS2TestSuite):
   def get_workload(cls):
     return 'functional-query'
 
-  @SkipIfGCS.jira(reason="IMPALA-10562")
-  @SkipIfCOS.jira(reason="IMPALA-10562")
+  @SkipIfFS.shutdown_idle_fails
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args(
       impalad_args="--shutdown_grace_period_s={grace_period} \
@@ -426,8 +425,7 @@ class TestGracefulShutdown(CustomClusterTestSuite, HS2TestSuite):
     shutdown_duration = time.time() - start_time
     assert shutdown_duration <= self.IDLE_SHUTDOWN_GRACE_PERIOD_S + 10
 
-  @SkipIfGCS.jira(reason="IMPALA-10562")
-  @SkipIfCOS.jira(reason="IMPALA-10562")
+  @SkipIfFS.shutdown_idle_fails
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args(
       impalad_args="--shutdown_grace_period_s={grace_period} \
diff --git a/tests/custom_cluster/test_rpc_timeout.py b/tests/custom_cluster/test_rpc_timeout.py
index 5a93dbb0c..8b266629f 100644
--- a/tests/custom_cluster/test_rpc_timeout.py
+++ b/tests/custom_cluster/test_rpc_timeout.py
@@ -19,7 +19,7 @@ import pytest
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.impala_cluster import ImpalaCluster
-from tests.common.skip import SkipIfBuildType, SkipIfGCS, SkipIfCOS
+from tests.common.skip import SkipIfBuildType, SkipIfFS
 from tests.verifiers.metric_verifier import MetricVerifier
 
 # The BE krpc port of the impalad to simulate rpc errors in tests.
@@ -205,8 +205,7 @@ class TestRPCTimeout(CustomClusterTestSuite):
     self.execute_query_verify_metrics(self.SLOW_TEST_QUERY,
         expected_exception="cancelled due to unresponsive backend")
 
-  @SkipIfGCS.jira(reason="IMPALA-10562")
-  @SkipIfCOS.jira(reason="IMPALA-10562")
+  @SkipIfFS.shutdown_idle_fails
   @SkipIfBuildType.not_dev_build
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args(
diff --git a/tests/custom_cluster/test_runtime_profile.py b/tests/custom_cluster/test_runtime_profile.py
index a03385962..3d669c954 100644
--- a/tests/custom_cluster/test_runtime_profile.py
+++ b/tests/custom_cluster/test_runtime_profile.py
@@ -17,7 +17,7 @@
 
 import pytest
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import SkipIfEC, SkipIfOzone
+from tests.common.skip import SkipIfEC, SkipIfFS
 
 
 class TestRuntimeProfile(CustomClusterTestSuite):
@@ -30,7 +30,7 @@ class TestRuntimeProfile(CustomClusterTestSuite):
   PERIODIC_COUNTER_UPDATE_FLAG = '--periodic_counter_update_period_ms=50'
 
   # Test depends on block size < 256MiB so larger table is stored in at least 4 blocks.
-  @SkipIfOzone.hdfs_block_size
+  @SkipIfFS.large_block_size
   @SkipIfEC.different_schedule
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args('--gen_experimental_profile=true ' +
diff --git a/tests/custom_cluster/test_topic_update_frequency.py b/tests/custom_cluster/test_topic_update_frequency.py
index 216273538..99d7b659c 100644
--- a/tests/custom_cluster/test_topic_update_frequency.py
+++ b/tests/custom_cluster/test_topic_update_frequency.py
@@ -15,12 +15,10 @@ import pytest
 import time
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import SkipIfS3, SkipIfGCS, SkipIfCOS
+from tests.common.skip import SkipIfFS
 
 
-@SkipIfS3.variable_listing_times
-@SkipIfGCS.variable_listing_times
-@SkipIfCOS.variable_listing_times
+@SkipIfFS.variable_listing_times
 class TestTopicUpdateFrequency(CustomClusterTestSuite):
 
   @pytest.mark.execute_serially
diff --git a/tests/data_errors/test_data_errors.py b/tests/data_errors/test_data_errors.py
index 0b3260f0e..562eef2bf 100644
--- a/tests/data_errors/test_data_errors.py
+++ b/tests/data_errors/test_data_errors.py
@@ -20,13 +20,11 @@
 # Tests Impala properly handles errors when reading and writing data.
 
 import pytest
-import random
 import subprocess
 
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import (SkipIf, SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfGCS,
-                               SkipIfCOS, SkipIfLocal, SkipIfOzone)
+from tests.common.skip import SkipIf, SkipIfFS
 from tests.common.test_dimensions import create_exec_option_dimension
 
 class TestDataErrors(ImpalaTestSuite):
@@ -106,12 +104,8 @@ class TestHdfsUnknownErrors(ImpalaTestSuite):
       assert error is "", "Couldn't turn Safe mode OFF. Error: %s" % (error)
       assert "Safe mode is OFF" in output
 
-@SkipIfS3.qualified_path
-@SkipIfOzone.qualified_path
-@SkipIfGCS.qualified_path
-@SkipIfCOS.qualified_path
-@SkipIfABFS.qualified_path
-@SkipIfADLS.qualified_path
+
+@SkipIfFS.qualified_path
 class TestHdfsScanNodeErrors(TestDataErrors):
   @classmethod
   def add_test_dimensions(cls):
@@ -128,13 +122,8 @@ class TestHdfsScanNodeErrors(TestDataErrors):
       pytest.xfail("Expected results differ across file formats")
     self.run_test_case('DataErrorsTest/hdfs-scan-node-errors', vector)
 
-@SkipIfS3.qualified_path
-@SkipIfOzone.qualified_path
-@SkipIfGCS.qualified_path
-@SkipIfCOS.qualified_path
-@SkipIfABFS.qualified_path
-@SkipIfADLS.qualified_path
-@SkipIfLocal.qualified_path
+
+@SkipIfFS.qualified_path
 class TestHdfsSeqScanNodeErrors(TestHdfsScanNodeErrors):
   @classmethod
   def add_test_dimensions(cls):
@@ -147,12 +136,7 @@ class TestHdfsSeqScanNodeErrors(TestHdfsScanNodeErrors):
     self.run_test_case('DataErrorsTest/hdfs-sequence-scan-errors', vector)
 
 
-@SkipIfS3.qualified_path
-@SkipIfOzone.qualified_path
-@SkipIfGCS.qualified_path
-@SkipIfCOS.qualified_path
-@SkipIfABFS.qualified_path
-@SkipIfADLS.qualified_path
+@SkipIfFS.qualified_path
 class TestHdfsRcFileScanNodeErrors(TestHdfsScanNodeErrors):
   @classmethod
   def add_test_dimensions(cls):
diff --git a/tests/failure/test_failpoints.py b/tests/failure/test_failpoints.py
index 1d6e23891..9283d4919 100644
--- a/tests/failure/test_failpoints.py
+++ b/tests/failure/test_failpoints.py
@@ -19,16 +19,13 @@
 # two types of failures - cancellation of the query and a failure test hook.
 #
 import pytest
-import os
 import re
-from collections import defaultdict
 from time import sleep
 
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_cluster import ImpalaCluster
 from tests.common.impala_test_suite import ImpalaTestSuite, LOG
-from tests.common.skip import (SkipIf, SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfOzone)
+from tests.common.skip import SkipIf, SkipIfFS
 from tests.common.test_dimensions import create_exec_option_dimension
 from tests.common.test_vector import ImpalaTestDimension
 from tests.verifiers.metric_verifier import MetricVerifier
@@ -61,15 +58,9 @@ QUERIES = [
            WHERE t2.int_col < 1000"""
 ]
 
-@SkipIf.skip_hbase # -skip_hbase argument specified
-@SkipIfS3.hbase # S3: missing coverage: failures
-@SkipIfOzone.hbase
-@SkipIfGCS.hbase
-@SkipIfCOS.hbase
-@SkipIfABFS.hbase
-@SkipIfADLS.hbase
-@SkipIfIsilon.hbase # ISILON: missing coverage: failures.
-@SkipIfLocal.hbase
+
+@SkipIf.skip_hbase  # -skip_hbase argument specified
+@SkipIfFS.hbase  # missing coverage: failures
 class TestFailpoints(ImpalaTestSuite):
   @classmethod
   def get_workload(cls):
diff --git a/tests/metadata/test_catalogd_debug_actions.py b/tests/metadata/test_catalogd_debug_actions.py
index 57d5d5d6c..77cfb58ef 100644
--- a/tests/metadata/test_catalogd_debug_actions.py
+++ b/tests/metadata/test_catalogd_debug_actions.py
@@ -17,12 +17,10 @@
 
 import pytest
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfS3, SkipIfGCS, SkipIfCOS
+from tests.common.skip import SkipIfFS
 
 
-@SkipIfS3.variable_listing_times
-@SkipIfGCS.variable_listing_times
-@SkipIfCOS.variable_listing_times
+@SkipIfFS.variable_listing_times
 class TestDebugActions(ImpalaTestSuite):
 
   @pytest.mark.execute_serially
diff --git a/tests/metadata/test_compute_stats.py b/tests/metadata/test_compute_stats.py
index 69fa50739..e4c919e42 100644
--- a/tests/metadata/test_compute_stats.py
+++ b/tests/metadata/test_compute_stats.py
@@ -21,9 +21,7 @@ from subprocess import check_call
 from tests.common.environ import ImpalaTestClusterProperties
 from tests.common.impala_cluster import ImpalaCluster
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfCatalogV2,
-                               SkipIfOzone)
+from tests.common.skip import SkipIfFS, SkipIfLocal, SkipIfCatalogV2
 from tests.common.test_dimensions import (
     create_exec_option_dimension,
     create_single_exec_option_dimension,
@@ -114,14 +112,7 @@ class TestComputeStats(ImpalaTestSuite):
                                           {"compression_codec": c})
         self.execute_query_expect_success(self.client, "drop stats {0}".format(table))
 
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_compute_stats_impala_2201(self, vector, unique_database):
     """IMPALA-2201: Tests that the results of compute incremental stats are properly
     persisted when the data was loaded from Hive with hive.stats.autogather=true.
@@ -196,14 +187,7 @@ class TestComputeStats(ImpalaTestSuite):
          assert(hdfs_physical_properties_template in explain_result.data[i + 1])
          assert("cardinality=0" not in explain_result.data[i + 2])
 
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_corrupted_stats_in_partitioned_hive_tables(self, vector, unique_database):
     """IMPALA-9744: Tests that the partition stats corruption in Hive tables
     (row count=0, partition size>0, persisted when the data was loaded with
@@ -246,14 +230,7 @@ class TestComputeStats(ImpalaTestSuite):
     self.create_load_test_corrupt_stats(self, unique_database, create_load_stmts,
             table_name, 2, 2)
 
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_corrupted_stats_in_unpartitioned_hive_tables(self, vector, unique_database):
     """IMPALA-9744: Tests that the stats corruption in unpartitioned Hive
     tables (row count=0, partition size>0, persisted when the data was loaded
diff --git a/tests/metadata/test_ddl.py b/tests/metadata/test_ddl.py
index 2183a788e..241feebd5 100644
--- a/tests/metadata/test_ddl.py
+++ b/tests/metadata/test_ddl.py
@@ -29,9 +29,8 @@ from tests.common.environ import (HIVE_MAJOR_VERSION)
 from tests.common.file_utils import create_table_from_orc
 from tests.common.impala_test_suite import LOG
 from tests.common.parametrize import UniqueDatabase
-from tests.common.skip import (SkipIf, SkipIfABFS, SkipIfADLS, SkipIfKudu, SkipIfLocal,
-                               SkipIfCatalogV2, SkipIfHive2, SkipIfS3, SkipIfGCS,
-                               SkipIfCOS, SkipIfOzone)
+from tests.common.skip import (SkipIf, SkipIfFS, SkipIfKudu, SkipIfLocal,
+                               SkipIfCatalogV2, SkipIfHive2)
 from tests.common.test_dimensions import create_single_exec_option_dimension
 from tests.common.test_dimensions import (create_exec_option_dimension,
     create_client_protocol_dimension)
@@ -105,7 +104,7 @@ class TestDdlStatements(TestDdlBase):
     self.filesystem_client.delete_file_dir(
         "test-warehouse/{0}.db/data_t3".format(unique_database), recursive=True)
 
-  @SkipIfADLS.eventually_consistent
+  @SkipIfFS.eventually_consistent
   @SkipIfLocal.hdfs_client
   def test_drop_cleans_hdfs_dirs(self, unique_database):
     self.client.execute('use default')
@@ -153,7 +152,7 @@ class TestDdlStatements(TestDdlBase):
     # Re-create database to make unique_database teardown succeed.
     self._create_db(unique_database)
 
-  @SkipIfADLS.eventually_consistent
+  @SkipIfFS.eventually_consistent
   @SkipIfLocal.hdfs_client
   def test_truncate_cleans_hdfs_files(self, unique_database):
     # Verify the db directory exists
@@ -305,10 +304,7 @@ class TestDdlStatements(TestDdlBase):
         use_db=unique_database, multiple_impalad=self._use_multiple_impalad(vector))
 
   @SkipIfHive2.orc
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
+  @SkipIfFS.hive
   @UniqueDatabase.parametrize(sync_ddl=True)
   def test_create_table_like_file_orc(self, vector, unique_database):
     COMPLEXTYPETBL_PATH = 'test-warehouse/managed/functional_orc_def.db/' \
diff --git a/tests/metadata/test_event_processing.py b/tests/metadata/test_event_processing.py
index eeac52914..dfe98ea3d 100644
--- a/tests/metadata/test_event_processing.py
+++ b/tests/metadata/test_event_processing.py
@@ -14,20 +14,12 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-from tests.common.skip import SkipIfHive2, SkipIfCatalogV2
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfLocal, SkipIfOzone)
+from tests.common.skip import SkipIfFS, SkipIfHive2, SkipIfCatalogV2
 from tests.util.event_processor_utils import EventProcessorUtils
 
 
-@SkipIfS3.hive
-@SkipIfOzone.hive
-@SkipIfABFS.hive
-@SkipIfADLS.hive
-@SkipIfGCS.hive
-@SkipIfIsilon.hive
-@SkipIfLocal.hive
+@SkipIfFS.hive
 @SkipIfCatalogV2.hms_event_polling_disabled()
 class TestEventProcessing(ImpalaTestSuite):
   """This class contains tests that exercise the event processing mechanism in the
diff --git a/tests/metadata/test_hdfs_encryption.py b/tests/metadata/test_hdfs_encryption.py
index 06724d120..897180fd5 100644
--- a/tests/metadata/test_hdfs_encryption.py
+++ b/tests/metadata/test_hdfs_encryption.py
@@ -19,8 +19,7 @@ import getpass
 import pytest
 
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfOzone)
+from tests.common.skip import SkipIfFS
 from tests.common.test_dimensions import (
     create_single_exec_option_dimension,
     create_uncompressed_text_dimension)
@@ -34,14 +33,7 @@ PYWEBHDFS_TMP_DIR = 'tmp/test_encryption_load_data'
 TMP_DIR = '/%s' % (PYWEBHDFS_TMP_DIR)
 
 
-@SkipIfS3.hdfs_encryption
-@SkipIfOzone.hdfs_encryption
-@SkipIfGCS.hdfs_encryption
-@SkipIfCOS.hdfs_encryption
-@SkipIfABFS.hdfs_encryption
-@SkipIfADLS.hdfs_encryption
-@SkipIfIsilon.hdfs_encryption
-@SkipIfLocal.hdfs_encryption
+@SkipIfFS.hdfs_encryption
 @pytest.mark.execute_serially
 class TestHdfsEncryption(ImpalaTestSuite):
   ''' Tests LOAD DATA commands work between HDFS encryption zones.
@@ -151,7 +143,6 @@ class TestHdfsEncryption(ImpalaTestSuite):
     else:
       self.client.execute('load data inpath \'%s\' into table tbl ' % (TMP_DIR))
 
-  @SkipIfIsilon.hdfs_encryption
   @pytest.mark.execute_serially
   def test_drop_partition_encrypt(self):
     """Verifies if alter <tbl> drop partition purge works in case
@@ -222,7 +213,6 @@ class TestHdfsEncryption(ImpalaTestSuite):
     assert not self.hdfs_client.exists("test-warehouse/{0}.db/t1/j=3/j3.txt".format(TEST_DB))
     assert not self.hdfs_client.exists("test-warehouse/{0}.db/t1/j=3".format(TEST_DB))
 
-  @SkipIfIsilon.hdfs_encryption
   @pytest.mark.execute_serially
   def test_drop_table_encrypt(self):
     """Verifies if drop <table> purge works in a case where Trash directory and table
diff --git a/tests/metadata/test_hdfs_permissions.py b/tests/metadata/test_hdfs_permissions.py
index d0e74e368..2ff4e1e6a 100644
--- a/tests/metadata/test_hdfs_permissions.py
+++ b/tests/metadata/test_hdfs_permissions.py
@@ -16,8 +16,7 @@
 # under the License.
 
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfLocal,
-                               SkipIfGCS, SkipIfCOS, SkipIfCatalogV2, SkipIfOzone)
+from tests.common.skip import SkipIfFS, SkipIfLocal, SkipIfCatalogV2
 from tests.common.test_dimensions import (
     create_single_exec_option_dimension,
     create_uncompressed_text_dimension)
@@ -28,12 +27,7 @@ TEST_TBL = 'read_only_tbl'
 TBL_LOC = '%s/%s' % (WAREHOUSE, TEST_TBL)
 
 
-@SkipIfS3.hdfs_acls
-@SkipIfOzone.hdfs_acls
-@SkipIfGCS.hdfs_acls
-@SkipIfCOS.hdfs_acls
-@SkipIfABFS.hdfs_acls
-@SkipIfADLS.hdfs_acls
+@SkipIfFS.hdfs_acls
 @SkipIfLocal.hdfs_client
 class TestHdfsPermissions(ImpalaTestSuite):
   @classmethod
diff --git a/tests/metadata/test_hms_integration.py b/tests/metadata/test_hms_integration.py
index e303c626b..cea6101fc 100644
--- a/tests/metadata/test_hms_integration.py
+++ b/tests/metadata/test_hms_integration.py
@@ -27,13 +27,10 @@
 import pytest
 import random
 import string
-from subprocess import call
 
 from tests.common.environ import HIVE_MAJOR_VERSION
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfHive2, SkipIfHive3,
-                               SkipIfIsilon, SkipIfGCS, SkipIfCOS, SkipIfLocal,
-                               SkipIfOzone)
+from tests.common.skip import SkipIfFS, SkipIfHive2, SkipIfHive3
 from tests.common.test_dimensions import (
     create_single_exec_option_dimension,
     create_uncompressed_text_dimension)
@@ -41,14 +38,7 @@ from tests.util.event_processor_utils import EventProcessorUtils
 from tests.util.hive_utils import HiveDbWrapper, HiveTableWrapper
 
 
-@SkipIfS3.hive
-@SkipIfOzone.hive
-@SkipIfGCS.hive
-@SkipIfCOS.hive
-@SkipIfABFS.hive
-@SkipIfADLS.hive
-@SkipIfIsilon.hive
-@SkipIfLocal.hive
+@SkipIfFS.hive
 class TestHmsIntegrationSanity(ImpalaTestSuite):
   @classmethod
   def get_workload(self):
@@ -153,14 +143,8 @@ class TestHmsIntegrationSanity(ImpalaTestSuite):
     else:
       assert False
 
-@SkipIfS3.hive
-@SkipIfOzone.hive
-@SkipIfGCS.hive
-@SkipIfCOS.hive
-@SkipIfABFS.hive
-@SkipIfADLS.hive
-@SkipIfIsilon.hive
-@SkipIfLocal.hive
+
+@SkipIfFS.hive
 class TestHmsIntegration(ImpalaTestSuite):
 
   @classmethod
diff --git a/tests/metadata/test_metadata_query_statements.py b/tests/metadata/test_metadata_query_statements.py
index ed2f54302..357cc8bc8 100644
--- a/tests/metadata/test_metadata_query_statements.py
+++ b/tests/metadata/test_metadata_query_statements.py
@@ -20,11 +20,8 @@
 import pytest
 import re
 
-from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import (SkipIfIsilon, SkipIfS3, SkipIfABFS, SkipIfADLS,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfCatalogV2,
-                               SkipIfOzone)
+from tests.common.skip import SkipIfFS, SkipIfCatalogV2
 from tests.common.test_dimensions import ALL_NODES_ONLY
 from tests.common.test_dimensions import create_exec_option_dimension
 from tests.common.test_dimensions import create_uncompressed_text_dimension
@@ -76,14 +73,7 @@ class TestMetadataQueryStatements(ImpalaTestSuite):
 
   # Missing Coverage: Describe formatted compatibility between Impala and Hive when the
   # data doesn't reside in hdfs.
-  @SkipIfIsilon.hive
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_describe_formatted(self, vector, unique_database):
     # IMPALA-10176: test_describe_formatted is broken, so disable it for now
     pytest.skip()
@@ -171,14 +161,7 @@ class TestMetadataQueryStatements(ImpalaTestSuite):
     for name in self.TEST_DATA_SRC_NAMES:
       self.client.execute(self.CREATE_DATA_SRC_STMT % (name,))
 
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   @pytest.mark.execute_serially  # because of use of hardcoded database
   def test_describe_db(self, vector, cluster_properties):
     self.__test_describe_db_cleanup()
diff --git a/tests/metadata/test_partition_metadata.py b/tests/metadata/test_partition_metadata.py
index b68e2cd91..0dd913182 100644
--- a/tests/metadata/test_partition_metadata.py
+++ b/tests/metadata/test_partition_metadata.py
@@ -17,11 +17,10 @@
 
 import pytest
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfOzone)
+from tests.common.skip import SkipIfFS, SkipIfLocal
 from tests.common.test_dimensions import (create_single_exec_option_dimension,
     create_uncompressed_text_dimension)
-from tests.util.filesystem_utils import get_fs_path, WAREHOUSE, FILESYSTEM_PREFIX
+from tests.util.filesystem_utils import WAREHOUSE, FILESYSTEM_PREFIX
 
 # Map from the test dimension file_format string to the SQL "STORED AS"
 # argument.
@@ -89,14 +88,7 @@ class TestPartitionMetadata(ImpalaTestSuite):
     data = self.execute_scalar("select sum(i), sum(j) from %s" % FQ_TBL_NAME)
     assert data.split('\t') == ['6', '9']
 
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_partition_metadata_compatibility(self, vector, unique_database):
     """Regression test for IMPALA-2048. For partitioned tables, test that when Impala
     updates the partition metadata (e.g. by doing a compute stats), the tables are
diff --git a/tests/metadata/test_recover_partitions.py b/tests/metadata/test_recover_partitions.py
index 5d8d73621..d2319f226 100644
--- a/tests/metadata/test_recover_partitions.py
+++ b/tests/metadata/test_recover_partitions.py
@@ -20,7 +20,7 @@
 import os
 from six.moves import urllib
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfLocal, SkipIfS3, SkipIfCatalogV2
+from tests.common.skip import SkipIfLocal, SkipIfFS, SkipIfCatalogV2
 from tests.common.test_dimensions import ALL_NODES_ONLY
 from tests.common.test_dimensions import create_exec_option_dimension
 from tests.util.filesystem_utils import WAREHOUSE, IS_S3
@@ -378,7 +378,7 @@ class TestRecoverPartitions(ImpalaTestSuite):
     self.verify_partitions(parts, result.data)
 
   @SkipIfLocal.hdfs_client
-  @SkipIfS3.empty_directory
+  @SkipIfFS.empty_directory
   def test_empty_directory(self, vector, unique_database):
     """Explicitly test how empty directories are handled when partitions are recovered."""
 
diff --git a/tests/metadata/test_recursive_listing.py b/tests/metadata/test_recursive_listing.py
index d2d50f261..321b78a2d 100644
--- a/tests/metadata/test_recursive_listing.py
+++ b/tests/metadata/test_recursive_listing.py
@@ -17,8 +17,7 @@ import time
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite, LOG
 from tests.common.test_dimensions import create_uncompressed_text_dimension
-from tests.common.skip import (SkipIfLocal, SkipIfS3, SkipIfGCS, SkipIfCOS,
-                               SkipIfADLS)
+from tests.common.skip import SkipIfLocal, SkipIfFS
 from tests.util.filesystem_utils import WAREHOUSE
 
 
@@ -142,10 +141,8 @@ class TestRecursiveListing(ImpalaTestSuite):
     assert len(self._show_files(fq_tbl_name)) == 1
     assert len(self._get_rows(fq_tbl_name)) == 1
 
-  @SkipIfS3.variable_listing_times
-  @SkipIfCOS.variable_listing_times
-  @SkipIfGCS.variable_listing_times
-  @SkipIfADLS.eventually_consistent
+  @SkipIfFS.variable_listing_times
+  @SkipIfFS.eventually_consistent
   @pytest.mark.execute_serially
   @pytest.mark.stress
   def test_large_staging_dirs(self, unique_database):
@@ -169,10 +166,8 @@ class TestRecursiveListing(ImpalaTestSuite):
                                  pause_ms_before_file_cleanup=300,
                                  refresh_should_fail=False)
 
-  @SkipIfS3.variable_listing_times
-  @SkipIfCOS.variable_listing_times
-  @SkipIfGCS.variable_listing_times
-  @SkipIfADLS.eventually_consistent
+  @SkipIfFS.variable_listing_times
+  @SkipIfFS.eventually_consistent
   @pytest.mark.execute_serially
   @pytest.mark.stress
   def test_partition_dir_removed_inflight(self, unique_database):
diff --git a/tests/metadata/test_refresh_partition.py b/tests/metadata/test_refresh_partition.py
index cf2c63474..984f96ebb 100644
--- a/tests/metadata/test_refresh_partition.py
+++ b/tests/metadata/test_refresh_partition.py
@@ -17,19 +17,11 @@ from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.test_dimensions import create_single_exec_option_dimension
 from tests.common.test_dimensions import create_uncompressed_text_dimension
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfOzone)
+from tests.common.skip import SkipIfFS
 from tests.util.filesystem_utils import get_fs_path
 
 
-@SkipIfS3.hive
-@SkipIfOzone.hive
-@SkipIfGCS.hive
-@SkipIfCOS.hive
-@SkipIfABFS.hive
-@SkipIfADLS.hive
-@SkipIfIsilon.hive
-@SkipIfLocal.hive
+@SkipIfFS.hive
 class TestRefreshPartition(ImpalaTestSuite):
   """
   This class tests the functionality to refresh a partition individually
diff --git a/tests/metadata/test_stale_metadata.py b/tests/metadata/test_stale_metadata.py
index e664b97e2..9611939fd 100644
--- a/tests/metadata/test_stale_metadata.py
+++ b/tests/metadata/test_stale_metadata.py
@@ -19,7 +19,7 @@ from subprocess import check_call
 
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfS3, SkipIfGCS
+from tests.common.skip import SkipIfFS
 from tests.common.test_dimensions import create_single_exec_option_dimension
 from tests.util.filesystem_utils import get_fs_path
 
@@ -76,8 +76,7 @@ class TestRewrittenFile(ImpalaTestSuite):
     result = self.client.execute("select count(*) from %s.%s" % (db_name, table_name))
     assert result.data == [str(expected_new_count)]
 
-  @SkipIfS3.jira(reason="IMPALA-2512")
-  @SkipIfGCS.jira(reason="IMPALA-2512")
+  @SkipIfFS.read_past_eof
   def test_new_file_shorter(self, vector, unique_database):
     """Rewrites an existing file with a new shorter file."""
     # Full error is something like:
diff --git a/tests/metadata/test_stats_extrapolation.py b/tests/metadata/test_stats_extrapolation.py
index 8add7ebe8..5f613a495 100644
--- a/tests/metadata/test_stats_extrapolation.py
+++ b/tests/metadata/test_stats_extrapolation.py
@@ -17,12 +17,12 @@
 
 from os import path
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfEC, SkipIfOzone
+from tests.common.skip import SkipIfEC, SkipIfFS
 from tests.common.test_dimensions import (
-    create_exec_option_dimension,
     create_single_exec_option_dimension,
     create_uncompressed_text_dimension)
 
+
 class TestStatsExtrapolation(ImpalaTestSuite):
   """Test stats extrapolation and compute stats tablesample. Stats extrapolation is
   enabled via table property and not via the impalad startup flag so these tests can be
@@ -39,7 +39,7 @@ class TestStatsExtrapolation(ImpalaTestSuite):
     cls.ImpalaTestMatrix.add_dimension(
         create_uncompressed_text_dimension(cls.get_workload()))
 
-  @SkipIfOzone.no_storage_ids
+  @SkipIfFS.no_storage_ids
   @SkipIfEC.contain_full_explain
   def test_stats_extrapolation(self, vector, unique_database):
     vector.get_value('exec_option')['num_nodes'] = 1
diff --git a/tests/metadata/test_views_compatibility.py b/tests/metadata/test_views_compatibility.py
index 69a57acad..7daedc711 100644
--- a/tests/metadata/test_views_compatibility.py
+++ b/tests/metadata/test_views_compatibility.py
@@ -15,7 +15,6 @@
 # specific language governing permissions and limitations
 # under the License.
 
-import os
 import pprint
 import pytest
 import shlex
@@ -24,8 +23,7 @@ from subprocess import call
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.environ import HIVE_MAJOR_VERSION
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfOzone)
+from tests.common.skip import SkipIfFS
 from tests.common.test_dimensions import create_uncompressed_text_dimension
 from tests.util.test_file_parser import QueryTestSectionReader
 
@@ -46,16 +44,9 @@ from tests.util.test_file_parser import QueryTestSectionReader
 # in Impala and Hive would be insufficient.
 
 
-# Missing Coverage: Views created by Hive and Impala being visible and queryble by each
+# Missing Coverage: Views created by Hive and Impala being visible and queryable by each
 # other on non hdfs storage.
-@SkipIfS3.hive
-@SkipIfOzone.hive
-@SkipIfGCS.hive
-@SkipIfCOS.hive
-@SkipIfABFS.hive
-@SkipIfADLS.hive
-@SkipIfIsilon.hive
-@SkipIfLocal.hive
+@SkipIfFS.hive
 class TestViewCompatibility(ImpalaTestSuite):
   VALID_SECTION_NAMES = ["CREATE_VIEW", "CREATE_VIEW_RESULTS",\
                         "QUERY_HIVE_VIEW_RESULTS", "QUERY_IMPALA_VIEW_RESULTS"]
diff --git a/tests/query_test/test_acid.py b/tests/query_test/test_acid.py
index 1335e1454..d3d3ed17f 100644
--- a/tests/query_test/test_acid.py
+++ b/tests/query_test/test_acid.py
@@ -24,9 +24,7 @@ import time
 from hive_metastore.ttypes import CommitTxnRequest, LockType, OpenTxnRequest
 from subprocess import check_call
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import (SkipIf, SkipIfHive2, SkipIfCatalogV2, SkipIfS3, SkipIfABFS,
-                               SkipIfADLS, SkipIfIsilon, SkipIfGCS, SkipIfCOS,
-                               SkipIfLocal, SkipIfOzone)
+from tests.common.skip import SkipIf, SkipIfHive2, SkipIfCatalogV2, SkipIfFS
 from tests.common.test_dimensions import create_single_exec_option_dimension
 from tests.util.acid_txn import AcidTxn
 
@@ -46,14 +44,7 @@ class TestAcid(ImpalaTestSuite):
         v.get_value('table_format').file_format in ['text'])
 
   @SkipIfHive2.acid
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_acid_basic(self, vector, unique_database):
     self.run_test_case('QueryTest/acid', vector, use_db=unique_database)
 
@@ -66,64 +57,29 @@ class TestAcid(ImpalaTestSuite):
     self.run_test_case('QueryTest/acid-no-hive', vector, use_db=unique_database)
 
   @SkipIfHive2.acid
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_acid_compaction(self, vector, unique_database):
     self.run_test_case('QueryTest/acid-compaction', vector, use_db=unique_database)
 
   @SkipIfHive2.acid
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_acid_negative(self, vector, unique_database):
     self.run_test_case('QueryTest/acid-negative', vector, use_db=unique_database)
 
   @SkipIfHive2.acid
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_acid_truncate(self, vector, unique_database):
     self.run_test_case('QueryTest/acid-truncate', vector, use_db=unique_database)
     assert "0" == self.run_stmt_in_hive("select count(*) from {0}.{1}".format(
         unique_database, "pt")).split("\n")[1]
 
   @SkipIfHive2.acid
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_acid_partitioned(self, vector, unique_database):
     self.run_test_case('QueryTest/acid-partitioned', vector, use_db=unique_database)
 
   @SkipIfHive2.acid
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_full_acid_scans(self, vector, unique_database):
     self.run_test_case('QueryTest/full-acid-scans', vector, use_db=unique_database)
 
@@ -138,14 +94,7 @@ class TestAcid(ImpalaTestSuite):
   # it can not be shown in the query profile.  Skip CatalogV2 to avoid flaky tests.
   @SkipIfHive2.acid
   @SkipIfCatalogV2.hms_event_polling_enabled()
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_acid_profile(self, vector, unique_database):
     self.run_test_case('QueryTest/acid-profile', vector, use_db=unique_database)
 
@@ -154,14 +103,7 @@ class TestAcid(ImpalaTestSuite):
     self.run_test_case('QueryTest/full-acid-rowid', vector, use_db=unique_database)
 
   @SkipIfHive2.acid
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_full_acid_original_files(self, vector, unique_database):
     table_name = "alltypes_promoted_nopart"
     fq_table_name = "{0}.{1}".format(unique_database, table_name)
@@ -179,14 +121,7 @@ class TestAcid(ImpalaTestSuite):
     self.run_test_case('QueryTest/full-acid-original-file', vector, unique_database)
 
   @SkipIfHive2.acid
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_acid_insert_statschg(self, vector, unique_database):
     self.run_test_case('QueryTest/acid-clear-statsaccurate',
         vector, use_db=unique_database)
@@ -198,14 +133,7 @@ class TestAcid(ImpalaTestSuite):
         .format(unique_database, "insertonly_part_colstats"))
     assert "2" in result
 
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_ext_statschg(self, vector, unique_database):
     self.run_test_case('QueryTest/clear-statsaccurate',
         vector, use_db=unique_database)
@@ -219,14 +147,7 @@ class TestAcid(ImpalaTestSuite):
     assert "2" in result
 
   @SkipIfHive2.acid
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_acid_compute_stats(self, vector, unique_database):
     self.run_test_case('QueryTest/acid-compute-stats', vector, use_db=unique_database)
 
@@ -236,14 +157,7 @@ class TestAcid(ImpalaTestSuite):
 #  Negative test for LOAD DATA INPATH and all other SQL that we don't support.
 
   @SkipIfHive2.acid
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_acid_heartbeats(self, vector, unique_database):
     """Tests heartbeating of transactions. Creates a long-running query via
     some jitting and in the meanwhile it periodically checks whether there is
@@ -316,14 +230,7 @@ class TestAcid(ImpalaTestSuite):
     commit_req.txnid = txn_id
     return self.hive_client.commit_txn(commit_req)
 
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_lock_timings(self, vector, unique_database):
     def elapsed_time_for_query(query):
       t_start = time.time()
@@ -364,14 +271,7 @@ class TestAcid(ImpalaTestSuite):
       acid_util.unlock(lock_resp.lockid)
 
   @SkipIfHive2.acid
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_in_progress_compactions(self, vector, unique_database):
     """Checks that in-progress compactions are not visible. The test mimics
     in-progress compactions by opening a transaction and creating a new base
diff --git a/tests/query_test/test_date_queries.py b/tests/query_test/test_date_queries.py
index ae64cdd88..56e37aeb2 100644
--- a/tests/query_test/test_date_queries.py
+++ b/tests/query_test/test_date_queries.py
@@ -20,8 +20,7 @@
 import pytest
 from tests.common.file_utils import create_table_and_copy_files
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfLocal, SkipIfGCS,
-                               SkipIfCOS, SkipIfOzone)
+from tests.common.skip import SkipIfFS
 from tests.common.test_dimensions import (create_exec_option_dimension_from_dict,
     create_client_protocol_dimension, hs2_parquet_constraint)
 from tests.shell.util import create_impala_shell_executable_dimension
@@ -71,13 +70,7 @@ class TestDateQueries(ImpalaTestSuite):
       pytest.skip()
     self.run_test_case('QueryTest/date-partitioning', vector, use_db=unique_database)
 
-  @SkipIfS3.qualified_path
-  @SkipIfOzone.qualified_path
-  @SkipIfGCS.qualified_path
-  @SkipIfCOS.qualified_path
-  @SkipIfABFS.qualified_path
-  @SkipIfADLS.qualified_path
-  @SkipIfLocal.qualified_path
+  @SkipIfFS.qualified_path
   def test_fileformat_support(self, vector, unique_database):
     """ Test that scanning and writing DATE is supported for text and parquet tables.
         Test that scanning DATE is supported for avro tables as well.
diff --git a/tests/query_test/test_hbase_queries.py b/tests/query_test/test_hbase_queries.py
index f92d1b057..a1acbf33b 100644
--- a/tests/query_test/test_hbase_queries.py
+++ b/tests/query_test/test_hbase_queries.py
@@ -19,17 +19,8 @@
 
 import pytest
 
-from tests.common.skip import (
-    SkipIfIsilon,
-    SkipIfS3,
-    SkipIfOzone,
-    SkipIfGCS,
-    SkipIfCOS,
-    SkipIfABFS,
-    SkipIfADLS,
-    SkipIfLocal)
-
 from tests.common.impala_test_suite import ImpalaTestSuite
+from tests.common.skip import SkipIfFS
 
 class TestHBaseQueries(ImpalaTestSuite):
   @classmethod
@@ -67,14 +58,7 @@ class TestHBaseQueries(ImpalaTestSuite):
   def test_hbase_inserts(self, vector):
     self.run_test_case('QueryTest/hbase-inserts', vector)
 
-  @SkipIfIsilon.hive
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_hbase_col_filter(self, vector, unique_database):
     """IMPALA-7929: test query with table created with hive and mapped to hbase. The key
     column doesn't have qualifier and the query with predicate on key column name should
diff --git a/tests/query_test/test_hdfs_caching.py b/tests/query_test/test_hdfs_caching.py
index 666829a63..0d65ab4da 100644
--- a/tests/query_test/test_hdfs_caching.py
+++ b/tests/query_test/test_hdfs_caching.py
@@ -25,22 +25,14 @@ from subprocess import check_call
 from tests.common.environ import build_flavor_timeout, IS_DOCKERIZED_TEST_CLUSTER
 from tests.common.impala_cluster import ImpalaCluster
 from tests.common.impala_test_suite import ImpalaTestSuite, LOG
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfEC,
-                               SkipIfDockerizedCluster, SkipIfOzone)
+from tests.common.skip import SkipIfFS, SkipIfEC, SkipIfDockerizedCluster
 from tests.common.test_dimensions import create_single_exec_option_dimension
 from tests.util.filesystem_utils import get_fs_path
 from tests.util.shell_util import exec_process
 
+
 # End to end test that hdfs caching is working.
-@SkipIfS3.caching # S3: missing coverage: verify SET CACHED gives error
-@SkipIfOzone.caching
-@SkipIfGCS.caching
-@SkipIfCOS.caching
-@SkipIfABFS.caching
-@SkipIfADLS.caching
-@SkipIfIsilon.caching
-@SkipIfLocal.caching
+@SkipIfFS.hdfs_caching  # missing coverage: verify SET CACHED gives error
 @SkipIfEC.fix_later
 class TestHdfsCaching(ImpalaTestSuite):
   @classmethod
@@ -113,30 +105,17 @@ class TestHdfsCaching(ImpalaTestSuite):
       result = self.execute_query(query_string)
       assert(len(result.data) == 2)
 
+
 # A separate class has been created for "test_hdfs_caching_fallback_path" to make it
 # run as a part of exhaustive tests which require the workload to be 'functional-query'.
 # TODO: Move this to TestHdfsCaching once we make exhaustive tests run for other workloads
-@SkipIfS3.caching
-@SkipIfOzone.caching
-@SkipIfGCS.caching
-@SkipIfCOS.caching
-@SkipIfABFS.caching
-@SkipIfADLS.caching
-@SkipIfIsilon.caching
-@SkipIfLocal.caching
+@SkipIfFS.hdfs_caching
 class TestHdfsCachingFallbackPath(ImpalaTestSuite):
   @classmethod
   def get_workload(self):
     return 'functional-query'
 
-  @SkipIfS3.hdfs_encryption
-  @SkipIfOzone.hdfs_encryption
-  @SkipIfGCS.hdfs_encryption
-  @SkipIfCOS.hdfs_encryption
-  @SkipIfABFS.hdfs_encryption
-  @SkipIfADLS.hdfs_encryption
-  @SkipIfIsilon.hdfs_encryption
-  @SkipIfLocal.hdfs_encryption
+  @SkipIfFS.hdfs_encryption
   def test_hdfs_caching_fallback_path(self, vector, unique_database, testid_checksum):
     """ This tests the code path of the query execution where the hdfs cache read fails
     and the execution falls back to the normal read path. To reproduce this situation we
@@ -184,14 +163,7 @@ class TestHdfsCachingFallbackPath(ImpalaTestSuite):
           shell=False)
 
 
-@SkipIfS3.caching
-@SkipIfOzone.caching
-@SkipIfGCS.caching
-@SkipIfCOS.caching
-@SkipIfABFS.caching
-@SkipIfADLS.caching
-@SkipIfIsilon.caching
-@SkipIfLocal.caching
+@SkipIfFS.hdfs_caching
 class TestHdfsCachingDdl(ImpalaTestSuite):
   @classmethod
   def get_workload(self):
diff --git a/tests/query_test/test_insert.py b/tests/query_test/test_insert.py
index 30d75fdc3..b57308add 100644
--- a/tests/query_test/test_insert.py
+++ b/tests/query_test/test_insert.py
@@ -25,8 +25,8 @@ from testdata.common import widetable
 from tests.common.impala_cluster import ImpalaCluster
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.parametrize import UniqueDatabase
-from tests.common.skip import SkipIfABFS, SkipIfEC, SkipIfLocal, \
-    SkipIfHive2, SkipIfNotHdfsMinicluster, SkipIfS3, SkipIfDockerizedCluster
+from tests.common.skip import (SkipIfFS, SkipIfEC, SkipIfLocal, SkipIfHive2,
+    SkipIfNotHdfsMinicluster)
 from tests.common.test_dimensions import (
     create_exec_option_dimension,
     create_uncompressed_text_dimension,
@@ -136,7 +136,7 @@ class TestInsertQueries(ImpalaTestSuite):
   # Erasure coding doesn't respect memory limit
   @SkipIfEC.fix_later
   # ABFS partition names cannot end in periods
-  @SkipIfABFS.file_or_folder_name_ends_with_period
+  @SkipIfFS.file_or_folder_name_ends_with_period
   def test_insert(self, vector, unique_database):
     if (vector.get_value('table_format').file_format == 'parquet'):
       vector.get_value('exec_option')['COMPRESSION_CODEC'] = \
diff --git a/tests/query_test/test_insert_behaviour.py b/tests/query_test/test_insert_behaviour.py
index bef5a6ef5..0e6580141 100644
--- a/tests/query_test/test_insert_behaviour.py
+++ b/tests/query_test/test_insert_behaviour.py
@@ -23,11 +23,11 @@ import re
 
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.parametrize import UniqueDatabase
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfDockerizedCluster,
-                               SkipIfCatalogV2, SkipIfOzone)
+from tests.common.skip import (SkipIfFS, SkipIfLocal, SkipIfDockerizedCluster,
+    SkipIfCatalogV2)
 from tests.util.filesystem_utils import WAREHOUSE, get_fs_path, IS_S3
 
+
 @SkipIfLocal.hdfs_client
 class TestInsertBehaviour(ImpalaTestSuite):
   """Tests for INSERT behaviour that isn't covered by checking query results"""
@@ -48,7 +48,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     if method.__name__ == "test_insert_select_with_empty_resultset":
       self.cleanup_db(self.TEST_DB_NAME)
 
-  @SkipIfADLS.eventually_consistent
+  @SkipIfFS.eventually_consistent
   @pytest.mark.execute_serially
   def test_insert_removes_staging_files(self):
     TBL_NAME = "insert_overwrite_nopart"
@@ -133,13 +133,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     # a subdirectory)
     assert len(self.filesystem_client.ls(part_dir)) == 1
 
-  @SkipIfS3.hdfs_acls
-  @SkipIfOzone.hdfs_acls
-  @SkipIfGCS.hdfs_acls
-  @SkipIfCOS.hdfs_acls
-  @SkipIfABFS.hdfs_acls
-  @SkipIfADLS.hdfs_acls
-  @SkipIfIsilon.hdfs_acls
+  @SkipIfFS.hdfs_acls
   @pytest.mark.xfail(run=False, reason="Fails intermittently on test clusters")
   @pytest.mark.execute_serially
   def test_insert_inherit_acls(self):
@@ -198,13 +192,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
                                       "PARTITION(p1=1, p2=2, p3=30) VALUES(1)")
     check_has_acls("p1=1/p2=2/p3=30", "default:group:new_leaf_group:-w-")
 
-  @SkipIfS3.hdfs_acls
-  @SkipIfOzone.hdfs_acls
-  @SkipIfGCS.hdfs_acls
-  @SkipIfCOS.hdfs_acls
-  @SkipIfABFS.hdfs_acls
-  @SkipIfADLS.hdfs_acls
-  @SkipIfIsilon.hdfs_acls
+  @SkipIfFS.hdfs_acls
   @SkipIfCatalogV2.impala_7539()
   def test_insert_file_permissions(self, unique_database):
     """Test that INSERT correctly respects file permission (minimum ACLs)"""
@@ -254,13 +242,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     # Should be writable because 'other' ACLs allow writes
     self.execute_query_expect_success(self.client, insert_query)
 
-  @SkipIfS3.hdfs_acls
-  @SkipIfOzone.hdfs_acls
-  @SkipIfGCS.hdfs_acls
-  @SkipIfCOS.hdfs_acls
-  @SkipIfABFS.hdfs_acls
-  @SkipIfADLS.hdfs_acls
-  @SkipIfIsilon.hdfs_acls
+  @SkipIfFS.hdfs_acls
   @SkipIfCatalogV2.impala_7539()
   def test_mixed_partition_permissions(self, unique_database):
     """
@@ -339,13 +321,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     self.execute_query_expect_success(self.client, insert_query("added_part"))
     load_data(self.execute_query_expect_success, "added_part")
 
-  @SkipIfS3.hdfs_acls
-  @SkipIfOzone.hdfs_acls
-  @SkipIfGCS.hdfs_acls
-  @SkipIfCOS.hdfs_acls
-  @SkipIfABFS.hdfs_acls
-  @SkipIfADLS.hdfs_acls
-  @SkipIfIsilon.hdfs_acls
+  @SkipIfFS.hdfs_acls
   @SkipIfCatalogV2.impala_7539()
   def test_readonly_table_dir(self, unique_database):
     """
@@ -375,13 +351,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     err = self.execute_query_expect_failure(self.client, insert_query("some_new_part"))
     assert re.search(r'Impala does not have WRITE access.*' + table_path, str(err))
 
-  @SkipIfS3.hdfs_acls
-  @SkipIfOzone.hdfs_acls
-  @SkipIfGCS.hdfs_acls
-  @SkipIfCOS.hdfs_acls
-  @SkipIfABFS.hdfs_acls
-  @SkipIfADLS.hdfs_acls
-  @SkipIfIsilon.hdfs_acls
+  @SkipIfFS.hdfs_acls
   @SkipIfDockerizedCluster.insert_acls
   @SkipIfCatalogV2.impala_7539()
   def test_insert_acl_permissions(self, unique_database):
@@ -459,13 +429,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     # Should be writable because 'other' ACLs allow writes
     self.execute_query_expect_success(self.client, insert_query)
 
-  @SkipIfS3.hdfs_acls
-  @SkipIfOzone.hdfs_acls
-  @SkipIfGCS.hdfs_acls
-  @SkipIfCOS.hdfs_acls
-  @SkipIfABFS.hdfs_acls
-  @SkipIfADLS.hdfs_acls
-  @SkipIfIsilon.hdfs_acls
+  @SkipIfFS.hdfs_acls
   @SkipIfCatalogV2.impala_7539()
   def test_load_permissions(self, unique_database):
     # We rely on test_insert_acl_permissions() to exhaustively check that ACL semantics
@@ -518,8 +482,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     # We expect this to succeed, it's not an error if all files in the dir cannot be read
     self.execute_query_expect_success(self.client, load_dir_query)
 
-  @SkipIfADLS.eventually_consistent
-  @SkipIfCOS.eventually_consistent
+  @SkipIfFS.eventually_consistent
   @pytest.mark.execute_serially
   def test_insert_select_with_empty_resultset(self):
     """Test insert/select query won't trigger partition directory or zero size data file
@@ -589,13 +552,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     self.filesystem_client.delete_file_dir(target_table_path, recursive=True)
     self.execute_query_expect_failure(self.client, insert_query)
 
-  @SkipIfS3.hdfs_acls
-  @SkipIfOzone.hdfs_acls
-  @SkipIfGCS.hdfs_acls
-  @SkipIfCOS.hdfs_acls
-  @SkipIfABFS.hdfs_acls
-  @SkipIfADLS.hdfs_acls
-  @SkipIfIsilon.hdfs_acls
+  @SkipIfFS.hdfs_acls
   @SkipIfDockerizedCluster.insert_acls
   @SkipIfCatalogV2.impala_7539()
   def test_multiple_group_acls(self, unique_database):
diff --git a/tests/query_test/test_insert_parquet.py b/tests/query_test/test_insert_parquet.py
index 8bd71757d..563d090ef 100644
--- a/tests/query_test/test_insert_parquet.py
+++ b/tests/query_test/test_insert_parquet.py
@@ -20,7 +20,7 @@
 import os
 
 from collections import namedtuple
-from datetime import (datetime, date)
+from datetime import datetime, date
 from decimal import Decimal
 from subprocess import check_call
 from parquet.ttypes import ColumnOrder, SortingColumn, TypeDefinedOrder, ConvertedType
@@ -28,8 +28,7 @@ from parquet.ttypes import ColumnOrder, SortingColumn, TypeDefinedOrder, Convert
 from tests.common.environ import impalad_basedir
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.parametrize import UniqueDatabase
-from tests.common.skip import (SkipIfEC, SkipIfIsilon, SkipIfLocal, SkipIfS3, SkipIfABFS,
-                               SkipIfADLS, SkipIfGCS, SkipIfCOS, SkipIfOzone)
+from tests.common.skip import SkipIfEC, SkipIfFS, SkipIfLocal
 from tests.common.test_dimensions import create_exec_option_dimension
 from tests.common.test_result_verifier import verify_query_result_is_equal
 from tests.common.test_vector import ImpalaTestDimension
@@ -200,7 +199,7 @@ class TestInsertParquetVerifySize(ImpalaTestSuite):
     cls.ImpalaTestMatrix.add_dimension(
         ImpalaTestDimension("compression_codec", *PARQUET_CODECS))
 
-  @SkipIfIsilon.hdfs_block_size
+  @SkipIfFS.hdfs_block_size
   @SkipIfLocal.hdfs_client
   def test_insert_parquet_verify_size(self, vector, unique_database):
     # Test to verify that the result file size is close to what we expect.
@@ -535,14 +534,7 @@ class TestHdfsParquetTableWriter(ImpalaTestSuite):
   # by python to string. In both HS2 and beewax, it only handles float
   # precision uptil 16 decimal digits and test needs 17.
   # IMPALA-9365 describes why HS2 is not started on non-HDFS test env.
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_double_precision(self, vector, unique_database):
     # IMPALA-10654: Test inserting double into Parquet table retains the precision.
     src_tbl = "{0}.{1}".format(unique_database, "i10654_parquet")
@@ -556,14 +548,8 @@ class TestHdfsParquetTableWriter(ImpalaTestSuite):
     result = self.run_stmt_in_hive(select_stmt)
     assert result.split('\n')[1] == '-0.43149576573887316'
 
-@SkipIfIsilon.hive
-@SkipIfLocal.hive
-@SkipIfS3.hive
-@SkipIfOzone.hive
-@SkipIfGCS.hive
-@SkipIfCOS.hive
-@SkipIfABFS.hive
-@SkipIfADLS.hive
+
+@SkipIfFS.hive
 # TODO: Should we move this to test_parquet_stats.py?
 class TestHdfsParquetTableStatsWriter(ImpalaTestSuite):
 
diff --git a/tests/query_test/test_join_queries.py b/tests/query_test/test_join_queries.py
index 29993f5d9..8961b6391 100644
--- a/tests/query_test/test_join_queries.py
+++ b/tests/query_test/test_join_queries.py
@@ -21,16 +21,7 @@ import pytest
 from copy import deepcopy
 
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import (
-    SkipIf,
-    SkipIfIsilon,
-    SkipIfLocal,
-    SkipIfS3,
-    SkipIfOzone,
-    SkipIfGCS,
-    SkipIfCOS,
-    SkipIfABFS,
-    SkipIfADLS)
+from tests.common.skip import SkipIf, SkipIfFS
 from tests.common.test_vector import ImpalaTestDimension
 
 class TestJoinQueries(ImpalaTestSuite):
@@ -81,15 +72,8 @@ class TestJoinQueries(ImpalaTestSuite):
     del new_vector.get_value('exec_option')['batch_size']  # .test file sets batch_size
     self.run_test_case('QueryTest/single-node-joins-with-limits-exhaustive', new_vector)
 
-  @SkipIfS3.hbase
-  @SkipIfOzone.hbase
-  @SkipIfGCS.hbase
-  @SkipIfCOS.hbase
-  @SkipIfABFS.hbase
-  @SkipIfADLS.hbase
-  @SkipIfIsilon.hbase
+  @SkipIfFS.hbase
   @SkipIf.skip_hbase
-  @SkipIfLocal.hbase
   def test_joins_against_hbase(self, vector):
     new_vector = deepcopy(vector)
     new_vector.get_value('exec_option')['batch_size'] = vector.get_value('batch_size')
diff --git a/tests/query_test/test_mt_dop.py b/tests/query_test/test_mt_dop.py
index 4f5b50d3a..4c9421046 100644
--- a/tests/query_test/test_mt_dop.py
+++ b/tests/query_test/test_mt_dop.py
@@ -25,7 +25,7 @@ from tests.common.environ import ImpalaTestClusterProperties, build_flavor_timeo
 from tests.common.environ import HIVE_MAJOR_VERSION
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.kudu_test_suite import KuduTestSuite
-from tests.common.skip import SkipIfABFS, SkipIfEC, SkipIfNotHdfsMinicluster
+from tests.common.skip import SkipIfFS, SkipIfEC, SkipIfNotHdfsMinicluster
 from tests.common.test_vector import ImpalaTestDimension
 from tests.util.filesystem_utils import IS_HDFS
 
@@ -130,7 +130,7 @@ class TestMtDopParquet(ImpalaTestSuite):
     self.run_test_case('QueryTest/parquet-filtering', vector)
 
   @pytest.mark.execute_serially
-  @SkipIfABFS.file_or_folder_name_ends_with_period
+  @SkipIfFS.file_or_folder_name_ends_with_period
   def test_mt_dop_insert(self, vector, unique_database):
     """Basic tests for inserts with mt_dop > 0"""
     mt_dop = vector.get_value('mt_dop')
diff --git a/tests/query_test/test_nested_types.py b/tests/query_test/test_nested_types.py
index d28314d26..0097fa117 100644
--- a/tests/query_test/test_nested_types.py
+++ b/tests/query_test/test_nested_types.py
@@ -18,24 +18,10 @@
 import os
 from copy import deepcopy
 import pytest
-from subprocess import check_call
-from pytest import skip
 
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import (
-    SkipIfIsilon,
-    SkipIfS3,
-    SkipIfOzone,
-    SkipIfGCS,
-    SkipIfCOS,
-    SkipIfABFS,
-    SkipIfADLS,
-    SkipIfEC,
-    SkipIfHive2,
-    SkipIfLocal,
-    SkipIfNotHdfsMinicluster
-    )
+from tests.common.skip import SkipIfFS, SkipIfEC, SkipIfHive2, SkipIfNotHdfsMinicluster
 from tests.common.test_dimensions import (create_exec_option_dimension,
     create_exec_option_dimension_from_dict, create_client_protocol_dimension,
     create_orc_dimension, orc_schema_resolution_constraint)
@@ -317,14 +303,7 @@ class TestNestedTypesNoMtDop(ImpalaTestSuite):
       pytest.skip('This test is specific to Parquet')
     self.run_test_case('QueryTest/nested-types-parquet-stats', vector)
 
-  @SkipIfIsilon.hive
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_upper_case_field_name(self, unique_database):
     """IMPALA-5994: Tests that a Hive-created table with a struct field name with upper
     case characters can be selected."""
@@ -375,14 +354,7 @@ class TestNestedTypesNoMtDop(ImpalaTestSuite):
 
   # Skip this test on non-HDFS filesystems, because the test contains Hive
   # queries that hang in some cases due to IMPALA-9365.
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
+  @SkipIfFS.hive
   @SkipIfHive2.acid
   def test_partitioned_table_acid(self, vector, unique_database):
     """IMPALA-6370: Test that a partitioned table with nested types can be scanned."""
@@ -801,7 +773,7 @@ class TestParquetArrayEncodings(ImpalaTestSuite):
 
     # The Parquet resolution policy is manually set in the .test files.
     if vector.get_value('parquet_array_resolution') != "three_level":
-      skip("Test only run with three_level")
+      pytest.skip("Test only run with three_level")
 
     ambig_modern_tbl = "ambig_modern"
     self._create_test_table(unique_database, ambig_modern_tbl,
@@ -884,14 +856,7 @@ class TestMaxNestingDepth(ImpalaTestSuite):
       self.run_stmt_in_hive(insert_table)
       self.client.execute("INVALIDATE METADATA %s" % tbl_name)
 
-  @SkipIfIsilon.hive
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_load_hive_table(self, vector, unique_database):
     """Tests that Impala rejects Hive-created tables with complex types that exceed
     the maximum nesting depth."""
diff --git a/tests/query_test/test_observability.py b/tests/query_test/test_observability.py
index aae450604..a5721b78f 100644
--- a/tests/query_test/test_observability.py
+++ b/tests/query_test/test_observability.py
@@ -19,9 +19,7 @@ from collections import defaultdict
 from datetime import datetime
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfOzone,
-                               SkipIfNotHdfsMinicluster)
+from tests.common.skip import SkipIfFS, SkipIfLocal, SkipIfNotHdfsMinicluster
 from tests.util.filesystem_utils import IS_EC
 from time import sleep
 from RuntimeProfile.ttypes import TRuntimeProfileFormat
@@ -92,14 +90,7 @@ class TestObservability(ImpalaTestSuite):
     assert num_validated > 0
     self.hs2_client.close_query(handle)
 
-  @SkipIfS3.hbase
-  @SkipIfOzone.hbase
-  @SkipIfGCS.hbase
-  @SkipIfCOS.hbase
-  @SkipIfLocal.hbase
-  @SkipIfIsilon.hbase
-  @SkipIfABFS.hbase
-  @SkipIfADLS.hbase
+  @SkipIfFS.hbase
   def test_scan_summary(self):
     """IMPALA-4499: Checks that the exec summary for scans show the table name."""
     # HDFS table
@@ -676,14 +667,7 @@ class TestObservability(ImpalaTestSuite):
     self.__check_query_profile_storage_load_time(unique_database, table_name,
         cluster_properties)
 
-  @SkipIfS3.hbase
-  @SkipIfOzone.hbase
-  @SkipIfGCS.hbase
-  @SkipIfCOS.hbase
-  @SkipIfLocal.hbase
-  @SkipIfIsilon.hbase
-  @SkipIfABFS.hbase
-  @SkipIfADLS.hbase
+  @SkipIfFS.hbase
   @pytest.mark.execute_serially
   def test_query_profile_storage_load_time(self, cluster_properties):
     """Test that when a query needs load metadata for table(s), the
diff --git a/tests/query_test/test_partitioning.py b/tests/query_test/test_partitioning.py
index 4bcc792de..acabdcbbc 100644
--- a/tests/query_test/test_partitioning.py
+++ b/tests/query_test/test_partitioning.py
@@ -15,13 +15,11 @@
 # specific language governing permissions and limitations
 # under the License.
 
-import os
 import pytest
 
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfOzone)
+from tests.common.skip import SkipIfFS, SkipIfLocal
 from tests.common.test_dimensions import create_single_exec_option_dimension
 
 # Tests to validate HDFS partitioning.
@@ -47,14 +45,7 @@ class TestPartitioning(ImpalaTestSuite):
 
   # Missing Coverage: Impala deals with boolean partitions created by Hive on a non-hdfs
   # filesystem.
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
+  @SkipIfFS.hive
   def test_boolean_partitions(self, vector, unique_database):
     # This test takes about a minute to complete due to the Hive commands that are
     # executed. To cut down on runtime, limit the test to exhaustive exploration
diff --git a/tests/query_test/test_resource_limits.py b/tests/query_test/test_resource_limits.py
index 9b0d0d347..37081e415 100644
--- a/tests/query_test/test_resource_limits.py
+++ b/tests/query_test/test_resource_limits.py
@@ -16,8 +16,7 @@
 # under the License.
 
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import (SkipIfEC, SkipIfLocal, SkipIfS3, SkipIfABFS,
-                               SkipIfGCS, SkipIfCOS, SkipIfADLS, SkipIfOzone)
+from tests.common.skip import SkipIfEC, SkipIfLocal, SkipIfFS
 from tests.common.test_dimensions import create_parquet_dimension
 
 
@@ -45,12 +44,7 @@ class TestResourceLimits(ImpalaTestSuite):
   def test_resource_limits(self, vector):
     self.run_test_case('QueryTest/query-resource-limits', vector)
 
-  @SkipIfS3.hbase
-  @SkipIfOzone.hbase
-  @SkipIfGCS.hbase
-  @SkipIfCOS.hbase
-  @SkipIfADLS.hbase
-  @SkipIfABFS.hbase
+  @SkipIfFS.hbase
   @SkipIfLocal.multiple_impalad
   def test_resource_limits_hbase(self, vector):
     self.run_test_case('QueryTest/query-resource-limits-hbase', vector)
diff --git a/tests/query_test/test_runtime_filters.py b/tests/query_test/test_runtime_filters.py
index 089de24ca..da5913c0b 100644
--- a/tests/query_test/test_runtime_filters.py
+++ b/tests/query_test/test_runtime_filters.py
@@ -22,13 +22,10 @@ import pytest
 import re
 import time
 
-from beeswaxd.BeeswaxService import QueryState
-from tests.common.environ import build_flavor_timeout
-from tests.common.environ import ImpalaTestClusterProperties
+from tests.common.environ import build_flavor_timeout, ImpalaTestClusterProperties
 from tests.common.impala_cluster import ImpalaCluster
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfEC
-from tests.common.skip import SkipIfLocal, SkipIfIsilon
+from tests.common.skip import SkipIfEC, SkipIfLocal, SkipIfFS
 from tests.common.test_dimensions import add_exec_option_dimension
 from tests.common.test_vector import ImpalaTestDimension
 from tests.verifiers.metric_verifier import MetricVerifier
@@ -49,7 +46,7 @@ build_runs_slowly = ImpalaTestClusterProperties.get_instance().runs_slowly()
 # what tests to run for non-HDFS platforms
 @pytest.mark.execute_serially
 @SkipIfLocal.multiple_impalad
-@SkipIfIsilon.jira(reason="IMPALA-6998")
+@SkipIfFS.late_filters
 class TestRuntimeFilters(ImpalaTestSuite):
   @classmethod
   def get_workload(cls):
diff --git a/tests/query_test/test_scanners.py b/tests/query_test/test_scanners.py
index ab64c6b64..389467be8 100644
--- a/tests/query_test/test_scanners.py
+++ b/tests/query_test/test_scanners.py
@@ -34,16 +34,10 @@ from testdata.common import widetable
 from tests.common.impala_test_suite import ImpalaTestSuite, LOG
 from tests.common.skip import (
     SkipIf,
-    SkipIfS3,
-    SkipIfOzone,
-    SkipIfGCS,
-    SkipIfCOS,
-    SkipIfABFS,
-    SkipIfADLS,
+    SkipIfFS,
     SkipIfEC,
     SkipIfHive2,
     SkipIfHive3,
-    SkipIfIsilon,
     SkipIfLocal,
     SkipIfNotHdfsMinicluster)
 from tests.common.test_dimensions import (
@@ -57,8 +51,7 @@ from tests.common.test_result_verifier import (
     QueryTestResult,
     parse_result_rows)
 from tests.common.test_vector import ImpalaTestDimension
-from tests.util.filesystem_utils import IS_HDFS, WAREHOUSE, get_fs_path
-from tests.util.hdfs_util import NAMENODE
+from tests.util.filesystem_utils import IS_HDFS, get_fs_path
 from tests.util.get_parquet_metadata import get_parquet_metadata
 from tests.util.parse_util import get_bytes_summary_stats_counter
 from tests.util.test_file_parser import QueryTestSectionReader
@@ -482,14 +475,7 @@ class TestParquet(ImpalaTestSuite):
     assert len(result.data) == 1
     assert "4294967294" in result.data
 
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
+  @SkipIfFS.hive
   def test_multi_compression_types(self, vector, unique_database):
     """IMPALA-5448: Tests that parquet splits with multi compression types are counted
     correctly. Cases tested:
@@ -616,13 +602,7 @@ class TestParquet(ImpalaTestSuite):
     assert len(result.data) == 1
     assert "AAAAAAAACPKFFAAA" in result.data
 
-  @SkipIfS3.hdfs_block_size
-  @SkipIfOzone.hdfs_block_size
-  @SkipIfGCS.hdfs_block_size
-  @SkipIfCOS.hdfs_block_size
-  @SkipIfABFS.hdfs_block_size
-  @SkipIfADLS.hdfs_block_size
-  @SkipIfIsilon.hdfs_block_size
+  @SkipIfFS.hdfs_block_size
   @SkipIfLocal.multiple_impalad
   @SkipIfEC.fix_later
   def test_misaligned_parquet_row_groups(self, vector):
@@ -676,13 +656,7 @@ class TestParquet(ImpalaTestSuite):
       total += int(n)
     assert total == num_scanners_with_no_reads
 
-  @SkipIfS3.hdfs_block_size
-  @SkipIfOzone.hdfs_block_size
-  @SkipIfGCS.hdfs_block_size
-  @SkipIfCOS.hdfs_block_size
-  @SkipIfABFS.hdfs_block_size
-  @SkipIfADLS.hdfs_block_size
-  @SkipIfIsilon.hdfs_block_size
+  @SkipIfFS.hdfs_block_size
   @SkipIfLocal.multiple_impalad
   @SkipIfEC.fix_later
   def test_multiple_blocks_mt_dop(self, vector):
@@ -728,13 +702,7 @@ class TestParquet(ImpalaTestSuite):
     finally:
       self.client.clear_configuration()
 
-  @SkipIfS3.hdfs_block_size
-  @SkipIfOzone.hdfs_block_size
-  @SkipIfGCS.hdfs_block_size
-  @SkipIfCOS.hdfs_block_size
-  @SkipIfABFS.hdfs_block_size
-  @SkipIfADLS.hdfs_block_size
-  @SkipIfIsilon.hdfs_block_size
+  @SkipIfFS.hdfs_block_size
   @SkipIfLocal.multiple_impalad
   @SkipIfEC.fix_later
   def test_multiple_blocks(self, vector):
@@ -747,13 +715,7 @@ class TestParquet(ImpalaTestSuite):
     # there are 6 blocks and 3 scan nodes.
     self._multiple_blocks_helper(table_name, 40000, ranges_per_node=2)
 
-  @SkipIfS3.hdfs_block_size
-  @SkipIfOzone.hdfs_block_size
-  @SkipIfGCS.hdfs_block_size
-  @SkipIfCOS.hdfs_block_size
-  @SkipIfABFS.hdfs_block_size
-  @SkipIfADLS.hdfs_block_size
-  @SkipIfIsilon.hdfs_block_size
+  @SkipIfFS.hdfs_block_size
   @SkipIfLocal.multiple_impalad
   @SkipIfEC.fix_later
   def test_multiple_blocks_one_row_group(self, vector):
@@ -1410,15 +1372,9 @@ class TestTextScanRangeLengths(ImpalaTestSuite):
     del new_vector.get_value('exec_option')['abort_on_error']
     self.run_test_case('QueryTest/dateless_timestamp_text', new_vector, unique_database)
 
+
 # Missing Coverage: No coverage for truncated files errors or scans.
-@SkipIfS3.hive
-@SkipIfOzone.hive
-@SkipIfGCS.hive
-@SkipIfCOS.hive
-@SkipIfABFS.hive
-@SkipIfADLS.hive
-@SkipIfIsilon.hive
-@SkipIfLocal.hive
+@SkipIfFS.hive
 class TestScanTruncatedFiles(ImpalaTestSuite):
   @classmethod
   def get_workload(cls):
@@ -1498,14 +1454,8 @@ class TestOrc(ImpalaTestSuite):
       lambda v: v.get_value('table_format').file_format == 'orc')
     cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension('orc_schema_resolution', 0, 1))
 
-  @SkipIfS3.hdfs_block_size
-  @SkipIfOzone.hdfs_block_size
-  @SkipIfGCS.hdfs_block_size
-  @SkipIfCOS.hdfs_block_size
-  @SkipIfABFS.hdfs_block_size
-  @SkipIfADLS.hdfs_block_size
+  @SkipIfFS.hdfs_block_size
   @SkipIfEC.fix_later
-  @SkipIfIsilon.hdfs_block_size
   @SkipIfLocal.multiple_impalad
   def test_misaligned_orc_stripes(self, vector, unique_database):
     self._build_lineitem_table_helper(unique_database, 'lineitem_threeblocks',
@@ -1577,14 +1527,7 @@ class TestOrc(ImpalaTestSuite):
   # queries that hang in some cases (IMPALA-9345). It would be possible to separate
   # the tests that use Hive and run most tests on S3, but I think that running these on
   # S3 doesn't add too much coverage.
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
+  @SkipIfFS.hive
   @SkipIfHive3.non_acid
   def test_type_conversions_hive2(self, vector, unique_database):
     # Create "illtypes" tables whose columns can't match the underlining ORC file's.
@@ -1629,14 +1572,7 @@ class TestOrc(ImpalaTestSuite):
   # queries that hang in some cases (IMPALA-9345). It would be possible to separate
   # the tests that use Hive and run most tests on S3, but I think that running these on
   # S3 doesn't add too much coverage.
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
+  @SkipIfFS.hive
   @SkipIfHive2.acid
   def test_type_conversions_hive3(self, vector, unique_database):
     # Create "illtypes" tables whose columns can't match the underlining ORC file's.
@@ -1756,14 +1692,7 @@ class TestOrc(ImpalaTestSuite):
 
     self.run_test_case('QueryTest/hive2-pre-gregorian-date-orc', vector, unique_database)
 
-  @SkipIfABFS.hive
-  @SkipIfADLS.hive
-  @SkipIfIsilon.hive
-  @SkipIfLocal.hive
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
+  @SkipIfFS.hive
   def test_missing_field_orc(self, unique_database):
     # Test scanning orc files with missing fields in file meta.
     orc_tbl_name = unique_database + ".missing_field_orc"
diff --git a/tests/stress/test_acid_stress.py b/tests/stress/test_acid_stress.py
index 4a90ab25b..96a61854d 100644
--- a/tests/stress/test_acid_stress.py
+++ b/tests/stress/test_acid_stress.py
@@ -23,8 +23,7 @@ from multiprocessing import Value
 
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.parametrize import UniqueDatabase
-from tests.common.skip import (SkipIf, SkipIfHive2, SkipIfS3, SkipIfGCS, SkipIfCOS,
-                               SkipIfDockerizedCluster, SkipIfOzone)
+from tests.common.skip import SkipIf, SkipIfHive2, SkipIfFS, SkipIfDockerizedCluster
 from tests.stress.stress_util import Task, run_tasks
 
 NUM_OVERWRITES = 2
@@ -160,10 +159,7 @@ class TestAcidInsertsBasic(TestAcidStress):
            sleep_seconds=0.1)])
 
   @SkipIfHive2.acid
-  @SkipIfS3.hive
-  @SkipIfOzone.hive
-  @SkipIfGCS.hive
-  @SkipIfCOS.hive
+  @SkipIfFS.hive
   @pytest.mark.execute_serially
   @pytest.mark.stress
   def test_read_hive_inserts(self, unique_database):
@@ -280,8 +276,7 @@ class TestConcurrentAcidInserts(TestAcidStress):
     finally:
       impalad_client.close()
 
-  @SkipIfGCS.jira(reason="IMPALA-10563")
-  @SkipIfCOS.jira(reason="IMPALA-10773")
+  @SkipIfFS.stress_insert_timeouts
   @SkipIfHive2.acid
   @SkipIfDockerizedCluster.jira(reason="IMPALA-11189")
   @pytest.mark.execute_serially
@@ -378,8 +373,7 @@ class TestFailingAcidInserts(TestAcidStress):
                 for i in xrange(0, num_checkers)]
     run_tasks(writers + checkers)
 
-  @SkipIfGCS.jira(reason="IMPALA-10563")
-  @SkipIfCOS.jira(reason="IMPALA-10773")
+  @SkipIfFS.stress_insert_timeouts
   @SkipIfDockerizedCluster.jira(reason="IMPALA-11191")
   @SkipIfHive2.acid
   @pytest.mark.execute_serially
diff --git a/tests/stress/test_ddl_stress.py b/tests/stress/test_ddl_stress.py
index 3cd433d56..16d71a6fe 100644
--- a/tests/stress/test_ddl_stress.py
+++ b/tests/stress/test_ddl_stress.py
@@ -18,8 +18,7 @@
 import pytest
 
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfGCS, SkipIfCOS, SkipIfLocal, SkipIfOzone)
+from tests.common.skip import SkipIfFS
 
 # Number of tables to create per thread
 NUM_TBLS_PER_THREAD = 10
@@ -48,14 +47,7 @@ class TestDdlStress(ImpalaTestSuite):
         lambda v: (v.get_value('table_format').file_format == 'text' and
                    v.get_value('table_format').compression_codec == 'none'))
 
-  @SkipIfS3.caching
-  @SkipIfOzone.caching
-  @SkipIfGCS.caching
-  @SkipIfCOS.caching
-  @SkipIfABFS.caching
-  @SkipIfADLS.caching
-  @SkipIfIsilon.caching
-  @SkipIfLocal.caching
+  @SkipIfFS.hdfs_caching
   @pytest.mark.stress
   @pytest.mark.parametrize('test_index', TEST_INDICES)
   def test_create_cache_many_tables(self, vector, testid_checksum, test_index):