You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by st...@apache.org on 2021/03/13 13:01:39 UTC

[impala] branch master updated: IMPALA-7712: Support Google Cloud Storage

This is an automated email from the ASF dual-hosted git repository.

stigahuang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git


The following commit(s) were added to refs/heads/master by this push:
     new 2dfc68d  IMPALA-7712: Support Google Cloud Storage
2dfc68d is described below

commit 2dfc68d85277f05bf20c09e31dd10c9474ada62c
Author: stiga-huang <hu...@gmail.com>
AuthorDate: Thu Feb 25 20:19:49 2021 +0800

    IMPALA-7712: Support Google Cloud Storage
    
    This patch adds support for GCS(Google Cloud Storage). Using the
    gcs-connector, the implementation is similar to other remote
    FileSystems.
    
    New flags for GCS:
     - num_gcs_io_threads: Number of GCS I/O threads. Defaults to be 16.
    
    Follow-up:
     - Support for spilling to GCS will be addressed in IMPALA-10561.
     - Support for caching GCS file handles will be addressed in
       IMPALA-10568.
     - test_concurrent_inserts and test_failing_inserts in
       test_acid_stress.py are skipped due to slow file listing on
       GCS (IMPALA-10562).
     - Some tests are skipped due to issues introduced by /etc/hosts setting
       on GCE instances (IMPALA-10563).
    
    Tests:
     - Compile and create hdfs test data on a GCE instance. Upload test data
       to a GCS bucket. Modify all locations in HMS DB to point to the GCS
       bucket. Remove some hdfs caching params. Run CORE tests.
     - Compile and load snapshot data to a GCS bucket. Run CORE tests.
    
    Change-Id: Ia91ec956de3b620cccf6a1244b56b7da7a45b32b
    Reviewed-on: http://gerrit.cloudera.org:8080/17121
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 be/src/exec/hdfs-table-sink.cc                     |  1 +
 be/src/runtime/io/disk-io-mgr-test.cc              |  3 +-
 be/src/runtime/io/disk-io-mgr.cc                   |  8 +++++
 be/src/runtime/io/disk-io-mgr.h                    |  4 +++
 be/src/runtime/tmp-file-mgr.cc                     |  1 +
 be/src/util/hdfs-util.cc                           |  5 +++
 be/src/util/hdfs-util.h                            |  4 +++
 bin/impala-config.sh                               | 18 +++++++++-
 .../java/org/apache/impala/catalog/HdfsTable.java  |  8 ++++-
 .../org/apache/impala/common/FileSystemUtil.java   | 39 +++++++++++++++++++---
 java/executor-deps/pom.xml                         |  6 ++++
 java/pom.xml                                       |  1 +
 testdata/bin/create-load-data.sh                   |  4 +--
 testdata/bin/load-test-warehouse-snapshot.sh       |  9 +++++
 testdata/bin/run-all.sh                            |  8 ++---
 .../common/etc/hadoop/conf/core-site.xml.py        |  6 ++++
 tests/authorization/test_ranger.py                 |  3 +-
 tests/common/impala_test_suite.py                  | 11 ++++--
 tests/common/skip.py                               | 21 ++++++++++++
 tests/custom_cluster/test_admission_controller.py  |  2 ++
 tests/custom_cluster/test_coordinators.py          |  3 +-
 tests/custom_cluster/test_event_processing.py      |  4 ++-
 tests/custom_cluster/test_hdfs_fd_caching.py       |  9 ++---
 .../test_hive_parquet_codec_interop.py             |  3 +-
 .../custom_cluster/test_hive_text_codec_interop.py |  3 +-
 tests/custom_cluster/test_insert_behaviour.py      |  3 +-
 tests/custom_cluster/test_lineage.py               |  3 +-
 tests/custom_cluster/test_local_catalog.py         |  3 +-
 tests/custom_cluster/test_local_tz_conversion.py   |  3 +-
 tests/custom_cluster/test_metadata_replicas.py     |  2 ++
 .../custom_cluster/test_parquet_max_page_header.py |  3 +-
 tests/custom_cluster/test_permanent_udfs.py        |  7 +++-
 tests/custom_cluster/test_query_retries.py         |  4 ++-
 tests/custom_cluster/test_restart_services.py      |  3 +-
 .../custom_cluster/test_topic_update_frequency.py  |  3 +-
 tests/data_errors/test_data_errors.py              |  6 +++-
 tests/failure/test_failpoints.py                   |  5 +--
 tests/metadata/test_catalogd_debug_actions.py      |  3 +-
 tests/metadata/test_compute_stats.py               |  5 ++-
 tests/metadata/test_ddl.py                         |  3 +-
 tests/metadata/test_hdfs_encryption.py             |  4 ++-
 tests/metadata/test_hdfs_permissions.py            |  3 +-
 tests/metadata/test_hms_integration.py             |  4 ++-
 tests/metadata/test_metadata_query_statements.py   |  4 ++-
 tests/metadata/test_partition_metadata.py          |  4 ++-
 tests/metadata/test_refresh_partition.py           |  4 ++-
 tests/metadata/test_reset_metadata.py              |  3 +-
 tests/metadata/test_stale_metadata.py              |  7 +++-
 tests/metadata/test_testcase_builder.py            |  3 ++
 tests/metadata/test_views_compatibility.py         |  4 ++-
 tests/query_test/test_acid.py                      | 15 ++++++++-
 tests/query_test/test_aggregation.py               |  1 -
 tests/query_test/test_date_queries.py              |  3 +-
 tests/query_test/test_hbase_queries.py             |  2 ++
 tests/query_test/test_hdfs_caching.py              |  7 +++-
 tests/query_test/test_insert_behaviour.py          | 10 +++++-
 tests/query_test/test_insert_parquet.py            |  3 +-
 tests/query_test/test_insert_permutation.py        |  1 -
 tests/query_test/test_join_queries.py              |  2 ++
 tests/query_test/test_nested_types.py              |  4 +++
 tests/query_test/test_observability.py             |  4 ++-
 tests/query_test/test_partitioning.py              |  4 ++-
 tests/query_test/test_resource_limits.py           |  5 +--
 tests/query_test/test_scanners.py                  | 10 ++++++
 tests/shell/test_shell_commandline.py              |  4 ++-
 tests/stress/test_acid_stress.py                   |  5 ++-
 tests/stress/test_ddl_stress.py                    |  4 ++-
 tests/util/filesystem_utils.py                     |  3 +-
 68 files changed, 303 insertions(+), 64 deletions(-)

diff --git a/be/src/exec/hdfs-table-sink.cc b/be/src/exec/hdfs-table-sink.cc
index ffc79af..4611080 100644
--- a/be/src/exec/hdfs-table-sink.cc
+++ b/be/src/exec/hdfs-table-sink.cc
@@ -410,6 +410,7 @@ Status HdfsTableSink::CreateNewTmpFile(RuntimeState* state,
   if (IsS3APath(output_partition->current_file_name.c_str()) ||
       IsABFSPath(output_partition->current_file_name.c_str()) ||
       IsADLSPath(output_partition->current_file_name.c_str()) ||
+      IsGcsPath(output_partition->current_file_name.c_str()) ||
       IsOzonePath(output_partition->current_file_name.c_str())) {
     // On S3A, the file cannot be stat'ed until after it's closed, and even so, the block
     // size reported will be just the filesystem default. Similarly, the block size
diff --git a/be/src/runtime/io/disk-io-mgr-test.cc b/be/src/runtime/io/disk-io-mgr-test.cc
index e9ece41..540a444 100644
--- a/be/src/runtime/io/disk-io-mgr-test.cc
+++ b/be/src/runtime/io/disk-io-mgr-test.cc
@@ -52,6 +52,7 @@ DECLARE_int32(num_remote_hdfs_io_threads);
 DECLARE_int32(num_s3_io_threads);
 DECLARE_int32(num_adls_io_threads);
 DECLARE_int32(num_abfs_io_threads);
+DECLARE_int32(num_gcs_io_threads);
 DECLARE_int32(num_ozone_io_threads);
 DECLARE_int32(num_remote_hdfs_file_oper_io_threads);
 DECLARE_int32(num_s3_file_oper_io_threads);
@@ -1716,7 +1717,7 @@ TEST_F(DiskIoMgrTest, VerifyNumThreadsParameter) {
   const int num_io_threads_for_remote_disks = FLAGS_num_remote_hdfs_io_threads
       + FLAGS_num_s3_io_threads + FLAGS_num_adls_io_threads + FLAGS_num_abfs_io_threads
       + FLAGS_num_ozone_io_threads + FLAGS_num_remote_hdfs_file_oper_io_threads
-      + FLAGS_num_s3_file_oper_io_threads;
+      + FLAGS_num_s3_file_oper_io_threads + FLAGS_num_gcs_io_threads;
 
   // Verify num_io_threads_per_rotational_disk and num_io_threads_per_solid_state_disk.
   // Since we do not have control over which disk is used, we check for either type
diff --git a/be/src/runtime/io/disk-io-mgr.cc b/be/src/runtime/io/disk-io-mgr.cc
index f05afae..a8ced4a 100644
--- a/be/src/runtime/io/disk-io-mgr.cc
+++ b/be/src/runtime/io/disk-io-mgr.cc
@@ -130,6 +130,9 @@ DEFINE_int32(num_abfs_io_threads, 16, "Number of ABFS I/O threads");
 // (~10 nodes), 64 threads would be more ideal.
 DEFINE_int32(num_adls_io_threads, 16, "Number of ADLS I/O threads");
 
+// The maximum number of GCS I/O threads. TODO: choose the default empirically.
+DEFINE_int32(num_gcs_io_threads, 16, "Number of GCS I/O threads");
+
 // The maximum number of Ozone I/O threads. TODO: choose the default empirically.
 DEFINE_int32(num_ozone_io_threads, 16, "Number of Ozone I/O threads");
 
@@ -465,6 +468,9 @@ Status DiskIoMgr::Init() {
     } else if (i == RemoteAdlsDiskId()) {
       num_threads_per_disk = FLAGS_num_adls_io_threads;
       device_name = "ADLS remote";
+    } else if (i == RemoteGcsDiskId()) {
+      num_threads_per_disk = FLAGS_num_gcs_io_threads;
+      device_name = "GCS remote";
     } else if (i == RemoteOzoneDiskId()) {
       num_threads_per_disk = FLAGS_num_ozone_io_threads;
       device_name = "Ozone remote";
@@ -820,12 +826,14 @@ int DiskIoMgr::AssignQueue(
     if (IsS3APath(file, check_default_fs)) return RemoteS3DiskId();
     if (IsABFSPath(file, check_default_fs)) return RemoteAbfsDiskId();
     if (IsADLSPath(file, check_default_fs)) return RemoteAdlsDiskId();
+    if (IsGcsPath(file, check_default_fs)) return RemoteGcsDiskId();
     if (IsOzonePath(file, check_default_fs)) return RemoteOzoneDiskId();
   }
   // Assign to a local disk queue.
   DCHECK(!IsS3APath(file, check_default_fs)); // S3 is always remote.
   DCHECK(!IsABFSPath(file, check_default_fs)); // ABFS is always remote.
   DCHECK(!IsADLSPath(file, check_default_fs)); // ADLS is always remote.
+  DCHECK(!IsGcsPath(file, check_default_fs)); // GCS is always remote.
   DCHECK(!IsOzonePath(file, check_default_fs)); // Ozone is always remote.
   if (disk_id == -1) {
     // disk id is unknown, assign it an arbitrary one.
diff --git a/be/src/runtime/io/disk-io-mgr.h b/be/src/runtime/io/disk-io-mgr.h
index 400702a..d05df42 100644
--- a/be/src/runtime/io/disk-io-mgr.h
+++ b/be/src/runtime/io/disk-io-mgr.h
@@ -327,6 +327,9 @@ class DiskIoMgr : public CacheLineAligned {
   /// The disk ID (and therefore disk_queues_ index) used for ADLS accesses.
   int RemoteAdlsDiskId() const { return num_local_disks() + REMOTE_ADLS_DISK_OFFSET; }
 
+  /// The disk ID (and therefore disk_queues_ index) used for GCS accesses.
+  int RemoteGcsDiskId() const { return num_local_disks() + REMOTE_GCS_DISK_OFFSET; }
+
   /// The disk ID (and therefore disk_queues_ index) used for Ozone accesses.
   int RemoteOzoneDiskId() const { return num_local_disks() + REMOTE_OZONE_DISK_OFFSET; }
 
@@ -383,6 +386,7 @@ class DiskIoMgr : public CacheLineAligned {
     REMOTE_S3_DISK_OFFSET,
     REMOTE_ADLS_DISK_OFFSET,
     REMOTE_ABFS_DISK_OFFSET,
+    REMOTE_GCS_DISK_OFFSET,
     REMOTE_OZONE_DISK_OFFSET,
     REMOTE_DFS_DISK_FILE_OPER_OFFSET,
     REMOTE_S3_DISK_FILE_OPER_OFFSET,
diff --git a/be/src/runtime/tmp-file-mgr.cc b/be/src/runtime/tmp-file-mgr.cc
index e0a65a6..91c3b07 100644
--- a/be/src/runtime/tmp-file-mgr.cc
+++ b/be/src/runtime/tmp-file-mgr.cc
@@ -278,6 +278,7 @@ Status TmpFileMgr::InitCustom(const vector<string>& tmp_dir_specifiers,
       s3a_options_ = {make_pair("fs.s3a.fast.upload", "true"),
           make_pair("fs.s3a.fast.upload.buffer", "disk")};
     } else {
+      // TODO(IMPALA-10561): Add support for spilling to GCS
       prefix = "";
       tmp_dirs_without_prefix = tmp_dir_spec_trimmed.substr(0);
     }
diff --git a/be/src/util/hdfs-util.cc b/be/src/util/hdfs-util.cc
index bb4014a..e0ee54e 100644
--- a/be/src/util/hdfs-util.cc
+++ b/be/src/util/hdfs-util.cc
@@ -34,6 +34,7 @@ const char* FILESYS_PREFIX_S3 = "s3a://";
 const char* FILESYS_PREFIX_ABFS = "abfs://";
 const char* FILESYS_PREFIX_ABFS_SEC = "abfss://";
 const char* FILESYS_PREFIX_ADL = "adl://";
+const char* FILESYS_PREFIX_GCS = "gs://";
 const char* FILESYS_PREFIX_OZONE = "o3fs://";
 
 string GetHdfsErrorMsg(const string& prefix, const string& file) {
@@ -108,6 +109,10 @@ bool IsADLSPath(const char* path, bool check_default_fs) {
   return IsSpecificPath(path, FILESYS_PREFIX_ADL, check_default_fs);
 }
 
+bool IsGcsPath(const char* path, bool check_default_fs) {
+  return IsSpecificPath(path, FILESYS_PREFIX_GCS, check_default_fs);
+}
+
 bool IsOzonePath(const char* path, bool check_default_fs) {
   return IsSpecificPath(path, FILESYS_PREFIX_OZONE, check_default_fs);
 }
diff --git a/be/src/util/hdfs-util.h b/be/src/util/hdfs-util.h
index 85e780a..835db65 100644
--- a/be/src/util/hdfs-util.h
+++ b/be/src/util/hdfs-util.h
@@ -30,6 +30,7 @@ extern const char* FILESYS_PREFIX_S3;
 extern const char* FILESYS_PREFIX_ABFS;
 extern const char* FILESYS_PREFIX_ABFS_SEC;
 extern const char* FILESYS_PREFIX_ADL;
+extern const char* FILESYS_PREFIX_GCS;
 extern const char* FILESYS_PREFIX_OZONE;
 
 /// Utility function to get error messages from HDFS. This function takes prefix/file and
@@ -66,6 +67,9 @@ bool IsABFSPath(const char* path, bool check_default_fs = true);
 /// Returns true iff the path refers to a location on an ADL filesystem.
 bool IsADLSPath(const char* path, bool check_default_fs = true);
 
+/// Returns true iff the path refers to a location on an GCS filesystem.
+bool IsGcsPath(const char* path, bool check_default_fs = true);
+
 /// Returns true iff the path refers to a location on an Ozone filesystem.
 bool IsOzonePath(const char* path, bool check_default_fs = true);
 
diff --git a/bin/impala-config.sh b/bin/impala-config.sh
index 133851a..4cacc3c 100755
--- a/bin/impala-config.sh
+++ b/bin/impala-config.sh
@@ -189,6 +189,7 @@ export CDP_OZONE_VERSION=1.0.0.7.2.7.0-44
 export CDP_PARQUET_VERSION=1.10.99.7.2.7.0-44
 export CDP_RANGER_VERSION=2.1.0.7.2.7.0-44
 export CDP_TEZ_VERSION=0.9.1.7.2.7.0-44
+export CDP_GCS_VERSION=2.1.2.7.2.7.0-44
 
 export ARCH_NAME=$(uname -p)
 
@@ -252,6 +253,7 @@ export IMPALA_RANGER_URL=${CDP_RANGER_URL-}
 export IMPALA_TEZ_VERSION=${CDP_TEZ_VERSION}
 export IMPALA_TEZ_URL=${CDP_TEZ_URL-}
 export IMPALA_HIVE_STORAGE_API_VERSION=${HIVE_STORAGE_API_VERSION_OVERRIDE:-"2.3.0.$IMPALA_HIVE_VERSION"}
+export IMPALA_GCS_VERSION=${CDP_GCS_VERSION}
 
 # Extract the first component of the hive version.
 # Allow overriding of Hive source location in case we want to build Impala without
@@ -349,6 +351,10 @@ export azure_client_secret="${azure_client_secret-DummyAdlsClientSecret}"
 export azure_data_lake_store_name="${azure_data_lake_store_name-}"
 export azure_storage_account_name="${azure_storage_account_name-}"
 export azure_storage_container_name="${azure_storage_container_name-}"
+export GOOGLE_CLOUD_PROJECT_ID="${GOOGLE_CLOUD_PROJECT_ID-}"
+export GOOGLE_CLOUD_SERVICE_ACCOUNT="${GOOGLE_CLOUD_SERVICE_ACCOUNT-}"
+export GOOGLE_APPLICATION_CREDENTIALS="${GOOGLE_APPLICATION_CREDENTIALS-}"
+export GCS_BUCKET="${GCS_BUCKET-}"
 export HDFS_REPLICATION="${HDFS_REPLICATION-3}"
 export ISILON_NAMENODE="${ISILON_NAMENODE-}"
 # Internal and external interfaces that test cluster services will listen on. The
@@ -493,6 +499,15 @@ elif [ "${TARGET_FILESYSTEM}" = "abfs" ]; then
   domain="${azure_storage_account_name}.dfs.core.windows.net"
   DEFAULT_FS="abfss://${azure_storage_container_name}@${domain}"
   export DEFAULT_FS
+elif [ "${TARGET_FILESYSTEM}" = "gs" ]; then
+  # Basic error checking
+  if [[ "${GOOGLE_APPLICATION_CREDENTIALS}" = "" ]]; then
+    echo "GOOGLE_APPLICATION_CREDENTIALS should be set to the JSON file that contains
+      your service account key."
+    return 1
+  fi
+  DEFAULT_FS="gs://${GCS_BUCKET}"
+  export DEFAULT_FS
 elif [ "${TARGET_FILESYSTEM}" = "isilon" ]; then
   if [ "${ISILON_NAMENODE}" = "" ]; then
     echo "In order to access the Isilon filesystem, ISILON_NAMENODE"
@@ -526,7 +541,7 @@ elif [ "${TARGET_FILESYSTEM}" = "hdfs" ]; then
   fi
 else
   echo "Unsupported filesystem '$TARGET_FILESYSTEM'"
-  echo "Valid values are: hdfs, isilon, s3, local"
+  echo "Valid values are: hdfs, isilon, s3, abfs, adls, gs, local"
   return 1
 fi
 
@@ -735,6 +750,7 @@ echo "IMPALA_HUDI_VERSION     = $IMPALA_HUDI_VERSION"
 echo "IMPALA_KUDU_VERSION     = $IMPALA_KUDU_VERSION"
 echo "IMPALA_RANGER_VERSION   = $IMPALA_RANGER_VERSION"
 echo "IMPALA_ICEBERG_VERSION  = $IMPALA_ICEBERG_VERSION"
+echo "IMPALA_GCS_VERSION      = $IMPALA_GCS_VERSION"
 
 # Kerberos things.  If the cluster exists and is kerberized, source
 # the required environment.  This is required for any hadoop tool to
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
index 77eee52..59b6572 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
@@ -770,7 +770,7 @@ public class HdfsTable extends Table implements FeFsTable {
    * permissions Impala has on the given path. If the path does not exist, recurses up
    * the path until a existing parent directory is found, and inherit access permissions
    * from that.
-   * Always returns READ_WRITE for S3 and ADLS files.
+   * Always returns READ_WRITE for S3, ADLS and GCS files.
    */
   private static TAccessLevel getAvailableAccessLevel(String tableName,
       Path location, FsPermissionCache permCache) throws IOException {
@@ -818,6 +818,12 @@ public class HdfsTable extends Table implements FeFsTable {
     // permissions to hadoop users/groups (HADOOP-14437).
     if (FileSystemUtil.isADLFileSystem(fs)) return true;
     if (FileSystemUtil.isABFSFileSystem(fs)) return true;
+
+    // GCS IAM permissions don't map to POSIX permissions. GCS connector presents fake
+    // POSIX file permissions configured by the 'fs.gs.reported.permissions' property.
+    // So calling getPermissions() on GCS files make no sense. Assume all GCS files have
+    // READ_WRITE permissions.
+    if (FileSystemUtil.isGCSFileSystem(fs)) return true;
     return false;
   }
 
diff --git a/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java b/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
index 89a26a5..592f74f 100644
--- a/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
+++ b/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
@@ -65,6 +65,7 @@ public class FileSystemUtil {
   public static final String SCHEME_S3A = "s3a";
   public static final String SCHEME_O3FS = "o3fs";
   public static final String SCHEME_ALLUXIO = "alluxio";
+  public static final String SCHEME_GCS = "gs";
 
   /**
    * Set containing all FileSystem scheme that known to supports storage UUIDs in
@@ -89,6 +90,7 @@ public class FileSystemUtil {
           .add(SCHEME_HDFS)
           .add(SCHEME_S3A)
           .add(SCHEME_O3FS)
+          .add(SCHEME_GCS)
           .build();
 
   /**
@@ -101,6 +103,7 @@ public class FileSystemUtil {
           .add(SCHEME_ADL)
           .add(SCHEME_HDFS)
           .add(SCHEME_S3A)
+          .add(SCHEME_GCS)
           .build();
 
   /**
@@ -114,6 +117,7 @@ public class FileSystemUtil {
           .add(SCHEME_HDFS)
           .add(SCHEME_S3A)
           .add(SCHEME_O3FS)
+          .add(SCHEME_GCS)
           .build();
 
   /**
@@ -385,6 +389,13 @@ public class FileSystemUtil {
   }
 
   /**
+   * Returns true iff the filesystem is a GoogleHadoopFileSystem.
+   */
+  public static boolean isGCSFileSystem(FileSystem fs) {
+    return hasScheme(fs, SCHEME_GCS);
+  }
+
+  /**
    * Returns true iff the filesystem is AdlFileSystem.
    */
   public static boolean isADLFileSystem(FileSystem fs) {
@@ -491,7 +502,8 @@ public class FileSystemUtil {
     LOCAL,
     S3,
     OZONE,
-    ALLUXIO;
+    ALLUXIO,
+    GCS;
 
     private static final Map<String, FsType> SCHEME_TO_FS_MAPPING =
         ImmutableMap.<String, FsType>builder()
@@ -503,6 +515,7 @@ public class FileSystemUtil {
             .put(SCHEME_S3A, S3)
             .put(SCHEME_O3FS, OZONE)
             .put(SCHEME_ALLUXIO, ALLUXIO)
+            .put(SCHEME_GCS, GCS)
             .build();
 
     /**
@@ -676,7 +689,7 @@ public class FileSystemUtil {
         return new FilterIterator(p, new RecursingIterator(fs, p));
       }
       DebugUtils.executeDebugAction(debugAction, DebugUtils.REFRESH_HDFS_LISTING_DELAY);
-      return new FilterIterator(p, fs.listStatusIterator(p));
+      return new FilterIterator(p, listStatusIterator(fs, p));
     } catch (FileNotFoundException e) {
       if (LOG.isWarnEnabled()) LOG.warn("Path does not exist: " + p.toString(), e);
       return null;
@@ -698,6 +711,23 @@ public class FileSystemUtil {
   }
 
   /**
+   * Wrapper around FileSystem.listStatusIterator() to make sure the path exists.
+   *
+   * @throws FileNotFoundException if <code>p</code> does not exist
+   * @throws IOException if any I/O error occurredd
+   */
+  public static RemoteIterator<FileStatus> listStatusIterator(FileSystem fs, Path p)
+      throws IOException {
+    RemoteIterator<FileStatus> iterator = fs.listStatusIterator(p);
+    // Some FileSystem implementations like GoogleHadoopFileSystem doesn't check
+    // existence of the start path when creating the RemoteIterator. Instead, their
+    // iterators throw the FileNotFoundException in the first call of hasNext() when
+    // the start path doesn't exist. Here we call hasNext() to ensure start path exists.
+    iterator.hasNext();
+    return iterator;
+  }
+
+  /**
    * Returns true if the path 'p' is a directory, false otherwise.
    */
   public static boolean isDir(Path p) throws IOException {
@@ -836,7 +866,7 @@ public class FileSystemUtil {
 
     private RecursingIterator(FileSystem fs, Path startPath) throws IOException {
       this.fs_ = Preconditions.checkNotNull(fs);
-      curIter_ = fs.listStatusIterator(Preconditions.checkNotNull(startPath));
+      curIter_ = listStatusIterator(fs, Preconditions.checkNotNull(startPath));
     }
 
     @Override
@@ -873,8 +903,9 @@ public class FileSystemUtil {
         curFile_ = fileStatus;
         return;
       }
+      RemoteIterator<FileStatus> iter = listStatusIterator(fs_, fileStatus.getPath());
       iters_.push(curIter_);
-      curIter_ = fs_.listStatusIterator(fileStatus.getPath());
+      curIter_ = iter;
       curFile_ = fileStatus;
     }
 
diff --git a/java/executor-deps/pom.xml b/java/executor-deps/pom.xml
index 4817f0a..50dc413 100644
--- a/java/executor-deps/pom.xml
+++ b/java/executor-deps/pom.xml
@@ -133,6 +133,12 @@ under the License.
     </dependency>
 
     <dependency>
+        <groupId>com.google.cloud.bigdataoss</groupId>
+        <artifactId>gcs-connector</artifactId>
+        <version>${gcs.version}</version>
+    </dependency>
+
+    <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-client</artifactId>
       <version>${hbase.version}</version>
diff --git a/java/pom.xml b/java/pom.xml
index 6fb10a1..f7b80a2 100644
--- a/java/pom.xml
+++ b/java/pom.xml
@@ -44,6 +44,7 @@ under the License.
     <parquet.version>${env.IMPALA_PARQUET_VERSION}</parquet.version>
     <kite.version>${env.IMPALA_KITE_VERSION}</kite.version>
     <knox.version>${env.IMPALA_KNOX_VERSION}</knox.version>
+    <gcs.version>${env.IMPALA_GCS_VERSION}</gcs.version>
     <thrift.version>0.9.3-1</thrift.version>
     <impala.extdatasrc.api.version>${project.version}</impala.extdatasrc.api.version>
     <impala.query.event.hook.api.version>${project.version}</impala.query.event.hook.api.version>
diff --git a/testdata/bin/create-load-data.sh b/testdata/bin/create-load-data.sh
index c2dfe33..3cf6973 100755
--- a/testdata/bin/create-load-data.sh
+++ b/testdata/bin/create-load-data.sh
@@ -130,9 +130,9 @@ elif [ $SKIP_SNAPSHOT_LOAD -eq 0 ]; then
   # Don't skip the metadata load if a schema change is detected.
   if ! ${IMPALA_HOME}/testdata/bin/check-schema-diff.sh; then
     if [[ "${TARGET_FILESYSTEM}" == "isilon" || "${TARGET_FILESYSTEM}" == "s3" || \
-          "${TARGET_FILESYSTEM}" == "local" ]] ; then
+          "${TARGET_FILESYSTEM}" == "local" || "${TARGET_FILESYSTEM}" == "gs" ]] ; then
       echo "ERROR in $0 at line $LINENO: A schema change has been detected in the"
-      echo "metadata, but it cannot be loaded on isilon, s3 or local and the"
+      echo "metadata, but it cannot be loaded on isilon, s3, gcs or local and the"
       echo "target file system is ${TARGET_FILESYSTEM}.  Exiting."
       exit 1
     fi
diff --git a/testdata/bin/load-test-warehouse-snapshot.sh b/testdata/bin/load-test-warehouse-snapshot.sh
index c53cb03..a5597cf 100755
--- a/testdata/bin/load-test-warehouse-snapshot.sh
+++ b/testdata/bin/load-test-warehouse-snapshot.sh
@@ -119,6 +119,15 @@ if [ "${TARGET_FILESYSTEM}" = "s3" ]; then
     echo "Copying the test-warehouse to s3 failed, aborting."
     exit 1
   fi
+elif [ "${TARGET_FILESYSTEM}" = "gs" ]; then
+  # Authenticate with the service account before using gsutil
+  gcloud auth activate-service-account --key-file "$GOOGLE_APPLICATION_CREDENTIALS"
+  # Parallelly(-m) upload files
+  if ! gsutil -m -q cp -r ${SNAPSHOT_STAGING_DIR}${TEST_WAREHOUSE_DIR} \
+      gs://${GCS_BUCKET}; then
+    echo "Copying the test-warehouse to GCS failed, aborting."
+    exit 1
+  fi
 else
     hadoop fs -put ${SNAPSHOT_STAGING_DIR}${TEST_WAREHOUSE_DIR}/* ${FILESYSTEM_PREFIX}${TEST_WAREHOUSE_DIR}
 fi
diff --git a/testdata/bin/run-all.sh b/testdata/bin/run-all.sh
index 0912c9f..247a1fa 100755
--- a/testdata/bin/run-all.sh
+++ b/testdata/bin/run-all.sh
@@ -80,13 +80,13 @@ elif [[ ${DEFAULT_FS} == "${LOCAL_FS}" ]]; then
   $IMPALA_HOME/testdata/bin/run-hive-server.sh -only_metastore 2>&1 | \
       tee ${IMPALA_CLUSTER_LOGS_DIR}/run-hive-server.log
 else
-  # With Isilon, we only start the Hive metastore.
-  #   - HDFS is not started becuase Isilon is used as the defaultFs in core-site
-  #   - HBase is irrelevent for Impala testing with Isilon.
+  # With Isilon, ABFS, ADLS or GCS we only start the Hive metastore.
+  #   - HDFS is not started becuase remote storage is used as the defaultFs in core-site
+  #   - HBase is irrelevent for Impala testing with remote storage.
   #   - We don't yet have a good way to start YARN using a different defaultFS. Moreoever,
   #     we currently don't run hive queries against Isilon for testing.
   #   - LLAMA is avoided because we cannot start YARN.
-  #   - KMS is used for encryption testing, which is not available on Isilon.
+  #   - KMS is used for encryption testing, which is not available on remote storage.
   #   - Hive needs YARN, and we don't run Hive queries.
   # TODO: Figure out how to start YARN, LLAMA and Hive with a different defaultFs.
   echo " --> Starting Hive Metastore Service"
diff --git a/testdata/cluster/node_templates/common/etc/hadoop/conf/core-site.xml.py b/testdata/cluster/node_templates/common/etc/hadoop/conf/core-site.xml.py
index 611cabf..fd3569f 100644
--- a/testdata/cluster/node_templates/common/etc/hadoop/conf/core-site.xml.py
+++ b/testdata/cluster/node_templates/common/etc/hadoop/conf/core-site.xml.py
@@ -92,6 +92,12 @@ CONFIG = {
   # buckets and rely on the old behavior. This also means that the tests do not
   # require AWS credentials.
   'fs.s3a.bucket.probe': '1',
+
+  # GCS IAM permissions don't map to POSIX permissions required by Hadoop FileSystem,
+  # so the GCS connector presents fake POSIX file permissions. The default 700 may end up
+  # being too restrictive for some processes performing file-based checks, e.g.
+  # HiveServer2 requires permission of /tmp/hive to be at lest 733.
+  'fs.gs.reported.permissions': '777',
 }
 
 if target_filesystem == 's3':
diff --git a/tests/authorization/test_ranger.py b/tests/authorization/test_ranger.py
index 791755d..4df2b39 100644
--- a/tests/authorization/test_ranger.py
+++ b/tests/authorization/test_ranger.py
@@ -28,7 +28,7 @@ from subprocess import check_call
 from getpass import getuser
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfLocal, SkipIfHive2)
+                               SkipIfLocal, SkipIfHive2, SkipIfGCS)
 from tests.util.hdfs_util import NAMENODE
 from tests.util.calculation_util import get_random_id
 
@@ -1049,6 +1049,7 @@ class TestRanger(CustomClusterTestSuite):
   @SkipIfIsilon.hive
   @SkipIfLocal.hive
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfHive2.ranger_auth
   @CustomClusterTestSuite.with_args()
   def test_hive_with_ranger_setup(self, vector):
diff --git a/tests/common/impala_test_suite.py b/tests/common/impala_test_suite.py
index dded47c..039485c 100644
--- a/tests/common/impala_test_suite.py
+++ b/tests/common/impala_test_suite.py
@@ -64,6 +64,7 @@ from tests.util.filesystem_utils import (
     IS_S3,
     IS_ABFS,
     IS_ADLS,
+    IS_GCS,
     IS_HDFS,
     S3_BUCKET_NAME,
     S3GUARD_ENABLED,
@@ -198,6 +199,7 @@ class ImpalaTestSuite(BaseTestSuite):
     #     ABFS: uses the HDFS CLI
     #     ADLS: uses a mixture of azure-data-lake-store-python and the HDFS CLI (TODO:
     #           this should completely switch to the HDFS CLI once we test it)
+    #     GCS:  uses the HDFS CLI
     #
     # 'hdfs_client' is a HDFS-specific client library, and it only works when running on
     # HDFS. When using 'hdfs_client', the test must be skipped on everything other than
@@ -218,6 +220,9 @@ class ImpalaTestSuite(BaseTestSuite):
       cls.filesystem_client = HadoopFsCommandLineClient("ABFS")
     elif IS_ADLS:
       cls.filesystem_client = ADLSClient(ADLS_STORE_NAME)
+    elif IS_GCS:
+      # GCS is implemented via HDFS command line client
+      cls.filesystem_client = HadoopFsCommandLineClient("GCS")
 
     # Override the shell history path so that commands run by any tests
     # don't write any history into the developer's file.
@@ -996,10 +1001,10 @@ class ImpalaTestSuite(BaseTestSuite):
       tf_dimensions = ImpalaTestDimension('table_format', *table_formats)
     else:
       tf_dimensions = load_table_info_dimension(cls.get_workload(), exploration_strategy)
-    # If 'skip_hbase' is specified or the filesystem is isilon, s3 or local, we don't
-    # need the hbase dimension.
+    # If 'skip_hbase' is specified or the filesystem is isilon, s3, GCS(gs) or local,
+    # we don't need the hbase dimension.
     if pytest.config.option.skip_hbase or TARGET_FILESYSTEM.lower() \
-        in ['s3', 'isilon', 'local', 'abfs', 'adls']:
+        in ['s3', 'isilon', 'local', 'abfs', 'adls', 'gs']:
       for tf_dimension in tf_dimensions:
         if tf_dimension.value.file_format == "hbase":
           tf_dimensions.remove(tf_dimension)
diff --git a/tests/common/skip.py b/tests/common/skip.py
index 92cf18c..6acef78 100644
--- a/tests/common/skip.py
+++ b/tests/common/skip.py
@@ -31,6 +31,7 @@ from tests.common.kudu_test_suite import get_kudu_master_flag
 from tests.util.filesystem_utils import (
     IS_ABFS,
     IS_ADLS,
+    IS_GCS,
     IS_EC,
     IS_HDFS,
     IS_ISILON,
@@ -107,6 +108,26 @@ class SkipIfADLS:
   eventually_consistent = pytest.mark.skipif(IS_ADLS,
       reason="The client is slow to realize changes to file metadata")
 
+
+class SkipIfGCS:
+
+  # These are skipped due to product limitations.
+  caching = pytest.mark.skipif(IS_GCS, reason="SET CACHED not implemented for GCS")
+  hive = pytest.mark.skipif(IS_GCS, reason="Hive doesn't work with GCS")
+  hdfs_block_size = pytest.mark.skipif(IS_GCS, reason="GCS uses it's own block size")
+  hdfs_acls = pytest.mark.skipif(IS_GCS, reason="HDFS acls are not supported on GCS")
+  jira = partial(pytest.mark.skipif, IS_GCS)
+  hdfs_encryption = pytest.mark.skipif(IS_GCS,
+      reason="HDFS encryption is not supported with GCS")
+
+  # These need test infra work to re-enable.
+  hbase = pytest.mark.skipif(IS_GCS, reason="HBase not started with GCS")
+  qualified_path = pytest.mark.skipif(IS_GCS,
+      reason="Tests rely on HDFS qualified paths, IMPALA-1872")
+  variable_listing_times = pytest.mark.skipif(IS_GCS,
+      reason="Flakiness due to unpredictable listing times on GCS.")
+
+
 class SkipIfKudu:
   no_hybrid_clock = pytest.mark.skipif(
       get_kudu_master_flag("--use_hybrid_clock") == "false",
diff --git a/tests/custom_cluster/test_admission_controller.py b/tests/custom_cluster/test_admission_controller.py
index 90ec4f2..b17b7f2 100644
--- a/tests/custom_cluster/test_admission_controller.py
+++ b/tests/custom_cluster/test_admission_controller.py
@@ -37,6 +37,7 @@ from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.resource_pool_config import ResourcePoolConfig
 from tests.common.skip import (
     SkipIfS3,
+    SkipIfGCS,
     SkipIfABFS,
     SkipIfADLS,
     SkipIfEC,
@@ -443,6 +444,7 @@ class TestAdmissionController(TestAdmissionControllerBase, HS2TestSuite):
                        ".* is greater than pool max mem resources 10.00 MB", str(ex))
 
   @SkipIfS3.hdfs_block_size
+  @SkipIfGCS.hdfs_block_size
   @SkipIfABFS.hdfs_block_size
   @SkipIfADLS.hdfs_block_size
   @SkipIfEC.fix_later
diff --git a/tests/custom_cluster/test_coordinators.py b/tests/custom_cluster/test_coordinators.py
index 6e3b296..1be16dd 100644
--- a/tests/custom_cluster/test_coordinators.py
+++ b/tests/custom_cluster/test_coordinators.py
@@ -25,7 +25,7 @@ from subprocess import check_call
 from tests.util.filesystem_utils import get_fs_path
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.skip import (SkipIf, SkipIfS3, SkipIfABFS, SkipIfADLS,
-    SkipIfIsilon, SkipIfLocal)
+                               SkipIfIsilon, SkipIfGCS, SkipIfLocal)
 
 LOG = logging.getLogger('test_coordinators')
 LOG.setLevel(level=logging.DEBUG)
@@ -320,6 +320,7 @@ class TestCoordinators(CustomClusterTestSuite):
     assert num_hosts in str(ret)
 
   @SkipIfS3.hbase
+  @SkipIfGCS.hbase
   @SkipIfABFS.hbase
   @SkipIfADLS.hbase
   @SkipIfIsilon.hbase
diff --git a/tests/custom_cluster/test_event_processing.py b/tests/custom_cluster/test_event_processing.py
index 108f2c7..499a688 100644
--- a/tests/custom_cluster/test_event_processing.py
+++ b/tests/custom_cluster/test_event_processing.py
@@ -22,7 +22,8 @@ import pytest
 from tests.common.skip import SkipIfHive2
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.environ import HIVE_MAJOR_VERSION
-from tests.common.skip import SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon, SkipIfLocal
+from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
+                               SkipIfGCS, SkipIfLocal)
 from tests.util.hive_utils import HiveDbWrapper
 from tests.util.event_processor_utils import EventProcessorUtils
 from tests.util.filesystem_utils import WAREHOUSE
@@ -31,6 +32,7 @@ from tests.util.filesystem_utils import WAREHOUSE
 @SkipIfS3.hive
 @SkipIfABFS.hive
 @SkipIfADLS.hive
+@SkipIfGCS.hive
 @SkipIfIsilon.hive
 @SkipIfLocal.hive
 class TestEventProcessing(CustomClusterTestSuite):
diff --git a/tests/custom_cluster/test_hdfs_fd_caching.py b/tests/custom_cluster/test_hdfs_fd_caching.py
index 07fdd55..1ccfd48 100644
--- a/tests/custom_cluster/test_hdfs_fd_caching.py
+++ b/tests/custom_cluster/test_hdfs_fd_caching.py
@@ -21,7 +21,8 @@ from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.skip import SkipIfLocal
 from tests.util.filesystem_utils import (
     IS_ISILON,
-    IS_ADLS)
+    IS_ADLS,
+    IS_GCS)
 from time import sleep
 
 @SkipIfLocal.hdfs_fd_caching
@@ -130,7 +131,7 @@ class TestHdfsFdCaching(CustomClusterTestSuite):
 
     # Caching applies to HDFS, S3, and ABFS files. If this is HDFS, S3, or ABFS, then
     # verify that caching works. Otherwise, verify that file handles are not cached.
-    if IS_ADLS or IS_ISILON:
+    if IS_ADLS or IS_ISILON or IS_GCS:
       caching_expected = False
     else:
       caching_expected = True
@@ -146,7 +147,7 @@ class TestHdfsFdCaching(CustomClusterTestSuite):
     handle_timeout = 5
 
     # Only test eviction on platforms where caching is enabled.
-    if IS_ADLS or IS_ISILON:
+    if IS_ADLS or IS_ISILON or IS_GCS:
       return
     caching_expected = True
     self.run_fd_caching_test(vector, caching_expected, cache_capacity, handle_timeout)
@@ -174,7 +175,7 @@ class TestHdfsFdCaching(CustomClusterTestSuite):
     eviction_timeout_secs = 5
 
     # Only test eviction on platforms where caching is enabled.
-    if IS_ADLS or IS_ISILON:
+    if IS_ADLS or IS_ISILON or IS_GCS:
       return
 
     # Maximum number of file handles cached.
diff --git a/tests/custom_cluster/test_hive_parquet_codec_interop.py b/tests/custom_cluster/test_hive_parquet_codec_interop.py
index e1b43e2..7e51737 100644
--- a/tests/custom_cluster/test_hive_parquet_codec_interop.py
+++ b/tests/custom_cluster/test_hive_parquet_codec_interop.py
@@ -21,7 +21,7 @@ import pytest
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.environ import HIVE_MAJOR_VERSION
-from tests.common.skip import SkipIfS3
+from tests.common.skip import SkipIfS3, SkipIfGCS
 from tests.common.test_dimensions import create_exec_option_dimension
 from tests.common.test_result_verifier import verify_query_result_is_equal
 from tests.util.filesystem_utils import get_fs_path
@@ -52,6 +52,7 @@ class TestParquetInterop(CustomClusterTestSuite):
         lambda v: v.get_value('table_format').file_format == 'parquet')
 
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args("-convert_legacy_hive_parquet_utc_timestamps=true "
       "-hdfs_zone_info_zip=%s" % get_fs_path("/test-warehouse/tzdb/2017c.zip"))
diff --git a/tests/custom_cluster/test_hive_text_codec_interop.py b/tests/custom_cluster/test_hive_text_codec_interop.py
index 7d4f094..cf6e2ee 100644
--- a/tests/custom_cluster/test_hive_text_codec_interop.py
+++ b/tests/custom_cluster/test_hive_text_codec_interop.py
@@ -21,7 +21,7 @@ import pytest
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.environ import HIVE_MAJOR_VERSION
-from tests.common.skip import SkipIfS3
+from tests.common.skip import SkipIfS3, SkipIfGCS
 from tests.common.test_dimensions import create_exec_option_dimension
 from tests.common.test_result_verifier import verify_query_result_is_equal
 
@@ -52,6 +52,7 @@ class TestTextInterop(CustomClusterTestSuite):
         lambda v: v.get_value('table_format').file_format == 'textfile')
 
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @pytest.mark.execute_serially
   def test_hive_impala_interop(self, unique_database, cluster_properties):
     """Tests compressed text file written by Hive with different codecs
diff --git a/tests/custom_cluster/test_insert_behaviour.py b/tests/custom_cluster/test_insert_behaviour.py
index 29c3af6..031a54c 100644
--- a/tests/custom_cluster/test_insert_behaviour.py
+++ b/tests/custom_cluster/test_insert_behaviour.py
@@ -18,7 +18,7 @@
 import pytest
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfLocal
+from tests.common.skip import SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfLocal, SkipIfGCS
 from tests.util.filesystem_utils import IS_ISILON, WAREHOUSE
 from tests.util.hdfs_util import (
     HdfsConfig,
@@ -28,6 +28,7 @@ from tests.util.hdfs_util import (
 TEST_TBL = "insert_inherit_permission"
 
 @SkipIfS3.hdfs_acls
+@SkipIfGCS.hdfs_acls
 @SkipIfABFS.hdfs_acls
 @SkipIfADLS.hdfs_acls
 class TestInsertBehaviourCustomCluster(CustomClusterTestSuite):
diff --git a/tests/custom_cluster/test_lineage.py b/tests/custom_cluster/test_lineage.py
index 8887632..e01df0d 100644
--- a/tests/custom_cluster/test_lineage.py
+++ b/tests/custom_cluster/test_lineage.py
@@ -26,7 +26,7 @@ import tempfile
 import time
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import SkipIfABFS, SkipIfADLS, SkipIfS3
+from tests.common.skip import SkipIfABFS, SkipIfADLS, SkipIfS3, SkipIfGCS
 
 LOG = logging.getLogger(__name__)
 
@@ -152,6 +152,7 @@ class TestLineage(CustomClusterTestSuite):
   @SkipIfABFS.hbase
   @SkipIfADLS.hbase
   @SkipIfS3.hbase
+  @SkipIfGCS.hbase
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args("--lineage_event_log_dir={0}"
                                     .format(LINEAGE_TESTS_DIR))
diff --git a/tests/custom_cluster/test_local_catalog.py b/tests/custom_cluster/test_local_catalog.py
index 98d6e35..e6ade3c 100644
--- a/tests/custom_cluster/test_local_catalog.py
+++ b/tests/custom_cluster/test_local_catalog.py
@@ -27,7 +27,7 @@ import time
 from multiprocessing.pool import ThreadPool
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import (SkipIfHive2, SkipIfS3, SkipIfABFS,
+from tests.common.skip import (SkipIfHive2, SkipIfS3, SkipIfABFS, SkipIfGCS,
                                SkipIfADLS, SkipIfIsilon, SkipIfLocal)
 from tests.util.filesystem_utils import WAREHOUSE
 
@@ -535,6 +535,7 @@ class TestFullAcid(CustomClusterTestSuite):
   @SkipIfS3.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
+  @SkipIfGCS.hive
   @SkipIfIsilon.hive
   @SkipIfLocal.hive
   @pytest.mark.execute_serially
diff --git a/tests/custom_cluster/test_local_tz_conversion.py b/tests/custom_cluster/test_local_tz_conversion.py
index 0bddd02..fefcfc1 100644
--- a/tests/custom_cluster/test_local_tz_conversion.py
+++ b/tests/custom_cluster/test_local_tz_conversion.py
@@ -19,7 +19,7 @@ import pytest
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.test_vector import ImpalaTestDimension
-from tests.common.skip import SkipIfABFS, SkipIfADLS, SkipIfS3
+from tests.common.skip import SkipIfABFS, SkipIfADLS, SkipIfS3, SkipIfGCS
 from tests.common.test_dimensions import create_exec_option_dimension
 
 class TestLocalTzConversion(CustomClusterTestSuite):
@@ -50,6 +50,7 @@ class TestLocalTzConversion(CustomClusterTestSuite):
   @SkipIfABFS.hbase
   @SkipIfADLS.hbase
   @SkipIfS3.hbase
+  @SkipIfGCS.hbase
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args("--use_local_tz_for_unix_timestamp_conversions=true")
   def test_timestamp_functions(self, vector):
diff --git a/tests/custom_cluster/test_metadata_replicas.py b/tests/custom_cluster/test_metadata_replicas.py
index 1d821a0..3e89070 100644
--- a/tests/custom_cluster/test_metadata_replicas.py
+++ b/tests/custom_cluster/test_metadata_replicas.py
@@ -23,11 +23,13 @@ from tests.common.skip import (
     SkipIfS3,
     SkipIfABFS,
     SkipIfADLS,
+    SkipIfGCS,
     SkipIfIsilon,
     SkipIfLocal)
 from tests.util.hive_utils import HiveDbWrapper
 
 @SkipIfS3.hive
+@SkipIfGCS.hive
 @SkipIfABFS.hive
 @SkipIfADLS.hive
 @SkipIfIsilon.hive
diff --git a/tests/custom_cluster/test_parquet_max_page_header.py b/tests/custom_cluster/test_parquet_max_page_header.py
index 7e32d80..e6f80f2 100644
--- a/tests/custom_cluster/test_parquet_max_page_header.py
+++ b/tests/custom_cluster/test_parquet_max_page_header.py
@@ -24,7 +24,7 @@ import string
 import subprocess
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon
+from tests.common.skip import SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon, SkipIfGCS
 
 class TestParquetMaxPageHeader(CustomClusterTestSuite):
   '''This tests large page headers in parquet files. Parquet page header size can
@@ -101,6 +101,7 @@ class TestParquetMaxPageHeader(CustomClusterTestSuite):
     put.wait()
 
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
diff --git a/tests/custom_cluster/test_permanent_udfs.py b/tests/custom_cluster/test_permanent_udfs.py
index 5c24981..104252e 100644
--- a/tests/custom_cluster/test_permanent_udfs.py
+++ b/tests/custom_cluster/test_permanent_udfs.py
@@ -24,7 +24,8 @@ import subprocess
 
 from tempfile import mkdtemp
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon, SkipIfLocal
+from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon, SkipIfGCS,
+                               SkipIfLocal)
 from tests.common.test_dimensions import create_uncompressed_text_dimension
 from tests.util.filesystem_utils import get_fs_path
 
@@ -162,6 +163,7 @@ class TestUdfPersistence(CustomClusterTestSuite):
 
   @SkipIfIsilon.hive
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfLocal.hive
@@ -184,6 +186,7 @@ class TestUdfPersistence(CustomClusterTestSuite):
 
   @SkipIfIsilon.hive
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfLocal.hive
@@ -249,6 +252,7 @@ class TestUdfPersistence(CustomClusterTestSuite):
 
   @SkipIfIsilon.hive
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfLocal.hive
@@ -312,6 +316,7 @@ class TestUdfPersistence(CustomClusterTestSuite):
 
   @SkipIfIsilon.hive
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfLocal.hive
diff --git a/tests/custom_cluster/test_query_retries.py b/tests/custom_cluster/test_query_retries.py
index 5a435f3..5ce74cd 100644
--- a/tests/custom_cluster/test_query_retries.py
+++ b/tests/custom_cluster/test_query_retries.py
@@ -34,7 +34,7 @@ from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.errors import Timeout
-from tests.common.skip import SkipIfEC, SkipIfBuildType
+from tests.common.skip import SkipIfEC, SkipIfBuildType, SkipIfGCS
 from tests.common.skip import SkipIfNotHdfsMinicluster
 
 # The BE krpc port of the impalad to simulate rpc or disk errors in tests.
@@ -283,6 +283,7 @@ class TestQueryRetries(CustomClusterTestSuite):
     self.client.close_query(handle)
     self.__validate_web_ui_state()
 
+  @SkipIfGCS.jira(reason="IMPALA-10562")
   @SkipIfBuildType.not_dev_build
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args(
@@ -348,6 +349,7 @@ class TestQueryRetries(CustomClusterTestSuite):
     self.client.close_query(handle)
     self.__validate_web_ui_state()
 
+  @SkipIfGCS.jira(reason="IMPALA-10562")
   @SkipIfBuildType.not_dev_build
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args(
diff --git a/tests/custom_cluster/test_restart_services.py b/tests/custom_cluster/test_restart_services.py
index d08c293..4115ef0 100644
--- a/tests/custom_cluster/test_restart_services.py
+++ b/tests/custom_cluster/test_restart_services.py
@@ -35,7 +35,7 @@ from TCLIService import TCLIService
 from beeswaxd.BeeswaxService import QueryState
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import SkipIfNotHdfsMinicluster
+from tests.common.skip import SkipIfNotHdfsMinicluster, SkipIfGCS
 from tests.hs2.hs2_test_suite import HS2TestSuite, needs_session
 
 LOG = logging.getLogger(__name__)
@@ -256,6 +256,7 @@ class TestGracefulShutdown(CustomClusterTestSuite, HS2TestSuite):
   def get_workload(cls):
     return 'functional-query'
 
+  @SkipIfGCS.jira(reason="IMPALA-10562")
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args(
       impalad_args="--shutdown_grace_period_s={grace_period} \
diff --git a/tests/custom_cluster/test_topic_update_frequency.py b/tests/custom_cluster/test_topic_update_frequency.py
index 65f3f73..66ad692 100644
--- a/tests/custom_cluster/test_topic_update_frequency.py
+++ b/tests/custom_cluster/test_topic_update_frequency.py
@@ -15,10 +15,11 @@ import pytest
 import time
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import SkipIfS3
+from tests.common.skip import SkipIfS3, SkipIfGCS
 
 
 @SkipIfS3.variable_listing_times
+@SkipIfGCS.variable_listing_times
 class TestTopicUpdateFrequency(CustomClusterTestSuite):
 
   @pytest.mark.execute_serially
diff --git a/tests/data_errors/test_data_errors.py b/tests/data_errors/test_data_errors.py
index 623298a..b61e66f 100644
--- a/tests/data_errors/test_data_errors.py
+++ b/tests/data_errors/test_data_errors.py
@@ -25,7 +25,8 @@ import subprocess
 
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIf, SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfLocal
+from tests.common.skip import (SkipIf, SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfGCS,
+                               SkipIfLocal)
 from tests.common.test_dimensions import create_exec_option_dimension
 
 class TestDataErrors(ImpalaTestSuite):
@@ -106,6 +107,7 @@ class TestHdfsUnknownErrors(ImpalaTestSuite):
       assert "Safe mode is OFF" in output
 
 @SkipIfS3.qualified_path
+@SkipIfGCS.qualified_path
 @SkipIfABFS.qualified_path
 @SkipIfADLS.qualified_path
 class TestHdfsScanNodeErrors(TestDataErrors):
@@ -125,6 +127,7 @@ class TestHdfsScanNodeErrors(TestDataErrors):
     self.run_test_case('DataErrorsTest/hdfs-scan-node-errors', vector)
 
 @SkipIfS3.qualified_path
+@SkipIfGCS.qualified_path
 @SkipIfABFS.qualified_path
 @SkipIfADLS.qualified_path
 @SkipIfLocal.qualified_path
@@ -141,6 +144,7 @@ class TestHdfsSeqScanNodeErrors(TestHdfsScanNodeErrors):
 
 
 @SkipIfS3.qualified_path
+@SkipIfGCS.qualified_path
 @SkipIfABFS.qualified_path
 @SkipIfADLS.qualified_path
 class TestHdfsRcFileScanNodeErrors(TestHdfsScanNodeErrors):
diff --git a/tests/failure/test_failpoints.py b/tests/failure/test_failpoints.py
index 033b485..c8244f9 100644
--- a/tests/failure/test_failpoints.py
+++ b/tests/failure/test_failpoints.py
@@ -27,8 +27,8 @@ from time import sleep
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_cluster import ImpalaCluster
 from tests.common.impala_test_suite import ImpalaTestSuite, LOG
-from tests.common.skip import SkipIf, SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon, \
-    SkipIfLocal
+from tests.common.skip import (SkipIf, SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
+                               SkipIfGCS, SkipIfLocal)
 from tests.common.test_dimensions import create_exec_option_dimension
 from tests.common.test_vector import ImpalaTestDimension
 from tests.verifiers.metric_verifier import MetricVerifier
@@ -63,6 +63,7 @@ QUERIES = [
 
 @SkipIf.skip_hbase # -skip_hbase argument specified
 @SkipIfS3.hbase # S3: missing coverage: failures
+@SkipIfGCS.hbase
 @SkipIfABFS.hbase
 @SkipIfADLS.hbase
 @SkipIfIsilon.hbase # ISILON: missing coverage: failures.
diff --git a/tests/metadata/test_catalogd_debug_actions.py b/tests/metadata/test_catalogd_debug_actions.py
index ade7e87..115ae2c 100644
--- a/tests/metadata/test_catalogd_debug_actions.py
+++ b/tests/metadata/test_catalogd_debug_actions.py
@@ -17,10 +17,11 @@
 
 import pytest
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfS3
+from tests.common.skip import SkipIfS3, SkipIfGCS
 
 
 @SkipIfS3.variable_listing_times
+@SkipIfGCS.variable_listing_times
 class TestDebugActions(ImpalaTestSuite):
 
   @pytest.mark.execute_serially
diff --git a/tests/metadata/test_compute_stats.py b/tests/metadata/test_compute_stats.py
index 58754a9..5057768 100644
--- a/tests/metadata/test_compute_stats.py
+++ b/tests/metadata/test_compute_stats.py
@@ -22,7 +22,7 @@ from tests.common.environ import ImpalaTestClusterProperties
 from tests.common.impala_cluster import ImpalaCluster
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-    SkipIfLocal, SkipIfCatalogV2)
+                               SkipIfGCS, SkipIfLocal, SkipIfCatalogV2)
 from tests.common.test_dimensions import (
     create_exec_option_dimension,
     create_single_exec_option_dimension,
@@ -114,6 +114,7 @@ class TestComputeStats(ImpalaTestSuite):
         self.execute_query_expect_success(self.client, "drop stats {0}".format(table))
 
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
@@ -193,6 +194,7 @@ class TestComputeStats(ImpalaTestSuite):
          assert("cardinality=0" not in explain_result.data[i + 2])
 
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
@@ -240,6 +242,7 @@ class TestComputeStats(ImpalaTestSuite):
             table_name, 2, 2)
 
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
diff --git a/tests/metadata/test_ddl.py b/tests/metadata/test_ddl.py
index 455a1da..419e36c 100644
--- a/tests/metadata/test_ddl.py
+++ b/tests/metadata/test_ddl.py
@@ -27,7 +27,7 @@ from tests.common.environ import (HIVE_MAJOR_VERSION)
 from tests.common.impala_test_suite import LOG
 from tests.common.parametrize import UniqueDatabase
 from tests.common.skip import (SkipIf, SkipIfABFS, SkipIfADLS, SkipIfKudu, SkipIfLocal,
-                               SkipIfCatalogV2, SkipIfHive2, SkipIfS3)
+                               SkipIfCatalogV2, SkipIfHive2, SkipIfS3, SkipIfGCS)
 from tests.common.test_dimensions import create_single_exec_option_dimension
 from tests.util.filesystem_utils import (
     WAREHOUSE,
@@ -297,6 +297,7 @@ class TestDdlStatements(TestDdlBase):
 
   @SkipIfHive2.orc
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @UniqueDatabase.parametrize(sync_ddl=True)
   def test_create_table_like_file_orc(self, vector, unique_database):
     COMPLEXTYPETBL_PATH = 'test-warehouse/managed/complextypestbl_orc_def/'
diff --git a/tests/metadata/test_hdfs_encryption.py b/tests/metadata/test_hdfs_encryption.py
index fef4ac1..470227b 100644
--- a/tests/metadata/test_hdfs_encryption.py
+++ b/tests/metadata/test_hdfs_encryption.py
@@ -19,7 +19,8 @@ import getpass
 import pytest
 
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon, SkipIfLocal
+from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
+                               SkipIfGCS, SkipIfLocal)
 from tests.common.test_dimensions import (
     create_single_exec_option_dimension,
     create_uncompressed_text_dimension)
@@ -34,6 +35,7 @@ TMP_DIR = '/%s' % (PYWEBHDFS_TMP_DIR)
 
 
 @SkipIfS3.hdfs_encryption
+@SkipIfGCS.hdfs_encryption
 @SkipIfABFS.hdfs_encryption
 @SkipIfADLS.hdfs_encryption
 @SkipIfIsilon.hdfs_encryption
diff --git a/tests/metadata/test_hdfs_permissions.py b/tests/metadata/test_hdfs_permissions.py
index 192920c..3d0cccc 100644
--- a/tests/metadata/test_hdfs_permissions.py
+++ b/tests/metadata/test_hdfs_permissions.py
@@ -17,7 +17,7 @@
 
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfLocal,
-                               SkipIfCatalogV2)
+                               SkipIfGCS, SkipIfCatalogV2)
 from tests.common.test_dimensions import (
     create_single_exec_option_dimension,
     create_uncompressed_text_dimension)
@@ -29,6 +29,7 @@ TBL_LOC = '%s/%s' % (WAREHOUSE, TEST_TBL)
 
 
 @SkipIfS3.hdfs_acls
+@SkipIfGCS.hdfs_acls
 @SkipIfABFS.hdfs_acls
 @SkipIfADLS.hdfs_acls
 @SkipIfLocal.hdfs_client
diff --git a/tests/metadata/test_hms_integration.py b/tests/metadata/test_hms_integration.py
index 2f3d29e..6f6ede7 100644
--- a/tests/metadata/test_hms_integration.py
+++ b/tests/metadata/test_hms_integration.py
@@ -32,7 +32,7 @@ from subprocess import call
 from tests.common.environ import HIVE_MAJOR_VERSION
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfHive2, SkipIfHive3,
-    SkipIfIsilon, SkipIfLocal, SkipIfCatalogV2)
+                               SkipIfIsilon, SkipIfGCS, SkipIfLocal, SkipIfCatalogV2)
 from tests.common.test_dimensions import (
     create_single_exec_option_dimension,
     create_uncompressed_text_dimension)
@@ -40,6 +40,7 @@ from tests.util.hive_utils import HiveDbWrapper, HiveTableWrapper
 
 
 @SkipIfS3.hive
+@SkipIfGCS.hive
 @SkipIfABFS.hive
 @SkipIfADLS.hive
 @SkipIfIsilon.hive
@@ -89,6 +90,7 @@ class TestHmsIntegrationSanity(ImpalaTestSuite):
     assert 'test_tbl' in self.client.execute("show tables in hms_sanity_db").data
 
 @SkipIfS3.hive
+@SkipIfGCS.hive
 @SkipIfABFS.hive
 @SkipIfADLS.hive
 @SkipIfIsilon.hive
diff --git a/tests/metadata/test_metadata_query_statements.py b/tests/metadata/test_metadata_query_statements.py
index 2979ce2..194c330 100644
--- a/tests/metadata/test_metadata_query_statements.py
+++ b/tests/metadata/test_metadata_query_statements.py
@@ -23,7 +23,7 @@ import re
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfIsilon, SkipIfS3, SkipIfABFS, SkipIfADLS,
-                               SkipIfLocal, SkipIfCatalogV2)
+                               SkipIfGCS, SkipIfLocal, SkipIfCatalogV2)
 from tests.common.test_dimensions import ALL_NODES_ONLY
 from tests.common.test_dimensions import create_exec_option_dimension
 from tests.common.test_dimensions import create_uncompressed_text_dimension
@@ -76,6 +76,7 @@ class TestMetadataQueryStatements(ImpalaTestSuite):
   # data doesn't reside in hdfs.
   @SkipIfIsilon.hive
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfLocal.hive
@@ -167,6 +168,7 @@ class TestMetadataQueryStatements(ImpalaTestSuite):
       self.client.execute(self.CREATE_DATA_SRC_STMT % (name,))
 
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
diff --git a/tests/metadata/test_partition_metadata.py b/tests/metadata/test_partition_metadata.py
index 5d7d109..3c551a6 100644
--- a/tests/metadata/test_partition_metadata.py
+++ b/tests/metadata/test_partition_metadata.py
@@ -17,7 +17,8 @@
 
 import pytest
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon, SkipIfLocal
+from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
+                               SkipIfGCS, SkipIfLocal)
 from tests.common.test_dimensions import (create_single_exec_option_dimension,
     create_uncompressed_text_dimension)
 from tests.util.filesystem_utils import get_fs_path, WAREHOUSE, FILESYSTEM_PREFIX
@@ -89,6 +90,7 @@ class TestPartitionMetadata(ImpalaTestSuite):
     assert data.split('\t') == ['6', '9']
 
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
diff --git a/tests/metadata/test_refresh_partition.py b/tests/metadata/test_refresh_partition.py
index 58d142b..b128b71 100644
--- a/tests/metadata/test_refresh_partition.py
+++ b/tests/metadata/test_refresh_partition.py
@@ -17,11 +17,13 @@ from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.test_dimensions import create_single_exec_option_dimension
 from tests.common.test_dimensions import create_uncompressed_text_dimension
-from tests.common.skip import SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon, SkipIfLocal
+from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
+                               SkipIfGCS, SkipIfLocal)
 from tests.util.filesystem_utils import get_fs_path
 
 
 @SkipIfS3.hive
+@SkipIfGCS.hive
 @SkipIfABFS.hive
 @SkipIfADLS.hive
 @SkipIfIsilon.hive
diff --git a/tests/metadata/test_reset_metadata.py b/tests/metadata/test_reset_metadata.py
index 4576846..024141e 100644
--- a/tests/metadata/test_reset_metadata.py
+++ b/tests/metadata/test_reset_metadata.py
@@ -16,7 +16,7 @@
 # under the License.
 
 from test_ddl_base import TestDdlBase
-from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS,
+from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfGCS,
                                SkipIfIsilon, SkipIfLocal)
 
 
@@ -37,6 +37,7 @@ class TestResetMetadata(TestDdlBase):
     self.client.execute('refresh functions %s' % unique_database.upper())
 
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
diff --git a/tests/metadata/test_stale_metadata.py b/tests/metadata/test_stale_metadata.py
index 223b454..e664b97 100644
--- a/tests/metadata/test_stale_metadata.py
+++ b/tests/metadata/test_stale_metadata.py
@@ -19,7 +19,7 @@ from subprocess import check_call
 
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfS3
+from tests.common.skip import SkipIfS3, SkipIfGCS
 from tests.common.test_dimensions import create_single_exec_option_dimension
 from tests.util.filesystem_utils import get_fs_path
 
@@ -77,11 +77,16 @@ class TestRewrittenFile(ImpalaTestSuite):
     assert result.data == [str(expected_new_count)]
 
   @SkipIfS3.jira(reason="IMPALA-2512")
+  @SkipIfGCS.jira(reason="IMPALA-2512")
   def test_new_file_shorter(self, vector, unique_database):
     """Rewrites an existing file with a new shorter file."""
     # Full error is something like:
     #   Metadata for file '...' appears stale. Try running "refresh
     #   unique_database_name.new_file_shorter" to reload the file metadata.
+    # IMPALA-2512: Error message could also be something like
+    #   Query aborted:Disk I/O error on ...:27001: Error seeking ...
+    #   between 0 and ... for '...'
+    # TODO: find a better way to detect stale file meta and remove skip markers.
     table_name = "new_file_shorter"
     self.__overwrite_file_and_query(unique_database, table_name,
       self.LONG_FILE, self.SHORT_FILE, 'appears stale.', self.SHORT_FILE_NUM_ROWS)
diff --git a/tests/metadata/test_testcase_builder.py b/tests/metadata/test_testcase_builder.py
index 542e2f6..161200b 100644
--- a/tests/metadata/test_testcase_builder.py
+++ b/tests/metadata/test_testcase_builder.py
@@ -38,6 +38,9 @@ class TestTestcaseBuilder(ImpalaTestSuite):
 
   def test_query_without_from(self):
     tmp_path = get_fs_path("/tmp")
+    # Make sure /tmp dir exists
+    if not self.filesystem_client.exists(tmp_path):
+      self.filesystem_client.make_dir('tmp')
     # Generate Testcase Data for query without table reference
     testcase_generate_query = """COPY TESTCASE TO '%s' SELECT 5 * 20""" % tmp_path
     result = self.execute_query_expect_success(self.client, testcase_generate_query)
diff --git a/tests/metadata/test_views_compatibility.py b/tests/metadata/test_views_compatibility.py
index c0716a5..031413f 100644
--- a/tests/metadata/test_views_compatibility.py
+++ b/tests/metadata/test_views_compatibility.py
@@ -24,7 +24,8 @@ from subprocess import call
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.environ import HIVE_MAJOR_VERSION
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon, SkipIfLocal
+from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
+                               SkipIfGCS, SkipIfLocal)
 from tests.common.test_dimensions import create_uncompressed_text_dimension
 from tests.util.test_file_parser import QueryTestSectionReader
 
@@ -48,6 +49,7 @@ from tests.util.test_file_parser import QueryTestSectionReader
 # Missing Coverage: Views created by Hive and Impala being visible and queryble by each
 # other on non hdfs storage.
 @SkipIfS3.hive
+@SkipIfGCS.hive
 @SkipIfABFS.hive
 @SkipIfADLS.hive
 @SkipIfIsilon.hive
diff --git a/tests/query_test/test_acid.py b/tests/query_test/test_acid.py
index 304b6b8..dee59ee 100644
--- a/tests/query_test/test_acid.py
+++ b/tests/query_test/test_acid.py
@@ -25,7 +25,7 @@ from hive_metastore.ttypes import CommitTxnRequest, OpenTxnRequest
 from subprocess import check_call
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIf, SkipIfHive2, SkipIfCatalogV2, SkipIfS3, SkipIfABFS,
-                               SkipIfADLS, SkipIfIsilon, SkipIfLocal)
+                               SkipIfADLS, SkipIfIsilon, SkipIfGCS, SkipIfLocal)
 from tests.common.test_dimensions import create_single_exec_option_dimension
 
 
@@ -45,6 +45,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
@@ -62,6 +63,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
@@ -71,6 +73,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
@@ -80,6 +83,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
@@ -91,6 +95,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
@@ -100,6 +105,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
@@ -119,6 +125,7 @@ class TestAcid(ImpalaTestSuite):
   @SkipIfHive2.acid
   @SkipIfCatalogV2.hms_event_polling_enabled()
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
@@ -132,6 +139,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
@@ -154,6 +162,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
@@ -170,6 +179,7 @@ class TestAcid(ImpalaTestSuite):
     assert "2" in result
 
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
@@ -188,6 +198,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
@@ -202,6 +213,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
@@ -280,6 +292,7 @@ class TestAcid(ImpalaTestSuite):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
diff --git a/tests/query_test/test_aggregation.py b/tests/query_test/test_aggregation.py
index 6aac406..14f57dc 100644
--- a/tests/query_test/test_aggregation.py
+++ b/tests/query_test/test_aggregation.py
@@ -21,7 +21,6 @@ import pytest
 
 from testdata.common import widetable
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfS3
 from tests.common.test_dimensions import (
     create_exec_option_dimension,
     create_exec_option_dimension_from_dict,
diff --git a/tests/query_test/test_date_queries.py b/tests/query_test/test_date_queries.py
index 46ca0fa..7bc9be4 100644
--- a/tests/query_test/test_date_queries.py
+++ b/tests/query_test/test_date_queries.py
@@ -20,7 +20,7 @@
 import pytest
 from tests.common.file_utils import create_table_and_copy_files
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfLocal
+from tests.common.skip import SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfLocal, SkipIfGCS
 from tests.common.test_dimensions import (create_exec_option_dimension_from_dict,
     create_client_protocol_dimension, hs2_parquet_constraint)
 from tests.shell.util import ImpalaShell
@@ -71,6 +71,7 @@ class TestDateQueries(ImpalaTestSuite):
     self.run_test_case('QueryTest/date-partitioning', vector, use_db=unique_database)
 
   @SkipIfS3.qualified_path
+  @SkipIfGCS.qualified_path
   @SkipIfABFS.qualified_path
   @SkipIfADLS.qualified_path
   @SkipIfLocal.qualified_path
diff --git a/tests/query_test/test_hbase_queries.py b/tests/query_test/test_hbase_queries.py
index ae68fa4..d28ef4a 100644
--- a/tests/query_test/test_hbase_queries.py
+++ b/tests/query_test/test_hbase_queries.py
@@ -22,6 +22,7 @@ import pytest
 from tests.common.skip import (
     SkipIfIsilon,
     SkipIfS3,
+    SkipIfGCS,
     SkipIfABFS,
     SkipIfADLS,
     SkipIfLocal)
@@ -66,6 +67,7 @@ class TestHBaseQueries(ImpalaTestSuite):
 
   @SkipIfIsilon.hive
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfLocal.hive
diff --git a/tests/query_test/test_hdfs_caching.py b/tests/query_test/test_hdfs_caching.py
index cf77b6e..6a7eda3 100644
--- a/tests/query_test/test_hdfs_caching.py
+++ b/tests/query_test/test_hdfs_caching.py
@@ -26,13 +26,15 @@ from tests.common.environ import build_flavor_timeout, IS_DOCKERIZED_TEST_CLUSTE
 from tests.common.impala_cluster import ImpalaCluster
 from tests.common.impala_test_suite import ImpalaTestSuite, LOG
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-    SkipIfLocal, SkipIfEC, SkipIfDockerizedCluster, SkipIfCatalogV2)
+                               SkipIfGCS, SkipIfLocal, SkipIfEC, SkipIfDockerizedCluster,
+                               SkipIfCatalogV2)
 from tests.common.test_dimensions import create_single_exec_option_dimension
 from tests.util.filesystem_utils import get_fs_path
 from tests.util.shell_util import exec_process
 
 # End to end test that hdfs caching is working.
 @SkipIfS3.caching # S3: missing coverage: verify SET CACHED gives error
+@SkipIfGCS.caching
 @SkipIfABFS.caching
 @SkipIfADLS.caching
 @SkipIfIsilon.caching
@@ -113,6 +115,7 @@ class TestHdfsCaching(ImpalaTestSuite):
 # run as a part of exhaustive tests which require the workload to be 'functional-query'.
 # TODO: Move this to TestHdfsCaching once we make exhaustive tests run for other workloads
 @SkipIfS3.caching
+@SkipIfGCS.caching
 @SkipIfABFS.caching
 @SkipIfADLS.caching
 @SkipIfIsilon.caching
@@ -123,6 +126,7 @@ class TestHdfsCachingFallbackPath(ImpalaTestSuite):
     return 'functional-query'
 
   @SkipIfS3.hdfs_encryption
+  @SkipIfGCS.hdfs_encryption
   @SkipIfABFS.hdfs_encryption
   @SkipIfADLS.hdfs_encryption
   @SkipIfIsilon.hdfs_encryption
@@ -175,6 +179,7 @@ class TestHdfsCachingFallbackPath(ImpalaTestSuite):
 
 
 @SkipIfS3.caching
+@SkipIfGCS.caching
 @SkipIfABFS.caching
 @SkipIfADLS.caching
 @SkipIfIsilon.caching
diff --git a/tests/query_test/test_insert_behaviour.py b/tests/query_test/test_insert_behaviour.py
index fc622b1..5be8c94 100644
--- a/tests/query_test/test_insert_behaviour.py
+++ b/tests/query_test/test_insert_behaviour.py
@@ -24,7 +24,8 @@ import re
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.parametrize import UniqueDatabase
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-    SkipIfLocal, SkipIfDockerizedCluster, SkipIfCatalogV2)
+                               SkipIfGCS, SkipIfLocal, SkipIfDockerizedCluster,
+                               SkipIfCatalogV2)
 from tests.util.filesystem_utils import WAREHOUSE, get_fs_path, IS_S3
 
 @SkipIfLocal.hdfs_client
@@ -133,6 +134,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     assert len(self.filesystem_client.ls(part_dir)) == 1
 
   @SkipIfS3.hdfs_acls
+  @SkipIfGCS.hdfs_acls
   @SkipIfABFS.hdfs_acls
   @SkipIfADLS.hdfs_acls
   @SkipIfIsilon.hdfs_acls
@@ -195,6 +197,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     check_has_acls("p1=1/p2=2/p3=30", "default:group:new_leaf_group:-w-")
 
   @SkipIfS3.hdfs_acls
+  @SkipIfGCS.hdfs_acls
   @SkipIfABFS.hdfs_acls
   @SkipIfADLS.hdfs_acls
   @SkipIfIsilon.hdfs_acls
@@ -248,6 +251,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     self.execute_query_expect_success(self.client, insert_query)
 
   @SkipIfS3.hdfs_acls
+  @SkipIfGCS.hdfs_acls
   @SkipIfABFS.hdfs_acls
   @SkipIfADLS.hdfs_acls
   @SkipIfIsilon.hdfs_acls
@@ -330,6 +334,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     load_data(self.execute_query_expect_success, "added_part")
 
   @SkipIfS3.hdfs_acls
+  @SkipIfGCS.hdfs_acls
   @SkipIfABFS.hdfs_acls
   @SkipIfADLS.hdfs_acls
   @SkipIfIsilon.hdfs_acls
@@ -363,6 +368,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     assert re.search(r'Impala does not have WRITE access.*' + table_path, str(err))
 
   @SkipIfS3.hdfs_acls
+  @SkipIfGCS.hdfs_acls
   @SkipIfABFS.hdfs_acls
   @SkipIfADLS.hdfs_acls
   @SkipIfIsilon.hdfs_acls
@@ -444,6 +450,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     self.execute_query_expect_success(self.client, insert_query)
 
   @SkipIfS3.hdfs_acls
+  @SkipIfGCS.hdfs_acls
   @SkipIfABFS.hdfs_acls
   @SkipIfADLS.hdfs_acls
   @SkipIfIsilon.hdfs_acls
@@ -570,6 +577,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     self.execute_query_expect_failure(self.client, insert_query)
 
   @SkipIfS3.hdfs_acls
+  @SkipIfGCS.hdfs_acls
   @SkipIfABFS.hdfs_acls
   @SkipIfADLS.hdfs_acls
   @SkipIfIsilon.hdfs_acls
diff --git a/tests/query_test/test_insert_parquet.py b/tests/query_test/test_insert_parquet.py
index f77ff3f..72885d5 100644
--- a/tests/query_test/test_insert_parquet.py
+++ b/tests/query_test/test_insert_parquet.py
@@ -29,7 +29,7 @@ from tests.common.environ import impalad_basedir
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.parametrize import UniqueDatabase
 from tests.common.skip import (SkipIfEC, SkipIfIsilon, SkipIfLocal, SkipIfS3, SkipIfABFS,
-    SkipIfADLS)
+                               SkipIfADLS, SkipIfGCS)
 from tests.common.test_dimensions import create_exec_option_dimension
 from tests.common.test_result_verifier import verify_query_result_is_equal
 from tests.common.test_vector import ImpalaTestDimension
@@ -537,6 +537,7 @@ class TestHdfsParquetTableWriter(ImpalaTestSuite):
 @SkipIfIsilon.hive
 @SkipIfLocal.hive
 @SkipIfS3.hive
+@SkipIfGCS.hive
 @SkipIfABFS.hive
 @SkipIfADLS.hive
 # TODO: Should we move this to test_parquet_stats.py?
diff --git a/tests/query_test/test_insert_permutation.py b/tests/query_test/test_insert_permutation.py
index dacdf7e..46d1090 100644
--- a/tests/query_test/test_insert_permutation.py
+++ b/tests/query_test/test_insert_permutation.py
@@ -18,7 +18,6 @@
 # Targeted Impala insert tests
 
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfS3
 from tests.common.test_dimensions import (
     create_exec_option_dimension,
     create_uncompressed_text_dimension)
diff --git a/tests/query_test/test_join_queries.py b/tests/query_test/test_join_queries.py
index 4cba21b..669aa26 100644
--- a/tests/query_test/test_join_queries.py
+++ b/tests/query_test/test_join_queries.py
@@ -26,6 +26,7 @@ from tests.common.skip import (
     SkipIfIsilon,
     SkipIfLocal,
     SkipIfS3,
+    SkipIfGCS,
     SkipIfABFS,
     SkipIfADLS)
 from tests.common.test_vector import ImpalaTestDimension
@@ -79,6 +80,7 @@ class TestJoinQueries(ImpalaTestSuite):
     self.run_test_case('QueryTest/single-node-joins-with-limits-exhaustive', new_vector)
 
   @SkipIfS3.hbase
+  @SkipIfGCS.hbase
   @SkipIfABFS.hbase
   @SkipIfADLS.hbase
   @SkipIfIsilon.hbase
diff --git a/tests/query_test/test_nested_types.py b/tests/query_test/test_nested_types.py
index f434729..fd1189b 100644
--- a/tests/query_test/test_nested_types.py
+++ b/tests/query_test/test_nested_types.py
@@ -26,6 +26,7 @@ from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (
     SkipIfIsilon,
     SkipIfS3,
+    SkipIfGCS,
     SkipIfABFS,
     SkipIfADLS,
     SkipIfEC,
@@ -160,6 +161,7 @@ class TestNestedTypesNoMtDop(ImpalaTestSuite):
 
   @SkipIfIsilon.hive
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfLocal.hive
@@ -218,6 +220,7 @@ class TestNestedTypesNoMtDop(ImpalaTestSuite):
   @SkipIfIsilon.hive
   @SkipIfLocal.hive
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfHive2.acid
   def test_partitioned_table_acid(self, vector, unique_database):
     """IMPALA-6370: Test that a partitioned table with nested types can be scanned."""
@@ -719,6 +722,7 @@ class TestMaxNestingDepth(ImpalaTestSuite):
 
   @SkipIfIsilon.hive
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfLocal.hive
diff --git a/tests/query_test/test_observability.py b/tests/query_test/test_observability.py
index 57d09f1..7148cc8 100644
--- a/tests/query_test/test_observability.py
+++ b/tests/query_test/test_observability.py
@@ -20,7 +20,7 @@ from datetime import datetime
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
-                               SkipIfLocal, SkipIfNotHdfsMinicluster)
+                               SkipIfGCS, SkipIfLocal, SkipIfNotHdfsMinicluster)
 from tests.util.filesystem_utils import IS_EC
 from time import sleep
 from RuntimeProfile.ttypes import TRuntimeProfileFormat
@@ -92,6 +92,7 @@ class TestObservability(ImpalaTestSuite):
     self.hs2_client.close_query(handle)
 
   @SkipIfS3.hbase
+  @SkipIfGCS.hbase
   @SkipIfLocal.hbase
   @SkipIfIsilon.hbase
   @SkipIfABFS.hbase
@@ -671,6 +672,7 @@ class TestObservability(ImpalaTestSuite):
         cluster_properties)
 
   @SkipIfS3.hbase
+  @SkipIfGCS.hbase
   @SkipIfLocal.hbase
   @SkipIfIsilon.hbase
   @SkipIfABFS.hbase
diff --git a/tests/query_test/test_partitioning.py b/tests/query_test/test_partitioning.py
index bb554ed..4af89f0 100644
--- a/tests/query_test/test_partitioning.py
+++ b/tests/query_test/test_partitioning.py
@@ -20,7 +20,8 @@ import pytest
 
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon, SkipIfLocal
+from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
+                               SkipIfGCS, SkipIfLocal)
 from tests.common.test_dimensions import create_single_exec_option_dimension
 
 # Tests to validate HDFS partitioning.
@@ -47,6 +48,7 @@ class TestPartitioning(ImpalaTestSuite):
   # Missing Coverage: Impala deals with boolean partitions created by Hive on a non-hdfs
   # filesystem.
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfABFS.hive
   @SkipIfADLS.hive
   @SkipIfIsilon.hive
diff --git a/tests/query_test/test_resource_limits.py b/tests/query_test/test_resource_limits.py
index c32bce9..bb9e7c6 100644
--- a/tests/query_test/test_resource_limits.py
+++ b/tests/query_test/test_resource_limits.py
@@ -16,8 +16,8 @@
 # under the License.
 
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfEC, SkipIfLocal, SkipIfS3, SkipIfABFS, \
-    SkipIfADLS
+from tests.common.skip import (SkipIfEC, SkipIfLocal, SkipIfS3, SkipIfABFS,
+                               SkipIfGCS, SkipIfADLS)
 from tests.common.test_dimensions import create_parquet_dimension
 
 
@@ -46,6 +46,7 @@ class TestResourceLimits(ImpalaTestSuite):
     self.run_test_case('QueryTest/query-resource-limits', vector)
 
   @SkipIfS3.hbase
+  @SkipIfGCS.hbase
   @SkipIfADLS.hbase
   @SkipIfABFS.hbase
   @SkipIfLocal.multiple_impalad
diff --git a/tests/query_test/test_scanners.py b/tests/query_test/test_scanners.py
index d98b0fd..0373395 100644
--- a/tests/query_test/test_scanners.py
+++ b/tests/query_test/test_scanners.py
@@ -36,6 +36,7 @@ from tests.common.impala_test_suite import ImpalaTestSuite, LOG
 from tests.common.skip import (
     SkipIf,
     SkipIfS3,
+    SkipIfGCS,
     SkipIfABFS,
     SkipIfADLS,
     SkipIfEC,
@@ -464,6 +465,7 @@ class TestParquet(ImpalaTestSuite):
   @SkipIfIsilon.hive
   @SkipIfLocal.hive
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   def test_multi_compression_types(self, vector, unique_database):
     """IMPALA-5448: Tests that parquet splits with multi compression types are counted
     correctly. Cases tested:
@@ -582,6 +584,7 @@ class TestParquet(ImpalaTestSuite):
         vector, unique_database)
 
   @SkipIfS3.hdfs_block_size
+  @SkipIfGCS.hdfs_block_size
   @SkipIfABFS.hdfs_block_size
   @SkipIfADLS.hdfs_block_size
   @SkipIfIsilon.hdfs_block_size
@@ -639,6 +642,7 @@ class TestParquet(ImpalaTestSuite):
     assert total == num_scanners_with_no_reads
 
   @SkipIfS3.hdfs_block_size
+  @SkipIfGCS.hdfs_block_size
   @SkipIfABFS.hdfs_block_size
   @SkipIfADLS.hdfs_block_size
   @SkipIfIsilon.hdfs_block_size
@@ -678,6 +682,7 @@ class TestParquet(ImpalaTestSuite):
       self.client.clear_configuration()
 
   @SkipIfS3.hdfs_block_size
+  @SkipIfGCS.hdfs_block_size
   @SkipIfABFS.hdfs_block_size
   @SkipIfADLS.hdfs_block_size
   @SkipIfIsilon.hdfs_block_size
@@ -694,6 +699,7 @@ class TestParquet(ImpalaTestSuite):
     self._multiple_blocks_helper(table_name, 40000, ranges_per_node=2)
 
   @SkipIfS3.hdfs_block_size
+  @SkipIfGCS.hdfs_block_size
   @SkipIfABFS.hdfs_block_size
   @SkipIfADLS.hdfs_block_size
   @SkipIfIsilon.hdfs_block_size
@@ -1300,6 +1306,7 @@ class TestTextScanRangeLengths(ImpalaTestSuite):
 
 # Missing Coverage: No coverage for truncated files errors or scans.
 @SkipIfS3.hive
+@SkipIfGCS.hive
 @SkipIfABFS.hive
 @SkipIfADLS.hive
 @SkipIfIsilon.hive
@@ -1383,6 +1390,7 @@ class TestOrc(ImpalaTestSuite):
       lambda v: v.get_value('table_format').file_format == 'orc')
 
   @SkipIfS3.hdfs_block_size
+  @SkipIfGCS.hdfs_block_size
   @SkipIfABFS.hdfs_block_size
   @SkipIfADLS.hdfs_block_size
   @SkipIfEC.fix_later
@@ -1457,6 +1465,7 @@ class TestOrc(ImpalaTestSuite):
   @SkipIfIsilon.hive
   @SkipIfLocal.hive
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfHive3.non_acid
   def test_type_conversions_hive2(self, vector, unique_database):
     # Create "illtypes" tables whose columns can't match the underlining ORC file's.
@@ -1506,6 +1515,7 @@ class TestOrc(ImpalaTestSuite):
   @SkipIfIsilon.hive
   @SkipIfLocal.hive
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @SkipIfHive2.acid
   def test_type_conversions_hive3(self, vector, unique_database):
     # Create "illtypes" tables whose columns can't match the underlining ORC file's.
diff --git a/tests/shell/test_shell_commandline.py b/tests/shell/test_shell_commandline.py
index 30cd85d..7706ff5 100644
--- a/tests/shell/test_shell_commandline.py
+++ b/tests/shell/test_shell_commandline.py
@@ -653,7 +653,9 @@ class TestImpalaShell(ImpalaTestSuite):
 
     assert "Error retrieving LDAP password" in result_1.stderr
     assert "command was: 'cmddoesntexist'" in result_1.stderr
-    assert "No such file or directory" in result_1.stderr
+    # On GCE instances, the error thrown in subprocess is "[Errno 13] Permission denied".
+    assert "No such file or directory" in result_1.stderr \
+        or "Permission denied" in result_1.stderr
 
     result_2 = run_impala_shell_cmd(vector,
                                     args + ['--ldap_password_cmd=cat filedoesntexist'],
diff --git a/tests/stress/test_acid_stress.py b/tests/stress/test_acid_stress.py
index 6051723..ef86d57 100644
--- a/tests/stress/test_acid_stress.py
+++ b/tests/stress/test_acid_stress.py
@@ -23,7 +23,7 @@ from multiprocessing import Value
 
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.parametrize import UniqueDatabase
-from tests.common.skip import SkipIfHive2, SkipIfS3
+from tests.common.skip import SkipIfHive2, SkipIfS3, SkipIfGCS
 from tests.stress.stress_util import Task, run_tasks
 
 NUM_OVERWRITES = 2
@@ -160,6 +160,7 @@ class TestAcidInsertsBasic(TestAcidStress):
 
   @SkipIfHive2.acid
   @SkipIfS3.hive
+  @SkipIfGCS.hive
   @pytest.mark.execute_serially
   @pytest.mark.stress
   def test_read_hive_inserts(self, unique_database):
@@ -227,6 +228,7 @@ class TestConcurrentAcidInserts(TestAcidStress):
     finally:
       impalad_client.close()
 
+  @SkipIfGCS.jira(reason="IMPALA-10563")
   @SkipIfHive2.acid
   @pytest.mark.execute_serially
   @pytest.mark.stress
@@ -322,6 +324,7 @@ class TestFailingAcidInserts(TestAcidStress):
                 for i in xrange(0, num_checkers)]
     run_tasks(writers + checkers)
 
+  @SkipIfGCS.jira(reason="IMPALA-10563")
   @SkipIfHive2.acid
   @pytest.mark.execute_serially
   @pytest.mark.stress
diff --git a/tests/stress/test_ddl_stress.py b/tests/stress/test_ddl_stress.py
index ea077a6..71f76e1 100644
--- a/tests/stress/test_ddl_stress.py
+++ b/tests/stress/test_ddl_stress.py
@@ -18,7 +18,8 @@
 import pytest
 
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon, SkipIfLocal
+from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
+                               SkipIfGCS, SkipIfLocal)
 
 # Number of tables to create per thread
 NUM_TBLS_PER_THREAD = 10
@@ -48,6 +49,7 @@ class TestDdlStress(ImpalaTestSuite):
                    v.get_value('table_format').compression_codec == 'none'))
 
   @SkipIfS3.caching
+  @SkipIfGCS.caching
   @SkipIfABFS.caching
   @SkipIfADLS.caching
   @SkipIfIsilon.caching
diff --git a/tests/util/filesystem_utils.py b/tests/util/filesystem_utils.py
index 5b39d36..24c960d 100644
--- a/tests/util/filesystem_utils.py
+++ b/tests/util/filesystem_utils.py
@@ -31,6 +31,7 @@ IS_LOCAL = FILESYSTEM == "local"
 IS_HDFS = FILESYSTEM == "hdfs"
 IS_ADLS = FILESYSTEM == "adls"
 IS_ABFS = FILESYSTEM == "abfs"
+IS_GCS = FILESYSTEM == "gs"
 IS_EC = os.getenv("ERASURE_CODING") == "true"
 # This condition satisfies both the states where one can assume a default fs
 #   - The environment variable is set to an empty string.
@@ -56,7 +57,7 @@ ADLS_CLIENT_SECRET = os.getenv("azure_client_secret")
 
 # A map of FILESYSTEM values to their corresponding Scan Node types
 fs_to_name = {'s3': 'S3', 'hdfs': 'HDFS', 'local': 'LOCAL', 'adls': 'ADLS',
-              'abfs': 'ADLS'}
+              'abfs': 'ADLS', 'gs': 'GCS'}
 
 
 def get_fs_name(fs):