You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by ta...@apache.org on 2019/07/27 07:19:07 UTC

[impala] branch master updated (20b5b1f -> 82f753e)

This is an automated email from the ASF dual-hosted git repository.

tarmstrong pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git.


    from 20b5b1f  IMPALA-8794: Skipping testPiggyback* in Hive 3 builds
     new 6dd36d9  IMPALA-8798: Skip TestAutoScaling on EC runs
     new 241a926  IMPALA-8766: Restrict hadoop-cloud-storage to USE_CDP_HIVE=true
     new 82f753e  IMPALA-8783: Add Kerberos SPNEGO support to the http hs2 server

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 be/src/rpc/CMakeLists.txt                 |   3 +
 be/src/rpc/TAcceptQueueServer.cpp         |   8 +-
 be/src/rpc/auth-provider.h                |  24 +++---
 be/src/rpc/authentication.cc              |  97 ++++++++++++++++++-----
 be/src/rpc/kerberos-test.cc               | 111 ++++++++++++++++++++++++++
 be/src/rpc/thrift-server.cc               |  21 +++--
 be/src/rpc/thrift-server.h                |   3 +
 be/src/service/impala-server.cc           |   5 +-
 be/src/transport/THttpServer.cpp          | 127 +++++++++++++++++++++---------
 be/src/transport/THttpServer.h            |  78 ++++++++++++------
 common/thrift/metrics.json                |  20 +++++
 fe/pom.xml                                |  65 ++++++++++-----
 tests/custom_cluster/test_auto_scaling.py |   4 +
 13 files changed, 447 insertions(+), 119 deletions(-)
 create mode 100644 be/src/rpc/kerberos-test.cc


[impala] 02/03: IMPALA-8766: Restrict hadoop-cloud-storage to USE_CDP_HIVE=true

Posted by ta...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tarmstrong pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 241a9265926e13204f780187144e0e703c2d4e09
Author: Joe McDonnell <jo...@cloudera.com>
AuthorDate: Fri Jul 26 11:25:09 2019 -0700

    IMPALA-8766: Restrict hadoop-cloud-storage to USE_CDP_HIVE=true
    
    The previous commit for IMPALA-8766 switched fe/pom.xml to have
    a dependency against hadoop-cloud-storage rather than referencing
    hadoop-aws and hadoop-azure directly. After considering it, it
    is better to limit unnecessary changes to the USE_CDP_HIVE=false
    configuration.
    
    This moves the dependencies for hadoop cloud components to the
    USE_CDP_HIVE=false/true profile. The USE_CDP_HIVE=false profile
    uses the hadoop-aws, hadoop-azure, and hadoop-azure-datalake
    dependencies, which is identical to before IMPALA_8766. The
    USE_CDP_HIVE=true profile references hadoop-cloud-storage.
    
    Change-Id: Iaa9acf5be5d79bca129d33688a8c623f54dea429
    Reviewed-on: http://gerrit.cloudera.org:8080/13929
    Reviewed-by: Vihang Karajgaonkar <vi...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 fe/pom.xml | 65 +++++++++++++++++++++++++++++++++++++++++++++-----------------
 1 file changed, 47 insertions(+), 18 deletions(-)

diff --git a/fe/pom.xml b/fe/pom.xml
index 3baa98c..f7e0d35 100644
--- a/fe/pom.xml
+++ b/fe/pom.xml
@@ -91,24 +91,6 @@ under the License.
     </dependency>
 
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-cloud-storage</artifactId>
-      <version>${hadoop.version}</version>
-      <exclusions>
-        <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
-        <exclusion>
-          <groupId>net.minidev</groupId>
-          <artifactId>json-smart</artifactId>
-        </exclusion>
-        <!-- Impala currently doesn't support GCS, so exclude those jars -->
-        <exclusion>
-          <groupId>com.google.cloud.bigdataoss</groupId>
-          <artifactId>*</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-
-    <dependency>
       <groupId>org.apache.ranger</groupId>
       <artifactId>ranger-plugins-common</artifactId>
       <version>${ranger.version}</version>
@@ -943,6 +925,34 @@ under the License.
             </exclusion>
           </exclusions>
         </dependency>
+
+        <!-- CDH profile manually specifies cloud storage dependencies rather
+             than using hadoop-cloud-storage. This is to minimize any disruption
+             to older configurations. -->
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-aws</artifactId>
+          <version>${hadoop.version}</version>
+        </dependency>
+
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-azure</artifactId>
+          <version>${hadoop.version}</version>
+        </dependency>
+
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-azure-datalake</artifactId>
+          <version>${hadoop.version}</version>
+          <exclusions>
+            <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
+            <exclusion>
+              <groupId>net.minidev</groupId>
+              <artifactId>json-smart</artifactId>
+            </exclusion>
+          </exclusions>
+        </dependency>
       </dependencies>
     </profile>
 
@@ -1047,6 +1057,25 @@ under the License.
           <version>3.2.0-m3</version>
           <scope>test</scope>
         </dependency>
+        <!-- The hadoop-cloud-storage artifact gets AWS, Azure, and other cloud
+             storage dependencies. It also incorporates Knox runtime dependencies. -->
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-cloud-storage</artifactId>
+          <version>${hadoop.version}</version>
+          <exclusions>
+            <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
+            <exclusion>
+              <groupId>net.minidev</groupId>
+              <artifactId>json-smart</artifactId>
+            </exclusion>
+            <!-- Impala currently doesn't support GCS, so exclude those jars -->
+            <exclusion>
+              <groupId>com.google.cloud.bigdataoss</groupId>
+              <artifactId>*</artifactId>
+            </exclusion>
+          </exclusions>
+        </dependency>
       </dependencies>
     </profile>
 


[impala] 03/03: IMPALA-8783: Add Kerberos SPNEGO support to the http hs2 server

Posted by ta...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tarmstrong pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 82f753e3044bd2482f35d137fbb28516fc0ef86c
Author: Thomas Tauber-Marshall <tm...@cloudera.com>
AuthorDate: Tue Jul 23 19:55:22 2019 +0000

    IMPALA-8783: Add Kerberos SPNEGO support to the http hs2 server
    
    IMPALA-8538 added support for http connections to the hs2 server along
    with LDAP auth support. This patch adds support for authorizing with
    Kerberos via SPNEGO.
    
    Testing:
    - Added a unit test that runs a minikdc and an Impala server against
      it. Currently disabled until curl is available in the toolchain.
    - Tested manually on a real cluster with a full Kerberos setup and
      beeline.
    
    Change-Id: I15d9a842ab37ebc34b9fde5917137ff2961d870a
    Reviewed-on: http://gerrit.cloudera.org:8080/13918
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 be/src/rpc/CMakeLists.txt         |   3 +
 be/src/rpc/TAcceptQueueServer.cpp |   8 ++-
 be/src/rpc/auth-provider.h        |  24 ++++---
 be/src/rpc/authentication.cc      |  97 +++++++++++++++++++++++------
 be/src/rpc/kerberos-test.cc       | 111 +++++++++++++++++++++++++++++++++
 be/src/rpc/thrift-server.cc       |  21 ++++---
 be/src/rpc/thrift-server.h        |   3 +
 be/src/service/impala-server.cc   |   5 +-
 be/src/transport/THttpServer.cpp  | 127 +++++++++++++++++++++++++++-----------
 be/src/transport/THttpServer.h    |  78 ++++++++++++++++-------
 common/thrift/metrics.json        |  20 ++++++
 11 files changed, 396 insertions(+), 101 deletions(-)

diff --git a/be/src/rpc/CMakeLists.txt b/be/src/rpc/CMakeLists.txt
index e420049..fa76059 100644
--- a/be/src/rpc/CMakeLists.txt
+++ b/be/src/rpc/CMakeLists.txt
@@ -53,6 +53,9 @@ ADD_BE_TEST(thrift-server-test) # TODO: this test leaks servers
 # The thrift-server-test uses some utilities from the Kudu security test code.
 target_link_libraries(thrift-server-test security-test-for-impala)
 
+ADD_BE_TEST(kerberos-test)
+target_link_libraries(kerberos-test security-test-for-impala)
+
 ADD_BE_LSAN_TEST(authentication-test)
 
 ADD_BE_TEST(rpc-mgr-test) # TODO: this test leaks various KRPC things
diff --git a/be/src/rpc/TAcceptQueueServer.cpp b/be/src/rpc/TAcceptQueueServer.cpp
index fa04d87..ae4de73 100644
--- a/be/src/rpc/TAcceptQueueServer.cpp
+++ b/be/src/rpc/TAcceptQueueServer.cpp
@@ -97,9 +97,6 @@ class TAcceptQueueServer::Task : public Runnable {
     } catch (...) {
       GlobalOutput("TAcceptQueueServer uncaught exception.");
     }
-    if (eventHandler != nullptr) {
-      eventHandler->deleteContext(connectionContext, input_, output_);
-    }
 
     try {
       input_->getTransport()->close();
@@ -114,6 +111,11 @@ class TAcceptQueueServer::Task : public Runnable {
       GlobalOutput(errStr.c_str());
     }
 
+    // Delete the context after closing the transports in case they have references to it.
+    if (eventHandler != nullptr) {
+      eventHandler->deleteContext(connectionContext, input_, output_);
+    }
+
     // Remove this task from parent bookkeeping
     {
       Synchronized s(server_.tasksMonitor_);
diff --git a/be/src/rpc/auth-provider.h b/be/src/rpc/auth-provider.h
index c7a6fd5..1807eb9 100644
--- a/be/src/rpc/auth-provider.h
+++ b/be/src/rpc/auth-provider.h
@@ -60,11 +60,12 @@ class AuthProvider {
       boost::shared_ptr<apache::thrift::transport::TTransport>* wrapped_transport)
       WARN_UNUSED_RESULT = 0;
 
-  /// Setup 'connection_ptr' to get its username from 'underlying_transport'.
+  /// Setup 'connection_ptr' to get its username with the given transports.
   virtual void SetupConnectionContext(
       const boost::shared_ptr<ThriftServer::ConnectionContext>& connection_ptr,
       ThriftServer::TransportType underlying_transport_type,
-      apache::thrift::transport::TTransport* underlying_transport) = 0;
+      apache::thrift::transport::TTransport* underlying_input_transport,
+      apache::thrift::transport::TTransport* underlying_output_transport) = 0;
 
   /// Returns true if this provider uses Sasl at the transport layer.
   virtual bool is_secure() = 0;
@@ -73,9 +74,10 @@ class AuthProvider {
 };
 
 /// Used if either (or both) Kerberos and LDAP auth are desired. For BINARY connections we
-/// use Sasl for the communication, and for HTTP connections we use BASIC auth.  This
-/// "wraps" the underlying communication, in thrift-speak. This is used for both client
-/// and server contexts; there is one for internal and one for external communication.
+/// use Sasl for the communication, and for HTTP connections we use Basic or SPNEGO auth.
+/// This "wraps" the underlying communication, in thrift-speak. This is used for both
+/// client and server contexts; there is one for internal and one for external
+/// communication.
 class SecureAuthProvider : public AuthProvider {
  public:
   SecureAuthProvider(bool is_internal)
@@ -105,13 +107,14 @@ class SecureAuthProvider : public AuthProvider {
       boost::shared_ptr<apache::thrift::transport::TTransportFactory>* factory);
 
   /// IF sasl was used, the username will be available from the handshake, and we set it
-  /// here. If HTTP BASIC auth was used, the username won't be available until the first
-  /// packet is received, so w register a callback with the transport that will set the
-  /// username then.
+  /// here. If HTTP Basic or SPNEGO auth was used, the username won't be available until
+  /// one or more packets are received, so we register a callback with the transport that
+  /// will set the username when it's available.
   virtual void SetupConnectionContext(
       const boost::shared_ptr<ThriftServer::ConnectionContext>& connection_ptr,
       ThriftServer::TransportType underlying_transport_type,
-      apache::thrift::transport::TTransport* underlying_transport);
+      apache::thrift::transport::TTransport* underlying_input_transport,
+      apache::thrift::transport::TTransport* underlying_output_transport);
 
   virtual bool is_secure() { return true; }
 
@@ -190,7 +193,8 @@ class NoAuthProvider : public AuthProvider {
   virtual void SetupConnectionContext(
       const boost::shared_ptr<ThriftServer::ConnectionContext>& connection_ptr,
       ThriftServer::TransportType underlying_transport_type,
-      apache::thrift::transport::TTransport* underlying_transport) {
+      apache::thrift::transport::TTransport* underlying_input_transport,
+      apache::thrift::transport::TTransport* underlying_output_transport) {
     connection_ptr->username = "";
   }
 
diff --git a/be/src/rpc/authentication.cc b/be/src/rpc/authentication.cc
index cac10ba..ef8e1db 100644
--- a/be/src/rpc/authentication.cc
+++ b/be/src/rpc/authentication.cc
@@ -26,6 +26,7 @@
 #include <boost/filesystem.hpp>
 #include <gutil/casts.h>
 #include <gutil/strings/escaping.h>
+#include <gutil/strings/strip.h>
 #include <gutil/strings/substitute.h>
 #include <random>
 #include <string>
@@ -39,6 +40,7 @@
 
 #include "exec/kudu-util.h"
 #include "kudu/rpc/sasl_common.h"
+#include "kudu/security/gssapi.h"
 #include "kudu/security/init.h"
 #include "rpc/auth-provider.h"
 #include "rpc/thrift-server.h"
@@ -496,18 +498,24 @@ static int SaslGetPath(void* context, const char** path) {
   return SASL_OK;
 }
 
-bool BasicAuth(ThriftServer::ConnectionContext* connection_context, const char* base64) {
-  int64_t in_len = strlen(base64);
+bool BasicAuth(
+    ThriftServer::ConnectionContext* connection_context, const std::string& base64) {
+  if (base64.empty()) {
+    connection_context->return_headers.push_back("WWW-Authenticate: Basic");
+    return false;
+  }
   string decoded;
-  if (!Base64Unescape(base64, in_len, &decoded)) {
+  if (!Base64Unescape(base64, &decoded)) {
     LOG(ERROR) << "Failed to decode base64 auth string from: "
                << TNetworkAddressToString(connection_context->network_address);
+    connection_context->return_headers.push_back("WWW-Authenticate: Basic");
     return false;
   }
   std::size_t colon = decoded.find(':');
   if (colon == std::string::npos) {
     LOG(ERROR) << "Auth string must be in the form '<username>:<password>' from: "
                << TNetworkAddressToString(connection_context->network_address);
+    connection_context->return_headers.push_back("WWW-Authenticate: Basic");
     return false;
   }
   string username = decoded.substr(0, colon);
@@ -516,8 +524,57 @@ bool BasicAuth(ThriftServer::ConnectionContext* connection_context, const char*
   if (ret) {
     // Authenication was successful, so set the username on the connection.
     connection_context->username = username;
+    return true;
+  }
+  connection_context->return_headers.push_back("WWW-Authenticate: Basic");
+  return false;
+}
+
+// Performs a step of SPNEGO auth for the HTTP transport and sets the username on
+// 'connection_context' if auth is successful. 'header_token' is the value from an
+// 'Authorization: Negotiate" header. Returns true if the step was successful and sets
+// 'is_complete' to indicate if more steps are needed. Returns false if an error was
+// encountered and the connection should be closed.
+bool NegotiateAuth(ThriftServer::ConnectionContext* connection_context,
+    const std::string& header_token, bool* is_complete) {
+  std::string token;
+  // Note: according to RFC 2616, the correct format for the header is:
+  // 'Authorization: Negotiate <token>'. However, beeline incorrectly adds an additional
+  // ':', i.e. 'Authorization: Negotiate: <token>'. We handle that here.
+  TryStripPrefixString(header_token, ": ", &token);
+  string resp_token;
+  string username;
+  kudu::Status spnego_status =
+      kudu::gssapi::SpnegoStep(token, &resp_token, is_complete, &username);
+  if (spnego_status.ok()) {
+    if (!resp_token.empty()) {
+      string resp_header = Substitute("WWW-Authenticate: Negotiate $0", resp_token);
+      connection_context->return_headers.push_back(resp_header);
+    }
+    if (*is_complete) {
+      if (username.empty()) {
+        spnego_status = kudu::Status::RuntimeError(
+            "SPNEGO indicated complete, but got empty principal");
+        // Crash in debug builds, but fall through to treating as an error in release.
+        LOG(DFATAL) << "Got no authenticated principal for SPNEGO-authenticated "
+                    << " connection from "
+                    << TNetworkAddressToString(connection_context->network_address)
+                    << ": " << spnego_status.ToString();
+      } else {
+        // Authentication was successful, so set the username on the connection.
+        connection_context->username = username;
+      }
+    }
+  } else {
+    LOG(WARNING) << "Failed to authenticate request from "
+                 << TNetworkAddressToString(connection_context->network_address)
+                 << " via SPNEGO: " << spnego_status.ToString();
   }
-  return ret;
+  return spnego_status.ok();
+}
+
+vector<string> ReturnHeaders(ThriftServer::ConnectionContext* connection_context) {
+  return std::move(connection_context->return_headers);
 }
 
 namespace {
@@ -869,17 +926,10 @@ Status SecureAuthProvider::GetServerTransportFactory(
   DCHECK(!principal_.empty() || has_ldap_);
 
   if (underlying_transport_type == ThriftServer::HTTP) {
-    // TODO: add support for SPNEGO style of HTTP auth
-    if (!principal_.empty() && !has_ldap_) {
-      const string err = "Kerberos not yet supported with HTTP transport.";
-      LOG(ERROR) << err;
-      return Status(err);
-    }
-
+    bool has_kerberos = !principal_.empty();
     factory->reset(new ThriftServer::BufferedTransportFactory(
         ThriftServer::BufferedTransportFactory::DEFAULT_BUFFER_SIZE_BYTES,
-        new THttpServerTransportFactory(
-            server_name, metrics, /* requireBasicAuth */ true)));
+        new THttpServerTransportFactory(server_name, metrics, has_ldap_, has_kerberos)));
     return Status::OK();
   }
 
@@ -951,20 +1001,31 @@ Status SecureAuthProvider::WrapClientTransport(const string& hostname,
 void SecureAuthProvider::SetupConnectionContext(
     const boost::shared_ptr<ThriftServer::ConnectionContext>& connection_ptr,
     ThriftServer::TransportType underlying_transport_type,
-    TTransport* underlying_transport) {
+    TTransport* underlying_input_transport, TTransport* underlying_output_transport) {
   switch (underlying_transport_type) {
     case ThriftServer::BINARY: {
       TSaslServerTransport* sasl_transport =
-          down_cast<TSaslServerTransport*>(underlying_transport);
+          down_cast<TSaslServerTransport*>(underlying_input_transport);
 
       // Get the username from the transport.
       connection_ptr->username = sasl_transport->getUsername();
       break;
     }
     case ThriftServer::HTTP: {
-      THttpServer* http_transport = down_cast<THttpServer*>(underlying_transport);
-      http_transport->setAuthFn(
-          std::bind(BasicAuth, connection_ptr.get(), std::placeholders::_1));
+      THttpServer* http_input_transport =
+          down_cast<THttpServer*>(underlying_input_transport);
+      if (has_ldap_) {
+        http_input_transport->setBasicAuthFn(
+            std::bind(BasicAuth, connection_ptr.get(), std::placeholders::_1));
+      }
+      if (!principal_.empty()) {
+        http_input_transport->setNegotiateAuthFn(std::bind(NegotiateAuth,
+            connection_ptr.get(), std::placeholders::_1, std::placeholders::_2));
+        THttpServer* http_output_transport =
+            down_cast<THttpServer*>(underlying_input_transport);
+        http_output_transport->setReturnHeadersFn(
+            std::bind(ReturnHeaders, connection_ptr.get()));
+      }
       break;
     }
     default:
diff --git a/be/src/rpc/kerberos-test.cc b/be/src/rpc/kerberos-test.cc
new file mode 100644
index 0000000..45408e1
--- /dev/null
+++ b/be/src/rpc/kerberos-test.cc
@@ -0,0 +1,111 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "testutil/gtest-util.h"
+
+#include "gen-cpp/ImpalaHiveServer2Service.h"
+#include "rpc/authentication.h"
+#include "util/kudu-status-util.h"
+
+#include "kudu/security/test/mini_kdc.h"
+
+DECLARE_string(principal);
+DECLARE_string(keytab_file);
+
+using namespace impala;
+using namespace apache::hive::service::cli::thrift;
+using namespace apache::thrift;
+
+class TestHS2Service : public ImpalaHiveServer2ServiceIf {
+ public:
+  virtual ~TestHS2Service() {}
+  virtual void OpenSession(TOpenSessionResp& _return, const TOpenSessionReq& req) {}
+  virtual void CloseSession(TCloseSessionResp& _return, const TCloseSessionReq& req) {}
+  virtual void GetInfo(TGetInfoResp& _return, const TGetInfoReq& req) {}
+  virtual void ExecuteStatement(
+      TExecuteStatementResp& _return, const TExecuteStatementReq& req) {}
+  virtual void GetTypeInfo(TGetTypeInfoResp& _return, const TGetTypeInfoReq& req) {}
+  virtual void GetCatalogs(TGetCatalogsResp& _return, const TGetCatalogsReq& req) {}
+  virtual void GetSchemas(TGetSchemasResp& _return, const TGetSchemasReq& req) {}
+  virtual void GetTables(TGetTablesResp& _return, const TGetTablesReq& req) {}
+  virtual void GetTableTypes(TGetTableTypesResp& _return, const TGetTableTypesReq& req) {}
+  virtual void GetColumns(TGetColumnsResp& _return, const TGetColumnsReq& req) {}
+  virtual void GetFunctions(TGetFunctionsResp& _return, const TGetFunctionsReq& req) {}
+  virtual void GetOperationStatus(
+      TGetOperationStatusResp& _return, const TGetOperationStatusReq& req) {}
+  virtual void CancelOperation(
+      TCancelOperationResp& _return, const TCancelOperationReq& req) {}
+  virtual void CloseOperation(
+      TCloseOperationResp& _return, const TCloseOperationReq& req) {}
+  virtual void GetResultSetMetadata(
+      TGetResultSetMetadataResp& _return, const TGetResultSetMetadataReq& req) {}
+  virtual void FetchResults(TFetchResultsResp& _return, const TFetchResultsReq& req) {}
+  virtual void GetDelegationToken(
+      TGetDelegationTokenResp& _return, const TGetDelegationTokenReq& req) {}
+  virtual void CancelDelegationToken(
+      TCancelDelegationTokenResp& _return, const TCancelDelegationTokenReq& req) {}
+  virtual void RenewDelegationToken(
+      TRenewDelegationTokenResp& _return, const TRenewDelegationTokenReq& req) {}
+  virtual void GetLog(TGetLogResp& _return, const TGetLogReq& req) {}
+  virtual void GetExecSummary(
+      TGetExecSummaryResp& _return, const TGetExecSummaryReq& req) {}
+  virtual void GetRuntimeProfile(
+      TGetRuntimeProfileResp& _return, const TGetRuntimeProfileReq& req) {}
+  virtual void PingImpalaHS2Service(
+      TPingImpalaHS2ServiceResp& _return, const TPingImpalaHS2ServiceReq& req) {}
+  virtual void CloseImpalaOperation(
+      TCloseImpalaOperationResp& _return, const TCloseImpalaOperationReq& req) {}
+};
+
+// Test that the HTTP server can be connected to successfully with Kerberos.
+TEST(ThriftKerberosTest, TestSpnego) {
+  // Initialize the mini kdc.
+  kudu::MiniKdc kdc(kudu::MiniKdcOptions{});
+  KUDU_ASSERT_OK(kdc.Start());
+  kdc.SetKrb5Environment();
+  string kt_path;
+  KUDU_ASSERT_OK(kdc.CreateServiceKeytab("HTTP/127.0.0.1", &kt_path));
+  CHECK_ERR(setenv("KRB5_KTNAME", kt_path.c_str(), 1));
+  KUDU_ASSERT_OK(kdc.CreateUserPrincipal("alice"));
+  KUDU_ASSERT_OK(kdc.Kinit("alice"));
+
+  // Set up a fake impala server with Kerberos enabled.
+  gflags::FlagSaver saver;
+  FLAGS_principal = "HTTP/127.0.0.1@KRBTEST.COM";
+  FLAGS_keytab_file = kt_path;
+  AuthManager auth_manager;
+  ASSERT_OK(auth_manager.Init());
+  boost::shared_ptr<TestHS2Service> service(new TestHS2Service());
+  boost::shared_ptr<TProcessor> hs2_http_processor(
+      new ImpalaHiveServer2ServiceProcessor(service));
+  int port = 28005;
+  ThriftServer* http_server;
+  ThriftServerBuilder http_builder("test-http-server", hs2_http_processor, port);
+  ASSERT_OK(http_builder.auth_provider(auth_manager.GetExternalAuthProvider())
+                .transport_type(ThriftServer::TransportType::HTTP)
+                .Build(&http_server));
+  ASSERT_OK(http_server->Start());
+
+  // TODO: enable this when curl is available in the toolchain
+  //system("curl -X POST -v --negotiate -u : 'http://127.0.0.1:28005'");
+}
+
+int main(int argc, char** argv) {
+  ::testing::InitGoogleTest(&argc, argv);
+  InitCommonRuntime(argc, argv, false, TestInfo::BE_TEST);
+  return RUN_ALL_TESTS();
+}
diff --git a/be/src/rpc/thrift-server.cc b/be/src/rpc/thrift-server.cc
index c1ce270..fe475a1 100644
--- a/be/src/rpc/thrift-server.cc
+++ b/be/src/rpc/thrift-server.cc
@@ -205,21 +205,26 @@ void* ThriftServer::ThriftServerEventProcessor::createContext(
   TTransport* transport = input->getTransport().get();
   boost::shared_ptr<ConnectionContext> connection_ptr =
       boost::shared_ptr<ConnectionContext>(new ConnectionContext);
-  TTransport* underlying_transport =
+  TTransport* underlying_input_transport =
       (static_cast<TBufferedTransport*>(transport))->getUnderlyingTransport().get();
-
-  thrift_server_->auth_provider_->SetupConnectionContext(
-      connection_ptr, thrift_server_->transport_type_, underlying_transport);
+  TTransport* underlying_output_transport =
+      (static_cast<TBufferedTransport*>(output->getTransport().get()))
+          ->getUnderlyingTransport()
+          .get();
+
+  thrift_server_->auth_provider_->SetupConnectionContext(connection_ptr,
+      thrift_server_->transport_type_, underlying_input_transport,
+      underlying_output_transport);
   if (thrift_server_->auth_provider_->is_secure()
       && thrift_server_->transport_type_ == ThriftServer::BINARY) {
-    TSaslServerTransport* sasl_transport = static_cast<TSaslServerTransport*>(
-        underlying_transport);
+    TSaslServerTransport* sasl_transport =
+        static_cast<TSaslServerTransport*>(underlying_input_transport);
     socket = static_cast<TSocket*>(sasl_transport->getUnderlyingTransport().get());
   } else if (thrift_server_->transport_type_ == ThriftServer::HTTP) {
-    THttpServer* http_transport = static_cast<THttpServer*>(underlying_transport);
+    THttpServer* http_transport = static_cast<THttpServer*>(underlying_input_transport);
     socket = static_cast<TSocket*>(http_transport->getUnderlyingTransport().get());
   } else {
-    socket = static_cast<TSocket*>(underlying_transport);
+    socket = static_cast<TSocket*>(underlying_input_transport);
   }
 
   {
diff --git a/be/src/rpc/thrift-server.h b/be/src/rpc/thrift-server.h
index bf9a78b..e4bae51 100644
--- a/be/src/rpc/thrift-server.h
+++ b/be/src/rpc/thrift-server.h
@@ -97,6 +97,9 @@ class ThriftServer {
     Username username;
     TNetworkAddress network_address;
     std::string server_name;
+    /// Used to pass HTTP headers generated by the input transport to the output transport
+    /// to be returned.
+    std::vector<std::string> return_headers;
   };
 
   /// Interface class for receiving connection creation / termination events.
diff --git a/be/src/service/impala-server.cc b/be/src/service/impala-server.cc
index bf3e93f..07a332f 100644
--- a/be/src/service/impala-server.cc
+++ b/be/src/service/impala-server.cc
@@ -2588,10 +2588,7 @@ Status ImpalaServer::Start(int32_t thrift_be_port, int32_t beeswax_port, int32_t
       hs2_server_->SetConnectionHandler(this);
     }
 
-    // We can't currently secure the http server with Kerberos, only with LDAP, so if
-    // Kerberos is enabled and LDAP isn't we automatically disable the http server.
-    if ((hs2_http_port > 0 && (!IsKerberosEnabled() || FLAGS_enable_ldap_auth))
-        || (TestInfo::is_test() && hs2_http_port == 0)) {
+    if (hs2_http_port > 0 || (TestInfo::is_test() && hs2_http_port == 0)) {
       boost::shared_ptr<TProcessor> hs2_http_processor(
           new ImpalaHiveServer2ServiceProcessor(handler));
       boost::shared_ptr<TProcessorEventHandler> event_handler(
diff --git a/be/src/transport/THttpServer.cpp b/be/src/transport/THttpServer.cpp
index 29b60a9..182ed1d 100644
--- a/be/src/transport/THttpServer.cpp
+++ b/be/src/transport/THttpServer.cpp
@@ -40,24 +40,40 @@ namespace transport {
 using namespace std;
 using strings::Substitute;
 
-THttpServerTransportFactory::THttpServerTransportFactory(
-    const std::string server_name, impala::MetricGroup* metrics, bool requireBasicAuth)
-  : requireBasicAuth_(requireBasicAuth) {
-  if (requireBasicAuth_) {
-    total_basic_auth_success_ =
-        metrics->AddCounter(Substitute("$0.total-basic-auth-success", server_name), 0);
-    total_basic_auth_failure_ =
-        metrics->AddCounter(Substitute("$0.total-basic-auth-failure", server_name), 0);
+THttpServerTransportFactory::THttpServerTransportFactory(const std::string server_name,
+    impala::MetricGroup* metrics, bool has_ldap, bool has_kerberos)
+  : has_ldap_(has_ldap),
+    has_kerberos_(has_kerberos),
+    metrics_enabled_(metrics != nullptr) {
+  if (metrics_enabled_) {
+    if (has_ldap_) {
+      total_basic_auth_success_ =
+          metrics->AddCounter(Substitute("$0.total-basic-auth-success", server_name), 0);
+      total_basic_auth_failure_ =
+          metrics->AddCounter(Substitute("$0.total-basic-auth-failure", server_name), 0);
+    }
+    if (has_kerberos_) {
+      total_negotiate_auth_success_ = metrics->AddCounter(
+          Substitute("$0.total-negotiate-auth-success", server_name), 0);
+      total_negotiate_auth_failure_ = metrics->AddCounter(
+          Substitute("$0.total-negotiate-auth-failure", server_name), 0);
+    }
   }
 }
 
-THttpServer::THttpServer(boost::shared_ptr<TTransport> transport, bool requireBasicAuth,
-    impala::IntCounter* total_basic_auth_success,
-    impala::IntCounter* total_basic_auth_failure)
+THttpServer::THttpServer(boost::shared_ptr<TTransport> transport, bool has_ldap,
+    bool has_kerberos, bool metrics_enabled, impala::IntCounter* total_basic_auth_success,
+    impala::IntCounter* total_basic_auth_failure,
+    impala::IntCounter* total_negotiate_auth_success,
+    impala::IntCounter* total_negotiate_auth_failure)
   : THttpTransport(transport),
-    requireBasicAuth_(requireBasicAuth),
+    has_ldap_(has_ldap),
+    has_kerberos_(has_kerberos),
+    metrics_enabled_(metrics_enabled),
     total_basic_auth_success_(total_basic_auth_success),
-    total_basic_auth_failure_(total_basic_auth_failure) {}
+    total_basic_auth_failure_(total_basic_auth_failure),
+    total_negotiate_auth_success_(total_negotiate_auth_success),
+    total_negotiate_auth_failure_(total_negotiate_auth_failure) {}
 
 THttpServer::~THttpServer() {
 }
@@ -87,8 +103,9 @@ void THttpServer::parseHeader(char* header) {
     contentLength_ = atoi(value);
   } else if (strncmp(header, "X-Forwarded-For", sz) == 0) {
     origin_ = value;
-  } else if (requireBasicAuth_ && THRIFT_strncasecmp(header, "Authorization", sz) == 0) {
-    authValue_ = string(value);
+  } else if ((has_ldap_ || has_kerberos_)
+      && THRIFT_strncasecmp(header, "Authorization", sz) == 0) {
+    auth_value_ = string(value);
   }
 }
 
@@ -141,24 +158,53 @@ bool THttpServer::parseStatusLine(char* status) {
 }
 
 void THttpServer::headersDone() {
-  if (requireBasicAuth_) {
-    bool authorized = false;
-    if (authValue_ != "") {
-      StripWhiteSpace(&authValue_);
-      string base64;
-      if (TryStripPrefixString(authValue_, "Basic ", &base64)) {
-        if (authFn_(base64.c_str())) {
-          authorized = true;
-          total_basic_auth_success_->Increment(1);
-        }
-      }
+  if (!has_ldap_ && !has_kerberos_) {
+    // We don't need to authenticate.
+    auth_value_ = "";
+    return;
+  }
+
+  // Determine what type of auth header we got.
+  StripWhiteSpace(&auth_value_);
+  string basic_auth_token;
+  bool got_basic_auth = TryStripPrefixString(auth_value_, "Basic ", &basic_auth_token);
+  string negotiate_auth_token;
+  bool got_negotiate_auth =
+      TryStripPrefixString(auth_value_, "Negotiate ", &negotiate_auth_token);
+  // We can only have gotten one type of auth header.
+  DCHECK(!got_basic_auth || !got_negotiate_auth);
+
+  // For each auth type we support, we call the auth callback if the didn't get a header
+  // of the other auth type or if the other auth type isn't supported. This way, if a
+  // client select a supported auth method, they'll only get return headers for that
+  // method, but if they didn't specify a valid auth method or they didn't provide a
+  // 'Authorization' header at all, they'll get back 'WWW-Authenticate' return headers for
+  // all supported auth types.
+  bool authorized = false;
+  if (has_ldap_ && (!got_negotiate_auth || !has_kerberos_)) {
+    if (basic_auth_fn_(basic_auth_token)) {
+      authorized = true;
+      if (metrics_enabled_) total_basic_auth_success_->Increment(1);
+    } else {
+      if (got_basic_auth && metrics_enabled_) total_basic_auth_failure_->Increment(1);
     }
-    if (!authorized) {
-      total_basic_auth_failure_->Increment(1);
-      returnUnauthorized();
-      throw TTransportException("HTTP Basic auth failed.");
+  } else if (has_kerberos_ && (!got_basic_auth || !has_ldap_)) {
+    bool is_complete;
+    if (negotiate_auth_fn_(negotiate_auth_token, &is_complete)) {
+      // If 'is_complete' is false we want to return a 401.
+      authorized = is_complete;
+      if (is_complete && metrics_enabled_) total_negotiate_auth_success_->Increment(1);
+    } else {
+      if (got_negotiate_auth && metrics_enabled_) {
+        total_negotiate_auth_failure_->Increment(1);
+      }
     }
-    authValue_ = "";
+  }
+
+  auth_value_ = "";
+  if (!authorized) {
+    returnUnauthorized();
+    throw TTransportException("HTTP auth failed.");
   }
 }
 
@@ -170,10 +216,15 @@ void THttpServer::flush() {
 
   // Construct the HTTP header
   std::ostringstream h;
-  h << "HTTP/1.1 200 OK" << CRLF << "Date: " << getTimeRFC1123() << CRLF << "Server: Thrift/"
-    << VERSION << CRLF << "Access-Control-Allow-Origin: *" << CRLF
+  h << "HTTP/1.1 200 OK" << CRLF << "Date: " << getTimeRFC1123() << CRLF
+    << "Server: Thrift/" << VERSION << CRLF << "Access-Control-Allow-Origin: *" << CRLF
     << "Content-Type: application/x-thrift" << CRLF << "Content-Length: " << len << CRLF
-    << "Connection: Keep-Alive" << CRLF << CRLF;
+    << "Connection: Keep-Alive" << CRLF;
+  vector<string> return_headers = return_headers_fn_();
+  for (const string& header : return_headers) {
+    h << header << CRLF;
+  }
+  h << CRLF;
   string header = h.str();
 
   // Write the header, then the data, then flush
@@ -209,8 +260,12 @@ std::string THttpServer::getTimeRFC1123() {
 
 void THttpServer::returnUnauthorized() {
   std::ostringstream h;
-  h << "HTTP/1.1 401 Unauthorized" << CRLF << "Date: " << getTimeRFC1123() << CRLF
-    << "WWW-Authenticate: Basic" << CRLF << CRLF;
+  h << "HTTP/1.1 401 Unauthorized" << CRLF << "Date: " << getTimeRFC1123() << CRLF;
+  vector<string> return_headers = return_headers_fn_();
+  for (const string& header : return_headers) {
+    h << header << CRLF;
+  }
+  h << CRLF;
   string header = h.str();
   transport_->write((const uint8_t*)header.c_str(), static_cast<uint32_t>(header.size()));
   transport_->flush();
diff --git a/be/src/transport/THttpServer.h b/be/src/transport/THttpServer.h
index bd3366e..de057b5 100644
--- a/be/src/transport/THttpServer.h
+++ b/be/src/transport/THttpServer.h
@@ -35,17 +35,28 @@ public:
 
   // Function that takes a base64 encoded string of the form 'username:password' and
   // returns true if authentication is successful.
-  typedef std::function<bool(const char*)> BasicAuthFn;
+  typedef std::function<bool(const std::string&)> BasicAuthFn;
 
-  THttpServer(boost::shared_ptr<TTransport> transport, bool requireBasicAuth,
-      impala::IntCounter* total_basic_auth_success,
-      impala::IntCounter* total_basic_auth_failure);
+  // Function that takes the value from a 'Authorization: Negotiate' header. Returns true
+  // if successful and sets 'is_complete' to true if negoation is done.
+  typedef std::function<bool(const std::string&, bool* is_complete)> NegotiateAuthFn;
+
+  // Function that returns a list of headers to return to the client.
+  typedef std::function<std::vector<std::string>()> ReturnHeadersFn;
+
+  THttpServer(boost::shared_ptr<TTransport> transport, bool has_ldap, bool has_kerberos,
+      bool metrics_enabled, impala::IntCounter* total_basic_auth_success,
+      impala::IntCounter* total_basic_auth_failure,
+      impala::IntCounter* total_negotiate_auth_success,
+      impala::IntCounter* total_negotiate_auth_failure);
 
   virtual ~THttpServer();
 
   virtual void flush();
 
-  void setAuthFn(const BasicAuthFn& fn) { authFn_ = fn; }
+  void setBasicAuthFn(const BasicAuthFn& fn) { basic_auth_fn_ = fn; }
+  void setNegotiateAuthFn(const NegotiateAuthFn& fn) { negotiate_auth_fn_ = fn; }
+  void setReturnHeadersFn(const ReturnHeadersFn& fn) { return_headers_fn_ = fn; }
 
 protected:
   void readHeaders();
@@ -57,21 +68,35 @@ protected:
   void returnUnauthorized();
 
  private:
-  static bool dummyAuthFn(const char*) { return false; }
+  static bool dummyBasicAuthFn(const std::string&) { return false; }
+  static bool dummyNegotiateAuthFn(const std::string&, bool*) { return false; }
+  static std::vector<std::string> dummyReturnHeadersFn() {
+    return std::vector<std::string>();
+  }
 
-  // If true, a '401' will be returned and a TTransportException thrown unless each set
-  // of headers contains a valid 'Authorization: Basic...'.
-  bool requireBasicAuth_ = false;
+  // If either of the following is true, a '401 - Unauthorized' will be returned to the
+  // client on requests that do not contain a valid 'Authorization' header. If 'has_ldap_'
+  // is true, 'Basic' auth headers will be processed, and if 'has_kerberos_' is true
+  // 'Negotiate' auth headers will be processed.
+  bool has_ldap_ = false;
+  bool has_kerberos_ = false;
 
   // Called with the base64 encoded authorization from a 'Authorization: Basic' header.
-  BasicAuthFn authFn_ = &dummyAuthFn;
+  BasicAuthFn basic_auth_fn_ = &dummyBasicAuthFn;
+  // Called with the value from a 'Authorization: Negotiate' header.
+  NegotiateAuthFn negotiate_auth_fn_ = &dummyNegotiateAuthFn;
+  // Called during flush() to get additional headers to return.
+  ReturnHeadersFn return_headers_fn_ = &dummyReturnHeadersFn;
 
   // The value from the 'Authorization' header.
-  std::string authValue_ = "";
+  std::string auth_value_ = "";
 
   // Metrics
-  impala::IntCounter* total_basic_auth_success_;
-  impala::IntCounter* total_basic_auth_failure_;
+  bool metrics_enabled_;
+  impala::IntCounter* total_basic_auth_success_ = nullptr;
+  impala::IntCounter* total_basic_auth_failure_ = nullptr;
+  impala::IntCounter* total_negotiate_auth_success_ = nullptr;
+  impala::IntCounter* total_negotiate_auth_failure_ = nullptr;
 };
 
 /**
@@ -79,10 +104,10 @@ protected:
  */
 class THttpServerTransportFactory : public TTransportFactory {
 public:
- THttpServerTransportFactory() : requireBasicAuth_(false) {}
+ THttpServerTransportFactory() {}
 
- THttpServerTransportFactory(
-     const std::string server_name, impala::MetricGroup* metrics, bool requireBasicAuth);
+ THttpServerTransportFactory(const std::string server_name, impala::MetricGroup* metrics,
+     bool has_ldap, bool has_kerberos);
 
  virtual ~THttpServerTransportFactory() {}
 
@@ -90,17 +115,26 @@ public:
   * Wraps the transport into a buffered one.
   */
  virtual boost::shared_ptr<TTransport> getTransport(boost::shared_ptr<TTransport> trans) {
-   return boost::shared_ptr<TTransport>(new THttpServer(
-       trans, requireBasicAuth_, total_basic_auth_success_, total_basic_auth_failure_));
+   return boost::shared_ptr<TTransport>(new THttpServer(trans, has_ldap_, has_kerberos_,
+       metrics_enabled_, total_basic_auth_success_, total_basic_auth_failure_,
+       total_negotiate_auth_success_, total_negotiate_auth_failure_));
   }
 
  private:
-  bool requireBasicAuth_ = false;
+  bool has_ldap_ = false;
+  bool has_kerberos_ = false;
+
+  bool metrics_enabled_ = false;
+
+  // If 'has_ldap_' is true, metrics for the number of successful and failed Basic
+  // auth ettempts for every transport produced by this factory.
+  impala::IntCounter* total_basic_auth_success_ = nullptr;
+  impala::IntCounter* total_basic_auth_failure_ = nullptr;
 
-  // If 'requireBasicAuth_' is true, metrics for the number of successful and failed Basic
+  // If 'has_kerberos_' is true, metrics for the number of successful and failed Negotiate
   // auth ettempts for every transport produced by this factory.
-  impala::IntCounter* total_basic_auth_success_;
-  impala::IntCounter* total_basic_auth_failure_;
+  impala::IntCounter* total_negotiate_auth_success_ = nullptr;
+  impala::IntCounter* total_negotiate_auth_failure_ = nullptr;
 };
 }
 }
diff --git a/common/thrift/metrics.json b/common/thrift/metrics.json
index 4a93c80..2a0ee30 100644
--- a/common/thrift/metrics.json
+++ b/common/thrift/metrics.json
@@ -1142,6 +1142,26 @@
     "key": "impala.thrift-server.hiveserver2-http-frontend.total-basic-auth-failure"
   },
   {
+    "description": "The number of HiveServer2 HTTP API connection requests to this Impala Daemon that were successfully authenticated with Kerberos",
+    "contexts": [
+      "IMPALAD"
+    ],
+    "label": "HiveServer2 HTTP API Connection Kerberos Auth Success",
+    "units": "NONE",
+    "kind": "COUNTER",
+    "key": "impala.thrift-server.hiveserver2-http-frontend.total-negotiate-auth-success"
+  },
+  {
+    "description": "The number of HiveServer2 HTTP API connection requests to this Impala Daemon that attempted to authenticate with Kerberos but were unsuccessful.",
+    "contexts": [
+      "IMPALAD"
+    ],
+    "label": "HiveServer2 HTTP API Connection Kerberos Auth Failure",
+    "units": "NONE",
+    "kind": "COUNTER",
+    "key": "impala.thrift-server.hiveserver2-http-frontend.total-negotiate-auth-failure"
+  },
+  {
     "description": "The amount of memory freed by the last memory tracker garbage collection.",
     "contexts": [
       "IMPALAD"


[impala] 01/03: IMPALA-8798: Skip TestAutoScaling on EC runs

Posted by ta...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tarmstrong pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 6dd36d9d1be7437d3d5e18088a5e85a380c11040
Author: Lars Volker <lv...@cloudera.com>
AuthorDate: Fri Jul 26 19:26:06 2019 -0700

    IMPALA-8798: Skip TestAutoScaling on EC runs
    
    TestAutoScaling uses ConcurrentWorkload, which does not set the
    required query options to scan erasure-coded files. This change
    disables the affected test when running with erasure-coding enabled.
    
    Change-Id: I96690914f12679619ba96d79edde8af9cccf65fa
    Reviewed-on: http://gerrit.cloudera.org:8080/13935
    Reviewed-by: Lars Volker <lv...@cloudera.com>
    Tested-by: Lars Volker <lv...@cloudera.com>
---
 tests/custom_cluster/test_auto_scaling.py | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/tests/custom_cluster/test_auto_scaling.py b/tests/custom_cluster/test_auto_scaling.py
index d0ab577..3c5ecca 100644
--- a/tests/custom_cluster/test_auto_scaling.py
+++ b/tests/custom_cluster/test_auto_scaling.py
@@ -23,6 +23,7 @@ from time import sleep, time
 from tests.util.auto_scaler import AutoScaler
 from tests.util.concurrent_workload import ConcurrentWorkload
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
+from tests.common.skip import SkipIfEC
 
 LOG = logging.getLogger("test_auto_scaling")
 
@@ -47,6 +48,7 @@ class TestAutoScaling(CustomClusterTestSuite):
   def _get_num_running_queries(self):
     return self.impalad_test_service.get_num_running_queries("default-pool")
 
+  @SkipIfEC.fix_later
   def test_single_workload(self):
     """This test exercises the auto-scaling logic in the admission controller. It spins up
     a base cluster (coordinator, catalog, statestore), runs some queries to observe that
@@ -102,6 +104,7 @@ class TestAutoScaling(CustomClusterTestSuite):
       LOG.info("Stopping auto scaler")
       auto_scaler.stop()
 
+  @SkipIfEC.fix_later
   def test_single_group_maxed_out(self):
     """This test starts an auto scaler and limits it to a single executor group. It then
     makes sure that the query throughput does not exceed the expected limit."""
@@ -157,6 +160,7 @@ class TestAutoScaling(CustomClusterTestSuite):
       LOG.info("Stopping auto scaler")
       auto_scaler.stop()
 
+  @SkipIfEC.fix_later
   def test_sequential_startup(self):
     """This test starts an executor group sequentially and observes that no queries are
     admitted until the group has been fully started."""