You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by st...@apache.org on 2020/06/10 17:46:55 UTC

[impala] branch master updated (ad8f468 -> 3088ca8)

This is an automated email from the ASF dual-hosted git repository.

stakiar pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git.


    from ad8f468  IMPALA-8860: Improve /log_level usability on WebUI
     new f9cb0a6  IMPALA-9077: Remove scalable admission control configs
     new 0bc2220  IMPALA-9192: Move Avro-Java and Parquet dependencies to the CDP version
     new e389e85  IMPALA-9840: Fix data race in InternalQueue
     new 3088ca8  IMPALA-9818: Add fetch size as option to impala shell

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 be/src/scheduling/admission-controller-test.cc     | 260 ++++++---------------
 be/src/scheduling/admission-controller.cc          | 240 +++++--------------
 be/src/scheduling/admission-controller.h           |  53 +----
 be/src/util/internal-queue.h                       |  26 ++-
 bin/impala-config.sh                               |   8 +-
 common/thrift/ImpalaInternalService.thrift         |  17 +-
 common/thrift/metrics.json                         |  60 -----
 fe/pom.xml                                         |  12 +
 .../org/apache/impala/util/RequestPoolService.java |  36 +--
 .../apache/impala/util/TestRequestPoolService.java |  14 +-
 fe/src/test/resources/fair-scheduler-test.xml      |   4 -
 fe/src/test/resources/fair-scheduler-test2.xml     |   6 +-
 fe/src/test/resources/llama-site-test.xml          |  12 -
 fe/src/test/resources/llama-site-test2.xml         |  26 +--
 impala-parent/pom.xml                              |   1 +
 shell/impala_client.py                             |  31 +--
 shell/impala_shell.py                              |  25 +-
 shell/option_parser.py                             |  13 ++
 testdata/pom.xml                                   |   6 +
 .../QueryTest/admission-max-min-mem-limits.test    |   1 -
 .../QueryTest/admission-reject-mem-estimate.test   |   9 +-
 .../admission-reject-min-reservation.test          |   2 +-
 tests/custom_cluster/test_admission_controller.py  | 188 +--------------
 tests/custom_cluster/test_hs2_fault_injection.py   |   4 +-
 tests/shell/test_shell_client.py                   |  96 ++++++++
 tests/shell/test_shell_commandline.py              |   9 +
 tests/webserver/test_web_pages.py                  |   5 -
 www/admission_controller.tmpl                      |  20 +-
 28 files changed, 342 insertions(+), 842 deletions(-)
 create mode 100644 tests/shell/test_shell_client.py


[impala] 02/04: IMPALA-9192: Move Avro-Java and Parquet dependencies to the CDP version

Posted by st...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

stakiar pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 0bc22205d8647063098562502202bec0f0593b55
Author: Laszlo Gaal <la...@cloudera.com>
AuthorDate: Fri May 15 22:41:57 2020 +0200

    IMPALA-9192: Move Avro-Java and Parquet dependencies to the CDP version
    
    IMPALA-9731 adopted the CDP version of many Hadoop dependencies.
    This patch moves the Avro and Parquet Java components to their CDP
    versions so that they are aligned with the other Hadoop components.
    
    Test: Ran tests successfully in core mode.
    
    Change-Id: I49c7c5832b5ba53a00b098642f6c64616eb944bd
    Reviewed-on: http://gerrit.cloudera.org:8080/15933
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 bin/impala-config.sh  |  8 ++++++--
 fe/pom.xml            | 12 ++++++++++++
 impala-parent/pom.xml |  1 +
 testdata/pom.xml      |  6 ++++++
 4 files changed, 25 insertions(+), 2 deletions(-)

diff --git a/bin/impala-config.sh b/bin/impala-config.sh
index c6387b7..728d52a 100755
--- a/bin/impala-config.sh
+++ b/bin/impala-config.sh
@@ -175,16 +175,16 @@ export CDH_MAVEN_REPOSITORY=\
 export CDP_BUILD_NUMBER=3192304
 export CDP_MAVEN_REPOSITORY=\
 "https://${IMPALA_TOOLCHAIN_HOST}/build/cdp_components/${CDP_BUILD_NUMBER}/maven"
+export CDP_AVRO_JAVA_VERSION=1.8.2.7.2.1.0-57
 export CDP_HADOOP_VERSION=3.1.1.7.2.1.0-57
 export CDP_HBASE_VERSION=2.2.3.7.2.1.0-57
 export CDP_HIVE_VERSION=3.1.3000.7.2.1.0-57
 export CDP_KNOX_VERSION=1.3.0.7.2.1.0-57
 export CDP_OZONE_VERSION=0.6.0.7.2.1.0-57
+export CDP_PARQUET_VERSION=1.10.99.7.2.1.0-57
 export CDP_RANGER_VERSION=2.0.0.7.2.1.0-57
 export CDP_TEZ_VERSION=0.9.1.7.2.1.0-57
 
-export IMPALA_PARQUET_VERSION=1.10.99-cdh6.x-SNAPSHOT
-export IMPALA_AVRO_JAVA_VERSION=1.8.2-cdh6.x-SNAPSHOT
 export IMPALA_HUDI_VERSION=0.5.0-incubating
 export IMPALA_KITE_VERSION=1.0.0-cdh6.x-SNAPSHOT
 export IMPALA_ORC_JAVA_VERSION=1.6.2
@@ -226,6 +226,7 @@ export CDP_TEZ_URL=${CDP_TEZ_URL-}
 
 export CDP_COMPONENTS_HOME="$IMPALA_TOOLCHAIN/cdp_components-$CDP_BUILD_NUMBER"
 export CDH_MAJOR_VERSION=7
+export IMPALA_AVRO_JAVA_VERSION=${CDP_AVRO_JAVA_VERSION}
 export IMPALA_HADOOP_VERSION=${CDP_HADOOP_VERSION}
 export IMPALA_HADOOP_URL=${CDP_HADOOP_URL-}
 export HADOOP_HOME="$CDP_COMPONENTS_HOME/hadoop-${IMPALA_HADOOP_VERSION}/"
@@ -236,6 +237,7 @@ export IMPALA_HIVE_URL=${CDP_HIVE_URL-}
 export IMPALA_HIVE_SOURCE_URL=${CDP_HIVE_SOURCE_URL-}
 export IMPALA_KNOX_VERSION=${CDP_KNOX_VERSION}
 export IMPALA_OZONE_VERSION=${CDP_OZONE_VERSION}
+export IMPALA_PARQUET_VERSION=${CDP_PARQUET_VERSION}
 export IMPALA_RANGER_VERSION=${CDP_RANGER_VERSION}
 export IMPALA_RANGER_URL=${CDP_RANGER_URL-}
 export IMPALA_TEZ_VERSION=${CDP_TEZ_VERSION}
@@ -740,6 +742,8 @@ echo "CDH_BUILD_NUMBER        = $CDH_BUILD_NUMBER"
 echo "CDP_BUILD_NUMBER        = $CDP_BUILD_NUMBER"
 echo "CDP_COMPONENTS_HOME     = $CDP_COMPONENTS_HOME"
 echo "IMPALA_HADOOP_VERSION   = $IMPALA_HADOOP_VERSION"
+echo "IMPALA_AVRO_JAVA_VERSION= $IMPALA_AVRO_JAVA_VERSION"
+echo "IMPALA_PARQUET_VERSION  = $IMPALA_PARQUET_VERSION"
 echo "IMPALA_HIVE_VERSION     = $IMPALA_HIVE_VERSION"
 echo "IMPALA_HBASE_VERSION    = $IMPALA_HBASE_VERSION"
 echo "IMPALA_HUDI_VERSION     = $IMPALA_HUDI_VERSION"
diff --git a/fe/pom.xml b/fe/pom.xml
index 5b21cf3..ec40300 100644
--- a/fe/pom.xml
+++ b/fe/pom.xml
@@ -172,6 +172,10 @@ under the License.
             <artifactId>parquet-avro</artifactId>
           </exclusion>
           <exclusion>
+            <groupId>org.apache.avro</groupId>
+            <artifactId>*</artifactId>
+          </exclusion>
+          <exclusion>
             <groupId>org.apache.hadoop</groupId>
             <artifactId>*</artifactId>
           </exclusion>
@@ -244,6 +248,12 @@ under the License.
     </dependency>
 
     <dependency>
+      <groupId>org.apache.avro</groupId>
+      <artifactId>avro</artifactId>
+      <version>${avro.version}</version>
+    </dependency>
+
+    <dependency>
       <groupId>org.apache.orc</groupId>
       <artifactId>orc-core</artifactId>
       <version>${orc.version}</version>
@@ -698,6 +708,7 @@ under the License.
                     <exclude>org.apache.hive:*</exclude>
                     <exclude>org.apache.kudu:*</exclude>
                     <exclude>org.apache.parquet:*</exclude>
+                    <exclude>org.apache.avro:*</exclude>
                     <exclude>org.apache.orc:*</exclude>
                   </excludes>
                   <includes>
@@ -710,6 +721,7 @@ under the License.
                     <include>org.apache.hive:*:${hive.version}</include>
                     <include>org.apache.hive:hive-storage-api:${hive.storage.api.version}</include>
                     <include>org.apache.kudu:*:${kudu.version}</include>
+                    <include>org.apache.avro:*:${avro.version}</include>
                     <include>org.apache.parquet:*:${parquet.version}</include>
                     <include>org.apache.orc:*:${orc.version}</include>
                   </includes>
diff --git a/impala-parent/pom.xml b/impala-parent/pom.xml
index 3048f0b..e3a0db1 100644
--- a/impala-parent/pom.xml
+++ b/impala-parent/pom.xml
@@ -38,6 +38,7 @@ under the License.
     <ranger.version>${env.IMPALA_RANGER_VERSION}</ranger.version>
     <postgres.jdbc.version>${env.IMPALA_POSTGRES_JDBC_DRIVER_VERSION}</postgres.jdbc.version>
     <hbase.version>${env.IMPALA_HBASE_VERSION}</hbase.version>
+    <avro.version>${env.IMPALA_AVRO_JAVA_VERSION}</avro.version>
     <orc.version>${env.IMPALA_ORC_JAVA_VERSION}</orc.version>
     <ozone.version>${env.IMPALA_OZONE_VERSION}</ozone.version>
     <parquet.version>${env.IMPALA_PARQUET_VERSION}</parquet.version>
diff --git a/testdata/pom.xml b/testdata/pom.xml
index 74ce872..3d8c12f 100644
--- a/testdata/pom.xml
+++ b/testdata/pom.xml
@@ -157,6 +157,12 @@ under the License.
     </dependency>
 
     <dependency>
+      <groupId>org.apache.avro</groupId>
+      <artifactId>avro</artifactId>
+      <version>${avro.version}</version>
+    </dependency>
+
+    <dependency>
       <groupId>org.kitesdk</groupId>
       <artifactId>kite-data-core</artifactId>
       <version>${kite.version}</version>


[impala] 04/04: IMPALA-9818: Add fetch size as option to impala shell

Posted by st...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

stakiar pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 3088ca8580ed2f8c87235cbee3f3b3a90c292218
Author: Sahil Takiar <ta...@gmail.com>
AuthorDate: Fri Jun 5 13:32:54 2020 -0700

    IMPALA-9818: Add fetch size as option to impala shell
    
    Adds the option --fetch_size to the Impala shell. This new option allows
    users to specify the fetch size used when issuing fetch RPCs to the
    Impala Coordinator (e.g. TFetchResultsReq and BeeswaxService.fetch).
    This parameter applies for all client protocols: beeswax, hs2, hs2-http.
    The default --fetch_size is set to 10240 (10x the default batch size).
    
    The new --fetch_size parameter is most effective when result spooling is
    enabled. When result spooling is disabled, Impala can only return a
    single row batch per fetch RPC (so 1024 rows by default). When result
    spooling is enabled, Impala can return up to 100 row batches per fetch
    request.
    
    Removes some logic in the the impala_client.py file that attempts to
    simulate a fetch_size. The code would issue multiple fetch requests to
    fullfill the given fetch_size. This logic is no longer needed now that
    result spooling is available.
    
    Testing:
    * Ran core tests
    * Added new tests in test_shell_client.py and test_shell_commandline.py
    
    Change-Id: I8dc7962aada6b38795241d067a99bd94fabca57b
    Reviewed-on: http://gerrit.cloudera.org:8080/16041
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Sahil Takiar <st...@cloudera.com>
---
 shell/impala_client.py                           | 31 ++------
 shell/impala_shell.py                            | 25 +++---
 shell/option_parser.py                           | 13 ++++
 tests/custom_cluster/test_hs2_fault_injection.py |  4 +-
 tests/shell/test_shell_client.py                 | 96 ++++++++++++++++++++++++
 tests/shell/test_shell_commandline.py            |  9 +++
 6 files changed, 141 insertions(+), 37 deletions(-)

diff --git a/shell/impala_client.py b/shell/impala_client.py
index 9e5704b..8973d59 100755
--- a/shell/impala_client.py
+++ b/shell/impala_client.py
@@ -114,7 +114,7 @@ class ImpalaClient(object):
   """Base class for shared functionality between HS2 and Beeswax. Includes stub methods
   for methods that are expected to be implemented in the subclasses.
   TODO: when beeswax support is removed, merge this with ImpalaHS2Client."""
-  def __init__(self, impalad, kerberos_host_fqdn, use_kerberos=False,
+  def __init__(self, impalad, fetch_size, kerberos_host_fqdn, use_kerberos=False,
                kerberos_service_name="impala", use_ssl=False, ca_cert=None, user=None,
                ldap_password=None, use_ldap=False, client_connect_timeout_ms=60000,
                verbose=True, use_http_base_transport=False, http_path=None):
@@ -133,7 +133,7 @@ class ImpalaClient(object):
     self.client_connect_timeout_ms = int(client_connect_timeout_ms)
     self.default_query_options = {}
     self.query_option_levels = {}
-    self.fetch_batch_size = 1024
+    self.fetch_size = fetch_size
     self.use_http_base_transport = use_http_base_transport
     self.http_path = http_path
     # This is set from ImpalaShell's signal handler when a query is cancelled
@@ -264,25 +264,10 @@ class ImpalaClient(object):
   def fetch(self, query_handle):
     """Returns an iterable of batches of result rows. Each batch is an iterable of rows.
     Each row is an iterable of strings in the format in which they should be displayed
-    Tries to ensure that the batches have a granularity of self.fetch_batch_size but
+    Tries to ensure that the batches have a granularity of self.fetch_size but
     does not guarantee it.
     """
-    result_rows = None
-    for rows in self._fetch_one_batch(query_handle):
-      if result_rows:
-        result_rows.extend(rows)
-      else:
-        result_rows = rows
-      if len(result_rows) > self.fetch_batch_size:
-        yield result_rows
-        result_rows = None
-
-    # Return the final batch of rows.
-    if result_rows:
-      yield result_rows
-
-  def _fetch_one_batch(self, query_handle):
-    """Returns an iterable of batches of result rows up to self.fetch_batch_size. Does
+    """Returns an iterable of batches of result rows up to self.fetch_size. Does
     not need to consolidate those batches into larger batches."""
     raise NotImplementedError()
 
@@ -761,7 +746,7 @@ class ImpalaHS2Client(ImpalaClient):
 
     return "{low}:{high}".format(low=low_hex, high=high_hex)
 
-  def _fetch_one_batch(self, query_handle):
+  def fetch(self, query_handle):
     assert query_handle.hasResultSet
     prim_types = [column.typeDesc.types[0].primitiveEntry.type
                   for column in query_handle.schema.columns]
@@ -769,7 +754,7 @@ class ImpalaHS2Client(ImpalaClient):
                         for prim_type in prim_types]
     while True:
       req = TFetchResultsReq(query_handle, TFetchOrientation.FETCH_NEXT,
-          self.fetch_batch_size)
+          self.fetch_size)
 
       def FetchResults():
         return self.imp_service.FetchResults(req)
@@ -1083,11 +1068,11 @@ class ImpalaBeeswaxClient(ImpalaClient):
       return self.ERROR_STATE
     return state
 
-  def _fetch_one_batch(self, query_handle):
+  def fetch(self, query_handle):
     while True:
       result, rpc_status = self._do_beeswax_rpc(
          lambda: self.imp_service.fetch(query_handle, False,
-                                        self.fetch_batch_size))
+                                        self.fetch_size))
       if rpc_status != RpcStatus.OK:
         raise RPCException()
       yield [row.split('\t') for row in result.data]
diff --git a/shell/impala_shell.py b/shell/impala_shell.py
index 322d94b..244bef2 100755
--- a/shell/impala_shell.py
+++ b/shell/impala_shell.py
@@ -217,6 +217,7 @@ class ImpalaShell(cmd.Cmd, object):
     self.ignore_query_failure = options.ignore_query_failure
 
     self.http_path = options.http_path
+    self.fetch_size = options.fetch_size
 
     # Due to a readline bug in centos/rhel7, importing it causes control characters to be
     # printed. This breaks any scripting against the shell in non-interactive mode. Since
@@ -543,22 +544,22 @@ class ImpalaShell(cmd.Cmd, object):
   def _new_impala_client(self):
     protocol = options.protocol.lower()
     if protocol == 'hs2':
-      return ImpalaHS2Client(self.impalad, self.kerberos_host_fqdn, self.use_kerberos,
-                          self.kerberos_service_name, self.use_ssl,
-                          self.ca_cert, self.user, self.ldap_password,
-                          self.use_ldap, self.client_connect_timeout_ms, self.verbose,
+      return ImpalaHS2Client(self.impalad, self.fetch_size, self.kerberos_host_fqdn,
+                          self.use_kerberos, self.kerberos_service_name, self.use_ssl,
+                          self.ca_cert, self.user, self.ldap_password, self.use_ldap,
+                          self.client_connect_timeout_ms, self.verbose,
                           use_http_base_transport=False, http_path=self.http_path)
     elif protocol == 'hs2-http':
-      return ImpalaHS2Client(self.impalad, self.kerberos_host_fqdn, self.use_kerberos,
-                          self.kerberos_service_name, self.use_ssl,
-                          self.ca_cert, self.user, self.ldap_password,
-                          self.use_ldap, self.client_connect_timeout_ms, self.verbose,
+      return ImpalaHS2Client(self.impalad, self.fetch_size, self.kerberos_host_fqdn,
+                          self.use_kerberos, self.kerberos_service_name, self.use_ssl,
+                          self.ca_cert, self.user, self.ldap_password, self.use_ldap,
+                          self.client_connect_timeout_ms, self.verbose,
                           use_http_base_transport=True, http_path=self.http_path)
     elif protocol == 'beeswax':
-      return ImpalaBeeswaxClient(self.impalad, self.kerberos_host_fqdn, self.use_kerberos,
-                          self.kerberos_service_name, self.use_ssl,
-                          self.ca_cert, self.user, self.ldap_password,
-                          self.use_ldap, self.client_connect_timeout_ms, self.verbose)
+      return ImpalaBeeswaxClient(self.impalad, self.fetch_size, self.kerberos_host_fqdn,
+                          self.use_kerberos, self.kerberos_service_name, self.use_ssl,
+                          self.ca_cert, self.user, self.ldap_password, self.use_ldap,
+                          self.client_connect_timeout_ms, self.verbose)
     else:
       err_msg = "Invalid --protocol value {0}, must be beeswax, hs2 or hs2-http."
       print(err_msg.format(protocol), file=sys.stderr)
diff --git a/shell/option_parser.py b/shell/option_parser.py
index 3a4f090..a48c053 100755
--- a/shell/option_parser.py
+++ b/shell/option_parser.py
@@ -284,6 +284,19 @@ def get_option_parser(defaults):
                     "enforce any http path for the incoming requests, deployments could "
                     "still put it behind a loadbalancer that can expect the traffic at a "
                     "certain path.")
+  parser.add_option("--fetch_size", type="int", dest="fetch_size", default=10240,
+                    help="The fetch size when fetching rows from the Impala coordinator. "
+                    "The fetch size controls how many rows a single fetch RPC request "
+                    "(RPC from the Impala shell to the Impala coordinator) reads at a "
+                    "time. This option is most effective when result spooling is enabled "
+                    "('spool_query_results'=true). When result spooling is enabled "
+                    "values over the batch_size are honored. When result spooling is "
+                    "disabled, values over the batch_size have no affect. By default, "
+                    "the fetch_size is set to 10240 which is equivalent to 10 row "
+                    "batches (assuming the default batch size). Note that if result "
+                    "spooling is disabled only a single row batch can be fetched at a "
+                    "time regardless of the specified fetch_size.")
+
 
   # add default values to the help text
   for option in parser.option_list:
diff --git a/tests/custom_cluster/test_hs2_fault_injection.py b/tests/custom_cluster/test_hs2_fault_injection.py
index 2393cf4..f450287 100644
--- a/tests/custom_cluster/test_hs2_fault_injection.py
+++ b/tests/custom_cluster/test_hs2_fault_injection.py
@@ -106,7 +106,7 @@ class TestHS2FaultInjection(CustomClusterTestSuite):
   impala-shell client"""
   def setup(self):
     impalad = IMPALAD_HS2_HTTP_HOST_PORT.split(":")
-    self.custom_hs2_http_client = FaultInjectingImpalaHS2Client(impalad,
+    self.custom_hs2_http_client = FaultInjectingImpalaHS2Client(impalad, 1024,
         kerberos_host_fqdn=None, use_http_base_transport=True, http_path='cliservice')
     self.transport = self.custom_hs2_http_client.transport
 
@@ -317,7 +317,7 @@ class TestHS2FaultInjection(CustomClusterTestSuite):
     self.transport.close()
     rows_fetched = self.custom_hs2_http_client.fetch(query_handle)
     for rows in rows_fetched:
-      num_rows += 1
+      num_rows += len(rows)
     assert num_rows == 1
     self.transport.close()
     self.close_query(query_handle)
diff --git a/tests/shell/test_shell_client.py b/tests/shell/test_shell_client.py
new file mode 100644
index 0000000..c5321eb
--- /dev/null
+++ b/tests/shell/test_shell_client.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env impala-python
+# -*- coding: utf-8 -*-
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+from shell.impala_client import ImpalaBeeswaxClient, ImpalaHS2Client
+from tests.common.impala_test_suite import ImpalaTestSuite
+from tests.common.test_dimensions import create_client_protocol_dimension
+from util import get_impalad_host_port
+
+
+class TestShellClient(ImpalaTestSuite):
+  """Tests for the Impala Shell clients: ImpalaBeeswaxClient and ImpalaHS2Client."""
+
+  @classmethod
+  def get_workload(self):
+    return 'functional-query'
+
+  @classmethod
+  def add_test_dimensions(cls):
+    cls.ImpalaTestMatrix.add_dimension(create_client_protocol_dimension())
+
+  def test_fetch_size(self, vector):
+    """Tests that when result spooling is disabled, setting a small batch_size causes
+    the shell to fetch a single batch at a time, even when the configured fetch size is
+    larger than the batch_size."""
+    handle = None
+    num_rows = 100
+    batch_size = 10
+    query_options = {'batch_size': str(batch_size)}
+    client = self.__get_shell_client(vector)
+
+    try:
+      client.connect()
+      handle = client.execute_query(
+          "select * from functional.alltypes limit {0}".format(num_rows), query_options)
+      self.__fetch_rows(client.fetch(handle), batch_size, num_rows)
+    finally:
+      if handle is not None: client.close_query(handle)
+      client.close_connection()
+
+  def test_fetch_size_result_spooling(self, vector):
+    """Tests that when result spooling is enabled, that the exact fetch_size is honored
+    even if a small batch_size is configured."""
+    handle = None
+    fetch_size = 20
+    num_rows = 100
+    query_options = {'batch_size': '10', 'spool_query_results': 'true'}
+    client = self.__get_shell_client(vector, fetch_size)
+
+    try:
+      client.connect()
+      handle = client.execute_query(
+          "select * from functional.alltypes limit {0}".format(num_rows), query_options)
+      self.__fetch_rows(client.fetch(handle), num_rows / fetch_size, num_rows)
+    finally:
+      if handle is not None: client.close_query(handle)
+      client.close_connection()
+
+  def __fetch_rows(self, fetch_batches, num_batches, num_rows):
+    """Fetches all rows using the given fetch_batches generator. Asserts that num_batches
+    batches are produced by the generator and that num_rows are returned."""
+    num_batches_count = 0
+    rows_per_batch = num_rows / num_batches
+    for fetch_batch in fetch_batches:
+      assert len(fetch_batch) == rows_per_batch
+      num_batches_count += 1
+      if num_batches_count == num_batches: break
+    assert num_batches_count == num_batches
+
+  def __get_shell_client(self, vector, fetch_size=1024):
+    """Returns the client specified by the protocol in the given vector."""
+    impalad = get_impalad_host_port(vector).split(":")
+    protocol = vector.get_value("protocol")
+    if protocol == 'hs2':
+      return ImpalaHS2Client(impalad, fetch_size, None)
+    elif protocol == 'hs2-http':
+      return ImpalaHS2Client(impalad, fetch_size, None,
+              use_http_base_transport=True, http_path='cliservice')
+    elif protocol == 'beeswax':
+      return ImpalaBeeswaxClient(impalad, fetch_size, None)
diff --git a/tests/shell/test_shell_commandline.py b/tests/shell/test_shell_commandline.py
index b2fcb59..34d9dd9 100644
--- a/tests/shell/test_shell_commandline.py
+++ b/tests/shell/test_shell_commandline.py
@@ -1017,3 +1017,12 @@ class TestImpalaShell(ImpalaTestSuite):
       assert "3\t3\t30.3" in result.stdout, result.stdout
 
     assert "4\t4\t40.4" in result.stdout, result.stdout
+
+  def test_fetch_size(self, vector):
+    """Test the --fetch_size option with and without result spooling enabled."""
+    query = "select * from functional.alltypes limit 1024"
+    query_with_result_spooling = "set spool_query_results=true; " + query
+    for query in [query, query_with_result_spooling]:
+      result = run_impala_shell_cmd(vector, ['-q', query, '-B', '--fetch_size', '512'])
+      result_rows = result.stdout.strip().split('\n')
+      assert len(result_rows) == 1024


[impala] 01/04: IMPALA-9077: Remove scalable admission control configs

Posted by st...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

stakiar pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit f9cb0a65fe68411e03a57681611cbcc87dfaaf50
Author: Bikramjeet Vig <bi...@gmail.com>
AuthorDate: Fri Jun 5 15:33:08 2020 -0700

    IMPALA-9077: Remove scalable admission control configs
    
    Removed the 3 scalable configs added in IMPALA-8536:
    - Max Memory Multiple
    - Max Running Queries Multiple
    - Max Queued Queries Multiple
    
    This patch removes the functionality related to those configs but
    retains the additional test coverage and cleanup added in
    IMPALA-8536. This removal is to make it easier to enhance
    Admission Control using Executor Groups which has turned out to
    be a useful building block.
    
    Testing:
    Ran core tests.
    
    Change-Id: Ib9bd63f03758a6c4eebb99c64ee67e60cb56b5ac
    Reviewed-on: http://gerrit.cloudera.org:8080/16039
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 be/src/scheduling/admission-controller-test.cc     | 260 ++++++---------------
 be/src/scheduling/admission-controller.cc          | 240 +++++--------------
 be/src/scheduling/admission-controller.h           |  53 +----
 common/thrift/ImpalaInternalService.thrift         |  17 +-
 common/thrift/metrics.json                         |  60 -----
 .../org/apache/impala/util/RequestPoolService.java |  36 +--
 .../apache/impala/util/TestRequestPoolService.java |  14 +-
 fe/src/test/resources/fair-scheduler-test.xml      |   4 -
 fe/src/test/resources/fair-scheduler-test2.xml     |   6 +-
 fe/src/test/resources/llama-site-test.xml          |  12 -
 fe/src/test/resources/llama-site-test2.xml         |  26 +--
 .../QueryTest/admission-max-min-mem-limits.test    |   1 -
 .../QueryTest/admission-reject-mem-estimate.test   |   9 +-
 .../admission-reject-min-reservation.test          |   2 +-
 tests/custom_cluster/test_admission_controller.py  | 188 +--------------
 tests/webserver/test_web_pages.py                  |   5 -
 www/admission_controller.tmpl                      |  20 +-
 17 files changed, 158 insertions(+), 795 deletions(-)

diff --git a/be/src/scheduling/admission-controller-test.cc b/be/src/scheduling/admission-controller-test.cc
index 3c54061..4308de0 100644
--- a/be/src/scheduling/admission-controller-test.cc
+++ b/be/src/scheduling/admission-controller-test.cc
@@ -199,10 +199,7 @@ class AdmissionControllerTest : public testing::Test {
   static void CheckPoolConfig(RequestPoolService& request_pool_service,
       const string pool_name, const int64_t max_requests, const int64_t max_mem_resources,
       const int64_t queue_timeout_ms, const bool clamp_mem_limit_query_option,
-      const int64_t min_query_mem_limit = 0, const int64_t max_query_mem_limit = 0,
-      const double max_running_queries_multiple = 0.0,
-      const double max_queued_queries_multiple = 0.0,
-      const int64_t max_memory_multiple = 0) {
+      const int64_t min_query_mem_limit = 0, const int64_t max_query_mem_limit = 0) {
     TPoolConfig config;
     ASSERT_OK(request_pool_service.GetPoolConfig(pool_name, &config));
 
@@ -212,9 +209,6 @@ class AdmissionControllerTest : public testing::Test {
     ASSERT_EQ(clamp_mem_limit_query_option, config.clamp_mem_limit_query_option);
     ASSERT_EQ(min_query_mem_limit, config.min_query_mem_limit);
     ASSERT_EQ(max_query_mem_limit, config.max_query_mem_limit);
-    ASSERT_EQ(max_running_queries_multiple, config.max_running_queries_multiple);
-    ASSERT_EQ(max_queued_queries_multiple, config.max_queued_queries_multiple);
-    ASSERT_EQ(max_memory_multiple, config.max_memory_multiple);
   }
 
   /// Check that a PoolStats object has all zero values.
@@ -230,29 +224,6 @@ class AdmissionControllerTest : public testing::Test {
     ASSERT_EQ(0, pool_stats->metrics()->agg_num_running->GetValue());
   }
 
-  /// Check the calculations made by GetMaxQueuedForPool and GetMaxRequestsForPool are
-  /// rounded correctly.
-  static void CheckRoundingForPool(AdmissionController* admission_controller,
-      const int expected_result, const double multiple, const int host_count) {
-    TPoolConfig config_round;
-    config_round.max_queued_queries_multiple = multiple;
-    config_round.max_queued = 0;
-    config_round.max_running_queries_multiple = multiple;
-    config_round.max_requests = 0;
-
-    int64_t num_queued_rounded =
-        admission_controller->GetMaxQueuedForPool(config_round, host_count);
-    ASSERT_EQ(expected_result, num_queued_rounded)
-        << "with max_queued_queries_multiple=" << config_round.max_queued_queries_multiple
-        << " host_count=" << host_count;
-
-    int64_t num_requests_rounded =
-        admission_controller->GetMaxRequestsForPool(config_round, host_count);
-    ASSERT_EQ(expected_result, num_requests_rounded)
-        << "with max_running_queries_multiple="
-        << config_round.max_running_queries_multiple << " host_count=" << host_count;
-  }
-
   /// Return the path of the configuration file in the test resources directory
   /// that has name 'file_name'.
   static string GetResourceFile(const string& file_name) {
@@ -274,14 +245,11 @@ class AdmissionControllerTest : public testing::Test {
         cmm, nullptr, request_pool_service, metric_group, *addr));
   }
 
-  static void checkPoolDisabled(bool expected_result, int64_t max_requests,
-      double max_running_queries_multiple, int64_t max_mem_resources,
-      int64_t max_memory_multiple) {
+  static void checkPoolDisabled(
+      bool expected_result, int64_t max_requests, int64_t max_mem_resources) {
     TPoolConfig pool_config;
     pool_config.max_requests = max_requests;
-    pool_config.max_running_queries_multiple = max_running_queries_multiple;
     pool_config.max_mem_resources = max_mem_resources;
-    pool_config.max_memory_multiple = max_memory_multiple;
     ASSERT_EQ(expected_result, AdmissionController::PoolDisabled(pool_config));
   }
 };
@@ -310,21 +278,18 @@ TEST_F(AdmissionControllerTest, Simple) {
 
   // Check that the query can be admitted.
   string not_admitted_reason;
-  int64_t host_count = 1;
   ASSERT_TRUE(admission_controller->CanAdmitRequest(
-      *query_schedule, config_c, host_count, true, &not_admitted_reason));
+      *query_schedule, config_c, true, &not_admitted_reason));
 
-  // Create a QuerySchedule just like 'query_schedule' but to run on 3 hosts.
+  // Create a QuerySchedule just like 'query_schedule' to run on 3 hosts which can't be
+  // admitted.
   QuerySchedule* query_schedule_3_hosts =
       MakeQuerySchedule(QUEUE_C, config_c, 3, 64L * MEGABYTE);
-  host_count = 3;
-  // This won't run as configuration using 'max_mem_resources' is not scalable.
   ASSERT_FALSE(admission_controller->CanAdmitRequest(
-      *query_schedule_3_hosts, config_c, host_count, true, &not_admitted_reason));
+      *query_schedule_3_hosts, config_c, true, &not_admitted_reason));
   EXPECT_STR_CONTAINS(not_admitted_reason,
       "Not enough aggregate memory available in pool root.queueC with max mem "
-      "resources 128.00 MB (configured statically). Needed 192.00 MB but only 128.00 "
-      "MB was available.");
+      "resources 128.00 MB. Needed 192.00 MB but only 128.00 MB was available.");
 
   // Make a TopicDeltaMap describing some activity on host1 and host2.
   TTopicDelta membership = MakeTopicDelta(false);
@@ -352,33 +317,12 @@ TEST_F(AdmissionControllerTest, Simple) {
 
   // Test that the query cannot be admitted now.
   ASSERT_FALSE(admission_controller->CanAdmitRequest(
-      *query_schedule, config_c, host_count, true, &not_admitted_reason));
+      *query_schedule, config_c, true, &not_admitted_reason));
   EXPECT_STR_CONTAINS(not_admitted_reason,
-      "number of running queries 11 is at or over limit 10 (configured statically).");
-}
-
-/// Test rounding of scalable configuration parameters.
-TEST_F(AdmissionControllerTest, CheckRounding) {
-  // Pass the paths of the configuration files as command line flags.
-  FLAGS_fair_scheduler_allocation_path = GetResourceFile("fair-scheduler-test2.xml");
-  FLAGS_llama_site_path = GetResourceFile("llama-site-test2.xml");
-
-  AdmissionController* ac = MakeAdmissionController(); // Short name to make code neat.
-
-  // The scalable configuration parameters 'max_running_queries_multiple' and
-  // 'max_queued_queries_multiple' are scaled by multiplying by the number of hosts.
-  // If the result is non-zero then this is rounded up.
-  CheckRoundingForPool(ac, /*expected*/ 0, /*parameter*/ 0, /*num hosts*/ 100);
-  CheckRoundingForPool(ac, /*expected*/ 3, /*parameter*/ 0.3, /*num hosts*/ 10);
-  CheckRoundingForPool(ac, /*expected*/ 4, /*parameter*/ 0.31, /*num hosts*/ 10);
-  CheckRoundingForPool(ac, /*expected*/ 1, /*parameter*/ 0.3, /*num hosts*/ 3);
-  CheckRoundingForPool(ac, /*expected*/ 1, /*parameter*/ 0.1, /*num hosts*/ 3);
-  CheckRoundingForPool(ac, /*expected*/ 3, /*parameter*/ 0.3, /*num hosts*/ 9);
-  CheckRoundingForPool(ac, /*expected*/ 2, /*parameter*/ 0.5, /*num hosts*/ 3);
-  CheckRoundingForPool(ac, /*expected*/ 10000, /*parameter*/ 100, /*num hosts*/ 100);
+      "number of running queries 11 is at or over limit 10.");
 }
 
-/// Test CanAdmitRequest using scalable memory parameter 'max_memory_multiple'.
+/// Test CanAdmitRequest in the context of aggregated memory required to admit a query.
 TEST_F(AdmissionControllerTest, CanAdmitRequestMemory) {
   // Pass the paths of the configuration files as command line flags.
   FLAGS_fair_scheduler_allocation_path = GetResourceFile("fair-scheduler-test2.xml");
@@ -390,8 +334,6 @@ TEST_F(AdmissionControllerTest, CanAdmitRequestMemory) {
   // Get the PoolConfig for QUEUE_D ("root.queueD").
   TPoolConfig config_d;
   ASSERT_OK(request_pool_service->GetPoolConfig(QUEUE_D, &config_d));
-  // This queue is using a scalable amount of memory.
-  ASSERT_EQ(40L * MEGABYTE, config_d.max_memory_multiple);
 
   // Check the PoolStats for QUEUE_D.
   AdmissionController::PoolStats* pool_stats =
@@ -406,14 +348,19 @@ TEST_F(AdmissionControllerTest, CanAdmitRequestMemory) {
   // Check that the query can be admitted.
   string not_admitted_reason;
   ASSERT_TRUE(admission_controller->CanAdmitRequest(
-      *query_schedule, config_d, host_count, true, &not_admitted_reason));
+      *query_schedule, config_d, true, &not_admitted_reason));
 
-  // The query scales with cluster size of 1000.
-  host_count = 1000;
-  QuerySchedule* query_schedule1000 =
+  // Tests that this query cannot be admitted.
+  // Increasing the number of hosts pushes the aggregate memory required to admit this
+  // query over the allowed limit.
+  host_count = 15;
+  QuerySchedule* query_schedule15 =
       MakeQuerySchedule(QUEUE_D, config_d, host_count, 30L * MEGABYTE);
-  ASSERT_TRUE(admission_controller->CanAdmitRequest(
-      *query_schedule1000, config_d, host_count, true, &not_admitted_reason));
+  ASSERT_FALSE(admission_controller->CanAdmitRequest(
+      *query_schedule15, config_d, true, &not_admitted_reason));
+  EXPECT_STR_CONTAINS(not_admitted_reason,
+      "Not enough aggregate memory available in pool root.queueD with max mem resources "
+      "400.00 MB. Needed 480.00 MB but only 400.00 MB was available.");
 
   // Create a QuerySchedule to run on QUEUE_D with per_host_mem_estimate of 50MB.
   // which is going to be too much memory.
@@ -421,16 +368,14 @@ TEST_F(AdmissionControllerTest, CanAdmitRequestMemory) {
   QuerySchedule* query_schedule_10_fail =
       MakeQuerySchedule(QUEUE_D, config_d, host_count, 50L * MEGABYTE);
 
-  // Test that this query cannot be admitted.
   ASSERT_FALSE(admission_controller->CanAdmitRequest(
-      *query_schedule_10_fail, config_d, host_count, true, &not_admitted_reason));
+      *query_schedule_10_fail, config_d, true, &not_admitted_reason));
   EXPECT_STR_CONTAINS(not_admitted_reason,
       "Not enough aggregate memory available in pool root.queueD with max mem resources "
-      "400.00 MB (calculated as 10 backends each with 40.00 MB). Needed 500.00 MB but "
-      "only 400.00 MB was available.");
+      "400.00 MB. Needed 500.00 MB but only 400.00 MB was available.");
 }
 
-/// Test CanAdmitRequest using scalable parameter 'max_running_queries_multiple'.
+/// Test CanAdmitRequest in the context of max running queries allowed.
 TEST_F(AdmissionControllerTest, CanAdmitRequestCount) {
   // Pass the paths of the configuration files as command line flags.
   FLAGS_fair_scheduler_allocation_path = GetResourceFile("fair-scheduler-test2.xml");
@@ -443,10 +388,6 @@ TEST_F(AdmissionControllerTest, CanAdmitRequestCount) {
   TPoolConfig config_d;
   ASSERT_OK(request_pool_service->GetPoolConfig(QUEUE_D, &config_d));
 
-  // This queue can run a scalable number of queries.
-  ASSERT_EQ(0.5, config_d.max_running_queries_multiple);
-  ASSERT_EQ(2.5, config_d.max_queued_queries_multiple);
-
   // Check the PoolStats for QUEUE_D.
   AdmissionController::PoolStats* pool_stats =
       admission_controller->GetPoolStats(QUEUE_D);
@@ -463,20 +404,19 @@ TEST_F(AdmissionControllerTest, CanAdmitRequestCount) {
 
   // Query can be admitted from queue...
   ASSERT_TRUE(admission_controller->CanAdmitRequest(
-      *query_schedule, config_d, host_count, true, &not_admitted_reason));
+      *query_schedule, config_d, true, &not_admitted_reason));
   // ... but same Query cannot be admitted directly.
   ASSERT_FALSE(admission_controller->CanAdmitRequest(
-      *query_schedule, config_d, host_count, false, &not_admitted_reason));
+      *query_schedule, config_d, false, &not_admitted_reason));
   EXPECT_STR_CONTAINS(not_admitted_reason,
       "queue is not empty (size 2); queued queries are executed first");
 
   // Simulate that there are 7 queries already running.
   pool_stats->agg_num_running_ = 7;
   ASSERT_FALSE(admission_controller->CanAdmitRequest(
-      *query_schedule, config_d, host_count, true, &not_admitted_reason));
-  EXPECT_STR_CONTAINS(not_admitted_reason,
-      "number of running queries 7 is at or over limit 6 (calculated as 12 backends each "
-      "with 0.5 queries)");
+      *query_schedule, config_d, true, &not_admitted_reason));
+  EXPECT_STR_CONTAINS(
+      not_admitted_reason, "number of running queries 7 is at or over limit 6");
 }
 
 /// Test CanAdmitRequest() using the slots mechanism that is enabled with non-default
@@ -503,8 +443,8 @@ TEST_F(AdmissionControllerTest, CanAdmitRequestSlots) {
   QuerySchedule* other_group_schedule =
       MakeQuerySchedule(QUEUE_D, config_d, host_count, 30L * MEGABYTE, "other_group");
   for (QuerySchedule* schedule : {default_group_schedule, other_group_schedule}) {
-    SetHostsInQuerySchedule(*schedule, 2, false,
-        MEGABYTE, 200L * MEGABYTE, slots_per_query, slots_per_host);
+    SetHostsInQuerySchedule(
+        *schedule, 2, false, MEGABYTE, 200L * MEGABYTE, slots_per_query, slots_per_host);
   }
   vector<TNetworkAddress> host_addrs = GetHostAddrs(*default_group_schedule);
   string not_admitted_reason;
@@ -514,10 +454,10 @@ TEST_F(AdmissionControllerTest, CanAdmitRequestSlots) {
 
   // Enough slots are available so it can be admitted in both cases.
   ASSERT_TRUE(admission_controller->CanAdmitRequest(
-      *default_group_schedule, config_d, host_count, true, &not_admitted_reason))
+      *default_group_schedule, config_d, true, &not_admitted_reason))
       << not_admitted_reason;
   ASSERT_TRUE(admission_controller->CanAdmitRequest(
-      *other_group_schedule, config_d, host_count, true, &not_admitted_reason))
+      *other_group_schedule, config_d, true, &not_admitted_reason))
       << not_admitted_reason;
 
   // Simulate that almost all the slots are in use, which prevents admission in the
@@ -525,10 +465,10 @@ TEST_F(AdmissionControllerTest, CanAdmitRequestSlots) {
   SetSlotsInUse(admission_controller, host_addrs, slots_per_host - 1);
 
   ASSERT_TRUE(admission_controller->CanAdmitRequest(
-      *default_group_schedule, config_d, host_count, true, &not_admitted_reason))
+      *default_group_schedule, config_d, true, &not_admitted_reason))
       << not_admitted_reason;
   ASSERT_FALSE(admission_controller->CanAdmitRequest(
-      *other_group_schedule, config_d, host_count, true, &not_admitted_reason));
+      *other_group_schedule, config_d, true, &not_admitted_reason));
   EXPECT_STR_CONTAINS(not_admitted_reason,
       "Not enough admission control slots available on host host1:25000. Needed 4 "
       "slots but 15/16 are already in use.");
@@ -562,22 +502,20 @@ TEST_F(AdmissionControllerTest, QueryRejection) {
   // Check messages from RejectForSchedule().
   string rejected_reason;
   ASSERT_TRUE(admission_controller->RejectForSchedule(
-      *query_schedule, config_d, host_count, host_count, &rejected_reason));
+      *query_schedule, config_d, &rejected_reason));
   EXPECT_STR_CONTAINS(rejected_reason,
-      "request memory needed 500.00 MB is greater than pool max mem resources 400.00 MB "
-      "(calculated as 10 backends each with 40.00 MB)");
+      "request memory needed 500.00 MB is greater than pool max mem resources 400.00 MB");
 
   // Adjust the QuerySchedule to have minimum memory reservation of 45MB.
   // This will be rejected immediately as minimum memory reservation is too high.
   SetHostsInQuerySchedule(*query_schedule, host_count, false, 45L * MEGABYTE);
   string rejected_reserved_reason;
   ASSERT_TRUE(admission_controller->RejectForSchedule(
-      *query_schedule, config_d, host_count, host_count, &rejected_reserved_reason));
+      *query_schedule, config_d, &rejected_reserved_reason));
   EXPECT_STR_CONTAINS(rejected_reserved_reason,
       "minimum memory reservation needed is greater than pool max mem resources. Pool "
-      "max mem resources: 400.00 MB (calculated as 10 backends each with 40.00 MB). "
-      "Cluster-wide memory reservation needed: 450.00 MB. Increase the pool max mem "
-      "resources.");
+      "max mem resources: 400.00 MB. Cluster-wide memory reservation needed: 450.00 MB. "
+      "Increase the pool max mem resources.");
 
   // Adjust the QuerySchedule to require many slots per node.
   // This will be rejected immediately in non-default executor groups
@@ -587,7 +525,7 @@ TEST_F(AdmissionControllerTest, QueryRejection) {
   string rejected_slots_reason;
   // Don't reject for default executor group.
   EXPECT_FALSE(admission_controller->RejectForSchedule(
-      *query_schedule, config_d, host_count, host_count, &rejected_slots_reason))
+      *query_schedule, config_d, &rejected_slots_reason))
       << rejected_slots_reason;
   // Reject for non-default executor group.
   QuerySchedule* other_group_schedule = MakeQuerySchedule(
@@ -595,8 +533,9 @@ TEST_F(AdmissionControllerTest, QueryRejection) {
   SetHostsInQuerySchedule(
       *other_group_schedule, 2, false, MEGABYTE, 200L * MEGABYTE, 16, 4);
   EXPECT_TRUE(admission_controller->RejectForSchedule(
-      *other_group_schedule, config_d, host_count, host_count, &rejected_slots_reason));
-  EXPECT_STR_CONTAINS(rejected_slots_reason, "number of admission control slots needed "
+      *other_group_schedule, config_d, &rejected_slots_reason));
+  EXPECT_STR_CONTAINS(rejected_slots_reason,
+      "number of admission control slots needed "
       "(16) on backend 'host1:25000' is greater than total slots available 4. Reduce "
       "mt_dop to less than 4 to ensure that the query can execute.");
   rejected_slots_reason = "";
@@ -604,7 +543,7 @@ TEST_F(AdmissionControllerTest, QueryRejection) {
   SetHostsInQuerySchedule(
       *other_group_schedule, 2, false, MEGABYTE, 200L * MEGABYTE, 4, 4);
   EXPECT_FALSE(admission_controller->RejectForSchedule(
-      *other_group_schedule, config_d, host_count, host_count, &rejected_slots_reason))
+      *other_group_schedule, config_d, &rejected_slots_reason))
       << rejected_slots_reason;
 
   // Overwrite min_query_mem_limit and max_query_mem_limit in config_d to test a message.
@@ -613,26 +552,24 @@ TEST_F(AdmissionControllerTest, QueryRejection) {
   config_d.max_query_mem_limit = 700L * MEGABYTE;
   string rejected_invalid_config_reason;
   ASSERT_TRUE(admission_controller->RejectForCluster(QUEUE_D, config_d,
-      /* admit_from_queue=*/false, host_count, &rejected_invalid_config_reason));
+      /* admit_from_queue=*/false, &rejected_invalid_config_reason));
   EXPECT_STR_CONTAINS(rejected_invalid_config_reason,
-      "The min_query_mem_limit 629145600 is greater than the current max_mem_resources "
-      "419430400 (calculated as 10 backends each with 40.00 MB); queries will not be "
-      "admitted until more executors are available.");
+      "Invalid pool config: the min_query_mem_limit 629145600 is greater than the "
+      "max_mem_resources 419430400");
 
   TPoolConfig config_disabled_queries;
   config_disabled_queries.max_requests = 0;
   string rejected_queries_reason;
   ASSERT_TRUE(admission_controller->RejectForCluster(QUEUE_D, config_disabled_queries,
-      /* admit_from_queue=*/false, host_count, &rejected_queries_reason));
+      /* admit_from_queue=*/false, &rejected_queries_reason));
   EXPECT_STR_CONTAINS(rejected_queries_reason, "disabled by requests limit set to 0");
 
   TPoolConfig config_disabled_memory;
   config_disabled_memory.max_requests = 1;
   config_disabled_memory.max_mem_resources = 0;
-  config_disabled_memory.max_memory_multiple = 0;
   string rejected_mem_reason;
   ASSERT_TRUE(admission_controller->RejectForCluster(QUEUE_D, config_disabled_memory,
-      /* admit_from_queue=*/false, host_count, &rejected_mem_reason));
+      /* admit_from_queue=*/false, &rejected_mem_reason));
   EXPECT_STR_CONTAINS(rejected_mem_reason, "disabled by pool max mem resources set to 0");
 
   TPoolConfig config_queue_small;
@@ -642,19 +579,8 @@ TEST_F(AdmissionControllerTest, QueryRejection) {
   pool_stats->agg_num_queued_ = 3;
   string rejected_queue_length_reason;
   ASSERT_TRUE(admission_controller->RejectForCluster(QUEUE_D, config_queue_small,
-      /* admit_from_queue=*/false, host_count, &rejected_queue_length_reason));
-  EXPECT_STR_CONTAINS(rejected_queue_length_reason,
-      "queue full, limit=3 (configured statically), num_queued=3.");
-
-  // Make max_queued_queries_multiple small so that rejection is becasue of number of
-  // queries that can run be queued.
-  config_queue_small.max_queued_queries_multiple = 0.3;
-  string rejected_queue_multiple_reason;
-  ASSERT_TRUE(admission_controller->RejectForCluster(QUEUE_D, config_queue_small,
-      /* admit_from_queue=*/false, host_count, &rejected_queue_multiple_reason));
-  EXPECT_STR_CONTAINS(rejected_queue_multiple_reason,
-      "queue full, limit=3 (calculated as 10 backends each with 0.3 queries), "
-      "num_queued=3.");
+      /* admit_from_queue=*/false, &rejected_queue_length_reason));
+  EXPECT_STR_CONTAINS(rejected_queue_length_reason, "queue full, limit=3, num_queued=3.");
 }
 
 /// Test GetMaxToDequeue() method.
@@ -674,21 +600,13 @@ TEST_F(AdmissionControllerTest, GetMaxToDequeue) {
   AdmissionController::RequestQueue& queue_c =
       admission_controller->request_queue_map_[QUEUE_C];
   ASSERT_OK(request_pool_service->GetPoolConfig(QUEUE_C, &config_c));
-  AdmissionController::RequestQueue& queue_d =
-      admission_controller->request_queue_map_[QUEUE_D];
 
   AdmissionController::PoolStats* stats_c = admission_controller->GetPoolStats(QUEUE_C);
-  AdmissionController::PoolStats* stats_d = admission_controller->GetPoolStats(QUEUE_D);
 
   int64_t max_to_dequeue;
-  int64_t host_count = 1;
-
   // Queue is empty, so nothing to dequeue
   max_to_dequeue =
-      admission_controller->GetMaxToDequeue(queue_c, stats_c, config_c, host_count);
-  ASSERT_EQ(0, max_to_dequeue);
-  max_to_dequeue =
-      admission_controller->GetMaxToDequeue(queue_d, stats_d, config_d, host_count);
+      admission_controller->GetMaxToDequeue(queue_c, stats_c, config_c);
   ASSERT_EQ(0, max_to_dequeue);
 
   AdmissionController::PoolStats stats(admission_controller, "test");
@@ -701,54 +619,20 @@ TEST_F(AdmissionControllerTest, GetMaxToDequeue) {
   stats.agg_num_queued_ = 20;
   stats.agg_num_running_ = 10;
   max_to_dequeue =
-      admission_controller->GetMaxToDequeue(queue_c, &stats, config, host_count);
+      admission_controller->GetMaxToDequeue(queue_c, &stats, config);
   ASSERT_EQ(0, max_to_dequeue);
 
   // Can only dequeue 1.
   stats.agg_num_running_ = 9;
   max_to_dequeue =
-      admission_controller->GetMaxToDequeue(queue_c, &stats, config, host_count);
+      admission_controller->GetMaxToDequeue(queue_c, &stats, config);
   ASSERT_EQ(1, max_to_dequeue);
 
   // There is space for 10 but it looks like there are 2 coordinators.
   stats.agg_num_running_ = 0;
   max_to_dequeue =
-      admission_controller->GetMaxToDequeue(queue_c, &stats, config, host_count);
+      admission_controller->GetMaxToDequeue(queue_c, &stats, config);
   ASSERT_EQ(5, max_to_dequeue);
-
-  // Now test scalable configuration.
-
-  config.max_running_queries_multiple = 0.5;
-  max_to_dequeue =
-      admission_controller->GetMaxToDequeue(queue_c, &stats, config, host_count);
-  ASSERT_EQ(1, max_to_dequeue);
-
-  config.max_running_queries_multiple = 5;
-  // At this point the host_count is one, so the estimate of the pool
-  // size will be 1. This coordinator will take its share (1/2) of the 5 that can run
-  max_to_dequeue =
-      admission_controller->GetMaxToDequeue(queue_c, &stats, config, host_count);
-  ASSERT_EQ(2, max_to_dequeue);
-
-  // Add a lot of hosts, limitation will now be number queued
-  host_count = 100;
-  max_to_dequeue =
-      admission_controller->GetMaxToDequeue(queue_c, &stats, config, host_count);
-  ASSERT_EQ(stats.local_stats_.num_queued, max_to_dequeue);
-
-  // Increase number queued.
-  host_count = 200;
-  stats.local_stats_.num_queued = host_count;
-  stats.agg_num_queued_ = host_count;
-  max_to_dequeue =
-      admission_controller->GetMaxToDequeue(queue_c, &stats, config, host_count);
-  ASSERT_EQ(stats.local_stats_.num_queued, max_to_dequeue);
-
-  // Test max_running_queries_multiple less than 1.
-  config.max_running_queries_multiple = 0.5;
-  max_to_dequeue =
-      admission_controller->GetMaxToDequeue(queue_c, &stats, config, host_count);
-  ASSERT_EQ(100, max_to_dequeue);
 }
 
 /// Test that RequestPoolService correctly reads configuration files.
@@ -766,8 +650,6 @@ TEST_F(AdmissionControllerTest, Config) {
   CheckPoolConfig(request_pool_service, QUEUE_A, 1, 100000L * MEGABYTE, 50, true);
   CheckPoolConfig(request_pool_service, QUEUE_B, 5, -1, 600000, true);
   CheckPoolConfig(request_pool_service, QUEUE_C, 10, 128L * MEGABYTE, 30000, true);
-  CheckPoolConfig(request_pool_service, QUEUE_D, 5, MEGABYTE * 1024L, 30000, true, 10,
-      60L * MEGABYTE, 0.5, 2.5, 40L * MEGABYTE);
 }
 
 /// Unit test for PoolStats
@@ -810,16 +692,10 @@ TEST_F(AdmissionControllerTest, PoolStats) {
 
 /// Test that PoolDisabled works
 TEST_F(AdmissionControllerTest, PoolDisabled) {
-  checkPoolDisabled(true, /* max_requests */ 0, /* max_running_queries_multiple */ 0,
-      /* max_mem_resources */ 0, /* max_memory_multiple */ 0);
-  checkPoolDisabled(false, /* max_requests */ 1, /* max_running_queries_multiple */ 0,
-      /* max_mem_resources */ 1, /* max_memory_multiple */ 0);
-  checkPoolDisabled(false, /* max_requests */ 0, /* max_running_queries_multiple */ 1.0,
-      /* max_mem_resources */ 0, /* max_memory_multiple */ 1);
-  checkPoolDisabled(true, /* max_requests */ 0, /* max_running_queries_multiple */ 0,
-      /* max_mem_resources */ 0, /* max_memory_multiple */ 1);
-  checkPoolDisabled(true, /* max_requests */ 0, /* max_running_queries_multiple */ 1.0,
-      /* max_mem_resources */ 0, /* max_memory_multiple */ 0);
+  checkPoolDisabled(true, /* max_requests */ 0, /* max_mem_resources */ 0);
+  checkPoolDisabled(false, /* max_requests */ 1, /* max_mem_resources */ 1);
+  checkPoolDisabled(true, /* max_requests */ 0, /* max_mem_resources */ 1);
+  checkPoolDisabled(true, /* max_requests */ 1, /* max_mem_resources */ 0);
 }
 
 // Basic tests of the QuerySchedule object to confirm that a query with different
@@ -947,13 +823,13 @@ TEST_F(AdmissionControllerTest, DedicatedCoordAdmissionChecks) {
   // queued if there is not enough capacity.
   query_schedule->UpdateMemoryRequirements(pool_config);
   ASSERT_FALSE(admission_controller->RejectForSchedule(
-      *query_schedule, pool_config, 2, 2, &not_admitted_reason));
+      *query_schedule, pool_config, &not_admitted_reason));
   ASSERT_TRUE(admission_controller->HasAvailableMemResources(
-      *query_schedule, pool_config, 2, &not_admitted_reason));
+      *query_schedule, pool_config, &not_admitted_reason));
   // Coord does not have enough available memory.
   admission_controller->host_stats_[coord_host].mem_reserved = 500 * MEGABYTE;
   ASSERT_FALSE(admission_controller->HasAvailableMemResources(
-      *query_schedule, pool_config, 2, &not_admitted_reason));
+      *query_schedule, pool_config, &not_admitted_reason));
   EXPECT_STR_CONTAINS(not_admitted_reason,
       "Not enough memory available on host host1:25000. Needed 150.00 MB but only "
       "12.00 MB out of 512.00 MB was available.");
@@ -961,7 +837,7 @@ TEST_F(AdmissionControllerTest, DedicatedCoordAdmissionChecks) {
   // Neither coordinator or executor has enough available memory.
   admission_controller->host_stats_[exec_host].mem_reserved = 500 * MEGABYTE;
   ASSERT_FALSE(admission_controller->HasAvailableMemResources(
-      *query_schedule, pool_config, 2, &not_admitted_reason));
+      *query_schedule, pool_config, &not_admitted_reason));
   EXPECT_STR_CONTAINS(not_admitted_reason,
       "Not enough memory available on host host2:25000. Needed 1.00 GB but only "
       "524.00 MB out of 1.00 GB was available.");
@@ -969,7 +845,7 @@ TEST_F(AdmissionControllerTest, DedicatedCoordAdmissionChecks) {
   // Executor does not have enough available memory.
   admission_controller->host_stats_[coord_host].mem_reserved = 0;
   ASSERT_FALSE(admission_controller->HasAvailableMemResources(
-      *query_schedule, pool_config, 2, &not_admitted_reason));
+      *query_schedule, pool_config, &not_admitted_reason));
   EXPECT_STR_CONTAINS(not_admitted_reason,
       "Not enough memory available on host host2:25000. Needed 1.00 GB but only "
       "524.00 MB out of 1.00 GB was available.");
@@ -983,12 +859,12 @@ TEST_F(AdmissionControllerTest, DedicatedCoordAdmissionChecks) {
   (*per_backend_exec_params)[coord_addr] = *coord_exec_params;
   query_schedule->set_per_backend_exec_params(*per_backend_exec_params);
   ASSERT_TRUE(admission_controller->RejectForSchedule(
-      *query_schedule, pool_config, 2, 2, &not_admitted_reason));
+      *query_schedule, pool_config, &not_admitted_reason));
   EXPECT_STR_CONTAINS(not_admitted_reason,
       "request memory needed 150.00 MB is greater than memory available for "
       "admission 100.00 MB of host1:25000");
   ASSERT_FALSE(admission_controller->HasAvailableMemResources(
-      *query_schedule, pool_config, 2, &not_admitted_reason));
+      *query_schedule, pool_config, &not_admitted_reason));
   EXPECT_STR_CONTAINS(not_admitted_reason,
       "Not enough memory available on host host1:25000. Needed 150.00 MB but only "
       "100.00 MB out of 100.00 MB was available.");
diff --git a/be/src/scheduling/admission-controller.cc b/be/src/scheduling/admission-controller.cc
index 4844531..6337288 100644
--- a/be/src/scheduling/admission-controller.cc
+++ b/be/src/scheduling/admission-controller.cc
@@ -129,18 +129,6 @@ const string POOL_MIN_QUERY_MEM_LIMIT_METRIC_KEY_FORMAT =
   "admission-controller.pool-min-query-mem-limit.$0";
 const string POOL_CLAMP_MEM_LIMIT_QUERY_OPTION_METRIC_KEY_FORMAT =
   "admission-controller.pool-clamp-mem-limit-query-option.$0";
-const string POOL_MAX_RUNNING_QUERIES_MULTIPLE_METRIC_KEY_FORMAT =
-  "admission-controller.pool-max-running-queries-multiple.$0";
-const string POOL_MAX_QUEUED_QUERIES_MULTIPLE_METRIC_KEY_FORMAT =
-  "admission-controller.pool-max-queued-queries-multiple.$0";
-const string POOL_MAX_MEMORY_MULTIPLE_METRIC_KEY_FORMAT =
-  "admission-controller.pool-max-memory-multiple.$0";
-const string POOL_MAX_RUNNING_QUERIES_DERIVED_METRIC_KEY_FORMAT =
-  "admission-controller.pool-max-running-queries-derived.$0";
-const string POOL_MAX_QUEUED_QUERIES_DERIVED_METRIC_KEY_FORMAT =
-  "admission-controller.max-queued-queries-derived.$0";
-const string POOL_MAX_MEMORY_DERIVED_METRIC_KEY_FORMAT =
-  "admission-controller.max-memory-derived.$0";
 
 // Profile query events
 const string QUERY_EVENT_SUBMIT_FOR_ADMISSION = "Submit for admission";
@@ -172,12 +160,9 @@ const string AdmissionController::PROFILE_TIME_SINCE_LAST_UPDATE_COUNTER_NAME =
     "AdmissionControlTimeSinceLastUpdate";
 
 // Error status string details
-const string REASON_INVALID_POOL_CONFIG_MIN_LIMIT_MAX_MEM_FIXED =
+const string REASON_INVALID_POOL_CONFIG_MIN_LIMIT_MAX_MEM =
     "Invalid pool config: the min_query_mem_limit $0 is greater than the "
-    "max_mem_resources $1 (configured statically)";
-const string REASON_INVALID_POOL_CONFIG_MIN_LIMIT_MAX_MEM_MULTIPLE =
-    "The min_query_mem_limit $0 is greater than the current max_mem_resources $1 ($2); "
-    "queries will not be admitted until more executors are available.";
+    "max_mem_resources $1";
 const string REASON_INVALID_POOL_CONFIG_MIN_LIMIT_MAX_LIMIT =
     "Invalid pool config: the min_query_mem_limit is greater than the "
     "max_query_mem_limit ($0 > $1)";
@@ -198,17 +183,17 @@ const string REASON_NOT_ENOUGH_SLOTS_ON_BACKEND =
     "execute.";
 const string REASON_MIN_RESERVATION_OVER_POOL_MEM =
     "minimum memory reservation needed is greater than pool max mem resources. Pool "
-    "max mem resources: $0 ($1). Cluster-wide memory reservation needed: $2. Increase "
-    "the pool max mem resources. See the query profile for more information about the "
+    "max mem resources: $0. Cluster-wide memory reservation needed: $1. Increase the "
+    "pool max mem resources. See the query profile for more information about the "
     "per-node memory requirements.";
 const string REASON_DISABLED_MAX_MEM_RESOURCES =
     "disabled by pool max mem resources set to 0";
 const string REASON_DISABLED_REQUESTS_LIMIT = "disabled by requests limit set to 0";
 // $2 is the description of how the queue limit was calculated, $3 is the staleness
 // detail.
-const string REASON_QUEUE_FULL = "queue full, limit=$0 ($1), num_queued=$2.$3";
+const string REASON_QUEUE_FULL = "queue full, limit=$0, num_queued=$1.$2";
 const string REASON_REQ_OVER_POOL_MEM =
-    "request memory needed $0 is greater than pool max mem resources $1 ($2).\n\n"
+    "request memory needed $0 is greater than pool max mem resources $1.\n\n"
     "Use the MEM_LIMIT query option to indicate how much memory is required per node. "
     "The total memory needed is the per-node MEM_LIMIT times the number of nodes "
     "executing the query. See the Admission Control documentation for more information.";
@@ -229,18 +214,16 @@ const string REASON_NO_EXECUTOR_GROUPS = "Waiting for executors to start. Only D
     "queries can currently run.";
 
 // Queue decision details
-// $0 = num running queries, $1 = num queries limit, $2 = num queries limit explanation,
-// $3 = staleness detail
+// $0 = num running queries, $1 = num queries limit, $2 = staleness detail
 const string QUEUED_NUM_RUNNING =
-    "number of running queries $0 is at or over limit $1 ($2)$3.";
+    "number of running queries $0 is at or over limit $1.$2";
 // $0 = queue size, $1 = staleness detail
 const string QUEUED_QUEUE_NOT_EMPTY = "queue is not empty (size $0); queued queries are "
     "executed first.$1";
-// $0 = pool name, $1 = pool max memory, $2 = pool max memory explanation,
-// $3 = pool mem needed, $4 = pool mem available, $5 = staleness detail
-const string POOL_MEM_NOT_AVAILABLE =
-    "Not enough aggregate memory available in pool $0 "
-    "with max mem resources $1 ($2). Needed $3 but only $4 was available.$5";
+// $0 = pool name, $1 = pool max memory, $2 = pool mem needed, $3 = pool mem available,
+// $4 = staleness detail
+const string POOL_MEM_NOT_AVAILABLE = "Not enough aggregate memory available in pool $0 "
+    "with max mem resources $1. Needed $2 but only $3 was available.$4";
 // $0 = host name, $1 = host mem needed, $3 = host mem available, $4 = staleness detail
 const string HOST_MEM_NOT_AVAILABLE = "Not enough memory available on host $0. "
     "Needed $1 but only $2 out of $3 was available.$4";
@@ -500,9 +483,9 @@ bool AdmissionController::CanAccommodateMaxInitialReservation(
 }
 
 bool AdmissionController::HasAvailableMemResources(const QuerySchedule& schedule,
-    const TPoolConfig& pool_cfg, int64_t cluster_size, string* mem_unavailable_reason) {
+    const TPoolConfig& pool_cfg, string* mem_unavailable_reason) {
   const string& pool_name = schedule.request_pool();
-  const int64_t pool_max_mem = GetMaxMemForPool(pool_cfg, cluster_size);
+  const int64_t pool_max_mem = GetMaxMemForPool(pool_cfg);
   // If the pool doesn't have memory resources configured, always true.
   if (pool_max_mem < 0) return true;
 
@@ -519,12 +502,10 @@ bool AdmissionController::HasAvailableMemResources(const QuerySchedule& schedule
   VLOG_RPC << "Checking agg mem in pool=" << pool_name << " : " << stats->DebugString()
            << " executor_group=" << schedule.executor_group()
            << " cluster_mem_needed=" << PrintBytes(cluster_mem_to_admit)
-           << " pool_max_mem=" << PrintBytes(pool_max_mem) << " ("
-           << GetMaxMemForPoolDescription(pool_cfg, cluster_size) << ")";
+           << " pool_max_mem=" << PrintBytes(pool_max_mem);
   if (stats->EffectiveMemReserved() + cluster_mem_to_admit > pool_max_mem) {
     *mem_unavailable_reason = Substitute(POOL_MEM_NOT_AVAILABLE, pool_name,
-        PrintBytes(pool_max_mem), GetMaxMemForPoolDescription(pool_cfg, cluster_size),
-        PrintBytes(cluster_mem_to_admit),
+        PrintBytes(pool_max_mem), PrintBytes(cluster_mem_to_admit),
         PrintBytes(max(pool_max_mem - stats->EffectiveMemReserved(), 0L)),
         GetStalenessDetailLocked(" "));
     return false;
@@ -585,8 +566,7 @@ bool AdmissionController::HasAvailableSlots(const QuerySchedule& schedule,
 }
 
 bool AdmissionController::CanAdmitRequest(const QuerySchedule& schedule,
-    const TPoolConfig& pool_cfg, int64_t cluster_size, bool admit_from_queue,
-    string* not_admitted_reason) {
+    const TPoolConfig& pool_cfg, bool admit_from_queue, string* not_admitted_reason) {
   // Can't admit if:
   //  (a) There are already queued requests (and this is not admitting from the queue).
   //  (b) The resource pool is already at the maximum number of requests.
@@ -594,7 +574,7 @@ bool AdmissionController::CanAdmitRequest(const QuerySchedule& schedule,
   //      (when not using the default executor group).
   //  (d) There are not enough memory resources available for the query.
 
-  const int64_t max_requests = GetMaxRequestsForPool(pool_cfg, cluster_size);
+  const int64_t max_requests = GetMaxRequestsForPool(pool_cfg);
   PoolStats* pool_stats = GetPoolStats(schedule);
   bool default_group =
       schedule.executor_group() == ImpalaServer::DEFAULT_EXECUTOR_GROUP_NAME;
@@ -607,8 +587,7 @@ bool AdmissionController::CanAdmitRequest(const QuerySchedule& schedule,
     // All executor groups are limited by the aggregate number of queries running in the
     // pool.
     *not_admitted_reason = Substitute(QUEUED_NUM_RUNNING, pool_stats->agg_num_running(),
-        max_requests, GetMaxRequestsForPoolDescription(pool_cfg, cluster_size),
-        GetStalenessDetailLocked(" "));
+        max_requests, GetStalenessDetailLocked(" "));
     return false;
   }
   if (!default_group && !HasAvailableSlots(schedule, pool_cfg, not_admitted_reason)) {
@@ -617,40 +596,32 @@ bool AdmissionController::CanAdmitRequest(const QuerySchedule& schedule,
     // TODO(IMPALA-8757): Extend slot based admission to default executor group
     return false;
   }
-  if (!HasAvailableMemResources(schedule, pool_cfg, cluster_size, not_admitted_reason)) {
+  if (!HasAvailableMemResources(schedule, pool_cfg, not_admitted_reason)) {
     return false;
   }
   return true;
 }
 
 bool AdmissionController::RejectForCluster(const string& pool_name,
-    const TPoolConfig& pool_cfg, bool admit_from_queue, int64_t cluster_size,
-    string* rejection_reason) {
+    const TPoolConfig& pool_cfg, bool admit_from_queue, string* rejection_reason) {
   DCHECK(rejection_reason != nullptr && rejection_reason->empty());
 
   // Checks related to pool max_requests:
-  if (GetMaxRequestsForPool(pool_cfg, cluster_size) == 0) {
+  if (GetMaxRequestsForPool(pool_cfg) == 0) {
     *rejection_reason = REASON_DISABLED_REQUESTS_LIMIT;
     return true;
   }
 
   // Checks related to pool max_mem_resources:
-  int64_t max_mem = GetMaxMemForPool(pool_cfg, cluster_size);
+  int64_t max_mem = GetMaxMemForPool(pool_cfg);
   if (max_mem == 0) {
     *rejection_reason = REASON_DISABLED_MAX_MEM_RESOURCES;
     return true;
   }
 
   if (max_mem > 0 && pool_cfg.min_query_mem_limit > max_mem) {
-    if (PoolHasFixedMemoryLimit(pool_cfg)) {
-      *rejection_reason = Substitute(REASON_INVALID_POOL_CONFIG_MIN_LIMIT_MAX_MEM_FIXED,
-          pool_cfg.min_query_mem_limit, max_mem);
-    } else {
-      *rejection_reason =
-          Substitute(REASON_INVALID_POOL_CONFIG_MIN_LIMIT_MAX_MEM_MULTIPLE,
-              pool_cfg.min_query_mem_limit, max_mem,
-              GetMaxMemForPoolDescription(pool_cfg, cluster_size));
-    }
+    *rejection_reason = Substitute(REASON_INVALID_POOL_CONFIG_MIN_LIMIT_MAX_MEM,
+        pool_cfg.min_query_mem_limit, max_mem);
     return true;
   }
 
@@ -662,10 +633,9 @@ bool AdmissionController::RejectForCluster(const string& pool_name,
   }
 
   PoolStats* stats = GetPoolStats(pool_name);
-  int64_t max_queued = GetMaxQueuedForPool(pool_cfg, cluster_size);
+  int64_t max_queued = GetMaxQueuedForPool(pool_cfg);
   if (!admit_from_queue && stats->agg_num_queued() >= max_queued) {
-    *rejection_reason = Substitute(REASON_QUEUE_FULL, max_queued,
-        GetMaxQueuedForPoolDescription(pool_cfg, cluster_size), stats->agg_num_queued(),
+    *rejection_reason = Substitute(REASON_QUEUE_FULL, max_queued, stats->agg_num_queued(),
         GetStalenessDetailLocked(" "));
     return true;
   }
@@ -674,8 +644,7 @@ bool AdmissionController::RejectForCluster(const string& pool_name,
 }
 
 bool AdmissionController::RejectForSchedule(const QuerySchedule& schedule,
-    const TPoolConfig& pool_cfg, int64_t cluster_size, int64_t group_size,
-    string* rejection_reason) {
+    const TPoolConfig& pool_cfg, string* rejection_reason) {
   DCHECK(rejection_reason != nullptr && rejection_reason->empty());
   bool default_group =
       schedule.executor_group() == ImpalaServer::DEFAULT_EXECUTOR_GROUP_NAME;
@@ -752,7 +721,7 @@ bool AdmissionController::RejectForSchedule(const QuerySchedule& schedule,
   // Checks related to pool max_mem_resources:
   // We perform these checks here against the group_size to prevent queuing up queries
   // that would never be able to reserve the required memory on an executor group.
-  int64_t max_mem = GetMaxMemForPool(pool_cfg, group_size);
+  int64_t max_mem = GetMaxMemForPool(pool_cfg);
   if (max_mem == 0) {
     *rejection_reason = REASON_DISABLED_MAX_MEM_RESOURCES;
     return true;
@@ -760,15 +729,13 @@ bool AdmissionController::RejectForSchedule(const QuerySchedule& schedule,
   if (max_mem > 0) {
     if (cluster_min_mem_reservation_bytes > max_mem) {
       *rejection_reason = Substitute(REASON_MIN_RESERVATION_OVER_POOL_MEM,
-          PrintBytes(max_mem), GetMaxMemForPoolDescription(pool_cfg, group_size),
-          PrintBytes(cluster_min_mem_reservation_bytes));
+          PrintBytes(max_mem), PrintBytes(cluster_min_mem_reservation_bytes));
       return true;
     }
     int64_t cluster_mem_to_admit = schedule.GetClusterMemoryToAdmit();
     if (cluster_mem_to_admit > max_mem) {
-      *rejection_reason =
-          Substitute(REASON_REQ_OVER_POOL_MEM, PrintBytes(cluster_mem_to_admit),
-              PrintBytes(max_mem), GetMaxMemForPoolDescription(pool_cfg, group_size));
+      *rejection_reason = Substitute(REASON_REQ_OVER_POOL_MEM,
+          PrintBytes(cluster_mem_to_admit), PrintBytes(max_mem));
       return true;
     }
     int64_t executor_mem_to_admit = schedule.per_backend_mem_to_admit();
@@ -797,8 +764,7 @@ bool AdmissionController::RejectForSchedule(const QuerySchedule& schedule,
   return false;
 }
 
-void AdmissionController::PoolStats::UpdateConfigMetrics(
-    const TPoolConfig& pool_cfg, int64_t cluster_size) {
+void AdmissionController::PoolStats::UpdateConfigMetrics(const TPoolConfig& pool_cfg) {
   metrics_.pool_max_mem_resources->SetValue(pool_cfg.max_mem_resources);
   metrics_.pool_max_requests->SetValue(pool_cfg.max_requests);
   metrics_.pool_max_queued->SetValue(pool_cfg.max_queued);
@@ -806,18 +772,6 @@ void AdmissionController::PoolStats::UpdateConfigMetrics(
   metrics_.max_query_mem_limit->SetValue(pool_cfg.max_query_mem_limit);
   metrics_.min_query_mem_limit->SetValue(pool_cfg.min_query_mem_limit);
   metrics_.clamp_mem_limit_query_option->SetValue(pool_cfg.clamp_mem_limit_query_option);
-  metrics_.max_running_queries_multiple->SetValue(pool_cfg.max_running_queries_multiple);
-  metrics_.max_queued_queries_multiple->SetValue(pool_cfg.max_queued_queries_multiple);
-  metrics_.max_memory_multiple->SetValue(pool_cfg.max_memory_multiple);
-}
-
-void AdmissionController::PoolStats::UpdateDerivedMetrics(
-    const TPoolConfig& pool_cfg, int64_t cluster_size) {
-  metrics_.max_running_queries_derived->SetValue(
-      GetMaxRequestsForPool(pool_cfg, cluster_size));
-  metrics_.max_queued_queries_derived->SetValue(
-      GetMaxQueuedForPool(pool_cfg, cluster_size));
-  metrics_.max_memory_derived->SetValue(GetMaxMemForPool(pool_cfg, cluster_size));
 }
 
 Status AdmissionController::SubmitForAdmission(const AdmissionRequest& request,
@@ -845,7 +799,6 @@ Status AdmissionController::SubmitForAdmission(const AdmissionRequest& request,
       ResolvePoolAndGetConfig(request.request.query_ctx, &pool_name, &pool_cfg));
   request.summary_profile->AddInfoString("Request Pool", pool_name);
 
-  const int64_t cluster_size = GetClusterSize(*membership_snapshot);
   // We track this outside of the queue node so that it is still available after the query
   // has been dequeued.
   string initial_queue_reason;
@@ -857,11 +810,10 @@ Status AdmissionController::SubmitForAdmission(const AdmissionRequest& request,
 
     pool_config_map_[pool_name] = pool_cfg;
     PoolStats* stats = GetPoolStats(pool_name);
-    stats->UpdateConfigMetrics(pool_cfg, cluster_size);
-    stats->UpdateDerivedMetrics(pool_cfg, cluster_size);
+    stats->UpdateConfigMetrics(pool_cfg);
 
-    bool must_reject = !FindGroupToAdmitOrReject(cluster_size, membership_snapshot,
-        pool_cfg, /* admit_from_queue=*/false, stats, &queue_node);
+    bool must_reject = !FindGroupToAdmitOrReject(
+        membership_snapshot, pool_cfg, /* admit_from_queue=*/false, stats, &queue_node);
     if (must_reject) {
       AdmissionOutcome outcome = admit_outcome->Set(AdmissionOutcome::REJECTED);
       if (outcome != AdmissionOutcome::REJECTED) {
@@ -1254,14 +1206,13 @@ Status AdmissionController::ComputeGroupSchedules(
   return Status::OK();
 }
 
-bool AdmissionController::FindGroupToAdmitOrReject(int64_t cluster_size,
+bool AdmissionController::FindGroupToAdmitOrReject(
     ClusterMembershipMgr::SnapshotPtr membership_snapshot, const TPoolConfig& pool_config,
     bool admit_from_queue, PoolStats* pool_stats, QueueNode* queue_node) {
   // Check for rejection based on current cluster size
   const string& pool_name = pool_stats->name();
   string rejection_reason;
-  if (RejectForCluster(
-          pool_name, pool_config, admit_from_queue, cluster_size, &rejection_reason)) {
+  if (RejectForCluster(pool_name, pool_config, admit_from_queue, &rejection_reason)) {
     DCHECK(!rejection_reason.empty());
     queue_node->not_admitted_reason = rejection_reason;
     return false;
@@ -1290,33 +1241,29 @@ bool AdmissionController::FindGroupToAdmitOrReject(int64_t cluster_size,
     VLOG(3) << "Trying to admit query to pool " << pool_name << " in executor group "
             << group_name << " (" << group_size << " executors)";
 
-    const int64_t max_queued = GetMaxQueuedForPool(pool_config, cluster_size);
-    const int64_t max_mem = GetMaxMemForPool(pool_config, cluster_size);
-    const int64_t max_requests = GetMaxRequestsForPool(pool_config, cluster_size);
+    const int64_t max_queued = GetMaxQueuedForPool(pool_config);
+    const int64_t max_mem = GetMaxMemForPool(pool_config);
+    const int64_t max_requests = GetMaxRequestsForPool(pool_config);
     VLOG_QUERY << "Trying to admit id=" << PrintId(schedule->query_id())
                << " in pool_name=" << pool_name << " executor_group_name=" << group_name
                << " per_host_mem_estimate="
                << PrintBytes(schedule->GetPerExecutorMemoryEstimate())
                << " dedicated_coord_mem_estimate="
                << PrintBytes(schedule->GetDedicatedCoordMemoryEstimate())
-               << " max_requests=" << max_requests << " ("
-               << GetMaxRequestsForPoolDescription(pool_config, cluster_size) << ")"
-               << " max_queued=" << max_queued << " ("
-               << GetMaxQueuedForPoolDescription(pool_config, cluster_size) << ")"
-               << " max_mem=" << PrintBytes(max_mem) << " ("
-               << GetMaxMemForPoolDescription(pool_config, cluster_size) << ")";
+               << " max_requests=" << max_requests
+               << " max_queued=" << max_queued
+               << " max_mem=" << PrintBytes(max_mem);
     VLOG_QUERY << "Stats: " << pool_stats->DebugString();
 
     // Query is rejected if the rejection check fails on *any* group.
-    if (RejectForSchedule(
-            *schedule, pool_config, cluster_size, group_size, &rejection_reason)) {
+    if (RejectForSchedule(*schedule, pool_config, &rejection_reason)) {
       DCHECK(!rejection_reason.empty());
       queue_node->not_admitted_reason = rejection_reason;
       return false;
     }
 
-    if (CanAdmitRequest(*schedule, pool_config, cluster_size, admit_from_queue,
-            &queue_node->not_admitted_reason)) {
+    if (CanAdmitRequest(
+            *schedule, pool_config, admit_from_queue, &queue_node->not_admitted_reason)) {
       queue_node->admitted_schedule = std::move(group_schedule.schedule);
       return true;
     } else {
@@ -1388,23 +1335,21 @@ void AdmissionController::DequeueLoop() {
     // services have already started to accept connections, the whole membership can still
     // be empty.
     if (membership_snapshot->executor_groups.empty()) continue;
-    const int64_t cluster_size = GetClusterSize(*membership_snapshot);
 
     for (const PoolConfigMap::value_type& entry: pool_config_map_) {
       const string& pool_name = entry.first;
       const TPoolConfig& pool_config = entry.second;
       PoolStats* stats = GetPoolStats(pool_name, /* dcheck_exists=*/true);
-      stats->UpdateDerivedMetrics(pool_config, cluster_size);
 
       if (stats->local_stats().num_queued == 0) continue; // Nothing to dequeue
       DCHECK_GE(stats->agg_num_queued(), stats->local_stats().num_queued);
 
       RequestQueue& queue = request_queue_map_[pool_name];
-      int64_t max_to_dequeue = GetMaxToDequeue(queue, stats, pool_config, cluster_size);
+      int64_t max_to_dequeue = GetMaxToDequeue(queue, stats, pool_config);
       VLOG_RPC << "Dequeue thread will try to admit " << max_to_dequeue << " requests"
                << ", pool=" << pool_name
                << ", num_queued=" << stats->local_stats().num_queued
-               << " cluster_size=" << cluster_size;
+               << " cluster_size=" << GetClusterSize(*membership_snapshot);
       if (max_to_dequeue == 0) continue; // to next pool.
 
       while (max_to_dequeue > 0 && !queue.empty()) {
@@ -1415,7 +1360,7 @@ void AdmissionController::DequeueLoop() {
             && queue_node->admit_outcome->Get() == AdmissionOutcome::CANCELLED;
 
         bool is_rejected = !is_cancelled
-            && !FindGroupToAdmitOrReject(cluster_size, membership_snapshot, pool_config,
+            && !FindGroupToAdmitOrReject(membership_snapshot, pool_config,
                    /* admit_from_queue=*/true, stats, queue_node);
 
         if (!is_cancelled && !is_rejected
@@ -1481,10 +1426,10 @@ int64_t AdmissionController::GetQueueTimeoutForPoolMs(const TPoolConfig& pool_co
   return max<int64_t>(0, queue_wait_timeout_ms);
 }
 
-int64_t AdmissionController::GetMaxToDequeue(RequestQueue& queue, PoolStats* stats,
-    const TPoolConfig& pool_config, int64_t cluster_size) {
+int64_t AdmissionController::GetMaxToDequeue(
+    RequestQueue& queue, PoolStats* stats, const TPoolConfig& pool_config) {
   if (PoolLimitsRunningQueriesCount(pool_config)) {
-    const int64_t max_requests = GetMaxRequestsForPool(pool_config, cluster_size);
+    const int64_t max_requests = GetMaxRequestsForPool(pool_config);
     const int64_t total_available = max_requests - stats->agg_num_running();
     if (total_available <= 0) {
       // There is a limit for the number of running queries, so we can
@@ -1493,7 +1438,6 @@ int64_t AdmissionController::GetMaxToDequeue(RequestQueue& queue, PoolStats* sta
       if (!queue.empty()) {
         LogDequeueFailed(queue.head(),
             Substitute(QUEUED_NUM_RUNNING, stats->agg_num_running(), max_requests,
-                GetMaxRequestsForPoolDescription(pool_config, cluster_size),
                 GetStalenessDetailLocked(" ")));
       }
       return 0;
@@ -1715,18 +1659,6 @@ void AdmissionController::PoolStats::ToJson(
       document->GetAllocator());
   pool->AddMember("clamp_mem_limit_query_option",
       metrics_.clamp_mem_limit_query_option->GetValue(), document->GetAllocator());
-  pool->AddMember("max_running_queries_multiple",
-      metrics_.max_running_queries_multiple->GetValue(), document->GetAllocator());
-  pool->AddMember("max_queued_queries_multiple",
-      metrics_.max_queued_queries_multiple->GetValue(), document->GetAllocator());
-  pool->AddMember("max_memory_multiple", metrics_.max_memory_multiple->GetValue(),
-      document->GetAllocator());
-  pool->AddMember("max_running_queries_derived",
-      metrics_.max_running_queries_derived->GetValue(), document->GetAllocator());
-  pool->AddMember("max_queued_queries_derived",
-      metrics_.max_queued_queries_derived->GetValue(), document->GetAllocator());
-  pool->AddMember("max_memory_derived", metrics_.max_memory_derived->GetValue(),
-      document->GetAllocator());
   pool->AddMember("wait_time_ms_ema", wait_time_ms_ema_, document->GetAllocator());
   Value histogram(kArrayType);
   for (int bucket = 0; bucket < peak_mem_histogram_.size(); bucket++) {
@@ -1811,18 +1743,6 @@ void AdmissionController::PoolStats::InitMetrics() {
       POOL_MIN_QUERY_MEM_LIMIT_METRIC_KEY_FORMAT, 0, name_);
   metrics_.clamp_mem_limit_query_option = parent_->metrics_group_->AddProperty<bool>(
       POOL_CLAMP_MEM_LIMIT_QUERY_OPTION_METRIC_KEY_FORMAT, false, name_);
-  metrics_.max_running_queries_multiple = parent_->metrics_group_->AddDoubleGauge(
-      POOL_MAX_RUNNING_QUERIES_MULTIPLE_METRIC_KEY_FORMAT, 0, name_);
-  metrics_.max_queued_queries_multiple = parent_->metrics_group_->AddDoubleGauge(
-      POOL_MAX_QUEUED_QUERIES_MULTIPLE_METRIC_KEY_FORMAT, 0, name_);
-  metrics_.max_memory_multiple = parent_->metrics_group_->AddGauge(
-      POOL_MAX_MEMORY_MULTIPLE_METRIC_KEY_FORMAT, 0, name_);
-  metrics_.max_running_queries_derived = parent_->metrics_group_->AddGauge(
-      POOL_MAX_RUNNING_QUERIES_DERIVED_METRIC_KEY_FORMAT, 0, name_);
-  metrics_.max_queued_queries_derived = parent_->metrics_group_->AddGauge(
-      POOL_MAX_QUEUED_QUERIES_DERIVED_METRIC_KEY_FORMAT, 0, name_);
-  metrics_.max_memory_derived = parent_->metrics_group_->AddGauge(
-      POOL_MAX_MEMORY_DERIVED_METRIC_KEY_FORMAT, 0, name_);
 }
 
 void AdmissionController::PopulatePerHostMemReservedAndAdmitted(
@@ -1885,68 +1805,24 @@ int64_t AdmissionController::GetExecutorGroupSize(
   return it->second.NumExecutors();
 }
 
-int64_t AdmissionController::GetMaxMemForPool(
-    const TPoolConfig& pool_config, int64_t cluster_size) {
-  if (pool_config.max_memory_multiple > 0) {
-    return pool_config.max_memory_multiple * cluster_size;
-  }
+int64_t AdmissionController::GetMaxMemForPool(const TPoolConfig& pool_config) {
   return pool_config.max_mem_resources;
 }
 
-string AdmissionController::GetMaxMemForPoolDescription(
-    const TPoolConfig& pool_config, int64_t cluster_size) {
-  if (pool_config.max_memory_multiple > 0) {
-    return Substitute("calculated as $0 backends each with $1", cluster_size,
-        PrintBytes(pool_config.max_memory_multiple));
-  }
-  return "configured statically";
-}
-
-int64_t AdmissionController::GetMaxRequestsForPool(
-    const TPoolConfig& pool_config, int64_t cluster_size) {
-  if (pool_config.max_running_queries_multiple > 0) {
-    return ceil(pool_config.max_running_queries_multiple * cluster_size);
-  }
+int64_t AdmissionController::GetMaxRequestsForPool(const TPoolConfig& pool_config) {
   return pool_config.max_requests;
 }
 
-string AdmissionController::GetMaxRequestsForPoolDescription(
-    const TPoolConfig& pool_config, int64_t cluster_size) {
-  if (pool_config.max_running_queries_multiple > 0) {
-    return Substitute("calculated as $0 backends each with $1 queries", cluster_size,
-        pool_config.max_running_queries_multiple);
-  }
-  return "configured statically";
-}
-
-int64_t AdmissionController::GetMaxQueuedForPool(
-    const TPoolConfig& pool_config, int64_t cluster_size) {
-  if (pool_config.max_queued_queries_multiple > 0) {
-    return ceil(pool_config.max_queued_queries_multiple * cluster_size);
-  }
+int64_t AdmissionController::GetMaxQueuedForPool(const TPoolConfig& pool_config) {
   return pool_config.max_queued;
 }
 
-string AdmissionController::GetMaxQueuedForPoolDescription(
-    const TPoolConfig& pool_config, int64_t cluster_size) {
-  if (pool_config.max_queued_queries_multiple > 0) {
-    return Substitute("calculated as $0 backends each with $1 queries", cluster_size,
-        pool_config.max_queued_queries_multiple);
-  }
-  return "configured statically";
-}
-
 bool AdmissionController::PoolDisabled(const TPoolConfig& pool_config) {
-  return ((pool_config.max_requests == 0 && pool_config.max_running_queries_multiple == 0)
-      || (pool_config.max_mem_resources == 0 && pool_config.max_memory_multiple == 0));
+  return (pool_config.max_requests == 0 || pool_config.max_mem_resources == 0);
 }
 
 bool AdmissionController::PoolLimitsRunningQueriesCount(const TPoolConfig& pool_config) {
-  return pool_config.max_requests > 0 || pool_config.max_running_queries_multiple > 0;
-}
-
-bool AdmissionController::PoolHasFixedMemoryLimit(const TPoolConfig& pool_config) {
-  return pool_config.max_mem_resources > 0 && pool_config.max_memory_multiple <= 0;
+  return pool_config.max_requests > 0;
 }
 
 int64_t AdmissionController::GetMemToAdmit(
diff --git a/be/src/scheduling/admission-controller.h b/be/src/scheduling/admission-controller.h
index 07c73cc..abeaa13 100644
--- a/be/src/scheduling/admission-controller.h
+++ b/be/src/scheduling/admission-controller.h
@@ -484,13 +484,6 @@ class AdmissionController {
       IntGauge* max_query_mem_limit;
       IntGauge* min_query_mem_limit;
       BooleanProperty* clamp_mem_limit_query_option;
-      DoubleGauge* max_running_queries_multiple;
-      DoubleGauge* max_queued_queries_multiple;
-      IntGauge* max_memory_multiple;
-      /// Metrics exposing the pool's derived runtime configuration.
-      IntGauge* max_running_queries_derived;
-      IntGauge* max_queued_queries_derived;
-      IntGauge* max_memory_derived;
     };
 
     PoolStats(AdmissionController* parent, const std::string& name)
@@ -547,10 +540,7 @@ class AdmissionController {
     const TPoolStats& local_stats() { return local_stats_; }
 
     /// Updates the metrics exposing the pool configuration to those in pool_cfg.
-    void UpdateConfigMetrics(const TPoolConfig& pool_cfg, int64_t cluster_size);
-
-    /// Updates the metrics exposing the scalable pool configuration values.
-    void UpdateDerivedMetrics(const TPoolConfig& pool_cfg, int64_t cluster_size);
+    void UpdateConfigMetrics(const TPoolConfig& pool_cfg);
 
     PoolMetrics* metrics() { return &metrics_; }
     std::string DebugString() const;
@@ -794,8 +784,7 @@ class AdmissionController {
   /// true and keeps queue_node->admitted_schedule unset if the query cannot be admitted
   /// now, but also does not need to be rejected. If the query must be rejected, this
   /// method returns false and sets queue_node->not_admitted_reason.
-  bool FindGroupToAdmitOrReject(
-      int64_t cluster_size, ClusterMembershipMgr::SnapshotPtr membership_snapshot,
+  bool FindGroupToAdmitOrReject(ClusterMembershipMgr::SnapshotPtr membership_snapshot,
       const TPoolConfig& pool_config, bool admit_from_queue, PoolStats* pool_stats,
       QueueNode* queue_node);
 
@@ -808,7 +797,7 @@ class AdmissionController {
   /// false and not_admitted_reason specifies why the request can not be admitted
   /// immediately. Caller owns not_admitted_reason. Must hold admission_ctrl_lock_.
   bool CanAdmitRequest(const QuerySchedule& schedule, const TPoolConfig& pool_cfg,
-      int64_t cluster_size, bool admit_from_queue, std::string* not_admitted_reason);
+      bool admit_from_queue, std::string* not_admitted_reason);
 
   /// Returns true if all executors can accommodate the largest initial reservation of
   /// any executor and the backend running the coordinator fragment can accommodate its
@@ -832,8 +821,7 @@ class AdmissionController {
   /// 'mem_unavailable_reason'.
   /// Must hold admission_ctrl_lock_.
   bool HasAvailableMemResources(const QuerySchedule& schedule,
-      const TPoolConfig& pool_cfg, int64_t cluster_size,
-      std::string* mem_unavailable_reason);
+      const TPoolConfig& pool_cfg, std::string* mem_unavailable_reason);
 
   /// Returns true if there are enough available slots on all executors in the schedule to
   /// fit the query schedule. The number of slots per executors does not change with the
@@ -891,7 +879,7 @@ class AdmissionController {
   /// disabled, or the queue is already full.
   /// Must hold admission_ctrl_lock_.
   bool RejectForCluster(const std::string& pool_name, const TPoolConfig& pool_cfg,
-      bool admit_from_queue, int64_t cluster_size, std::string* rejection_reason);
+      bool admit_from_queue, std::string* rejection_reason);
 
   /// Returns true if a request must be rejected immediately based on the pool
   /// configuration and a particular schedule, e.g. because the memory requirements of the
@@ -900,7 +888,7 @@ class AdmissionController {
   /// other groups, either.
   /// Must hold admission_ctrl_lock_.
   bool RejectForSchedule(const QuerySchedule& schedule, const TPoolConfig& pool_cfg,
-      int64_t cluster_size, int64_t group_size, std::string* rejection_reason);
+      std::string* rejection_reason);
 
   /// Gets or creates the PoolStats for pool_name. Must hold admission_ctrl_lock_.
   PoolStats* GetPoolStats(const std::string& pool_name, bool dcheck_exists = false);
@@ -933,20 +921,10 @@ class AdmissionController {
       const std::string& pool_name, const std::string& backend_id);
 
   /// Returns the maximum memory for the pool.
-  static int64_t GetMaxMemForPool(const TPoolConfig& pool_config, int64_t cluster_size);
-
-  /// Returns a description of how the maximum memory for the pool is configured.
-  static std::string GetMaxMemForPoolDescription(
-      const TPoolConfig& pool_config, int64_t cluster_size);
+  static int64_t GetMaxMemForPool(const TPoolConfig& pool_config);
 
   /// Returns the maximum number of requests that can run in the pool.
-  static int64_t GetMaxRequestsForPool(
-      const TPoolConfig& pool_config, int64_t cluster_size);
-
-  /// Returns a description of how the maximum number of requests that can run in the pool
-  /// is configured.
-  static std::string GetMaxRequestsForPoolDescription(
-      const TPoolConfig& pool_config, int64_t cluster_size);
+  static int64_t GetMaxRequestsForPool(const TPoolConfig& pool_config);
 
   /// Returns the effective queue timeout for the pool in milliseconds.
   static int64_t GetQueueTimeoutForPoolMs(const TPoolConfig& pool_config);
@@ -957,8 +935,8 @@ class AdmissionController {
   /// is returned.
   /// Uses a heuristic to limit the number of requests we dequeue locally to avoid all
   /// impalads dequeuing too many requests at the same time.
-  int64_t GetMaxToDequeue(RequestQueue& queue, PoolStats* stats,
-      const TPoolConfig& pool_config, int64_t cluster_size);
+  int64_t GetMaxToDequeue(
+      RequestQueue& queue, PoolStats* stats, const TPoolConfig& pool_config);
 
   /// Returns true if the pool has been disabled through configuration.
   static bool PoolDisabled(const TPoolConfig& pool_config);
@@ -966,17 +944,8 @@ class AdmissionController {
   /// Returns true if the pool is configured to limit the number of running queries.
   static bool PoolLimitsRunningQueriesCount(const TPoolConfig& pool_config);
 
-  /// Returns true if the pool has a fixed (i.e. not scalable) maximum memory limit.
-  static bool PoolHasFixedMemoryLimit(const TPoolConfig& pool_config);
-
   /// Returns the maximum number of requests that can be queued in the pool.
-  static int64_t GetMaxQueuedForPool(
-      const TPoolConfig& pool_config, int64_t cluster_size);
-
-  /// Returns a description of how the maximum number of requests that can run be queued
-  /// in the pool is configured.
-  static std::string GetMaxQueuedForPoolDescription(
-      const TPoolConfig& pool_config, int64_t cluster_size);
+  static int64_t GetMaxQueuedForPool(const TPoolConfig& pool_config);
 
   /// Return all executor groups from 'all_groups' that can be used to run queries in
   /// 'pool_name'.
diff --git a/common/thrift/ImpalaInternalService.thrift b/common/thrift/ImpalaInternalService.thrift
index 4b055b8..9cbf167 100644
--- a/common/thrift/ImpalaInternalService.thrift
+++ b/common/thrift/ImpalaInternalService.thrift
@@ -733,25 +733,10 @@ struct TPoolConfig {
   // limits specified for the pool. Default is true.
   8: required bool clamp_mem_limit_query_option = true;
 
-  // This floating point number is multiplied by the current total number of executors at
-  // runtime to give the maximum number of concurrently running queries allowed in the
-  // pool. If this value is zero then it is ignored.
-  9: required double max_running_queries_multiple = 0;
-
-  // This floating point number is multiplied by the current total number of executors at
-  // runtime to give the maximum number of queries that can be queued in the pool.  If
-  // this value is zero then it is ignored.
-  10: required double max_queued_queries_multiple = 0;
-
-  // This number of bytes is multiplied by the current total number of executors at
-  // runtime to give the maximum memory available across the cluster for the pool.  If
-  // this value is zero then it is ignored.
-  11: required i64 max_memory_multiple = 0;
-
   // Maximum value for the mt_dop query option. If the mt_dop is set and exceeds this
   // maximum, the mt_dop setting is reduced to the maximum. If the max_mt_dop is
   // negative, no limit is enforced.
-  12: required i64 max_mt_dop = -1;
+  9: required i64 max_mt_dop = -1;
 }
 
 struct TParseDateStringResult {
diff --git a/common/thrift/metrics.json b/common/thrift/metrics.json
index e4be01a..932b2c3 100644
--- a/common/thrift/metrics.json
+++ b/common/thrift/metrics.json
@@ -90,66 +90,6 @@
     "key": "admission-controller.pool-clamp-mem-limit-query-option.$0"
   },
   {
-    "description": "This floating point number is multiplied by the current total number of executors at runtime to give the maximum number of concurrently running queries allowed in the pool.",
-    "contexts": [
-      "RESOURCE_POOL"
-    ],
-    "label": "Resource Pool $0 Max Running Queries Multiple",
-    "units": "UNIT",
-    "kind": "GAUGE",
-    "key": "admission-controller.pool-max-running-queries-multiple.$0"
-  },
-  {
-    "description": "This floating point number is multiplied by the current total number of executors at runtime to give the maximum number of queries that can be queued in the pool.",
-    "contexts": [
-      "RESOURCE_POOL"
-    ],
-    "label": "Resource Pool $0 Max Queued Queries Multiple",
-    "units": "UNIT",
-    "kind": "GAUGE",
-    "key": "admission-controller.pool-max-queued-queries-multiple.$0"
-  },
-  {
-    "description": "This number of bytes is multiplied by the current total number of executors at runtime to give the maximum memory available across the cluster for the pool.",
-    "contexts": [
-      "RESOURCE_POOL"
-    ],
-    "label": "Resource Pool $0 Max Memory Multiple",
-    "units": "BYTES",
-    "kind": "GAUGE",
-    "key": "admission-controller.pool-max-memory-multiple.$0"
-  },
-  {
-    "description": "The maximum number of concurrently running queries allowed in the pool. If pool-max-running-queries-multiple is set then the metric is derived by multiplying that value by the current total number of executors.",
-    "contexts": [
-      "RESOURCE_POOL"
-    ],
-    "label": "Resource Pool $0 Max Running Queries Derived",
-    "units": "UNIT",
-    "kind": "GAUGE",
-    "key": "admission-controller.pool-max-running-queries-derived.$0"
-  },
-  {
-    "description": "The maximum number of queries that can be queued in the pool. If pool-max-queued-queries-multiple is set then the metric is derived by multiplying that value by the current total number of executors.",
-    "contexts": [
-      "RESOURCE_POOL"
-    ],
-    "label": "Resource Pool $0 Max Queued Queries Derived",
-    "units": "UNIT",
-    "kind": "GAUGE",
-    "key": "admission-controller.max-queued-queries-derived.$0"
-  },
-  {
-    "description": "The maximum memory available across the cluster for the pool. If pool-max-memory-multiple is set then the metric is derived by multiplying that value by the current total number of executors.",
-    "contexts": [
-      "RESOURCE_POOL"
-    ],
-    "label": "Resource Pool $0 Max Memory Derived",
-    "units": "BYTES",
-    "kind": "GAUGE",
-    "key": "admission-controller.max-memory-derived.$0"
-  },
-  {
     "description": "Resource Pool $0 Aggregate Queue Size",
     "contexts": [
       "RESOURCE_POOL"
diff --git a/fe/src/main/java/org/apache/impala/util/RequestPoolService.java b/fe/src/main/java/org/apache/impala/util/RequestPoolService.java
index 8850a5c..58fe6d5 100644
--- a/fe/src/main/java/org/apache/impala/util/RequestPoolService.java
+++ b/fe/src/main/java/org/apache/impala/util/RequestPoolService.java
@@ -125,21 +125,6 @@ public class RequestPoolService {
   private final static String CLAMP_MEM_LIMIT_QUERY_OPTION =
       "impala.admission-control.clamp-mem-limit-query-option";
 
-  // Key for specifying the "Max Running Queries Multiple" configuration
-  // of the pool.
-  private final static String MAX_RUNNING_QUERIES_MULTIPLE =
-      "impala.admission-control.max-running-queries-multiple";
-
-  // Key for specifying the "Max Queued Queries Multiple" configuration
-  // of the pool.
-  private final static String MAX_QUEUED_QUERIES_MULTIPLE =
-      "impala.admission-control.max-queued-queries-multiple";
-
-  // Key for specifying the "Max Memory Multiple" configuration
-  // of the pool.
-  private final static String MAX_MEMORY_MULTIPLE =
-      "impala.admission-control.max-memory-multiple";
-
   // Key for specifying the "Max mt_dop" configuration of the pool
   private final static String MAX_MT_DOP = "impala.admission-control.max-mt-dop";
 
@@ -402,12 +387,6 @@ public class RequestPoolService {
           getPoolConfigValue(currentConf, pool, MIN_QUERY_MEM_LIMIT_BYTES, 0L));
       result.setClamp_mem_limit_query_option(
           getPoolConfigValue(currentConf, pool, CLAMP_MEM_LIMIT_QUERY_OPTION, true));
-      result.setMax_running_queries_multiple(
-          getPoolConfigDoubleValue(currentConf, pool, MAX_RUNNING_QUERIES_MULTIPLE, 0.0));
-      result.setMax_queued_queries_multiple(
-          getPoolConfigDoubleValue(currentConf, pool, MAX_QUEUED_QUERIES_MULTIPLE, 0.0));
-      result.setMax_memory_multiple(
-          getPoolConfigValue(currentConf, pool, MAX_MEMORY_MULTIPLE, 0));
       result.setMax_mt_dop(
           getPoolConfigValue(currentConf, pool, MAX_MT_DOP, -1));
     }
@@ -415,13 +394,11 @@ public class RequestPoolService {
       LOG.debug("getPoolConfig(pool={}): max_mem_resources={}, max_requests={},"
               + " max_queued={},  queue_timeout_ms={}, default_query_options={},"
               + " max_query_mem_limit={}, min_query_mem_limit={},"
-              + " clamp_mem_limit_query_option={}, max_running_queries_multiple={},"
-              + " max_queued_queries_multiple={}, max_memory_multiple={}",
+              + " clamp_mem_limit_query_option={}",
           pool, result.max_mem_resources, result.max_requests, result.max_queued,
           result.queue_timeout_ms, result.default_query_options,
           result.max_query_mem_limit, result.min_query_mem_limit,
-          result.clamp_mem_limit_query_option, result.max_running_queries_multiple,
-          result.max_queued_queries_multiple, result.max_memory_multiple);
+          result.clamp_mem_limit_query_option);
     }
     return result;
   }
@@ -459,15 +436,6 @@ public class RequestPoolService {
   }
 
   /**
-   * Looks up the per-pool Double config from the Configuration. See above.
-   */
-  private double getPoolConfigDoubleValue(
-      Configuration conf, String pool, String key, double defaultValue) {
-    return conf.getDouble(String.format(PER_POOL_CONFIG_KEY_FORMAT, key, pool),
-        conf.getDouble(key, defaultValue));
-  }
-
-  /**
    * Resolves the actual pool to use via the allocation placement policy. The policy may
    * change the requested pool.
    *
diff --git a/fe/src/test/java/org/apache/impala/util/TestRequestPoolService.java b/fe/src/test/java/org/apache/impala/util/TestRequestPoolService.java
index 25d6d3d..436984c 100644
--- a/fe/src/test/java/org/apache/impala/util/TestRequestPoolService.java
+++ b/fe/src/test/java/org/apache/impala/util/TestRequestPoolService.java
@@ -205,9 +205,7 @@ public class TestRequestPoolService {
         10000L, "mem_limit=1024m,query_timeout_s=10");
     checkPoolConfigResult("root.queueB", 5, 10, -1, 30000L, "mem_limit=1024m");
     checkPoolConfigResult("root.queueC", 5, 10, 1024 * ByteUnits.MEGABYTE, 30000L,
-        "mem_limit=1024m", 1000, 10, false, 0.0, 0.0, 0);
-    checkPoolConfigResult("root.queueD", 5, 10, 1024 * ByteUnits.MEGABYTE, 30000L,
-        "mem_limit=1024m", 0, 0, true, 0.5, 2.5, 25);
+            "mem_limit=1024m", 1000, 10, false);
   }
 
   @Test
@@ -215,7 +213,7 @@ public class TestRequestPoolService {
     createPoolService(ALLOCATION_FILE_EMPTY, LLAMA_CONFIG_FILE_EMPTY);
     Assert.assertEquals("root.userA", poolService_.assignToPool("", "userA"));
     Assert.assertTrue(poolService_.hasAccess("root.userA", "userA"));
-    checkPoolConfigResult("root", -1, 200, -1, null, "", 0, 0, true, 0.0, 0.0, 0);
+    checkPoolConfigResult("root", -1, 200, -1, null, "", 0, 0, true);
   }
 
   @Ignore("IMPALA-4868") @Test
@@ -311,8 +309,7 @@ public class TestRequestPoolService {
   private void checkPoolConfigResult(String pool, long expectedMaxRequests,
       long expectedMaxQueued, long expectedMaxMem, Long expectedQueueTimeoutMs,
       String expectedQueryOptions, long max_query_mem_limit, long min_query_mem_limit,
-      boolean clamp_mem_limit_query_option, double max_running_queries_multiple,
-      double max_queued_queries_multiple, long max_memory_multiple) {
+      boolean clamp_mem_limit_query_option) {
     TPoolConfig expectedResult = new TPoolConfig();
     expectedResult.setMax_requests(expectedMaxRequests);
     expectedResult.setMax_queued(expectedMaxQueued);
@@ -320,9 +317,6 @@ public class TestRequestPoolService {
     expectedResult.setMax_query_mem_limit(max_query_mem_limit);
     expectedResult.setMin_query_mem_limit(min_query_mem_limit);
     expectedResult.setClamp_mem_limit_query_option(clamp_mem_limit_query_option);
-    expectedResult.setMax_running_queries_multiple(max_running_queries_multiple);
-    expectedResult.setMax_queued_queries_multiple(max_queued_queries_multiple);
-    expectedResult.setMax_memory_multiple(max_memory_multiple);
     if (expectedQueueTimeoutMs != null) {
       expectedResult.setQueue_timeout_ms(expectedQueueTimeoutMs);
     }
@@ -337,7 +331,7 @@ public class TestRequestPoolService {
       long expectedMaxQueued, long expectedMaxMem, Long expectedQueueTimeoutMs,
       String expectedQueryOptions) {
     checkPoolConfigResult(pool, expectedMaxRequests, expectedMaxQueued, expectedMaxMem,
-        expectedQueueTimeoutMs, expectedQueryOptions, 0, 0, true, 0.0, 0.0, 0);
+        expectedQueueTimeoutMs, expectedQueryOptions, 0, 0, true);
   }
 
   private void checkPoolConfigResult(String pool, long expectedMaxRequests,
diff --git a/fe/src/test/resources/fair-scheduler-test.xml b/fe/src/test/resources/fair-scheduler-test.xml
index b9a6673..9d3dafd 100644
--- a/fe/src/test/resources/fair-scheduler-test.xml
+++ b/fe/src/test/resources/fair-scheduler-test.xml
@@ -12,10 +12,6 @@
       <aclSubmitApps>* </aclSubmitApps>
       <maxResources>1024 mb, 0 vcores</maxResources>
     </queue>
-    <queue name="queueD">
-      <aclSubmitApps>* </aclSubmitApps>
-      <maxResources>1024 mb, 0 vcores</maxResources>
-    </queue>
     <aclSubmitApps> </aclSubmitApps>
   </queue>
   <queuePlacementPolicy>
diff --git a/fe/src/test/resources/fair-scheduler-test2.xml b/fe/src/test/resources/fair-scheduler-test2.xml
index 743d153..6e2b18d 100644
--- a/fe/src/test/resources/fair-scheduler-test2.xml
+++ b/fe/src/test/resources/fair-scheduler-test2.xml
@@ -14,11 +14,7 @@
     </queue>
     <queue name="queueD">
       <aclSubmitApps>* </aclSubmitApps>
-      <maxResources>1024 mb, 0 vcores</maxResources>
-    </queue>
-    <queue name="queueE">
-      <aclSubmitApps>* </aclSubmitApps>
-      <maxResources>1024 mb, 0 vcores</maxResources>
+      <maxResources>400 mb, 0 vcores</maxResources>
     </queue>
     <aclSubmitApps> </aclSubmitApps>
   </queue>
diff --git a/fe/src/test/resources/llama-site-test.xml b/fe/src/test/resources/llama-site-test.xml
index 0b22064..3738705 100644
--- a/fe/src/test/resources/llama-site-test.xml
+++ b/fe/src/test/resources/llama-site-test.xml
@@ -55,16 +55,4 @@
     <name>impala.admission-control.clamp-mem-limit-query-option.root.queueC</name>
     <value>false</value>
   </property>
-  <property>
-    <name>impala.admission-control.max-running-queries-multiple.root.queueD</name>
-    <value>0.5</value>
-  </property>
-  <property>
-    <name>impala.admission-control.max-queued-queries-multiple.root.queueD</name>
-    <value>2.5</value>
-  </property>
-  <property>
-    <name>impala.admission-control.max-memory-multiple.root.queueD</name>
-    <value>25</value>
-  </property>
 </configuration>
diff --git a/fe/src/test/resources/llama-site-test2.xml b/fe/src/test/resources/llama-site-test2.xml
index d71dba8..8890d99 100644
--- a/fe/src/test/resources/llama-site-test2.xml
+++ b/fe/src/test/resources/llama-site-test2.xml
@@ -66,29 +66,7 @@
     <value>10</value>
   </property>
   <property>
-    <name>impala.admission-control.max-running-queries-multiple.root.queueD</name>
-    <value>0.5</value>
-  </property>
-  <property>
-    <name>impala.admission-control.max-queued-queries-multiple.root.queueD</name>
-    <value>2.5</value>
-  </property>
-  <property>
-    <name>impala.admission-control.max-memory-multiple.root.queueD</name>
-    <value>41943040</value> <!-- 40 * 1024 * 1024 -->
-  </property>
-
-  <!-- queue E is used by test_admission_controller.py to run tests in the minicluster-->
-  <property>
-    <name>impala.admission-control.max-running-queries-multiple.root.queueE</name>
-    <value>1</value>
-  </property>
-  <property>
-    <name>impala.admission-control.max-queued-queries-multiple.root.queueE</name>
-    <value>0.6</value>
-  </property>
-  <property>
-    <name>impala.admission-control.max-memory-multiple.root.queueE</name>
-    <value>104857600</value> <!-- 100 * 1024 * 1024 -->
+    <name>llama.am.throttling.maximum.placed.reservations.root.queueD</name>
+    <value>6</value>
   </property>
 </configuration>
diff --git a/testdata/workloads/functional-query/queries/QueryTest/admission-max-min-mem-limits.test b/testdata/workloads/functional-query/queries/QueryTest/admission-max-min-mem-limits.test
index 4da1ac1..c9e48ec 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/admission-max-min-mem-limits.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/admission-max-min-mem-limits.test
@@ -164,5 +164,4 @@ Select 1;
 ---- CATCH
 Rejected query from pool root.maxMemLessThanMinLimit: Invalid pool config: the
  min_query_mem_limit 2621440001 is greater than the max_mem_resources 2621440000
- (configured statically)
 ====
diff --git a/testdata/workloads/functional-query/queries/QueryTest/admission-reject-mem-estimate.test b/testdata/workloads/functional-query/queries/QueryTest/admission-reject-mem-estimate.test
index 9984f2b..7f8feb2 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/admission-reject-mem-estimate.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/admission-reject-mem-estimate.test
@@ -6,8 +6,7 @@ select min(c_name), min(n_name)
 from tpch_parquet.customer
   join tpch_parquet.nation on c_nationkey = n_nationkey
 ---- CATCH
-request memory needed 82.94 MB is greater than pool
- max mem resources 40.00 MB (configured statically)
+request memory needed 82.94 MB is greater than pool max mem resources 40.00 MB
 ====
 ---- QUERY
 # Overriding the memory estimate should allow the query to execute.
@@ -33,8 +32,7 @@ select min(c_name), min(n_name)
 from tpch_parquet.customer
   join tpch_parquet.nation on c_nationkey = n_nationkey
 ---- CATCH
-request memory needed 45.00 MB is greater than pool
- max mem resources 40.00 MB (configured statically)
+request memory needed 45.00 MB is greater than pool max mem resources 40.00 MB
 ====
 ---- QUERY
 # MEM_LIMIT takes precedence over MAX_MEM_ESTIMATE_FOR_ADMISSION, so the query is
@@ -45,8 +43,7 @@ select min(c_name), min(n_name)
 from tpch_parquet.customer
   join tpch_parquet.nation on c_nationkey = n_nationkey
 ---- CATCH
-request memory needed 45.00 MB is greater than pool
- max mem resources 40.00 MB (configured statically)
+request memory needed 45.00 MB is greater than pool max mem resources 40.00 MB
 ====
 ---- QUERY
 # MEM_LIMIT takes precedence over MAX_MEM_ESTIMATE_FOR_ADMISSION, so the query is
diff --git a/testdata/workloads/functional-query/queries/QueryTest/admission-reject-min-reservation.test b/testdata/workloads/functional-query/queries/QueryTest/admission-reject-min-reservation.test
index 10bd714..66f9047 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/admission-reject-min-reservation.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/admission-reject-min-reservation.test
@@ -14,7 +14,7 @@ set mem_limit=150mb;
 select distinct * from functional_parquet.alltypesagg
 ---- CATCH
 minimum memory reservation needed is greater than pool max mem resources.
- Pool max mem resources: 40.00 MB (configured statically).
+ Pool max mem resources: 40.00 MB.
  Cluster-wide memory reservation needed: 204.26 MB
 ====
 ---- QUERY
diff --git a/tests/custom_cluster/test_admission_controller.py b/tests/custom_cluster/test_admission_controller.py
index 5cc3063..d463913 100644
--- a/tests/custom_cluster/test_admission_controller.py
+++ b/tests/custom_cluster/test_admission_controller.py
@@ -19,7 +19,6 @@
 
 import itertools
 import logging
-import math
 import os
 import pytest
 import re
@@ -420,8 +419,7 @@ class TestAdmissionController(TestAdmissionControllerBase, HS2TestSuite):
     for query in non_trivial_queries:
       ex = self.execute_query_expect_failure(self.client, query)
       assert re.search("Rejected query from pool default-pool: request memory needed "
-                       ".* is greater than pool max mem resources 10.00 MB \(configured "
-                       "statically\)", str(ex))
+                       ".* is greater than pool max mem resources 10.00 MB", str(ex))
 
   @SkipIfS3.hdfs_block_size
   @SkipIfABFS.hdfs_block_size
@@ -846,8 +844,8 @@ class TestAdmissionController(TestAdmissionControllerBase, HS2TestSuite):
     STMT = "select sleep(100)"
     TIMEOUT_S = 60
     EXPECTED_REASON = "Latest admission queue reason: Not enough aggregate memory " +\
-        "available in pool default-pool with max mem resources 10.00 MB (configured " \
-        "statically). Needed 9.00 MB but only 1.00 MB was available."
+        "available in pool default-pool with max mem resources 10.00 MB. Needed 9.00 MB" \
+        " but only 1.00 MB was available."
     NUM_QUERIES = 5
     profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
         TIMEOUT_S, {'mem_limit': '9mb'})
@@ -1138,186 +1136,6 @@ class TestAdmissionController(TestAdmissionControllerBase, HS2TestSuite):
     assert num_rejected == NUM_QUERIES - num_admitted_immediately - num_queued
 
   @pytest.mark.execute_serially
-  @CustomClusterTestSuite.with_args(impalad_args=impalad_admission_ctrl_config_args(
-    fs_allocation_file="fair-scheduler-test2.xml",
-    llama_site_file="llama-site-test2.xml"),
-    statestored_args=_STATESTORED_ARGS)
-  def test_scalable_config(self, vector):
-    """
-    Test that the scalable configuration parameters scale as the cluster size changes.
-    """
-    # Start with 3 Impalads
-    coordinator = self.cluster.impalads[0]
-    impalad_1 = self.cluster.impalads[1]
-    impalad_2 = self.cluster.impalads[2]
-    self.__check_admission_by_counts(expected_num_impalads=3)
-    # The mem_limit values that are passed to __check_admission_by_memory are based on
-    # the memory used by the query when run on clusters of varying sizes, but mem_limit
-    # is used in the test to make the test deterministic in the presence of changing
-    # memory estimates.
-    self.__check_admission_by_memory(True, '85M')
-
-    # Kill an Impalad, now there are 2.
-    impalad_1.kill_and_wait_for_exit()
-    coordinator.service.wait_for_num_known_live_backends(2)
-    self.__check_admission_by_counts(expected_num_impalads=2)
-    self.__check_admission_by_memory(False, '125M',
-                                   'is greater than pool max mem resources 200.00 MB'
-                                   ' (calculated as 2 backends each with 100.00 MB)')
-
-    # Restart an Impalad, now there are 3 again.
-    impalad_1.start(wait_until_ready=True)
-    coordinator.service.wait_for_num_known_live_backends(3)
-    self.__check_admission_by_counts(expected_num_impalads=3)
-    self.__check_admission_by_memory(True, '85M')
-
-    # Kill 2 Impalads, now there are 1.
-    impalad_1.kill_and_wait_for_exit()
-    impalad_2.kill_and_wait_for_exit()
-    coordinator.service.wait_for_num_known_live_backends(1)
-    self.__check_admission_by_counts(expected_num_impalads=1)
-    self.__check_admission_by_memory(False, '135M',
-                                   'is greater than pool max mem resources 100.00 MB'
-                                   ' (calculated as 1 backends each with 100.00 MB)')
-
-    # Restart 2 Impalads, now there are 3 again.
-    impalad_1.start(wait_until_ready=True)
-    impalad_2.start(wait_until_ready=True)
-    coordinator.service.wait_for_num_known_live_backends(3)
-    self.__check_admission_by_counts(expected_num_impalads=3)
-    self.__check_admission_by_memory(True, '85M')
-
-  def __check_admission_by_memory(self, expected_admission, mem_limit,
-                                  expected_rejection_reason=None):
-    """
-    Test if a query can run against the current cluster.
-    :param mem_limit set in the client configuration to limit query memory.
-    :param expected_admission: True if admission is expected.
-    :param expected_rejection_reason: a string expected to be in the reason for rejection.
-    """
-    query = "select * from functional.alltypesagg  order by int_col limit 1"
-    profiles = self._execute_and_collect_profiles([query], timeout_s=60,
-        allow_query_failure=True, config_options={'request_pool': 'root.queueE',
-                                                  'mem_limit': mem_limit})
-    assert len(profiles) == 1
-    profile = profiles[0]
-    if "Admission result: Admitted immediately" in profile:
-      did_admit = True
-    elif "Admission result: Rejected" in profile:
-      did_admit = False
-      num_rejected, rejected_reasons = self.parse_profiles_rejected(profiles)
-      assert num_rejected == 1
-      assert expected_rejection_reason is not None, rejected_reasons[0]
-      assert expected_rejection_reason in rejected_reasons[0], profile
-    else:
-      assert "Admission result: Admitted (queued)" in profile, profile
-      assert 0, "should not queue based on memory"
-    assert did_admit == expected_admission, profile
-
-  def __check_admission_by_counts(self, expected_num_impalads):
-    """
-    Run some queries, find how many were admitted, queued or rejected, and check that
-    AdmissionController correctly enforces query count limits based in the configuration
-    in llama-site-test2.xml.
-    """
-    NUM_QUERIES = 6
-    # Set expected values based on expected_num_impalads.
-    # We can run 1 query per backend for queueE from llama-site-test2.xml.
-    expected_num_admitted = expected_num_impalads
-    # The value of max-queued-queries-multiple for queueE from llama-site-test2.xml.
-    QUERIES_MULTIPLE = 0.6
-    expected_num_queued = int(math.ceil(expected_num_impalads * QUERIES_MULTIPLE))
-    expected_num_rejected = NUM_QUERIES - (expected_num_admitted + expected_num_queued)
-
-    impalad = self.cluster.impalads[0]
-    client = impalad.service.create_beeswax_client()
-    client.set_configuration({'request_pool': 'root.queueE'})
-    result = client.execute("select 1")
-    # Query should execute in queueE.
-    self.__check_query_options(result.runtime_profile, ['REQUEST_POOL=root.queueE'])
-    STMT = "select sleep(1000)"
-    TIMEOUT_S = 60
-    profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
-            TIMEOUT_S, allow_query_failure=True,
-            config_options={'request_pool': 'root.queueE'})
-    # Check admitted queries
-    num_admitted_immediately = self.parse_profiles_admitted(profiles)
-    assert num_admitted_immediately == expected_num_admitted
-
-    # Check queued queries.
-    num_queued, queued_reasons = self.parse_profiles_queued(profiles)
-    assert num_queued == expected_num_queued
-    assert len(queued_reasons) == num_queued
-    expected_queue_reason = (
-      "number of running queries {0} is at or over limit {1}".format(
-        expected_num_admitted, expected_num_admitted)
-    )
-    # The first query to get queued sees expected_queue_reason.
-    assert len([s for s in queued_reasons if
-                expected_queue_reason in s]) == 1
-    # Subsequent queries that are queued see that the queue is not empty.
-    expected_see_non_empty_queue = max(expected_num_queued - 1, 0)
-    assert len([s for s in queued_reasons if
-                "queue is not empty" in s]) == expected_see_non_empty_queue
-
-    # Check rejected queries
-    num_rejected, rejected_reasons = self.parse_profiles_rejected(profiles)
-    assert num_rejected == expected_num_rejected
-    expected_rejection_reason = (
-      "Rejected query from pool root.queueE: queue full, "
-      "limit={0} (calculated as {1} backends each with 0.6 queries)".format(
-        expected_num_queued, expected_num_impalads)
-    )
-    assert len([s for s in rejected_reasons if
-                expected_rejection_reason in s]) == expected_num_rejected
-
-  def parse_profiles_queued(self, profiles):
-    """
-    Parse a list of Profile strings and sum the counts of queries queued.
-    :param profiles: a list of query profiles to parse.
-    :return: The number queued.
-    """
-    num_queued = 0
-    queued_reasons_return = []
-    for profile in profiles:
-      if "Admission result: Admitted (queued)" in profile:
-        queued_reasons = [line for line in profile.split("\n")
-                          if "Initial admission queue reason:" in line]
-        assert len(queued_reasons) == 1, profile
-        num_queued += 1
-        queued_reasons_return.append(queued_reasons[0])
-    return num_queued, queued_reasons_return
-
-  def parse_profiles_admitted(self, profiles):
-    """
-    Parse a list of Profile strings and sum the counts of queries admitted immediately.
-    :param profiles: a list of query profiles to parse.
-    :return: The number admitted immediately.
-    """
-    num_admitted_immediately = 0
-    for profile in profiles:
-      if "Admission result: Admitted immediately" in profile:
-        num_admitted_immediately += 1
-    return num_admitted_immediately
-
-  def parse_profiles_rejected(self, profiles):
-    """
-    Parse a list of Profile strings and sum the counts of queries rejected.
-    :param profiles: a list of query profiles to parse.
-    :return: The number rejected.
-    """
-    num_rejected = 0
-    rejected_reasons_return = []
-    for profile in profiles:
-      if "Admission result: Rejected" in profile:
-        num_rejected += 1
-        query_statuses = [line for line in profile.split("\n")
-                          if "Query Status:" in line]
-        assert len(query_statuses) == 1, profile
-        rejected_reasons_return.append(query_statuses[0])
-    return num_rejected, rejected_reasons_return
-
-  @pytest.mark.execute_serially
   def test_impala_server_startup_delay(self):
     """This test verifies that queries get queued when the coordinator has already started
     accepting client connections during startup, but the local backend descriptor is not
diff --git a/tests/webserver/test_web_pages.py b/tests/webserver/test_web_pages.py
index b93dfa1..cab6108 100644
--- a/tests/webserver/test_web_pages.py
+++ b/tests/webserver/test_web_pages.py
@@ -621,12 +621,7 @@ class TestWebPage(ImpalaTestSuite):
     # check that metrics exist
     assert 'max_query_mem_limit' in pool_config
     assert 'min_query_mem_limit' in pool_config
-    assert 'max_running_queries_multiple' in pool_config
-    assert 'max_memory_multiple' in pool_config
     assert 'clamp_mem_limit_query_option' in pool_config
-    assert 'max_running_queries_derived' in pool_config
-    assert 'max_queued_queries_derived' in pool_config
-    assert 'max_memory_derived' in pool_config
 
   def __fetch_resource_pools_json(self, pool_name=None):
     """Helper method used to fetch the resource pool json from the admission debug page.
diff --git a/www/admission_controller.tmpl b/www/admission_controller.tmpl
index 6b88401..46c73d5 100644
--- a/www/admission_controller.tmpl
+++ b/www/admission_controller.tmpl
@@ -261,18 +261,6 @@ Time since last statestore update containing admission control topic state (ms):
       <td>Clamp MEM_LIMIT query option</td>
       <td>{{clamp_mem_limit_query_option}}</td>
     </tr>
-    <tr>
-      <td>Max Running Queries Multiple</td>
-      <td>{{max_running_queries_multiple}}</td>
-    </tr>
-    <tr>
-      <td>Max Queued Queries Multiple</td>
-      <td>{{max_queued_queries_multiple}}</td>
-    </tr>
-    <tr>
-      <td>Max Memory Multiple</td>
-      <td class='memory'>{{max_memory_multiple}}</td>
-    </tr>
   </table>
 
   <h4>Queued queries in order of being queued (submitted to this coordinator)</h4>
@@ -349,22 +337,22 @@ Time since last statestore update containing admission control topic state (ms):
     <tr>
       <td>Queries currently running</td>
       <td>{{agg_num_running}}</td>
-      <td>{{max_running_queries_derived}}</td>
+      <td>{{pool_max_requests}}</td>
     </tr>
     <tr>
       <td>Queries currently queued</td>
       <td>{{agg_num_queued}}</td>
-      <td>{{max_queued_queries_derived}}</td>
+      <td>{{pool_max_queued}}</td>
     </tr>
     <tr>
       <td>Total memory reserved across cluster</td>
       <td class='memory'>{{agg_mem_reserved}}</td>
-      <td class='memory'>{{max_memory_derived}}</td>
+      <td class='memory'>{{pool_max_mem_resources}}</td>
     </tr>
     <tr>
       <td>Memory admitted on this coordinator</td>
       <td class='memory'>{{local_mem_admitted}}</td>
-      <td class='memory'>{{max_memory_derived}}</td>
+      <td class='memory'>{{pool_max_mem_resources}}</td>
     </tr>
     <tr>
       <td>Queued reason of query at the head of the queue</td>


[impala] 03/04: IMPALA-9840: Fix data race in InternalQueue

Posted by st...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

stakiar pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit e389e857479efa56aab59476e07a23221d66a4c7
Author: Bikramjeet Vig <bi...@gmail.com>
AuthorDate: Mon Jun 8 14:54:46 2020 -0700

    IMPALA-9840: Fix data race in InternalQueue
    
    This patch converts the remaining public methods in InternalQueue to
    be thread safe. These methods were being used during multi threaded
    execution and got flagged by thread sanitizer.
    
    Performance:
    Ran single node perf run with TPCH (scale factor 30) on my local to
    make sure there was no perf impact.
    
    +----------+-----------------------+---------+------------+------------+----------------+
    | Workload | File Format           | Avg (s) | Delta(Avg) | GeoMean(s) | Delta(GeoMean) |
    +----------+-----------------------+---------+------------+------------+----------------+
    | TPCH(30) | parquet / none / none | 6.17    | -1.59%     | 4.33       | -0.30%         |
    +----------+-----------------------+---------+------------+------------+----------------+
    
    Change-Id: Ied72c4573e5d23ba744964c3e8a90851d9c6b31c
    Reviewed-on: http://gerrit.cloudera.org:8080/16051
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 be/src/util/internal-queue.h | 26 ++++++++++++++++++--------
 1 file changed, 18 insertions(+), 8 deletions(-)

diff --git a/be/src/util/internal-queue.h b/be/src/util/internal-queue.h
index ed367d7..85a46e1 100644
--- a/be/src/util/internal-queue.h
+++ b/be/src/util/internal-queue.h
@@ -81,7 +81,7 @@ class InternalQueueBase {
   /// if the queue is empty. This is O(1).
   T* head() const {
     std::lock_guard<LockType> lock(lock_);
-    if (empty()) return nullptr;
+    if (IsEmptyLocked()) return nullptr;
     return reinterpret_cast<T*>(head_);
   }
 
@@ -89,7 +89,7 @@ class InternalQueueBase {
   /// if the queue is empty. This is O(1).
   T* tail() {
     std::lock_guard<LockType> lock(lock_);
-    if (empty()) return nullptr;
+    if (IsEmptyLocked()) return nullptr;
     return reinterpret_cast<T*>(tail_);
   }
 
@@ -133,7 +133,7 @@ class InternalQueueBase {
     Node* result = nullptr;
     {
       std::lock_guard<LockType> lock(lock_);
-      if (empty()) return nullptr;
+      if (IsEmptyLocked()) return nullptr;
       --size_;
       result = head_;
       head_ = head_->next;
@@ -155,7 +155,7 @@ class InternalQueueBase {
     Node* result = nullptr;
     {
       std::lock_guard<LockType> lock(lock_);
-      if (empty()) return nullptr;
+      if (IsEmptyLocked()) return nullptr;
       --size_;
       result = tail_;
       tail_ = tail_->prev;
@@ -223,8 +223,14 @@ class InternalQueueBase {
     head_ = tail_ = nullptr;
   }
 
-  int size() const { return size_; }
-  bool empty() const { return head_ == nullptr; }
+  int size() const {
+    std::lock_guard<LockType> lock(lock_);
+    return SizeLocked();
+  }
+  bool empty() const {
+    std::lock_guard<LockType> lock(lock_);
+    return IsEmptyLocked();
+  }
 
   /// Returns if the target is on the queue. This is O(1) and does not acquire any locks.
   bool Contains(const T* target) const {
@@ -237,7 +243,7 @@ class InternalQueueBase {
     std::lock_guard<LockType> lock(lock_);
     if (head_ == nullptr) {
       if (tail_ != nullptr) return false;
-      if (size() != 0) return false;
+      if (SizeLocked() != 0) return false;
       return true;
     }
 
@@ -254,7 +260,7 @@ class InternalQueueBase {
       }
       current = next;
     }
-    if (num_elements_found != size()) return false;
+    if (num_elements_found != SizeLocked()) return false;
     return true;
   }
 
@@ -286,6 +292,10 @@ class InternalQueueBase {
 
  private:
   friend struct Node;
+
+  inline int SizeLocked() const { return size_; }
+  inline bool IsEmptyLocked() const { return head_ == nullptr; }
+
   mutable LockType lock_;
   Node *head_, *tail_;
   int size_;