You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by jo...@apache.org on 2023/03/09 17:22:53 UTC

[impala] 04/06: IMPALA-11975: Fix Dictionary methods to work with Python 3

This is an automated email from the ASF dual-hosted git repository.

joemcdonnell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit c233634d747e4106128627f258d1bee23855fd1e
Author: Joe McDonnell <jo...@cloudera.com>
AuthorDate: Sat Mar 4 09:52:49 2023 -0800

    IMPALA-11975: Fix Dictionary methods to work with Python 3
    
    Python 3 made the main dictionary methods lazy (items(),
    keys(), values()). This means that code that uses those
    methods may need to wrap the call in list() to get a
    list immediately. Python 3 also removed the old iter*
    lazy variants.
    
    This changes all locations to use Python 3 dictionary
    methods and wraps calls with list() appropriately.
    This also changes all itemitems(), itervalues(), iterkeys()
    locations to items(), values(), keys(), etc. Python 2
    will not use the lazy implementation of these, so there
    is a theoretical performance impact. Our python code is
    mostly for tests and the performance impact is minimal.
    Python 2 will be deprecated when Python 3 is functional.
    
    This addresses these pylint warnings:
    dict-iter-method
    dict-keys-not-iterating
    dict-values-not-iterating
    
    Testing:
     - Ran core tests
    
    Change-Id: Ie873ece54a633a8a95ed4600b1df4be7542348da
    Reviewed-on: http://gerrit.cloudera.org:8080/19590
    Reviewed-by: Joe McDonnell <jo...@cloudera.com>
    Tested-by: Joe McDonnell <jo...@cloudera.com>
---
 bin/banned_py3k_warnings.txt                      |  3 +++
 bin/load-data.py                                  |  2 +-
 bin/start-impala-cluster.py                       |  4 ++--
 testdata/bin/generate-schema-statements.py        |  2 +-
 tests/beeswax/impala_beeswax.py                   |  6 +++---
 tests/common/impala_cluster.py                    |  4 ++--
 tests/common/impala_connection.py                 |  2 +-
 tests/common/impala_test_suite.py                 |  6 +++---
 tests/comparison/common.py                        |  6 +++---
 tests/comparison/leopard/impala_docker_env.py     |  2 +-
 tests/comparison/query_generator.py               | 10 +++++-----
 tests/comparison/query_profile.py                 | 16 ++++++++--------
 tests/custom_cluster/test_admission_controller.py |  5 +++--
 tests/custom_cluster/test_breakpad.py             |  4 ++--
 tests/performance/workload.py                     |  2 +-
 tests/query_test/test_cancellation.py             |  2 +-
 tests/query_test/test_decimal_fuzz.py             |  4 ++--
 tests/query_test/test_insert_parquet.py           |  2 +-
 tests/query_test/test_kudu.py                     |  4 ++--
 tests/run-tests.py                                |  2 +-
 tests/stress/concurrent_select.py                 |  7 ++++---
 tests/stress/extract_min_mem.py                   |  2 +-
 tests/stress/queries.py                           |  2 +-
 tests/stress/query_runner.py                      |  6 +++---
 tests/stress/runtime_info.py                      | 10 +++++-----
 25 files changed, 60 insertions(+), 55 deletions(-)

diff --git a/bin/banned_py3k_warnings.txt b/bin/banned_py3k_warnings.txt
index 01d54fe73..a970906a0 100644
--- a/bin/banned_py3k_warnings.txt
+++ b/bin/banned_py3k_warnings.txt
@@ -7,3 +7,6 @@ zip-builtin-not-iterating
 filter-builtin-not-iterating
 reduce-builtin
 deprecated-itertools-function
+dict-iter-method
+dict-keys-not-iterating
+dict-values-not-iterating
diff --git a/bin/load-data.py b/bin/load-data.py
index 535a4f8bf..a4cfd5a97 100755
--- a/bin/load-data.py
+++ b/bin/load-data.py
@@ -483,7 +483,7 @@ def main():
   total_time = 0.0
   thread_pool.close()
   thread_pool.join()
-  for workload, load_time in loading_time_map.iteritems():
+  for workload, load_time in loading_time_map.items():
     total_time += load_time
     LOG.info('Data loading for workload \'%s\' completed in: %.2fs'\
         % (workload, load_time))
diff --git a/bin/start-impala-cluster.py b/bin/start-impala-cluster.py
index 5fac2c2d6..40ccd3d36 100755
--- a/bin/start-impala-cluster.py
+++ b/bin/start-impala-cluster.py
@@ -593,7 +593,7 @@ class DockerMiniClusterOperations(object):
     # List all running containers on the network and kill those with the impalad name
     # prefix to make sure that no running container are left over from previous clusters.
     container_name_prefix = self.__gen_container_name__("impalad")
-    for container_id, info in self.__get_network_info__()["Containers"].iteritems():
+    for container_id, info in self.__get_network_info__()["Containers"].items():
       container_name = info["Name"]
       if container_name.startswith(container_name_prefix):
         LOG.info("Stopping container {0}".format(container_name))
@@ -665,7 +665,7 @@ class DockerMiniClusterOperations(object):
       port_args = ["-P"]
     else:
       port_args = ["-p{dst}:{src}".format(src=src, dst=dst)
-                   for src, dst in port_map.iteritems()]
+                   for src, dst in port_map.items()]
     # Impersonate the current user for operations against the minicluster. This is
     # necessary because the user name inside the container is "root".
     # TODO: pass in the actual options
diff --git a/testdata/bin/generate-schema-statements.py b/testdata/bin/generate-schema-statements.py
index 69683a2f4..232db516b 100755
--- a/testdata/bin/generate-schema-statements.py
+++ b/testdata/bin/generate-schema-statements.py
@@ -336,7 +336,7 @@ def build_table_template(file_format, columns, partition_columns, row_format, tb
     external = ""
 
   all_tblproperties = []
-  for key, value in tblproperties.iteritems():
+  for key, value in tblproperties.items():
     all_tblproperties.append("'{0}' = '{1}'".format(key, value))
 
   # If there are no properties to set avoid the TBLPROPERTIES clause altogether.
diff --git a/tests/beeswax/impala_beeswax.py b/tests/beeswax/impala_beeswax.py
index c58bb7109..dbe2e81fc 100644
--- a/tests/beeswax/impala_beeswax.py
+++ b/tests/beeswax/impala_beeswax.py
@@ -128,7 +128,7 @@ class ImpalaBeeswaxClient(object):
     self.query_states = QueryState._NAMES_TO_VALUES
 
   def __options_to_string_list(self):
-    return ["%s=%s" % (k,v) for (k,v) in self.__query_options.iteritems()]
+    return ["%s=%s" % (k, v) for (k, v) in self.__query_options.items()]
 
   def get_query_options(self):
     return self.__query_options
@@ -140,7 +140,7 @@ class ImpalaBeeswaxClient(object):
     if query_option_dict is None:
       raise ValueError('Cannot pass None value for query options')
     self.clear_query_options()
-    for name, value in query_option_dict.iteritems():
+    for name, value in query_option_dict.items():
       self.set_query_option(name, value)
 
   def get_query_option(self, name):
@@ -483,7 +483,7 @@ class ImpalaBeeswaxClient(object):
     result = self.close_dml(handle)
     # The insert was successful
     num_rows = sum(map(int, result.rows_modified.values()))
-    data = ["%s: %s" % row for row in result.rows_modified.iteritems()]
+    data = ["%s: %s" % row for row in result.rows_modified.items()]
     exec_result = ImpalaBeeswaxResult(query_id=handle.id, success=True, data=data)
     exec_result.summary = "Inserted %d rows" % (num_rows,)
     return exec_result
diff --git a/tests/common/impala_cluster.py b/tests/common/impala_cluster.py
index 8a02e952f..e8a9aff7f 100644
--- a/tests/common/impala_cluster.py
+++ b/tests/common/impala_cluster.py
@@ -282,7 +282,7 @@ class ImpalaCluster(object):
       args = container_info["Args"]
       executable = os.path.basename(args[0])
       port_map = {}
-      for k, v in container_info["NetworkSettings"]["Ports"].iteritems():
+      for k, v in container_info["NetworkSettings"]["Ports"].items():
         # Key looks like "25000/tcp"..
         port = int(k.split("/")[0])
         # Value looks like { "HostPort": "25002", "HostIp": "" }.
@@ -626,7 +626,7 @@ def run_daemon(daemon_binary, args, build_type="latest", env_vars={}, output_fil
   # achieve the same thing but it doesn't work on some platforms for some reasons.
   sys_cmd = ("{set_cmds} {cmd} {redirect} &".format(
       set_cmds=''.join(["export {0}={1};".format(k, pipes.quote(v))
-                         for k, v in env_vars.iteritems()]),
+                         for k, v in env_vars.items()]),
       cmd=' '.join([pipes.quote(tok) for tok in cmd]),
       redirect=redirect))
   os.system(sys_cmd)
diff --git a/tests/common/impala_connection.py b/tests/common/impala_connection.py
index 21128db5d..6c949ea23 100644
--- a/tests/common/impala_connection.py
+++ b/tests/common/impala_connection.py
@@ -94,7 +94,7 @@ class ImpalaConnection(object):
     """Replaces existing configuration with the given dictionary"""
     assert config_option_dict is not None, "config_option_dict cannot be None"
     self.clear_configuration()
-    for name, value in config_option_dict.iteritems():
+    for name, value in config_option_dict.items():
       self.set_configuration_option(name, value)
 
   @abc.abstractmethod
diff --git a/tests/common/impala_test_suite.py b/tests/common/impala_test_suite.py
index 92099170e..c8c780a3b 100644
--- a/tests/common/impala_test_suite.py
+++ b/tests/common/impala_test_suite.py
@@ -408,7 +408,7 @@ class ImpalaTestSuite(BaseTestSuite):
     # Populate the default query option if it's empty.
     if not self.default_query_options:
       query_options = impalad_client.get_default_configuration()
-      for key, value in query_options.iteritems():
+      for key, value in query_options.items():
         self.default_query_options[key.upper()] = value
     # Restore all the changed query options.
     for query_option in query_options_changed:
@@ -512,12 +512,12 @@ class ImpalaTestSuite(BaseTestSuite):
       raise AssertionError("Query contains $DATABASE but no use_db specified")
 
     if extra:
-      for k, v in extra.iteritems():
+      for k, v in extra.items():
         if k in repl:
           raise RuntimeError("Key {0} is reserved".format(k))
         repl[k] = v
 
-    for k, v in repl.iteritems():
+    for k, v in repl.items():
       s = s.replace(k, v)
     return s
 
diff --git a/tests/comparison/common.py b/tests/comparison/common.py
index 1025e7c79..5b2e71dcd 100644
--- a/tests/comparison/common.py
+++ b/tests/comparison/common.py
@@ -162,7 +162,7 @@ class ValExpr(object):
     if self.is_func:
       for arg in self.args:
         if isinstance(arg, ValExpr):
-          for col, count in arg.count_col_refs().iteritems():
+          for col, count in arg.count_col_refs().items():
             col_ref_counts[col] += count
     elif self.is_col:
       col_ref_counts[self] += 1
@@ -631,7 +631,7 @@ class TableExprList(list):
   def joinable_cols_by_type(self):
     cols_by_type = defaultdict(ValExprList)
     for table_expr in self:
-      for type_, cols in table_expr.joinable_cols_by_type.iteritems():
+      for type_, cols in table_expr.joinable_cols_by_type.items():
         cols_by_type[type_].extend(cols)
     return cols_by_type
 
@@ -639,7 +639,7 @@ class TableExprList(list):
   def cols_by_type(self):
     cols_by_type = defaultdict(ValExprList)
     for table_expr in self:
-      for type_, cols in table_expr.cols_by_type.iteritems():
+      for type_, cols in table_expr.cols_by_type.items():
         cols_by_type[type_].extend(cols)
     return cols_by_type
 
diff --git a/tests/comparison/leopard/impala_docker_env.py b/tests/comparison/leopard/impala_docker_env.py
index af3a09d48..1814764b6 100755
--- a/tests/comparison/leopard/impala_docker_env.py
+++ b/tests/comparison/leopard/impala_docker_env.py
@@ -141,7 +141,7 @@ class ImpalaDockerEnv(object):
           volume_ops = ' '.join(
               ['-v {host_path}:{container_path}'.format(host_path=host_path,
                                                         container_path=container_path)
-               for host_path, container_path in volume_map.iteritems()])
+               for host_path, container_path in volume_map.items()])
         start_command += (
             'docker run -d -t {volume_ops} -p {postgres_port}:5432 -p {ssh_port}:22 '
             '-p {impala_port}:21050 {docker_image_name} /bin/docker-boot-daemon').format(
diff --git a/tests/comparison/query_generator.py b/tests/comparison/query_generator.py
index 3da76a1ab..00df0c4f2 100644
--- a/tests/comparison/query_generator.py
+++ b/tests/comparison/query_generator.py
@@ -752,7 +752,7 @@ class QueryGenerator(object):
         # A root_func was chosen and it's children are in one or more of the
         # null_args_by_func_allowed pools. A pool will be chosen, then a child function.
         null_arg_counts_by_pool = dict((pool_category, len(pool)) for pool_category, pool
-                                       in null_args_by_func_allowed.iteritems())
+                                       in null_args_by_func_allowed.items())
         # There is a special case that would lead to a dead end. If there is only one
         # distinct place holder across all the pools and an analytic is still needed,
         # then that place holder cannot be replaced by an aggregate since aggregates
@@ -788,7 +788,7 @@ class QueryGenerator(object):
 
       if parent_func:
         # Remove the place holder from all of the other pools.
-        for pool_category, pool in null_args_by_func_allowed.iteritems():
+        for pool_category, pool in null_args_by_func_allowed.items():
           for null_arg_idx, (func, arg_idx) in enumerate(pool):
             if func is parent_func and arg_idx == parent_arg_idx:
               del pool[null_arg_idx]
@@ -849,7 +849,7 @@ class QueryGenerator(object):
             continue
           null_args.append((chosen_func, idx))
 
-      if not any(null_args_by_func_allowed.itervalues()):
+      if not any(null_args_by_func_allowed.values()):
         # Some analytic functions take no arguments. Ex: ROW_NUM()
         break
 
@@ -1253,7 +1253,7 @@ class QueryGenerator(object):
 
     root_predicate, relational_predicates = self._create_boolean_func_tree(
       require_relational_func=True,
-      relational_col_types=table_exprs_by_col_types.keys(),
+      relational_col_types=list(table_exprs_by_col_types.keys()),
       allowed_signatures=join_signatures)
 
     for predicate in relational_predicates:
@@ -1440,7 +1440,7 @@ class QueryGenerator(object):
           # Prefer to replace Boolean leaves to get a more realistic expression.
           return_type = Boolean
         else:
-          return_type = choice(null_args_by_type.keys())
+          return_type = choice(list(null_args_by_type.keys()))
         # Rather than track if this is a child of a relational function, in which case
         # the arg type needs to be preserved, just always assume that this is a child of
         # a relational function.
diff --git a/tests/comparison/query_profile.py b/tests/comparison/query_profile.py
index e47f7a6c6..fae66d71e 100644
--- a/tests/comparison/query_profile.py
+++ b/tests/comparison/query_profile.py
@@ -296,9 +296,9 @@ class DefaultProfile(object):
       weights = self.weights(*weight_args)
     else:
       weights = weight_args[0]
-    total_weight = sum(weights.itervalues())
+    total_weight = sum(weights.values())
     numeric_choice = randint(1, total_weight)
-    for choice_, weight in weights.iteritems():
+    for choice_, weight in weights.items():
       if weight <= 0:
         continue
       if numeric_choice <= weight:
@@ -312,7 +312,7 @@ class DefaultProfile(object):
     else:
       weights = weights[0]
     return self._choose_from_weights(dict((choice_, weight) for choice_, weight
-                                     in weights.iteritems() if filter_fn(choice_)))
+                                     in weights.items() if filter_fn(choice_)))
 
   def _decide_from_probability(self, *keys):
     return random() < self.probability(*keys)
@@ -341,7 +341,7 @@ class DefaultProfile(object):
     return self._choose_from_bounds('MAX_NESTED_EXPR_COUNT')
 
   def allowed_analytic_designs(self):
-    return [design for design, is_enabled in self._flags['ANALYTIC_DESIGNS'].iteritems()
+    return [design for design, is_enabled in self._flags['ANALYTIC_DESIGNS'].items()
             if is_enabled]
 
   def use_partition_by_clause_in_analytic(self):
@@ -386,14 +386,14 @@ class DefaultProfile(object):
 
   def choose_subquery_predicate_category(self, func_name, allow_correlated):
     weights = self.weights('SUBQUERY_PREDICATE')
-    func_names = set(name for name, _, _ in weights.iterkeys())
+    func_names = set(name for name, _, _ in weights.keys())
     if func_name not in func_names:
       func_name = 'Scalar'
     allow_agg = self.weights('SELECT_ITEM_CATEGORY').get('AGG', 0)
     if allow_correlated and self.bounds('TABLE_COUNT')[1] == 0:
       allow_correlated = False
     weights = dict(((name, use_agg, use_correlated), weight)
-                   for (name, use_agg, use_correlated), weight in weights.iteritems()
+                   for (name, use_agg, use_correlated), weight in weights.items()
                    if name == func_name and
                    (allow_agg or use_agg == 'NON_AGG') and
                    weight)
@@ -563,7 +563,7 @@ class DefaultProfile(object):
       if not func_weights:
         raise Exception('All functions disallowed based on signature types')
       distinct_signature_lengths = set(signature_length_by_func.values())
-      for func, weight in func_weights.iteritems():
+      for func, weight in func_weights.items():
         signature_length = signature_length_by_func[func]
         func_weights[func] = reduce(
             lambda x, y: x * y,
@@ -591,7 +591,7 @@ class DefaultProfile(object):
         signature_weights[idx] = signature_weight
         signature_lengths[idx] = signature_length
     distinct_signature_lengths = set(signature_lengths.values())
-    for idx, weight in signature_weights.iteritems():
+    for idx, weight in signature_weights.items():
       signature_length = signature_lengths[idx]
       signature_weights[idx] = reduce(
           lambda x, y: x * y,
diff --git a/tests/custom_cluster/test_admission_controller.py b/tests/custom_cluster/test_admission_controller.py
index 79e4cc3a7..1fc7a14af 100644
--- a/tests/custom_cluster/test_admission_controller.py
+++ b/tests/custom_cluster/test_admission_controller.py
@@ -1852,8 +1852,9 @@ class TestAdmissionControllerStress(TestAdmissionControllerBase):
       curr[impalad] = init[impalad]
 
     while True:
-      LOG.debug("wait_for_statestore_updates: curr=%s, init=%s, d=%s", curr.values(),
-          init.values(), [curr[i] - init[i] for i in self.impalads])
+      LOG.debug("wait_for_statestore_updates: curr=%s, init=%s, d=%s",
+          list(curr.values()), list(init.values()),
+          [curr[i] - init[i] for i in self.impalads])
       if all([curr[i] - init[i] >= heartbeats for i in self.impalads]): break
       for impalad in self.impalads:
         curr[impalad] = impalad.service.get_metric_value(
diff --git a/tests/custom_cluster/test_breakpad.py b/tests/custom_cluster/test_breakpad.py
index 6e43c42b5..38c827588 100644
--- a/tests/custom_cluster/test_breakpad.py
+++ b/tests/custom_cluster/test_breakpad.py
@@ -66,7 +66,7 @@ class TestBreakpadBase(CustomClusterTestSuite):
   def start_cluster_with_args(self, **kwargs):
     cluster_options = []
     for daemon_arg in DAEMON_ARGS:
-      daemon_options = " ".join("-{0}={1}".format(k, v) for k, v in kwargs.iteritems())
+      daemon_options = " ".join("-{0}={1}".format(k, v) for k, v in kwargs.items())
       cluster_options.append("--{0}={1}".format(daemon_arg, daemon_options))
     self._start_impala_cluster(cluster_options)
 
@@ -401,7 +401,7 @@ class TestLogging(TestBreakpadBase):
   def start_cluster_with_args(self, cluster_size, log_dir, **kwargs):
     cluster_options = []
     for daemon_arg in DAEMON_ARGS:
-      daemon_options = " ".join("-{0}={1}".format(k, v) for k, v in kwargs.iteritems())
+      daemon_options = " ".join("-{0}={1}".format(k, v) for k, v in kwargs.items())
       cluster_options.append("--{0}={1}".format(daemon_arg, daemon_options))
     self._start_impala_cluster(cluster_options, cluster_size=cluster_size,
                                expected_num_impalads=cluster_size, impala_log_dir=log_dir)
diff --git a/tests/performance/workload.py b/tests/performance/workload.py
index 1fc47b2e1..507c18ef4 100644
--- a/tests/performance/workload.py
+++ b/tests/performance/workload.py
@@ -74,7 +74,7 @@ class Workload(object):
     """
 
     queries = list()
-    for query_name, query_str in self._query_map.iteritems():
+    for query_name, query_str in self._query_map.items():
       queries.append(Query(name=query_name,
                            query_str=query_str,
                            workload=self._name,
diff --git a/tests/query_test/test_cancellation.py b/tests/query_test/test_cancellation.py
index c6cfb50ad..63c43e383 100644
--- a/tests/query_test/test_cancellation.py
+++ b/tests/query_test/test_cancellation.py
@@ -198,7 +198,7 @@ class TestCancellation(ImpalaTestSuite):
                for _ in range(5)), 'Query failed to cancel'
     # Get profile and check for formatting errors
     profile = client.get_runtime_profile(handle, TRuntimeProfileFormat.THRIFT)
-    for (k, v) in profile.nodes[1].info_strings.iteritems():
+    for (k, v) in profile.nodes[1].info_strings.items():
       # Ensure that whitespace gets removed from values.
       assert v == v.rstrip(), \
         "Profile value contains surrounding whitespace: %s %s" % (k, v)
diff --git a/tests/query_test/test_decimal_fuzz.py b/tests/query_test/test_decimal_fuzz.py
index d55251f00..cdb28d9a9 100644
--- a/tests/query_test/test_decimal_fuzz.py
+++ b/tests/query_test/test_decimal_fuzz.py
@@ -46,10 +46,10 @@ class TestDecimalFuzz(ImpalaTestSuite):
     cls.iterations = 10000
 
   def weighted_choice(self, options):
-    total_weight = sum(options.itervalues())
+    total_weight = sum(options.values())
     numeric_choice = random.uniform(0, total_weight)
     last_choice = None
-    for choice, weight in options.iteritems():
+    for choice, weight in options.items():
       if numeric_choice <= weight:
         return choice
       numeric_choice -= weight
diff --git a/tests/query_test/test_insert_parquet.py b/tests/query_test/test_insert_parquet.py
index dc72c63b6..b4f9307da 100644
--- a/tests/query_test/test_insert_parquet.py
+++ b/tests/query_test/test_insert_parquet.py
@@ -425,7 +425,7 @@ class TestHdfsParquetTableWriter(ImpalaTestSuite):
   def _check_only_one_member_var_is_set(obj, var_name):
     """Checks that 'var_name' is the only member of 'obj' that is not None. Useful to
     check Thrift unions."""
-    keys = [k for k, v in vars(obj).iteritems() if v is not None]
+    keys = [k for k, v in vars(obj).items() if v is not None]
     assert keys == [var_name]
 
   def _check_no_logical_type(self, schemas, column_name):
diff --git a/tests/query_test/test_kudu.py b/tests/query_test/test_kudu.py
index f985599e2..be3de8a58 100644
--- a/tests/query_test/test_kudu.py
+++ b/tests/query_test/test_kudu.py
@@ -604,7 +604,7 @@ class TestKuduPartitioning(KuduTestSuite):
 
     query = "INSERT INTO %s SELECT id FROM functional.alltypes" % table_full_name
     exec_options = dict((k, str(v)) for k, v
-        in vector.get_value('exec_option').iteritems())
+        in vector.get_value('exec_option').items())
     cursor.execute(query, configuration=exec_options)
 
     profile = cursor.get_profile()
@@ -1357,7 +1357,7 @@ class TestKuduMemLimits(KuduTestSuite):
     """Tests that the queries specified in this test suite run under the given
     memory limits."""
     exec_options = dict((k, str(v)) for k, v
-                        in vector.get_value('exec_option').iteritems())
+                        in vector.get_value('exec_option').items())
     exec_options['mem_limit'] = "{0}m".format(mem_limit)
     # IMPALA-9856: We disable query result spooling so that this test can run queries
     # with low mem_limit.
diff --git a/tests/run-tests.py b/tests/run-tests.py
index 1ec0f9722..166adc11b 100755
--- a/tests/run-tests.py
+++ b/tests/run-tests.py
@@ -166,7 +166,7 @@ def build_test_args(base_name, valid_dirs=VALID_TEST_DIRS):
 
   ignored_dirs = build_ignore_dir_arg_list(valid_dirs=valid_dirs)
   logging_args = []
-  for arg, log in LOGGING_ARGS.iteritems():
+  for arg, log in LOGGING_ARGS.items():
     logging_args.extend([arg, os.path.join(RESULT_DIR, log.format(base_name))])
 
   if valid_dirs != ['verifiers']:
diff --git a/tests/stress/concurrent_select.py b/tests/stress/concurrent_select.py
index 8c15e49ee..36ee36363 100755
--- a/tests/stress/concurrent_select.py
+++ b/tests/stress/concurrent_select.py
@@ -200,7 +200,7 @@ def print_crash_info_if_exists(impala, start_time):
     LOG.error(
         "Aborting after %s failed attempts to check if impalads crashed", max_attempts)
     raise e
-  for message in crashed_impalads.itervalues():
+  for message in crashed_impalads.values():
     print(message, file=sys.stderr)
   return crashed_impalads
 
@@ -393,8 +393,9 @@ class StressRunner(object):
           # First randomly determine a query type, then choose a random query of that
           # type.
           if (
-              QueryType.SELECT in queries_by_type and
-              (len(queries_by_type.keys()) == 1 or random() < self._select_probability)
+              QueryType.SELECT in queries_by_type
+              and (len(list(queries_by_type.keys())) == 1
+                   or random() < self._select_probability)
           ):
             result = choice(queries_by_type[QueryType.SELECT])
           else:
diff --git a/tests/stress/extract_min_mem.py b/tests/stress/extract_min_mem.py
index 22301441c..33db7b3f5 100755
--- a/tests/stress/extract_min_mem.py
+++ b/tests/stress/extract_min_mem.py
@@ -40,7 +40,7 @@ import sys
 results = []
 with open(sys.argv[1]) as f:
   data = json.load(f)
-  for query_data in data['db_names']['tpch_parquet'].itervalues():
+  for query_data in data['db_names']['tpch_parquet'].values():
     runtime_info = query_data['[]']
     # Build up list of query numbers and minimum memory.
     results.append((int(runtime_info['name'][1:]),
diff --git a/tests/stress/queries.py b/tests/stress/queries.py
index 36b3a287c..4ea878f09 100644
--- a/tests/stress/queries.py
+++ b/tests/stress/queries.py
@@ -117,7 +117,7 @@ def load_tpc_queries(workload):
   LOG.info("Loading %s queries", workload)
   queries = []
   for query_name, query_sql in test_file_parser.load_tpc_queries(workload,
-      include_stress_queries=True).iteritems():
+      include_stress_queries=True).items():
     query = Query()
     query.name = query_name
     query.sql = query_sql
diff --git a/tests/stress/query_runner.py b/tests/stress/query_runner.py
index 26a99fbca..fa6b5f5d2 100644
--- a/tests/stress/query_runner.py
+++ b/tests/stress/query_runner.py
@@ -188,10 +188,10 @@ class QueryRunner(object):
     if run_set_up and query.set_up_sql:
       LOG.debug("Running set up query:\n%s", query.set_up_sql)
       cursor.execute(query.set_up_sql)
-    for query_option, value in self.common_query_options.iteritems():
+    for query_option, value in self.common_query_options.items():
       cursor.execute(
           "SET {query_option}={value}".format(query_option=query_option, value=value))
-    for query_option, value in query.options.iteritems():
+    for query_option, value in query.options.items():
       cursor.execute(
           "SET {query_option}={value}".format(query_option=query_option, value=value))
     # Set a time limit if it is the expected method of cancellation, or as an additional
@@ -383,7 +383,7 @@ class QueryRunner(object):
 
   def get_metric_vals(self):
     """Get the current values of the all metrics as a list of (k, v) pairs."""
-    return [(k, v.value) for k, v in self._metrics.iteritems()]
+    return [(k, v.value) for k, v in self._metrics.items()]
 
   def increment_metric(self, name):
     """Increment the current value of the metric called 'name'."""
diff --git a/tests/stress/runtime_info.py b/tests/stress/runtime_info.py
index a10692a22..5523aa66a 100644
--- a/tests/stress/runtime_info.py
+++ b/tests/stress/runtime_info.py
@@ -83,9 +83,9 @@ def load_runtime_info(path, impala=None):
         store.get("host_names") != sorted([i.host_name for i in impala.impalads])
     ):
       return queries_by_db_and_sql
-    for db_name, queries_by_sql in store["db_names"].iteritems():
-      for sql, queries_by_options in queries_by_sql.iteritems():
-        for options, json_query in queries_by_options.iteritems():
+    for db_name, queries_by_sql in store["db_names"].items():
+      for sql, queries_by_options in queries_by_sql.items():
+        for options, json_query in queries_by_options.items():
           query = Query()
           query.__dict__.update(json_query)
           query.sql = sql
@@ -123,11 +123,11 @@ def print_runtime_info_comparison(old_runtime_info, new_runtime_info):
       "Old Runtime wout/Spilling",
       "New Runtime wout/Spilling",
       "Diff %"]))
-  for db_name, old_queries in old_runtime_info.iteritems():
+  for db_name, old_queries in old_runtime_info.items():
     new_queries = new_runtime_info.get(db_name)
     if not new_queries:
       continue
-    for sql, old_query in old_queries.iteritems():
+    for sql, old_query in old_queries.items():
       new_query = new_queries.get(sql)
       if not new_query:
         continue