You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by jb...@apache.org on 2016/09/29 15:40:35 UTC

[1/2] incubator-impala git commit: IMPALA-4207: test infra: move Hive options from connection to cluster options

Repository: incubator-impala
Updated Branches:
  refs/heads/master 241c7e019 -> a5e84ac01


IMPALA-4207: test infra: move Hive options from connection to cluster options

Various test tools and frameworks, including the stress test, random
query generator, and nested types loader, share common modules. This
change

  IMPALA-3980: qgen: re-enable Hive as a target database

made changes to tests.comparison.cli_options, the shared command line
option module, and to tests.comparison.cluster, the shared module for
modeling various Impala clusters. Those changes were for the random
query generator, but didn't take into account the other shared entry
points. It was possible to call some of those entry points in such a way
as to produce an exception, because the Hive-related options are now
required for miniclusters, but the Hive-related options weren't always
being initialized in those entry points.

The simple fix is to say that, because Hive settings are now needed to
create Minicluster objects, make the Hive options initialized with
cluster options, not connection options. While I was making these
changes, I fixed all flake8 problems in this file.

Testing:

- qgen/minicluster unit tests (regression test)
- full private data load job, including load_nested.py (bug
  verification)
- data_generator.py run (regression test), long enough to verify
  connection to the minicluster, using both Hive and Impala
- discrepancy_searcher.py run (regression test), long enough verify
  connection to the minicluster, using both Hive and Impala
- concurrent_select.py (in typical mode using a CM host, this is a
  regression check; from the command line against the minicluster, this
  is a bug verification)

Change-Id: I2a2915e6db85ddb3d8e1bce8035eccd0c9324b4b
Reviewed-on: http://gerrit.cloudera.org:8080/4555
Reviewed-by: Michael Brown <mi...@cloudera.com>
Reviewed-by: Ishaan Joshi <is...@cloudera.com>
Tested-by: Internal Jenkins


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/a35e4380
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/a35e4380
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/a35e4380

Branch: refs/heads/master
Commit: a35e4380966346b47a81eeb2500e01bf97fa8f95
Parents: 241c7e0
Author: Michael Brown <mi...@cloudera.com>
Authored: Wed Sep 28 08:29:19 2016 -0700
Committer: Internal Jenkins <cl...@gerrit.cloudera.org>
Committed: Thu Sep 29 02:10:17 2016 +0000

----------------------------------------------------------------------
 tests/comparison/cli_options.py | 199 +++++++++++++++++++++--------------
 tests/comparison/cluster.py     |   3 +
 2 files changed, 124 insertions(+), 78 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a35e4380/tests/comparison/cli_options.py
----------------------------------------------------------------------
diff --git a/tests/comparison/cli_options.py b/tests/comparison/cli_options.py
index 92901f4..70740e6 100644
--- a/tests/comparison/cli_options.py
+++ b/tests/comparison/cli_options.py
@@ -24,22 +24,32 @@ from getpass import getuser
 from tempfile import gettempdir
 
 import db_connection
-from cluster import CmCluster, DEFAULT_HIVE_HOST, DEFAULT_HIVE_PORT, MiniCluster, \
-  MiniHiveCluster
+from cluster import (
+    CmCluster,
+    DEFAULT_HIVE_HOST,
+    DEFAULT_HIVE_PASSWORD,
+    DEFAULT_HIVE_PORT,
+    DEFAULT_HIVE_USER,
+    MiniCluster,
+    MiniHiveCluster,
+)
 from db_types import TYPES
 
-def add_logging_options(section, default_debug_log_file=None):
+
+def add_logging_options(parser, default_debug_log_file=None):
   if not default_debug_log_file:
     default_debug_log_file = os.path.join(
         gettempdir(), os.path.basename(sys.modules["__main__"].__file__) + ".log")
-  section.add_argument('--log-level', default='INFO',
+  parser.add_argument(
+      '--log-level', default='INFO',
       help='The log level to use.', choices=('DEBUG', 'INFO', 'WARN', 'ERROR'))
-  section.add_argument('--debug-log-file', default=default_debug_log_file,
+  parser.add_argument(
+      '--debug-log-file', default=default_debug_log_file,
       help='Path to debug log file.')
 
 
 def configure_logging(log_level, debug_log_file=None, log_thread_id=False,
-    log_process_id=False):
+                      log_process_id=False):
   root_logger = logging.getLogger()
   root_logger.setLevel(logging.DEBUG)
 
@@ -65,8 +75,8 @@ def configure_logging(log_level, debug_log_file=None, log_thread_id=False,
   def create_third_party_filter(level):
     def filter_record(record):
       name = record.name
-      if name.startswith("impala.") or name.startswith("paramiko.") \
-          or name.startswith("hdfs") or name.startswith("requests"):
+      if name.startswith("impala.") or name.startswith("paramiko.") or \
+         name.startswith("hdfs") or name.startswith("requests"):
         return record.levelno >= level
       return True
     log_filter = logging.Filter()
@@ -77,47 +87,79 @@ def configure_logging(log_level, debug_log_file=None, log_thread_id=False,
     file_logger.addFilter(create_third_party_filter(logging.INFO))
 
 
-def add_ssh_options(section):
-  section.add_argument('--ssh-user', metavar='user name', default=getuser(),
+def add_ssh_options(parser):
+  parser.add_argument(
+      '--ssh-user', metavar='user name', default=getuser(),
       help='The user name to use for SSH connections to cluster nodes.')
-  section.add_argument('--ssh-key-file', metavar='path to file',
+  parser.add_argument(
+      '--ssh-key-file', metavar='path to file',
       help='Specify an additional SSH key other than the defaults in ~/.ssh.')
-  section.add_argument('--ssh-port', metavar='number', type=int, default=22,
+  parser.add_argument(
+      '--ssh-port', metavar='number', type=int, default=22,
       help='The port number to use when connecting through SSH.')
 
 
-def add_db_name_option(section):
-  section.add_argument('--db-name', default='randomness',
+def add_db_name_option(parser):
+  parser.add_argument(
+      '--db-name', default='randomness',
       help='The name of the database to use. Ex: functional.')
 
 
-def add_cluster_options(section):
-  add_minicluster_options(section)
-  add_cm_options(section)
-  add_ssh_options(section)
-  section.add_argument("--hadoop-user-name", default=getuser(),
-      help="The user name to use when interacting with hadoop.")
+def add_cluster_options(parser):
+  add_minicluster_options(parser)
+  add_cm_options(parser)
+  add_ssh_options(parser)
+  parser.add_argument(
+      '--hadoop-user-name', default=getuser(),
+      help='The user name to use when interacting with hadoop.')
+
+
+def add_minicluster_options(parser):
+  group = parser.add_argument_group('Hive Options')
+  group.add_argument(
+      '--use-hive', action='store_true', default=False,
+      help='Use Hive (Impala will be skipped)')
+  group.add_argument(
+      '--hive-host', default=DEFAULT_HIVE_HOST,
+      help='The name of the host running the HS2')
+  group.add_argument(
+      '--hive-port', default=DEFAULT_HIVE_PORT, type=int,
+      help='The port of HiveServer2')
+  group.add_argument(
+      '--hive-user', default=DEFAULT_HIVE_USER,
+      help='The user name to use when connecting to HiveServer2')
+  group.add_argument(
+      '--hive-password', default=DEFAULT_HIVE_PASSWORD,
+      help='The password to use when connecting to HiveServer2')
+  parser.add_argument_group(group)
 
-def add_minicluster_options(section):
-  section.add_argument('--minicluster-num-impalads', default=3, type=int,
-      metavar='num impalads', help='The number of impalads in the mini cluster.')
+  parser.add_argument(
+      '--minicluster-num-impalads', default=3, type=int, metavar='num impalads',
+      help='The number of impalads in the mini cluster.')
 
-def add_cm_options(section):
-  section.add_argument('--cm-host', metavar='host name',
+
+def add_cm_options(parser):
+  parser.add_argument(
+      '--cm-host', metavar='host name',
       help='The host name of the CM server.')
-  section.add_argument('--cm-port', default=7180, type=int, metavar='port number',
+  parser.add_argument(
+      '--cm-port', default=7180, type=int, metavar='port number',
       help='The port of the CM server.')
-  section.add_argument('--cm-user', default="admin", metavar='user name',
+  parser.add_argument(
+      '--cm-user', default="admin", metavar='user name',
       help='The name of the CM user.')
-  section.add_argument('--cm-password', default="admin", metavar='password',
+  parser.add_argument(
+      '--cm-password', default="admin", metavar='password',
       help='The password for the CM user.')
-  section.add_argument('--cm-cluster-name', metavar='name',
+  parser.add_argument(
+      '--cm-cluster-name', metavar='name',
       help='If CM manages multiple clusters, use this to specify which cluster to use.')
 
 
 def create_cluster(args):
   if args.cm_host:
-    cluster = CmCluster(args.cm_host, user=args.cm_user, password=args.cm_password,
+    cluster = CmCluster(
+        args.cm_host, user=args.cm_user, password=args.cm_password,
         cluster_name=args.cm_cluster_name, ssh_user=args.ssh_user, ssh_port=args.ssh_port,
         ssh_key_file=args.ssh_key_file)
   elif args.use_hive:
@@ -128,74 +170,73 @@ def create_cluster(args):
   return cluster
 
 
-def add_storage_format_options(section):
+def add_storage_format_options(parser):
   storage_formats = ['avro', 'parquet', 'rcfile', 'sequencefile', 'textfile']
-  section.add_argument('--storage-file-formats', default=','.join(storage_formats),
+  parser.add_argument(
+      '--storage-file-formats', default=','.join(storage_formats),
       help='A comma separated list of storage formats to use.')
 
 
-def add_data_types_options(section):
-  section.add_argument('--data-types',
-      default=','.join(type_.__name__ for type_ in TYPES),
+def add_data_types_options(parser):
+  parser.add_argument(
+      '--data-types', default=','.join(type_.__name__ for type_ in TYPES),
       help='A comma separated list of data types to use.')
 
 
-def add_timeout_option(section):
-  section.add_argument('--timeout', default=(3 * 60), type=int,
-      help='Query timeout in seconds')
+def add_timeout_option(parser):
+  parser.add_argument(
+      '--timeout', default=(3 * 60), type=int, help='Query timeout in seconds')
 
 
 def add_connection_option_groups(parser):
 
-  group = parser.add_argument_group("Hive Options")
-  group.add_argument('--use-hive', action='store_true', default=False,
-      help='Use Hive (Impala will be skipped)')
-  group.add_argument('--hive-host', default=DEFAULT_HIVE_HOST,
-      help="The name of the host running the HS2")
-  group.add_argument("--hive-port", default=DEFAULT_HIVE_PORT, type=int,
-      help="The port of HiveServer2")
-  group.add_argument('--hive-user', default='hive',
-      help="The user name to use when connecting to HiveServer2")
-  group.add_argument('--hive-password', default='hive',
-      help="The password to use when connecting to HiveServer2")
-  parser.add_argument_group(group)
-
   group = parser.add_argument_group('MySQL Options')
-  group.add_argument('--use-mysql', action='store_true',
-      help='Use MySQL')
-  group.add_argument('--mysql-host', default='localhost',
+  group.add_argument(
+      '--use-mysql', action='store_true', help='Use MySQL')
+  group.add_argument(
+      '--mysql-host', default='localhost',
       help='The name of the host running the MySQL database.')
-  group.add_argument('--mysql-port', default=3306, type=int,
+  group.add_argument(
+      '--mysql-port', default=3306, type=int,
       help='The port of the host running the MySQL database.')
-  group.add_argument('--mysql-user', default='root',
+  group.add_argument(
+      '--mysql-user', default='root',
       help='The user name to use when connecting to the MySQL database.')
-  group.add_argument('--mysql-password',
+  group.add_argument(
+      '--mysql-password',
       help='The password to use when connecting to the MySQL database.')
   parser.add_argument_group(group)
 
   group = parser.add_argument_group('Oracle Options')
-  group.add_argument('--use-oracle', action='store_true',
-      help='Use Oracle')
-  group.add_argument('--oracle-host', default='localhost',
+  group.add_argument('--use-oracle', action='store_true', help='Use Oracle')
+  group.add_argument(
+      '--oracle-host', default='localhost',
       help='The name of the host running the Oracle database.')
-  group.add_argument('--oracle-port', default=1521, type=int,
+  group.add_argument(
+      '--oracle-port', default=1521, type=int,
       help='The port of the host running the Oracle database.')
-  group.add_argument('--oracle-user', default='system',
+  group.add_argument(
+      '--oracle-user', default='system',
       help='The user name to use when connecting to the Oracle database.')
-  group.add_argument('--oracle-password',
+  group.add_argument(
+      '--oracle-password',
       help='The password to use when connecting to the Oracle database.')
   parser.add_argument_group(group)
 
   group = parser.add_argument_group('Postgresql Options')
-  group.add_argument('--use-postgresql', action='store_true',
-      help='Use Postgresql')
-  group.add_argument('--postgresql-host', default='localhost',
+  group.add_argument(
+      '--use-postgresql', action='store_true', help='Use Postgresql')
+  group.add_argument(
+      '--postgresql-host', default='localhost',
       help='The name of the host running the Postgresql database.')
-  group.add_argument('--postgresql-port', default=5432, type=int,
+  group.add_argument(
+      '--postgresql-port', default=5432, type=int,
       help='The port of the host running the Postgresql database.')
-  group.add_argument('--postgresql-user', default='postgres',
+  group.add_argument(
+      '--postgresql-user', default='postgres',
       help='The user name to use when connecting to the Postgresql database.')
-  group.add_argument('--postgresql-password',
+  group.add_argument(
+      '--postgresql-password',
       help='The password to use when connecting to the Postgresql database.')
   parser.add_argument_group(group)
 
@@ -209,8 +250,8 @@ def get_db_type(args):
   if args.use_postgresql:
     db_types.append(db_connection.POSTGRESQL)
   if not db_types:
-    raise Exception("At least one of --use-mysql, --use-oracle, or --use-postgresql"
-        "must be used")
+    raise Exception(
+        "At least one of --use-mysql, --use-oracle, or --use-postgresql must be used")
   elif len(db_types) > 1:
     raise Exception("Too many databases requested: %s" % db_types)
   return db_types[0]
@@ -228,9 +269,10 @@ def create_connection(args, db_type=None, db_name=None):
   elif db_type == db_connection.HIVE:
     conn_class = db_connection.HiveConnection
   else:
-    raise Exception('Unexpected db_type: %s; expected one of %s.'
-        % (db_type, ', '.join([db_connection.POSTGRESQL, db_connection.MYSQL,
-              db_connection.ORACLE])))
+    raise Exception(
+        'Unexpected db_type: %s; expected one of %s.' % (
+            db_type, ', '.join([db_connection.POSTGRESQL, db_connection.MYSQL,
+                                db_connection.ORACLE])))
   prefix = db_type.lower()
   return conn_class(
       user_name=getattr(args, prefix + '_user'),
@@ -240,9 +282,10 @@ def create_connection(args, db_type=None, db_name=None):
       db_name=db_name)
 
 
-def add_kerberos_options(section):
-  section.add_argument("--use-kerberos", action="store_true",
+def add_kerberos_options(parser):
+  parser.add_argument(
+      "--use-kerberos", action="store_true",
       help="Use kerberos when communicating with Impala. This requires that kinit has"
       " already been done before running this script.")
-  section.add_argument("--kerberos-principal", default=getuser(),
-      help="The principal name to use.")
+  parser.add_argument(
+      "--kerberos-principal", default=getuser(), help="The principal name to use.")

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a35e4380/tests/comparison/cluster.py
----------------------------------------------------------------------
diff --git a/tests/comparison/cluster.py b/tests/comparison/cluster.py
index a009e92..bfb8fa4 100644
--- a/tests/comparison/cluster.py
+++ b/tests/comparison/cluster.py
@@ -54,6 +54,9 @@ LOG = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
 
 DEFAULT_HIVE_HOST = '127.0.0.1'
 DEFAULT_HIVE_PORT = 11050
+DEFAULT_HIVE_USER = 'hive'
+DEFAULT_HIVE_PASSWORD = 'hive'
+
 DEFAULT_TIMEOUT = 300
 
 class Cluster(object):


[2/2] incubator-impala git commit: IMPALA-4206: Add column lineage regression test.

Posted by jb...@apache.org.
IMPALA-4206: Add column lineage regression test.

The underlying issue was already fixed in IMPALA-3940.
This patch adds a new regression test to cover the IMPALA-4206.

Change-Id: I5b164000c7b0ce7e2f296d168d75a6860f5963d8
Reviewed-on: http://gerrit.cloudera.org:8080/4556
Reviewed-by: Alex Behm <al...@cloudera.com>
Tested-by: Internal Jenkins


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/a5e84ac0
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/a5e84ac0
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/a5e84ac0

Branch: refs/heads/master
Commit: a5e84ac01491f8cca8a229c4348a23ae87ddca61
Parents: a35e438
Author: Alex Behm <al...@cloudera.com>
Authored: Wed Sep 28 13:21:22 2016 -0700
Committer: Internal Jenkins <cl...@gerrit.cloudera.org>
Committed: Thu Sep 29 07:45:19 2016 +0000

----------------------------------------------------------------------
 .../queries/PlannerTest/lineage.test            | 44 ++++++++++++++++++++
 1 file changed, 44 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a5e84ac0/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test b/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test
index f32e5cc..9ba2ab7 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/lineage.test
@@ -4409,3 +4409,47 @@ select a + b as ab, c, d, e from functional.allcomplextypes t,
     ]
 }
 ====
+# IMPALA-4206: Test creating a view whose definition has a subquery and a view reference.
+create view test_view_lineage as
+select id from functional.alltypes_view v
+where not exists (select 1 from functional.alltypes a where v.id = a.id)
+---- LINEAGE
+{
+    "queryText":"create view test_view_lineage as\nselect id from functional.alltypes_view v\nwhere not exists (select 1 from functional.alltypes a where v.id = a.id)",
+    "hash":"e79b8abc8a682d9e0f6b2c30a6c885f3",
+    "user":"dev",
+    "timestamp":1475094005,
+    "edges":[
+        {
+            "sources":[
+                1
+            ],
+            "targets":[
+                0
+            ],
+            "edgeType":"PROJECTION"
+        },
+        {
+            "sources":[
+                1
+            ],
+            "targets":[
+                0
+            ],
+            "edgeType":"PREDICATE"
+        }
+    ],
+    "vertices":[
+        {
+            "id":0,
+            "vertexType":"COLUMN",
+            "vertexId":"default.test_view_lineage.id"
+        },
+        {
+            "id":1,
+            "vertexType":"COLUMN",
+            "vertexId":"functional.alltypes.id"
+        }
+    ]
+}
+====
\ No newline at end of file