You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ka...@apache.org on 2019/03/05 22:32:25 UTC
[phoenix-queryserver] branch master updated: PHOENIX-5063 Create a
new repo for the phoenix query server
This is an automated email from the ASF dual-hosted git repository.
karanmehta93 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix-queryserver.git
The following commit(s) were added to refs/heads/master by this push:
new d750aa9 PHOENIX-5063 Create a new repo for the phoenix query server
d750aa9 is described below
commit d750aa9484006527bb2fc08febf1b2d019217229
Author: karanmehta93 <ka...@gmail.com>
AuthorDate: Tue Mar 5 14:32:21 2019 -0800
PHOENIX-5063 Create a new repo for the phoenix query server
1. Project code and scripts for phoenix-queryserver and phoenix-queryserver-client
2. Artifacts renamed as queryserver and queryserver-client
3. Project will be bundled up in an independent tar ball which can be released directly
---
assembly/cluster.xml | 35 +
assembly/pom.xml | 55 ++
bin/daemon.py | 999 +++++++++++++++++++++
bin/hadoop-metrics2-hbase.properties | 36 +
bin/hadoop-metrics2-phoenix.properties | 70 ++
bin/hbase-site.xml | 27 +
bin/log4j.properties | 76 ++
bin/phoenix_utils.py | 231 +++++
bin/psql.py | 72 ++
bin/queryserver.py | 207 +++++
bin/sqlline-thin.py | 175 ++++
phoenix-client/pom.xml | 366 ++++++++
pom.xml | 554 ++++++++++++
queryserver-client/pom.xml | 203 +++++
.../apache/phoenix/queryserver/client/Driver.java | 49 +
.../phoenix/queryserver/client/SqllineWrapper.java | 97 ++
.../phoenix/queryserver/client/ThinClientUtil.java | 42 +
.../resources/META-INF/services/java.sql.Driver | 1 +
.../org-apache-phoenix-remote-jdbc.properties | 25 +
queryserver/pom.xml | 188 ++++
queryserver/src/build/query-server-runnable.xml | 52 ++
queryserver/src/it/bin/test_phoenixdb.py | 39 +
queryserver/src/it/bin/test_phoenixdb.sh | 79 ++
.../HttpParamImpersonationQueryServerIT.java | 438 +++++++++
.../phoenix/end2end/QueryServerBasicsIT.java | 346 +++++++
.../phoenix/end2end/QueryServerTestUtil.java | 187 ++++
.../apache/phoenix/end2end/QueryServerThread.java | 45 +
.../phoenix/end2end/SecureQueryServerIT.java | 323 +++++++
.../end2end/SecureQueryServerPhoenixDBIT.java | 424 +++++++++
.../phoenix/end2end/ServerCustomizersIT.java | 149 +++
queryserver/src/it/resources/log4j.properties | 68 ++
.../service/LoadBalanceZookeeperConf.java | 42 +
.../phoenix/queryserver/register/Registry.java | 48 +
.../server/AvaticaServerConfigurationFactory.java | 37 +
.../queryserver/server/PhoenixMetaFactory.java | 28 +
.../queryserver/server/PhoenixMetaFactoryImpl.java | 76 ++
.../phoenix/queryserver/server/QueryServer.java | 606 +++++++++++++
.../server/RemoteUserExtractorFactory.java | 36 +
.../server/ServerCustomizersFactory.java | 52 ++
.../org/apache/phoenix/DriverCohabitationTest.java | 65 ++
.../CustomAvaticaServerConfigurationTest.java | 37 +
.../server/PhoenixDoAsCallbackTest.java | 89 ++
.../server/PhoenixRemoteUserExtractorTest.java | 108 +++
.../server/QueryServerConfigurationTest.java | 92 ++
.../server/RemoteUserExtractorFactoryTest.java | 35 +
.../queryserver/server/ServerCustomizersTest.java | 92 ++
46 files changed, 7101 insertions(+)
diff --git a/assembly/cluster.xml b/assembly/cluster.xml
new file mode 100644
index 0000000..b98f017
--- /dev/null
+++ b/assembly/cluster.xml
@@ -0,0 +1,35 @@
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2 http://maven.apache.org/xsd/assembly-1.1.2.xsd">
+ <id>cluster</id>
+ <baseDirectory>/</baseDirectory>
+ <formats>
+ <format>tar.gz</format>
+ </formats>
+ <fileSets>
+ <fileSet>
+ <directory>${project.basedir}/../bin</directory>
+ <outputDirectory>${project.parent.artifactId}-${project.parent.version}/bin</outputDirectory>
+ </fileSet>
+ <fileSet>
+ <directory>${project.basedir}/../queryserver/target</directory>
+ <outputDirectory>${project.parent.artifactId}-${project.parent.version}/queryserver/target</outputDirectory>
+ <includes>
+ <include>*.jar</include>
+ </includes>
+ </fileSet>
+ <fileSet>
+ <directory>${project.basedir}/../queryserver-client/target</directory>
+ <outputDirectory>${project.parent.artifactId}-${project.parent.version}/queryserver-client/target</outputDirectory>
+ <includes>
+ <include>*.jar</include>
+ </includes>
+ </fileSet>
+ <fileSet>
+ <directory>${project.basedir}/../phoenix-client/target</directory>
+ <outputDirectory>${project.parent.artifactId}-${project.parent.version}/phoenix-client/target</outputDirectory>
+ <includes>
+ <include>*.jar</include>
+ </includes>
+ </fileSet>
+ </fileSets>
+</assembly>
diff --git a/assembly/pom.xml b/assembly/pom.xml
new file mode 100644
index 0000000..097f1cd
--- /dev/null
+++ b/assembly/pom.xml
@@ -0,0 +1,55 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.apache.phoenix</groupId>
+ <artifactId>phoenix-queryserver</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ </parent>
+
+ <groupId>org.apache.phoenix</groupId>
+ <artifactId>assembly</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <packaging>pom</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.phoenix</groupId>
+ <artifactId>queryserver</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.phoenix</groupId>
+ <artifactId>queryserver-client</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <directory>${project.basedir}/../target</directory>
+ <plugins>
+ <plugin>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>cluster</id>
+ <phase>package</phase>
+ <goals>
+ <goal>single</goal>
+ </goals>
+ <configuration>
+ <descriptor>cluster.xml</descriptor>
+ <finalName>${parent.artifactId}-${project.version}</finalName>
+ <tarLongFileMode>posix</tarLongFileMode>
+ <appendAssemblyId>false</appendAssemblyId>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
\ No newline at end of file
diff --git a/bin/daemon.py b/bin/daemon.py
new file mode 100644
index 0000000..bb64148
--- /dev/null
+++ b/bin/daemon.py
@@ -0,0 +1,999 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+############################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+############################################################################
+
+# daemon/daemon.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <be...@benfinney.id.au>
+# Copyright © 2007–2008 Robert Niederreiter, Jens Klein
+# Copyright © 2004–2005 Chad J. Schroeder
+# Copyright © 2003 Clark Evans
+# Copyright © 2002 Noah Spurrier
+# Copyright © 2001 Jürgen Hermann
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+#
+# Apache Phoenix note: this file is `daemon.py` from the package
+# `python-daemon 2.0.5`, https://pypi.python.org/pypi/python-daemon/
+#
+# The class `PidFile` was added for adapting the `lockfile` package's interface
+# without depending on yet another 3rd party package. Based on example from
+# http://code.activestate.com/recipes/577911-context-manager-for-a-daemon-pid-file/
+#
+
+""" Daemon process behaviour.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import os
+import sys
+import resource
+import errno
+import signal
+import socket
+import atexit
+import fcntl
+import time
+try:
+ # Python 2 has both ‘str’ (bytes) and ‘unicode’ (text).
+ basestring = basestring
+ unicode = unicode
+except NameError:
+ # Python 3 names the Unicode data type ‘str’.
+ basestring = str
+ unicode = str
+
+
+class DaemonError(Exception):
+ """ Base exception class for errors from this module. """
+
+ def __init__(self, *args, **kwargs):
+ self._chain_from_context()
+
+ super(DaemonError, self).__init__(*args, **kwargs)
+
+ def _chain_from_context(self):
+ _chain_exception_from_existing_exception_context(self, as_cause=True)
+
+
+class DaemonOSEnvironmentError(DaemonError, OSError):
+ """ Exception raised when daemon OS environment setup receives error. """
+
+
+class DaemonProcessDetachError(DaemonError, OSError):
+ """ Exception raised when process detach fails. """
+
+
+class DaemonContext:
+ """ Context for turning the current program into a daemon process.
+
+ A `DaemonContext` instance represents the behaviour settings and
+ process context for the program when it becomes a daemon. The
+ behaviour and environment is customised by setting options on the
+ instance, before calling the `open` method.
+
+ Each option can be passed as a keyword argument to the `DaemonContext`
+ constructor, or subsequently altered by assigning to an attribute on
+ the instance at any time prior to calling `open`. That is, for
+ options named `wibble` and `wubble`, the following invocation::
+
+ foo = daemon.DaemonContext(wibble=bar, wubble=baz)
+ foo.open()
+
+ is equivalent to::
+
+ foo = daemon.DaemonContext()
+ foo.wibble = bar
+ foo.wubble = baz
+ foo.open()
+
+ The following options are defined.
+
+ `files_preserve`
+ :Default: ``None``
+
+ List of files that should *not* be closed when starting the
+ daemon. If ``None``, all open file descriptors will be closed.
+
+ Elements of the list are file descriptors (as returned by a file
+ object's `fileno()` method) or Python `file` objects. Each
+ specifies a file that is not to be closed during daemon start.
+
+ `chroot_directory`
+ :Default: ``None``
+
+ Full path to a directory to set as the effective root directory of
+ the process. If ``None``, specifies that the root directory is not
+ to be changed.
+
+ `working_directory`
+ :Default: ``'/'``
+
+ Full path of the working directory to which the process should
+ change on daemon start.
+
+ Since a filesystem cannot be unmounted if a process has its
+ current working directory on that filesystem, this should either
+ be left at default or set to a directory that is a sensible “home
+ directory” for the daemon while it is running.
+
+ `umask`
+ :Default: ``0``
+
+ File access creation mask (“umask”) to set for the process on
+ daemon start.
+
+ A daemon should not rely on the parent process's umask value,
+ which is beyond its control and may prevent creating a file with
+ the required access mode. So when the daemon context opens, the
+ umask is set to an explicit known value.
+
+ If the conventional value of 0 is too open, consider setting a
+ value such as 0o022, 0o027, 0o077, or another specific value.
+ Otherwise, ensure the daemon creates every file with an
+ explicit access mode for the purpose.
+
+ `pidfile`
+ :Default: ``None``
+
+ Context manager for a PID lock file. When the daemon context opens
+ and closes, it enters and exits the `pidfile` context manager.
+
+ `detach_process`
+ :Default: ``None``
+
+ If ``True``, detach the process context when opening the daemon
+ context; if ``False``, do not detach.
+
+ If unspecified (``None``) during initialisation of the instance,
+ this will be set to ``True`` by default, and ``False`` only if
+ detaching the process is determined to be redundant; for example,
+ in the case when the process was started by `init`, by `initd`, or
+ by `inetd`.
+
+ `signal_map`
+ :Default: system-dependent
+
+ Mapping from operating system signals to callback actions.
+
+ The mapping is used when the daemon context opens, and determines
+ the action for each signal's signal handler:
+
+ * A value of ``None`` will ignore the signal (by setting the
+ signal action to ``signal.SIG_IGN``).
+
+ * A string value will be used as the name of an attribute on the
+ ``DaemonContext`` instance. The attribute's value will be used
+ as the action for the signal handler.
+
+ * Any other value will be used as the action for the
+ signal handler. See the ``signal.signal`` documentation
+ for details of the signal handler interface.
+
+ The default value depends on which signals are defined on the
+ running system. Each item from the list below whose signal is
+ actually defined in the ``signal`` module will appear in the
+ default map:
+
+ * ``signal.SIGTTIN``: ``None``
+
+ * ``signal.SIGTTOU``: ``None``
+
+ * ``signal.SIGTSTP``: ``None``
+
+ * ``signal.SIGTERM``: ``'terminate'``
+
+ Depending on how the program will interact with its child
+ processes, it may need to specify a signal map that
+ includes the ``signal.SIGCHLD`` signal (received when a
+ child process exits). See the specific operating system's
+ documentation for more detail on how to determine what
+ circumstances dictate the need for signal handlers.
+
+ `uid`
+ :Default: ``os.getuid()``
+
+ `gid`
+ :Default: ``os.getgid()``
+
+ The user ID (“UID”) value and group ID (“GID”) value to switch
+ the process to on daemon start.
+
+ The default values, the real UID and GID of the process, will
+ relinquish any effective privilege elevation inherited by the
+ process.
+
+ `prevent_core`
+ :Default: ``True``
+
+ If true, prevents the generation of core files, in order to avoid
+ leaking sensitive information from daemons run as `root`.
+
+ `stdin`
+ :Default: ``None``
+
+ `stdout`
+ :Default: ``None``
+
+ `stderr`
+ :Default: ``None``
+
+ Each of `stdin`, `stdout`, and `stderr` is a file-like object
+ which will be used as the new file for the standard I/O stream
+ `sys.stdin`, `sys.stdout`, and `sys.stderr` respectively. The file
+ should therefore be open, with a minimum of mode 'r' in the case
+ of `stdin`, and mimimum of mode 'w+' in the case of `stdout` and
+ `stderr`.
+
+ If the object has a `fileno()` method that returns a file
+ descriptor, the corresponding file will be excluded from being
+ closed during daemon start (that is, it will be treated as though
+ it were listed in `files_preserve`).
+
+ If ``None``, the corresponding system stream is re-bound to the
+ file named by `os.devnull`.
+
+ """
+
+ __metaclass__ = type
+
+ def __init__(
+ self,
+ chroot_directory=None,
+ working_directory="/",
+ umask=0,
+ uid=None,
+ gid=None,
+ prevent_core=True,
+ detach_process=None,
+ files_preserve=None,
+ pidfile=None,
+ stdin=None,
+ stdout=None,
+ stderr=None,
+ signal_map=None,
+ ):
+ """ Set up a new instance. """
+ self.chroot_directory = chroot_directory
+ self.working_directory = working_directory
+ self.umask = umask
+ self.prevent_core = prevent_core
+ self.files_preserve = files_preserve
+ self.pidfile = pidfile
+ self.stdin = stdin
+ self.stdout = stdout
+ self.stderr = stderr
+
+ if uid is None:
+ uid = os.getuid()
+ self.uid = uid
+ if gid is None:
+ gid = os.getgid()
+ self.gid = gid
+
+ if detach_process is None:
+ detach_process = is_detach_process_context_required()
+ self.detach_process = detach_process
+
+ if signal_map is None:
+ signal_map = make_default_signal_map()
+ self.signal_map = signal_map
+
+ self._is_open = False
+
+ @property
+ def is_open(self):
+ """ ``True`` if the instance is currently open. """
+ return self._is_open
+
+ def open(self):
+ """ Become a daemon process.
+
+ :return: ``None``.
+
+ Open the daemon context, turning the current program into a daemon
+ process. This performs the following steps:
+
+ * If this instance's `is_open` property is true, return
+ immediately. This makes it safe to call `open` multiple times on
+ an instance.
+
+ * If the `prevent_core` attribute is true, set the resource limits
+ for the process to prevent any core dump from the process.
+
+ * If the `chroot_directory` attribute is not ``None``, set the
+ effective root directory of the process to that directory (via
+ `os.chroot`).
+
+ This allows running the daemon process inside a “chroot gaol”
+ as a means of limiting the system's exposure to rogue behaviour
+ by the process. Note that the specified directory needs to
+ already be set up for this purpose.
+
+ * Set the process UID and GID to the `uid` and `gid` attribute
+ values.
+
+ * Close all open file descriptors. This excludes those listed in
+ the `files_preserve` attribute, and those that correspond to the
+ `stdin`, `stdout`, or `stderr` attributes.
+
+ * Change current working directory to the path specified by the
+ `working_directory` attribute.
+
+ * Reset the file access creation mask to the value specified by
+ the `umask` attribute.
+
+ * If the `detach_process` option is true, detach the current
+ process into its own process group, and disassociate from any
+ controlling terminal.
+
+ * Set signal handlers as specified by the `signal_map` attribute.
+
+ * If any of the attributes `stdin`, `stdout`, `stderr` are not
+ ``None``, bind the system streams `sys.stdin`, `sys.stdout`,
+ and/or `sys.stderr` to the files represented by the
+ corresponding attributes. Where the attribute has a file
+ descriptor, the descriptor is duplicated (instead of re-binding
+ the name).
+
+ * If the `pidfile` attribute is not ``None``, enter its context
+ manager.
+
+ * Mark this instance as open (for the purpose of future `open` and
+ `close` calls).
+
+ * Register the `close` method to be called during Python's exit
+ processing.
+
+ When the function returns, the running program is a daemon
+ process.
+
+ """
+ if self.is_open:
+ return
+
+ if self.chroot_directory is not None:
+ change_root_directory(self.chroot_directory)
+
+ if self.prevent_core:
+ prevent_core_dump()
+
+ change_file_creation_mask(self.umask)
+ change_working_directory(self.working_directory)
+ change_process_owner(self.uid, self.gid)
+
+ if self.detach_process:
+ detach_process_context(self.pidfile)
+
+ signal_handler_map = self._make_signal_handler_map()
+ set_signal_handlers(signal_handler_map)
+
+ exclude_fds = self._get_exclude_file_descriptors()
+ close_all_open_files(exclude=exclude_fds)
+
+ redirect_stream(sys.stdin, self.stdin)
+ redirect_stream(sys.stdout, self.stdout)
+ redirect_stream(sys.stderr, self.stderr)
+
+ if self.pidfile is not None:
+ self.pidfile.__enter__()
+
+ self._is_open = True
+
+ register_atexit_function(self.close)
+
+ def __enter__(self):
+ """ Context manager entry point. """
+ self.open()
+ return self
+
+ def close(self):
+ """ Exit the daemon process context.
+
+ :return: ``None``.
+
+ Close the daemon context. This performs the following steps:
+
+ * If this instance's `is_open` property is false, return
+ immediately. This makes it safe to call `close` multiple times
+ on an instance.
+
+ * If the `pidfile` attribute is not ``None``, exit its context
+ manager.
+
+ * Mark this instance as closed (for the purpose of future `open`
+ and `close` calls).
+
+ """
+ if not self.is_open:
+ return
+
+ if self.pidfile is not None:
+ # Follow the interface for telling a context manager to exit,
+ # <URL:http://docs.python.org/library/stdtypes.html#typecontextmanager>.
+ self.pidfile.__exit__(None, None, None)
+
+ self._is_open = False
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ """ Context manager exit point. """
+ self.close()
+
+ def terminate(self, signal_number, stack_frame):
+ """ Signal handler for end-process signals.
+
+ :param signal_number: The OS signal number received.
+ :param stack_frame: The frame object at the point the
+ signal was received.
+ :return: ``None``.
+
+ Signal handler for the ``signal.SIGTERM`` signal. Performs the
+ following step:
+
+ * Raise a ``SystemExit`` exception explaining the signal.
+
+ """
+ exception = SystemExit(
+ "Terminating on signal {signal_number!r}".format(
+ signal_number=signal_number))
+ raise exception
+
+ def _get_exclude_file_descriptors(self):
+ """ Get the set of file descriptors to exclude closing.
+
+ :return: A set containing the file descriptors for the
+ files to be preserved.
+
+ The file descriptors to be preserved are those from the
+ items in `files_preserve`, and also each of `stdin`,
+ `stdout`, and `stderr`. For each item:
+
+ * If the item is ``None``, it is omitted from the return
+ set.
+
+ * If the item's ``fileno()`` method returns a value, that
+ value is in the return set.
+
+ * Otherwise, the item is in the return set verbatim.
+
+ """
+ files_preserve = self.files_preserve
+ if files_preserve is None:
+ files_preserve = []
+ files_preserve.extend(
+ item for item in [self.stdin, self.stdout, self.stderr]
+ if hasattr(item, 'fileno'))
+
+ exclude_descriptors = set()
+ for item in files_preserve:
+ if item is None:
+ continue
+ file_descriptor = _get_file_descriptor(item)
+ if file_descriptor is not None:
+ exclude_descriptors.add(file_descriptor)
+ else:
+ exclude_descriptors.add(item)
+
+ return exclude_descriptors
+
+ def _make_signal_handler(self, target):
+ """ Make the signal handler for a specified target object.
+
+ :param target: A specification of the target for the
+ handler; see below.
+ :return: The value for use by `signal.signal()`.
+
+ If `target` is ``None``, return ``signal.SIG_IGN``. If `target`
+ is a text string, return the attribute of this instance named
+ by that string. Otherwise, return `target` itself.
+
+ """
+ if target is None:
+ result = signal.SIG_IGN
+ elif isinstance(target, unicode):
+ name = target
+ result = getattr(self, name)
+ else:
+ result = target
+
+ return result
+
+ def _make_signal_handler_map(self):
+ """ Make the map from signals to handlers for this instance.
+
+ :return: The constructed signal map for this instance.
+
+ Construct a map from signal numbers to handlers for this
+ context instance, suitable for passing to
+ `set_signal_handlers`.
+
+ """
+ signal_handler_map = dict(
+ (signal_number, self._make_signal_handler(target))
+ for (signal_number, target) in self.signal_map.items())
+ return signal_handler_map
+
+
+def _get_file_descriptor(obj):
+ """ Get the file descriptor, if the object has one.
+
+ :param obj: The object expected to be a file-like object.
+ :return: The file descriptor iff the file supports it; otherwise
+ ``None``.
+
+ The object may be a non-file object. It may also be a
+ file-like object with no support for a file descriptor. In
+ either case, return ``None``.
+
+ """
+ file_descriptor = None
+ if hasattr(obj, 'fileno'):
+ try:
+ file_descriptor = obj.fileno()
+ except ValueError:
+ # The item doesn't support a file descriptor.
+ pass
+
+ return file_descriptor
+
+
+def change_working_directory(directory):
+ """ Change the working directory of this process.
+
+ :param directory: The target directory path.
+ :return: ``None``.
+
+ """
+ try:
+ os.chdir(directory)
+ except Exception as exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change working directory ({exc})".format(exc=exc))
+ raise error
+
+
+def change_root_directory(directory):
+ """ Change the root directory of this process.
+
+ :param directory: The target directory path.
+ :return: ``None``.
+
+ Set the current working directory, then the process root directory,
+ to the specified `directory`. Requires appropriate OS privileges
+ for this process.
+
+ """
+ try:
+ os.chdir(directory)
+ os.chroot(directory)
+ except Exception as exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change root directory ({exc})".format(exc=exc))
+ raise error
+
+
+def change_file_creation_mask(mask):
+ """ Change the file creation mask for this process.
+
+ :param mask: The numeric file creation mask to set.
+ :return: ``None``.
+
+ """
+ try:
+ os.umask(mask)
+ except Exception as exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change file creation mask ({exc})".format(exc=exc))
+ raise error
+
+
+def change_process_owner(uid, gid):
+ """ Change the owning UID and GID of this process.
+
+ :param uid: The target UID for the daemon process.
+ :param gid: The target GID for the daemon process.
+ :return: ``None``.
+
+ Set the GID then the UID of the process (in that order, to avoid
+ permission errors) to the specified `gid` and `uid` values.
+ Requires appropriate OS privileges for this process.
+
+ """
+ try:
+ os.setgid(gid)
+ os.setuid(uid)
+ except Exception as exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change process owner ({exc})".format(exc=exc))
+ raise error
+
+
+def prevent_core_dump():
+ """ Prevent this process from generating a core dump.
+
+ :return: ``None``.
+
+ Set the soft and hard limits for core dump size to zero. On Unix,
+ this entirely prevents the process from creating core dump.
+
+ """
+ core_resource = resource.RLIMIT_CORE
+
+ try:
+ # Ensure the resource limit exists on this platform, by requesting
+ # its current value.
+ core_limit_prev = resource.getrlimit(core_resource)
+ except ValueError as exc:
+ error = DaemonOSEnvironmentError(
+ "System does not support RLIMIT_CORE resource limit"
+ " ({exc})".format(exc=exc))
+ raise error
+
+ # Set hard and soft limits to zero, i.e. no core dump at all.
+ core_limit = (0, 0)
+ resource.setrlimit(core_resource, core_limit)
+
+
+def detach_process_context(pidfile):
+ """ Detach the process context from parent and session.
+
+ :return: ``None``.
+
+ Detach from the parent process and session group, allowing the
+ parent to exit while this process continues running.
+
+ Reference: “Advanced Programming in the Unix Environment”,
+ section 13.3, by W. Richard Stevens, published 1993 by
+ Addison-Wesley.
+
+ """
+
+ def fork_then_exit_parent(error_message):
+ """ Fork a child process, then exit the parent process.
+
+ :param error_message: Message for the exception in case of a
+ detach failure.
+ :return: ``None``.
+ :raise DaemonProcessDetachError: If the fork fails.
+
+ """
+ try:
+ pid = os.fork()
+ if pid > 0:
+ while not os.path.exists(pidfile.path):
+ time.sleep(0.1)
+ os._exit(0)
+ except OSError as exc:
+ error = DaemonProcessDetachError(
+ "{message}: [{exc.errno:d}] {exc.strerror}".format(
+ message=error_message, exc=exc))
+ raise error
+
+ fork_then_exit_parent(error_message="Failed first fork")
+ os.setsid()
+ fork_then_exit_parent(error_message="Failed second fork")
+
+
+def is_process_started_by_init():
+ """ Determine whether the current process is started by `init`.
+
+ :return: ``True`` iff the parent process is `init`; otherwise
+ ``False``.
+
+ The `init` process is the one with process ID of 1.
+
+ """
+ result = False
+
+ init_pid = 1
+ if os.getppid() == init_pid:
+ result = True
+
+ return result
+
+
+def is_socket(fd):
+ """ Determine whether the file descriptor is a socket.
+
+ :param fd: The file descriptor to interrogate.
+ :return: ``True`` iff the file descriptor is a socket; otherwise
+ ``False``.
+
+ Query the socket type of `fd`. If there is no error, the file is a
+ socket.
+
+ """
+ result = False
+
+ file_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW)
+
+ try:
+ socket_type = file_socket.getsockopt(
+ socket.SOL_SOCKET, socket.SO_TYPE)
+ except socket.error as exc:
+ exc_errno = exc.args[0]
+ if exc_errno == errno.ENOTSOCK:
+ # Socket operation on non-socket.
+ pass
+ else:
+ # Some other socket error.
+ result = True
+ else:
+ # No error getting socket type.
+ result = True
+
+ return result
+
+
+def is_process_started_by_superserver():
+ """ Determine whether the current process is started by the superserver.
+
+ :return: ``True`` if this process was started by the internet
+ superserver; otherwise ``False``.
+
+ The internet superserver creates a network socket, and
+ attaches it to the standard streams of the child process. If
+ that is the case for this process, return ``True``, otherwise
+ ``False``.
+
+ """
+ result = False
+
+ stdin_fd = sys.__stdin__.fileno()
+ if is_socket(stdin_fd):
+ result = True
+
+ return result
+
+
+def is_detach_process_context_required():
+ """ Determine whether detaching the process context is required.
+
+ :return: ``True`` iff the process is already detached; otherwise
+ ``False``.
+
+ The process environment is interrogated for the following:
+
+ * Process was started by `init`; or
+
+ * Process was started by `inetd`.
+
+ If any of the above are true, the process is deemed to be already
+ detached.
+
+ """
+ result = True
+ if is_process_started_by_init() or is_process_started_by_superserver():
+ result = False
+
+ return result
+
+
+def close_file_descriptor_if_open(fd):
+ """ Close a file descriptor if already open.
+
+ :param fd: The file descriptor to close.
+ :return: ``None``.
+
+ Close the file descriptor `fd`, suppressing an error in the
+ case the file was not open.
+
+ """
+ try:
+ os.close(fd)
+ except EnvironmentError as exc:
+ if exc.errno == errno.EBADF:
+ # File descriptor was not open.
+ pass
+ else:
+ error = DaemonOSEnvironmentError(
+ "Failed to close file descriptor {fd:d} ({exc})".format(
+ fd=fd, exc=exc))
+ raise error
+
+
+MAXFD = 2048
+
+def get_maximum_file_descriptors():
+ """ Get the maximum number of open file descriptors for this process.
+
+ :return: The number (integer) to use as the maximum number of open
+ files for this process.
+
+ The maximum is the process hard resource limit of maximum number of
+ open file descriptors. If the limit is “infinity”, a default value
+ of ``MAXFD`` is returned.
+
+ """
+ limits = resource.getrlimit(resource.RLIMIT_NOFILE)
+ result = limits[1]
+ if result == resource.RLIM_INFINITY:
+ result = MAXFD
+ return result
+
+
+def close_all_open_files(exclude=set()):
+ """ Close all open file descriptors.
+
+ :param exclude: Collection of file descriptors to skip when closing
+ files.
+ :return: ``None``.
+
+ Closes every file descriptor (if open) of this process. If
+ specified, `exclude` is a set of file descriptors to *not*
+ close.
+
+ """
+ maxfd = get_maximum_file_descriptors()
+ for fd in reversed(range(maxfd)):
+ if fd not in exclude:
+ close_file_descriptor_if_open(fd)
+
+
+def redirect_stream(system_stream, target_stream):
+ """ Redirect a system stream to a specified file.
+
+ :param standard_stream: A file object representing a standard I/O
+ stream.
+ :param target_stream: The target file object for the redirected
+ stream, or ``None`` to specify the null device.
+ :return: ``None``.
+
+ `system_stream` is a standard system stream such as
+ ``sys.stdout``. `target_stream` is an open file object that
+ should replace the corresponding system stream object.
+
+ If `target_stream` is ``None``, defaults to opening the
+ operating system's null device and using its file descriptor.
+
+ """
+ if target_stream is None:
+ target_fd = os.open(os.devnull, os.O_RDWR)
+ else:
+ target_fd = target_stream.fileno()
+ os.dup2(target_fd, system_stream.fileno())
+
+
+def make_default_signal_map():
+ """ Make the default signal map for this system.
+
+ :return: A mapping from signal number to handler object.
+
+ The signals available differ by system. The map will not contain
+ any signals not defined on the running system.
+
+ """
+ name_map = {
+ 'SIGTSTP': None,
+ 'SIGTTIN': None,
+ 'SIGTTOU': None,
+ 'SIGTERM': 'terminate',
+ }
+ signal_map = dict(
+ (getattr(signal, name), target)
+ for (name, target) in name_map.items()
+ if hasattr(signal, name))
+
+ return signal_map
+
+
+def set_signal_handlers(signal_handler_map):
+ """ Set the signal handlers as specified.
+
+ :param signal_handler_map: A map from signal number to handler
+ object.
+ :return: ``None``.
+
+ See the `signal` module for details on signal numbers and signal
+ handlers.
+
+ """
+ for (signal_number, handler) in signal_handler_map.items():
+ signal.signal(signal_number, handler)
+
+
+def register_atexit_function(func):
+ """ Register a function for processing at program exit.
+
+ :param func: A callable function expecting no arguments.
+ :return: ``None``.
+
+ The function `func` is registered for a call with no arguments
+ at program exit.
+
+ """
+ atexit.register(func)
+
+
+def _chain_exception_from_existing_exception_context(exc, as_cause=False):
+ """ Decorate the specified exception with the existing exception context.
+
+ :param exc: The exception instance to decorate.
+ :param as_cause: If true, the existing context is declared to be
+ the cause of the exception.
+ :return: ``None``.
+
+ :PEP:`344` describes syntax and attributes (`__traceback__`,
+ `__context__`, `__cause__`) for use in exception chaining.
+
+ Python 2 does not have that syntax, so this function decorates
+ the exception with values from the current exception context.
+
+ """
+ (existing_exc_type, existing_exc, existing_traceback) = sys.exc_info()
+ if as_cause:
+ exc.__cause__ = existing_exc
+ else:
+ exc.__context__ = existing_exc
+ exc.__traceback__ = existing_traceback
+
+class PidFile(object):
+ """
+Adapter between a file path string and the `lockfile` API [0]. Based example
+found at [1].
+
+[0]: https://pythonhosted.org/lockfile/lockfile.html
+[1]: http://code.activestate.com/recipes/577911-context-manager-for-a-daemon-pid-file/
+"""
+ def __init__(self, path, enter_err_msg=None):
+ self.path = path
+ self.enter_err_msg = enter_err_msg
+ self.pidfile = open(self.path, 'a+')
+ try:
+ fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
+ fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_UN)
+ self.pidfile.close()
+ os.remove(self.path)
+ except IOError:
+ sys.exit(self.enter_err_msg)
+
+ def __enter__(self):
+ self.pidfile = open(self.path, 'a+')
+ try:
+ fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError:
+ sys.exit(self.enter_err_msg)
+ self.pidfile.seek(0)
+ self.pidfile.truncate()
+ self.pidfile.write(str(os.getpid()))
+ self.pidfile.flush()
+ self.pidfile.seek(0)
+ return self.pidfile
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ try:
+ self.pidfile.close()
+ except IOError as err:
+ if err.errno != 9:
+ raise
+ os.remove(self.path)
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/bin/hadoop-metrics2-hbase.properties b/bin/hadoop-metrics2-hbase.properties
new file mode 100644
index 0000000..bafd444
--- /dev/null
+++ b/bin/hadoop-metrics2-hbase.properties
@@ -0,0 +1,36 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# HBase Server Sink Configuration
+#################################
+#
+# Configuration for the metrics2 system for the HBase RegionServers
+# to enable phoenix trace collection on the HBase servers.
+#
+# See hadoop-metrics2-phoenix.properties for how these configurations
+# are utilized.
+#
+# Either this file can be used in place of the standard
+# hadoop-metrics2-hbase.properties file or the below
+# properties should be added to the file of the same name on
+# the HBase classpath (likely in the HBase conf/ folder)
+
+# ensure that we receive traces on the server
+hbase.sink.tracing.class=org.apache.phoenix.trace.PhoenixMetricsSink
+# Tell the sink where to write the metrics
+hbase.sink.tracing.writer-class=org.apache.phoenix.trace.PhoenixTableMetricsWriter
+# Only handle traces with a context of "tracing"
+hbase.sink.tracing.context=tracing
diff --git a/bin/hadoop-metrics2-phoenix.properties b/bin/hadoop-metrics2-phoenix.properties
new file mode 100644
index 0000000..f8c7223
--- /dev/null
+++ b/bin/hadoop-metrics2-phoenix.properties
@@ -0,0 +1,70 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Metrics properties for phoenix
+####################################
+#
+#There are two options with file names:
+# 1. hadoop-metrics2-[prefix].properties
+# 2. hadoop-metrics2.properties
+# Either will be loaded by the metrics system (but not both).
+#
+# NOTE: The metrics system is only initialized once per JVM (but does ref-counting, so we can't
+#shutdown and restart), so we only load the first prefix that we find. Generally, this will be
+#phoenix (unless someone else registers first, but for many clients, there should only be one).
+#
+# Usually, you would use hadoop-metrics2-phoenix.properties, but we use the generic
+# hadoop-metrics2.properties to ensure it these are loaded regardless of where we are running,
+# assuming there isn't another config on the classpath.
+
+# When specifying sinks, the syntax to use is:
+# [prefix].[source|sink].[instance].[options]
+# The interesting thing to note is that [instance] can literally be anything (as long as its
+# not zero-length). It is only there to differentiate the properties that are stored for
+# objects of the same type (e.g. differentiating between two phoenix.sink objects).
+#
+#You could the following lines in your config
+#
+# phoenix.sink.thingA.class=com.your-company.SpecialSink
+# phoenix.sink.thingA.option1=value1
+#
+# and also
+#
+# phoenix.sink.thingB.class=org.apache.phoenix.trace.PhoenixMetricsSink
+# phoenix.sink.thingB.doGoodStuff=true
+#
+# which will create both SpecialSink and PhoenixMetricsSink and register them
+# as a MetricsSink, but Special sink will only see option1=value1 in its
+# configuration, which similarly, the instantiated PhoenixMetricsSink will
+# only see doGoodStuff=true in its configuration
+#
+# See javadoc of package-info.java for org.apache.hadoop.metrics2 for detail
+
+# Uncomment to NOT start MBeans
+# *.source.start_mbeans=false
+
+# Sample from all the sources every 10 seconds
+*.period=10
+
+# Write Traces to Phoenix
+##########################
+# ensure that we receive traces on the server
+phoenix.sink.tracing.class=org.apache.phoenix.trace.PhoenixMetricsSink
+# Tell the sink where to write the metrics
+phoenix.sink.tracing.writer-class=org.apache.phoenix.trace.PhoenixTableMetricsWriter
+# Only handle traces with a context of "tracing"
+phoenix.sink.tracing.context=tracing
diff --git a/bin/hbase-site.xml b/bin/hbase-site.xml
new file mode 100644
index 0000000..0ab9fd8
--- /dev/null
+++ b/bin/hbase-site.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+ <property>
+ <name>hbase.regionserver.wal.codec</name>
+ <value>org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec</value>
+ </property>
+</configuration>
diff --git a/bin/log4j.properties b/bin/log4j.properties
new file mode 100644
index 0000000..2d007e1
--- /dev/null
+++ b/bin/log4j.properties
@@ -0,0 +1,76 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+# Define some default values that can be overridden by system properties
+psql.root.logger=WARN,console
+psql.log.dir=.
+psql.log.file=psql.log
+hadoop.log.dir=.
+
+# Define the root logger to the system property "plsql.root.logger".
+log4j.rootLogger=${psql.root.logger}
+
+# Logging Threshold to INFO for queryserver. root logger still at WARN for sqlline clients.
+log4j.threshold=INFO
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${psql.log.dir}/${psql.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+#
+# Null Appender
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+# Custom Logging levels
+log4j.logger.org.apache.zookeeper=ERROR
+log4j.logger.org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper=ERROR
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=ERROR
+log4j.logger.org.apache.hadoop.hbase.HBaseConfiguration=ERROR
+
+# query server packages
+log4j.logger.org.apache.calcite.avatica=INFO
+log4j.logger.org.apache.phoenix.queryserver.server=INFO
+log4j.logger.org.eclipse.jetty.server=INFO
diff --git a/bin/phoenix_utils.py b/bin/phoenix_utils.py
new file mode 100755
index 0000000..d85a857
--- /dev/null
+++ b/bin/phoenix_utils.py
@@ -0,0 +1,231 @@
+#!/usr/bin/env python
+############################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+############################################################################
+
+import os
+import fnmatch
+import subprocess
+
+def find(pattern, classPaths):
+ paths = classPaths.split(os.pathsep)
+
+ # for each class path
+ for path in paths:
+ # remove * if it's at the end of path
+ if ((path is not None) and (len(path) > 0) and (path[-1] == '*')) :
+ path = path[:-1]
+
+ for root, dirs, files in os.walk(path):
+ # sort the file names so *-client always precedes *-thin-client
+ files.sort()
+ for name in files:
+ if fnmatch.fnmatch(name, pattern):
+ return os.path.join(root, name)
+
+ return ""
+
+def findFileInPathWithoutRecursion(pattern, path):
+ if not os.path.exists(path):
+ return ""
+ files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path,f))]
+ # sort the file names so *-client always precedes *-thin-client
+ files.sort()
+ for name in files:
+ if fnmatch.fnmatch(name, pattern):
+ return os.path.join(path, name)
+
+ return ""
+
+def which(command):
+ for path in os.environ["PATH"].split(os.pathsep):
+ if os.path.exists(os.path.join(path, command)):
+ return os.path.join(path, command)
+ return None
+
+def findClasspath(command_name):
+ command_path = which(command_name)
+ if command_path is None:
+ # We don't have this command, so we can't get its classpath
+ return ''
+ command = "%s%s" %(command_path, ' classpath')
+ return subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read()
+
+def setPath():
+ PHOENIX_CLIENT_JAR_PATTERN = "phoenix-*-client.jar"
+ PHOENIX_THIN_CLIENT_JAR_PATTERN = "phoenix-*-thin-client.jar"
+ PHOENIX_QUERYSERVER_JAR_PATTERN = "phoenix-*-queryserver.jar"
+ PHOENIX_LOADBALANCER_JAR_PATTERN = "phoenix-load-balancer-*[!t][!e][!s][!t][!s].jar"
+ PHOENIX_TRACESERVER_JAR_PATTERN = "phoenix-tracing-webapp-*-runnable.jar"
+ PHOENIX_TESTS_JAR_PATTERN = "phoenix-core-*-tests*.jar"
+ PHOENIX_PHERF_JAR_PATTERN = "phoenix-pherf-*-minimal*.jar"
+
+ # Backward support old env variable PHOENIX_LIB_DIR replaced by PHOENIX_CLASS_PATH
+ global phoenix_class_path
+ phoenix_class_path = os.getenv('PHOENIX_LIB_DIR','')
+ if phoenix_class_path == "":
+ phoenix_class_path = os.getenv('PHOENIX_CLASS_PATH','')
+
+ global hbase_conf_dir
+ # if HBASE_CONF_DIR set explicitly, use that
+ hbase_conf_dir = os.getenv('HBASE_CONF_DIR', os.getenv('HBASE_CONF_PATH'))
+ if not hbase_conf_dir:
+ # else fall back to HBASE_HOME
+ if os.getenv('HBASE_HOME'):
+ hbase_conf_dir = os.path.join(os.getenv('HBASE_HOME'), "conf")
+ elif os.name == 'posix':
+ # default to the bigtop configuration dir
+ hbase_conf_dir = '/etc/hbase/conf'
+ else:
+ # Try to provide something valid
+ hbase_conf_dir = '.'
+ global hbase_conf_path # keep conf_path around for backward compatibility
+ hbase_conf_path = hbase_conf_dir
+
+ global current_dir
+ current_dir = os.path.dirname(os.path.abspath(__file__))
+
+ global pherf_conf_path
+ pherf_conf_path = os.path.join(current_dir, "config")
+ pherf_properties_file = find("pherf.properties", pherf_conf_path)
+ if pherf_properties_file == "":
+ pherf_conf_path = os.path.join(current_dir, "..", "phoenix-pherf", "config")
+
+ global phoenix_jar_path
+ phoenix_jar_path = os.path.join(current_dir, "..", "phoenix-client", "target","*")
+
+ global phoenix_client_jar
+ phoenix_client_jar = find("phoenix-*-client.jar", phoenix_jar_path)
+ if phoenix_client_jar == "":
+ phoenix_client_jar = findFileInPathWithoutRecursion(PHOENIX_CLIENT_JAR_PATTERN, os.path.join(current_dir, ".."))
+ if phoenix_client_jar == "":
+ phoenix_client_jar = find(PHOENIX_CLIENT_JAR_PATTERN, phoenix_class_path)
+
+ global phoenix_test_jar_path
+ phoenix_test_jar_path = os.path.join(current_dir, "..", "phoenix-core", "target","*")
+
+ global hadoop_conf
+ hadoop_conf = os.getenv('HADOOP_CONF_DIR', None)
+ if not hadoop_conf:
+ if os.name == 'posix':
+ # Try to provide a sane configuration directory for Hadoop if not otherwise provided.
+ # If there's no jaas file specified by the caller, this is necessary when Kerberos is enabled.
+ hadoop_conf = '/etc/hadoop/conf'
+ else:
+ # Try to provide something valid..
+ hadoop_conf = '.'
+
+ global hadoop_classpath
+ if (os.name != 'nt'):
+ hadoop_classpath = findClasspath('hadoop')
+ else:
+ hadoop_classpath = os.getenv('HADOOP_CLASSPATH', '')
+
+ global hadoop_common_jar_path
+ hadoop_common_jar_path = os.path.join(current_dir, "..", "phoenix-client", "target","*")
+
+ global hadoop_common_jar
+ hadoop_common_jar = find("hadoop-common*.jar", hadoop_common_jar_path)
+
+ global hadoop_hdfs_jar_path
+ hadoop_hdfs_jar_path = os.path.join(current_dir, "..", "phoenix-client", "target","*")
+
+ global hadoop_hdfs_jar
+ hadoop_hdfs_jar = find("hadoop-hdfs*.jar", hadoop_hdfs_jar_path)
+
+ global testjar
+ testjar = find(PHOENIX_TESTS_JAR_PATTERN, phoenix_test_jar_path)
+ if testjar == "":
+ testjar = findFileInPathWithoutRecursion(PHOENIX_TESTS_JAR_PATTERN, os.path.join(current_dir, "..", 'lib'))
+ if testjar == "":
+ testjar = find(PHOENIX_TESTS_JAR_PATTERN, phoenix_class_path)
+
+ global phoenix_queryserver_jar
+ phoenix_queryserver_jar = find(PHOENIX_QUERYSERVER_JAR_PATTERN, os.path.join(current_dir, "..", "queryserver", "target", "*"))
+ if phoenix_queryserver_jar == "":
+ phoenix_queryserver_jar = findFileInPathWithoutRecursion(PHOENIX_QUERYSERVER_JAR_PATTERN, os.path.join(current_dir, "..", "lib"))
+ if phoenix_queryserver_jar == "":
+ phoenix_queryserver_jar = findFileInPathWithoutRecursion(PHOENIX_QUERYSERVER_JAR_PATTERN, os.path.join(current_dir, ".."))
+
+ global phoenix_loadbalancer_jar
+ phoenix_loadbalancer_jar = find(PHOENIX_LOADBALANCER_JAR_PATTERN, os.path.join(current_dir, "..", "phoenix-loadbalancer", "target", "*"))
+ if phoenix_loadbalancer_jar == "":
+ phoenix_loadbalancer_jar = findFileInPathWithoutRecursion(PHOENIX_LOADBALANCER_JAR_PATTERN, os.path.join(current_dir, "..", "lib"))
+ if phoenix_loadbalancer_jar == "":
+ phoenix_loadbalancer_jar = findFileInPathWithoutRecursion(PHOENIX_LOADBALANCER_JAR_PATTERN, os.path.join(current_dir, ".."))
+
+ global phoenix_traceserver_jar
+ phoenix_traceserver_jar = find(PHOENIX_TRACESERVER_JAR_PATTERN, os.path.join(current_dir, "..", "phoenix-tracing-webapp", "target", "*"))
+ if phoenix_traceserver_jar == "":
+ phoenix_traceserver_jar = findFileInPathWithoutRecursion(PHOENIX_TRACESERVER_JAR_PATTERN, os.path.join(current_dir, "..", "lib"))
+ if phoenix_traceserver_jar == "":
+ phoenix_traceserver_jar = findFileInPathWithoutRecursion(PHOENIX_TRACESERVER_JAR_PATTERN, os.path.join(current_dir, ".."))
+
+ global phoenix_pherf_jar
+ phoenix_pherf_jar = find(PHOENIX_PHERF_JAR_PATTERN, os.path.join(current_dir, "..", "phoenix-pherf", "target", "*"))
+ if phoenix_pherf_jar == "":
+ phoenix_pherf_jar = findFileInPathWithoutRecursion(PHOENIX_PHERF_JAR_PATTERN, os.path.join(current_dir, "..", "lib"))
+ if phoenix_pherf_jar == "":
+ phoenix_pherf_jar = findFileInPathWithoutRecursion(PHOENIX_PHERF_JAR_PATTERN, os.path.join(current_dir, ".."))
+
+ global phoenix_thin_client_jar
+ phoenix_thin_client_jar = find(PHOENIX_THIN_CLIENT_JAR_PATTERN, os.path.join(current_dir, "..", "queryserver-client", "target", "*"))
+ if phoenix_thin_client_jar == "":
+ phoenix_thin_client_jar = findFileInPathWithoutRecursion(PHOENIX_THIN_CLIENT_JAR_PATTERN, os.path.join(current_dir, ".."))
+
+ return ""
+
+def shell_quote(args):
+ """
+ Return the platform specific shell quoted string. Handles Windows and *nix platforms.
+
+ :param args: array of shell arguments
+ :return: shell quoted string
+ """
+ if os.name == 'nt':
+ import subprocess
+ return subprocess.list2cmdline(args)
+ else:
+ # pipes module isn't available on Windows
+ import pipes
+ return " ".join([pipes.quote(v) for v in args])
+
+def common_sqlline_args(parser):
+ parser.add_argument('-v', '--verbose', help='Verbosity on sqlline.', default='true')
+ parser.add_argument('-c', '--color', help='Color setting for sqlline.', default='true')
+ parser.add_argument('-fc', '--fastconnect', help='Fetch all schemas on initial connection', default='false')
+
+if __name__ == "__main__":
+ setPath()
+ print "phoenix_class_path:", phoenix_class_path
+ print "hbase_conf_dir:", hbase_conf_dir
+ print "hbase_conf_path:", hbase_conf_path
+ print "current_dir:", current_dir
+ print "phoenix_jar_path:", phoenix_jar_path
+ print "phoenix_client_jar:", phoenix_client_jar
+ print "phoenix_test_jar_path:", phoenix_test_jar_path
+ print "hadoop_common_jar_path:", hadoop_common_jar_path
+ print "hadoop_common_jar:", hadoop_common_jar
+ print "hadoop_hdfs_jar_path:", hadoop_hdfs_jar_path
+ print "hadoop_hdfs_jar:", hadoop_hdfs_jar
+ print "testjar:", testjar
+ print "phoenix_queryserver_jar:", phoenix_queryserver_jar
+ print "phoenix_loadbalancer_jar:", phoenix_loadbalancer_jar
+ print "phoenix_thin_client_jar:", phoenix_thin_client_jar
+ print "hadoop_classpath:", hadoop_classpath
diff --git a/bin/psql.py b/bin/psql.py
new file mode 100755
index 0000000..973d3de
--- /dev/null
+++ b/bin/psql.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+############################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+############################################################################
+
+import os
+import subprocess
+import sys
+import phoenix_utils
+
+phoenix_utils.setPath()
+
+args = phoenix_utils.shell_quote(sys.argv[1:])
+
+# HBase configuration folder path (where hbase-site.xml reside) for
+# HBase/Phoenix client side property override
+hbase_config_path = os.getenv('HBASE_CONF_DIR', phoenix_utils.current_dir)
+
+java_home = os.getenv('JAVA_HOME')
+
+# load hbase-env.??? to extract JAVA_HOME, HBASE_PID_DIR, HBASE_LOG_DIR
+hbase_env_path = None
+hbase_env_cmd = None
+if os.name == 'posix':
+ hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.sh')
+ hbase_env_cmd = ['bash', '-c', 'source %s && env' % hbase_env_path]
+elif os.name == 'nt':
+ hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.cmd')
+ hbase_env_cmd = ['cmd.exe', '/c', 'call %s & set' % hbase_env_path]
+if not hbase_env_path or not hbase_env_cmd:
+ print >> sys.stderr, "hbase-env file unknown on platform %s" % os.name
+ sys.exit(-1)
+
+hbase_env = {}
+if os.path.isfile(hbase_env_path):
+ p = subprocess.Popen(hbase_env_cmd, stdout = subprocess.PIPE)
+ for x in p.stdout:
+ (k, _, v) = x.partition('=')
+ hbase_env[k.strip()] = v.strip()
+
+if hbase_env.has_key('JAVA_HOME'):
+ java_home = hbase_env['JAVA_HOME']
+
+if java_home:
+ java = os.path.join(java_home, 'bin', 'java')
+else:
+ java = 'java'
+
+java_cmd = java + ' $PHOENIX_OPTS ' + \
+ ' -cp "' + phoenix_utils.hbase_conf_dir + os.pathsep + phoenix_utils.phoenix_client_jar + \
+ os.pathsep + phoenix_utils.hadoop_conf + os.pathsep + phoenix_utils.hadoop_classpath + '" -Dlog4j.configuration=file:' + \
+ os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
+ " org.apache.phoenix.util.PhoenixRuntime " + args
+
+exitcode = subprocess.call(java_cmd, shell=True)
+sys.exit(exitcode)
diff --git a/bin/queryserver.py b/bin/queryserver.py
new file mode 100755
index 0000000..0c07b3b
--- /dev/null
+++ b/bin/queryserver.py
@@ -0,0 +1,207 @@
+#!/usr/bin/env python
+############################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+############################################################################
+
+#
+# Script to handle launching the query server process.
+#
+# usage: queryserver.py [start|stop|makeWinServiceDesc] [-Dhadoop=configs]
+#
+
+import datetime
+import getpass
+import os
+import os.path
+import signal
+import subprocess
+import sys
+import tempfile
+
+try:
+ import daemon
+ daemon_supported = True
+except ImportError:
+ # daemon script not supported on some platforms (windows?)
+ daemon_supported = False
+
+import phoenix_utils
+
+phoenix_utils.setPath()
+
+command = None
+args = sys.argv
+
+if len(args) > 1:
+ if args[1] == 'start':
+ command = 'start'
+ elif args[1] == 'stop':
+ command = 'stop'
+ elif args[1] == 'makeWinServiceDesc':
+ command = 'makeWinServiceDesc'
+
+if command:
+ # Pull off queryserver.py and the command
+ args = args[2:]
+else:
+ # Just pull off queryserver.py
+ args = args[1:]
+
+if os.name == 'nt':
+ args = subprocess.list2cmdline(args)
+else:
+ import pipes # pipes module isn't available on Windows
+ args = " ".join([pipes.quote(v) for v in args])
+
+# HBase configuration folder path (where hbase-site.xml reside) for
+# HBase/Phoenix client side property override
+hbase_config_path = phoenix_utils.hbase_conf_dir
+hadoop_config_path = phoenix_utils.hadoop_conf
+hadoop_classpath = phoenix_utils.hadoop_classpath
+
+# TODO: add windows support
+phoenix_file_basename = 'phoenix-%s-queryserver' % getpass.getuser()
+phoenix_log_file = '%s.log' % phoenix_file_basename
+phoenix_out_file = '%s.out' % phoenix_file_basename
+phoenix_pid_file = '%s.pid' % phoenix_file_basename
+
+# load hbase-env.??? to extract JAVA_HOME, HBASE_PID_DIR, HBASE_LOG_DIR
+hbase_env_path = None
+hbase_env_cmd = None
+if os.name == 'posix':
+ hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.sh')
+ hbase_env_cmd = ['bash', '-c', 'source %s && env' % hbase_env_path]
+elif os.name == 'nt':
+ hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.cmd')
+ hbase_env_cmd = ['cmd.exe', '/c', 'call %s & set' % hbase_env_path]
+if not hbase_env_path or not hbase_env_cmd:
+ print >> sys.stderr, "hbase-env file unknown on platform %s" % os.name
+ sys.exit(-1)
+
+hbase_env = {}
+if os.path.isfile(hbase_env_path):
+ p = subprocess.Popen(hbase_env_cmd, stdout = subprocess.PIPE)
+ for x in p.stdout:
+ (k, _, v) = x.partition('=')
+ hbase_env[k.strip()] = v.strip()
+
+java_home = hbase_env.get('JAVA_HOME') or os.getenv('JAVA_HOME')
+if java_home:
+ java = os.path.join(java_home, 'bin', 'java')
+else:
+ java = 'java'
+
+tmp_dir = os.path.join(tempfile.gettempdir(), 'phoenix')
+opts = os.getenv('PHOENIX_QUERYSERVER_OPTS') or hbase_env.get('PHOENIX_QUERYSERVER_OPTS') or ''
+pid_dir = os.getenv('PHOENIX_QUERYSERVER_PID_DIR') or hbase_env.get('HBASE_PID_DIR') or tmp_dir
+log_dir = os.getenv('PHOENIX_QUERYSERVER_LOG_DIR') or hbase_env.get('HBASE_LOG_DIR') or tmp_dir
+pid_file_path = os.path.join(pid_dir, phoenix_pid_file)
+log_file_path = os.path.join(log_dir, phoenix_log_file)
+out_file_path = os.path.join(log_dir, phoenix_out_file)
+
+# " -Xdebug -Xrunjdwp:transport=dt_socket,address=5005,server=y,suspend=n " + \
+# " -XX:+UnlockCommercialFeatures -XX:+FlightRecorder -XX:FlightRecorderOptions=defaultrecording=true,dumponexit=true" + \
+
+# The command is run through subprocess so environment variables are automatically inherited
+java_cmd = '%(java)s -cp ' + hbase_config_path + os.pathsep + hadoop_config_path + os.pathsep + \
+ phoenix_utils.phoenix_client_jar + os.pathsep + phoenix_utils.phoenix_loadbalancer_jar + \
+ os.pathsep + phoenix_utils.phoenix_queryserver_jar + os.pathsep + hadoop_classpath + \
+ " -Dproc_phoenixserver" + \
+ " -Dlog4j.configuration=file:" + os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
+ " -Dpsql.root.logger=%(root_logger)s" + \
+ " -Dpsql.log.dir=%(log_dir)s" + \
+ " -Dpsql.log.file=%(log_file)s" + \
+ " " + opts + \
+ " org.apache.phoenix.queryserver.server.QueryServer " + args
+
+if command == 'makeWinServiceDesc':
+ cmd = java_cmd % {'java': java, 'root_logger': 'INFO,DRFA,console', 'log_dir': log_dir, 'log_file': phoenix_log_file}
+ slices = cmd.split(' ')
+
+ print "<service>"
+ print " <id>queryserver</id>"
+ print " <name>Phoenix Query Server</name>"
+ print " <description>This service runs the Phoenix Query Server.</description>"
+ print " <executable>%s</executable>" % slices[0]
+ print " <arguments>%s</arguments>" % ' '.join(slices[1:])
+ print "</service>"
+ sys.exit()
+
+if command == 'start':
+ if not daemon_supported:
+ print >> sys.stderr, "daemon mode not supported on this platform"
+ sys.exit(-1)
+
+ # run in the background
+ d = os.path.dirname(out_file_path)
+ if not os.path.exists(d):
+ os.makedirs(d)
+ with open(out_file_path, 'a+') as out:
+ context = daemon.DaemonContext(
+ pidfile = daemon.PidFile(pid_file_path, 'Query Server already running, PID file found: %s' % pid_file_path),
+ stdout = out,
+ stderr = out,
+ )
+ print 'starting Query Server, logging to %s' % log_file_path
+ with context:
+ # this block is the main() for the forked daemon process
+ child = None
+ cmd = java_cmd % {'java': java, 'root_logger': 'INFO,DRFA', 'log_dir': log_dir, 'log_file': phoenix_log_file}
+
+ # notify the child when we're killed
+ def handler(signum, frame):
+ if child:
+ child.send_signal(signum)
+ sys.exit(0)
+ signal.signal(signal.SIGTERM, handler)
+
+ print '%s launching %s' % (datetime.datetime.now(), cmd)
+ child = subprocess.Popen(cmd.split())
+ sys.exit(child.wait())
+
+elif command == 'stop':
+ if not daemon_supported:
+ print >> sys.stderr, "daemon mode not supported on this platform"
+ sys.exit(-1)
+
+ if not os.path.exists(pid_file_path):
+ print >> sys.stderr, "no Query Server to stop because PID file not found, %s" % pid_file_path
+ sys.exit(0)
+
+ if not os.path.isfile(pid_file_path):
+ print >> sys.stderr, "PID path exists but is not a file! %s" % pid_file_path
+ sys.exit(1)
+
+ pid = None
+ with open(pid_file_path, 'r') as p:
+ pid = int(p.read())
+ if not pid:
+ sys.exit("cannot read PID file, %s" % pid_file_path)
+
+ print "stopping Query Server pid %s" % pid
+ with open(out_file_path, 'a+') as out:
+ print >> out, "%s terminating Query Server" % datetime.datetime.now()
+ os.kill(pid, signal.SIGTERM)
+
+else:
+ # run in the foreground using defaults from log4j.properties
+ cmd = java_cmd % {'java': java, 'root_logger': 'INFO,console', 'log_dir': '.', 'log_file': 'psql.log'}
+ # Because shell=True is not set, we don't have to alter the environment
+ child = subprocess.Popen(cmd.split())
+ sys.exit(child.wait())
diff --git a/bin/sqlline-thin.py b/bin/sqlline-thin.py
new file mode 100755
index 0000000..fecc96c
--- /dev/null
+++ b/bin/sqlline-thin.py
@@ -0,0 +1,175 @@
+#!/usr/bin/env python
+############################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+############################################################################
+
+import os
+import subprocess
+import sys
+import phoenix_utils
+import atexit
+import urlparse
+
+# import argparse
+try:
+ import argparse
+except ImportError:
+ current_dir = os.path.dirname(os.path.abspath(__file__))
+ sys.path.append(os.path.join(current_dir, 'argparse-1.4.0'))
+ import argparse
+
+global childProc
+childProc = None
+def kill_child():
+ if childProc is not None:
+ childProc.terminate()
+ childProc.kill()
+ if os.name != 'nt':
+ os.system("reset")
+atexit.register(kill_child)
+
+parser = argparse.ArgumentParser(description='Launches the Apache Phoenix Thin Client.')
+# Positional argument "url" is optional
+parser.add_argument('url', nargs='?', help='The URL to the Phoenix Query Server.', default='http://localhost:8765')
+# Positional argument "sqlfile" is optional
+parser.add_argument('sqlfile', nargs='?', help='A file of SQL commands to execute.', default='')
+# Avatica wire authentication
+parser.add_argument('-a', '--authentication', help='Mechanism for HTTP authentication.', choices=('SPNEGO', 'BASIC', 'DIGEST', 'NONE'), default='')
+# Avatica wire serialization
+parser.add_argument('-s', '--serialization', help='Serialization type for HTTP API.', choices=('PROTOBUF', 'JSON'), default=None)
+# Avatica authentication
+parser.add_argument('-au', '--auth-user', help='Username for HTTP authentication.')
+parser.add_argument('-ap', '--auth-password', help='Password for HTTP authentication.')
+# Common arguments across sqlline.py and sqlline-thin.py
+phoenix_utils.common_sqlline_args(parser)
+# Parse the args
+args=parser.parse_args()
+
+phoenix_utils.setPath()
+
+url = args.url
+sqlfile = args.sqlfile
+serialization_key = 'phoenix.queryserver.serialization'
+
+def cleanup_url(url):
+ parsed = urlparse.urlparse(url)
+ if parsed.scheme == "":
+ url = "http://" + url
+ parsed = urlparse.urlparse(url)
+ if ":" not in parsed.netloc:
+ url = url + ":8765"
+ return url
+
+def get_serialization():
+ default_serialization='PROTOBUF'
+ env=os.environ.copy()
+ if os.name == 'posix':
+ hbase_exec_name = 'hbase'
+ elif os.name == 'nt':
+ hbase_exec_name = 'hbase.cmd'
+ else:
+ print 'Unknown platform "%s", defaulting to HBase executable of "hbase"' % os.name
+ hbase_exec_name = 'hbase'
+
+ hbase_cmd = phoenix_utils.which(hbase_exec_name)
+ if hbase_cmd is None:
+ print 'Failed to find hbase executable on PATH, defaulting serialization to %s.' % default_serialization
+ return default_serialization
+
+ env['HBASE_CONF_DIR'] = phoenix_utils.hbase_conf_dir
+ proc = subprocess.Popen([hbase_cmd, 'org.apache.hadoop.hbase.util.HBaseConfTool', serialization_key],
+ env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (stdout, stderr) = proc.communicate()
+ if proc.returncode != 0:
+ print 'Failed to extract serialization from hbase-site.xml, defaulting to %s.' % default_serialization
+ return default_serialization
+ # Don't expect this to happen, but give a default value just in case
+ if stdout is None:
+ return default_serialization
+
+ stdout = stdout.strip()
+ if stdout == 'null':
+ return default_serialization
+ return stdout
+
+url = cleanup_url(url)
+
+if sqlfile != "":
+ sqlfile = "--run=" + sqlfile
+
+colorSetting = args.color
+# disable color setting for windows OS
+if os.name == 'nt':
+ colorSetting = "false"
+
+# HBase configuration folder path (where hbase-site.xml reside) for
+# HBase/Phoenix client side property override
+hbase_config_path = os.getenv('HBASE_CONF_DIR', phoenix_utils.current_dir)
+
+serialization = args.serialization if args.serialization else get_serialization()
+
+java_home = os.getenv('JAVA_HOME')
+
+# load hbase-env.??? to extract JAVA_HOME, HBASE_PID_DIR, HBASE_LOG_DIR
+hbase_env_path = None
+hbase_env_cmd = None
+if os.name == 'posix':
+ hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.sh')
+ hbase_env_cmd = ['bash', '-c', 'source %s && env' % hbase_env_path]
+elif os.name == 'nt':
+ hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.cmd')
+ hbase_env_cmd = ['cmd.exe', '/c', 'call %s & set' % hbase_env_path]
+if not hbase_env_path or not hbase_env_cmd:
+ print >> sys.stderr, "hbase-env file unknown on platform %s" % os.name
+ sys.exit(-1)
+
+hbase_env = {}
+if os.path.isfile(hbase_env_path):
+ p = subprocess.Popen(hbase_env_cmd, stdout = subprocess.PIPE)
+ for x in p.stdout:
+ (k, _, v) = x.partition('=')
+ hbase_env[k.strip()] = v.strip()
+
+if hbase_env.has_key('JAVA_HOME'):
+ java_home = hbase_env['JAVA_HOME']
+
+if java_home:
+ java = os.path.join(java_home, 'bin', 'java')
+else:
+ java = 'java'
+
+jdbc_url = 'jdbc:phoenix:thin:url=' + url + ';serialization=' + serialization
+if args.authentication:
+ jdbc_url += ';authentication=' + args.authentication
+if args.auth_user:
+ jdbc_url += ';avatica_user=' + args.auth_user
+if args.auth_password:
+ jdbc_url += ';avatica_password=' + args.auth_password
+
+java_cmd = java + ' $PHOENIX_OPTS ' + \
+ ' -cp "' + phoenix_utils.hbase_conf_dir + os.pathsep + phoenix_utils.phoenix_thin_client_jar + \
+ os.pathsep + phoenix_utils.hadoop_conf + os.pathsep + phoenix_utils.hadoop_classpath + '" -Dlog4j.configuration=file:' + \
+ os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
+ " org.apache.phoenix.queryserver.client.SqllineWrapper -d org.apache.phoenix.queryserver.client.Driver " + \
+ ' -u "' + jdbc_url + '"' + " -n none -p none " + \
+ " --color=" + colorSetting + " --fastConnect=" + args.fastconnect + " --verbose=" + args.verbose + \
+ " --incremental=false --isolation=TRANSACTION_READ_COMMITTED " + sqlfile
+
+exitcode = subprocess.call(java_cmd, shell=True)
+sys.exit(exitcode)
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
new file mode 100644
index 0000000..a421ff3
--- /dev/null
+++ b/phoenix-client/pom.xml
@@ -0,0 +1,366 @@
+<?xml version='1.0'?>
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.apache.phoenix</groupId>
+ <artifactId>phoenix-queryserver</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ </parent>
+ <artifactId>phoenix-client</artifactId>
+ <name>Phoenix Client</name>
+ <description>Phoenix Client</description>
+ <packaging>jar</packaging>
+ <properties>
+ <!-- Don't make a test-jar -->
+ <maven.test.skip>true</maven.test.skip>
+ <!-- Don't make a source-jar -->
+ <source.skip>true</source.skip>
+ <license.bundles.dependencies>true</license.bundles.dependencies>
+ <top.dir>${project.basedir}/..</top.dir>
+ <shaded.package>org.apache.phoenix.shaded</shaded.package>
+
+ </properties>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-site-plugin</artifactId>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-jar-plugin</artifactId>
+ <configuration>
+ <finalName>phoenix-${project.version}-client</finalName>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-install-plugin</artifactId>
+ <executions>
+ <execution>
+ <goals>
+ <goal>install-file</goal>
+ </goals>
+ <id>default-install</id>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ <phase>install</phase>
+ </execution>
+ </executions>
+ <configuration>
+ <file>${basedir}/target/phoenix-${project.version}-client.jar</file>
+ <pomFile>${basedir}/pom.xml</pomFile>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-shade-plugin</artifactId>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>shade</goal>
+ </goals>
+ <configuration>
+ <finalName>phoenix-${project.version}-client</finalName>
+ <shadedArtifactAttached>false</shadedArtifactAttached>
+ <promoteTransitiveDependencies>true</promoteTransitiveDependencies>
+ <shadeTestJar>false</shadeTestJar>
+ <transformers>
+ <transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
+ <transformer
+ implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+ <resource>csv-bulk-load-config.properties</resource>
+ <file>
+ ${project.basedir}/../config/csv-bulk-load-config.properties
+ </file>
+ </transformer>
+ <transformer
+ implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+ <resource>README.md</resource>
+ <file>${project.basedir}/../README.md</file>
+ </transformer>
+ <transformer
+ implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+ <resource>LICENSE.txt</resource>
+ <file>${project.basedir}/../LICENSE</file>
+ </transformer>
+ <transformer
+ implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+ <resource>NOTICE</resource>
+ <file>${project.basedir}/../NOTICE</file>
+ </transformer>
+ </transformers>
+ <artifactSet>
+ <includes>
+ <include>*:*</include>
+ </includes>
+ <excludes>
+ <exclude>org.apache.phoenix:phoenix-client</exclude>
+ <exclude>xom:xom</exclude>
+ </excludes>
+ </artifactSet>
+ <filters>
+ <filter>
+ <artifact>*:*</artifact>
+ <excludes>
+ <exclude>META-INF/*.SF</exclude>
+ <exclude>META-INF/*.DSA</exclude>
+ <exclude>META-INF/*.RSA</exclude>
+ <exclude>META-INF/license/*</exclude>
+ <exclude>LICENSE.*</exclude>
+ <exclude>NOTICE.*</exclude>
+ </excludes>
+ </filter>
+ </filters>
+
+ <relocations>
+
+ <!-- COM relocation -->
+ <relocation>
+ <pattern>com.codahale</pattern>
+ <shadedPattern>${shaded.package}.com.codahale</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>com.fasterxml</pattern>
+ <shadedPattern>${shaded.package}.com.fasterxml</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>com.jamesmurty</pattern>
+ <shadedPattern>${shaded.package}.com.jamesmurty</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>com.jcraft</pattern>
+ <shadedPattern>${shaded.package}.com.jcraft</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>com.lmax</pattern>
+ <shadedPattern>${shaded.package}.com.lmax</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>com.thoughtworks</pattern>
+ <shadedPattern>${shaded.package}.com.thoughtworks</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>com.yammer</pattern>
+ <shadedPattern>${shaded.package}.com.yammer</shadedPattern>
+ </relocation>
+
+ <!-- IO relocations -->
+ <relocation>
+ <pattern>io.netty</pattern>
+ <shadedPattern>${shaded.package}.io.netty</shadedPattern>
+ </relocation>
+
+ <!-- ORG relocations -->
+ <relocation>
+ <pattern>org.antlr</pattern>
+ <shadedPattern>${shaded.package}.org.antlr</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.aopalliance</pattern>
+ <shadedPattern>${shaded.package}.org.aopalliance</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.codehaus</pattern>
+ <shadedPattern>${shaded.package}.org.codehaus</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.fusesource</pattern>
+ <shadedPattern>${shaded.package}.org.fusesource</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.hamcrest</pattern>
+ <shadedPattern>${shaded.package}.org.hamcrest</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.hsqldb</pattern>
+ <shadedPattern>${shaded.package}.org.hsqldb</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.iq80</pattern>
+ <shadedPattern>${shaded.package}.org.iq80</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.jamon</pattern>
+ <shadedPattern>${shaded.package}.org.jamon</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.jboss</pattern>
+ <shadedPattern>${shaded.package}.org.jboss</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.jcodings</pattern>
+ <shadedPattern>${shaded.package}.org.jcodings</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.jets3t</pattern>
+ <shadedPattern>${shaded.package}.org.jets3t</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.joda</pattern>
+ <shadedPattern>${shaded.package}.org.joda</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.joni</pattern>
+ <shadedPattern>${shaded.package}.org.joni</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.junit</pattern>
+ <shadedPattern>${shaded.package}.org.junit</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.kosmix</pattern>
+ <shadedPattern>${shaded.package}.org.kosmix</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.mortbay</pattern>
+ <shadedPattern>${shaded.package}.org.mortbay</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.objectweb</pattern>
+ <shadedPattern>${shaded.package}.org.objectweb</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.stringtemplate</pattern>
+ <shadedPattern>${shaded.package}.org.stringtemplate</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.tukaani</pattern>
+ <shadedPattern>${shaded.package}.org.tukaani</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.znerd</pattern>
+ <shadedPattern>${shaded.package}.org.znerd</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.apache.avro</pattern>
+ <shadedPattern>${shaded.package}.org.apache.avro</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.apache.commons</pattern>
+ <shadedPattern>${shaded.package}.org.apache.commons</shadedPattern>
+ <excludes>
+ <exclude>org.apache.commons.csv.**</exclude>
+ <exclude>org.apache.commons.logging.**</exclude>
+ <exclude>org.apache.commons.configuration.**</exclude>
+ </excludes>
+ </relocation>
+ <relocation>
+ <pattern>org.apache.directory</pattern>
+ <shadedPattern>${shaded.package}.org.apache.directory</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.apache.http</pattern>
+ <shadedPattern>${shaded.package}.org.apache.http</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.apache.jasper</pattern>
+ <shadedPattern>${shaded.package}.org.apache.jasper</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.apache.jute</pattern>
+ <shadedPattern>${shaded.package}.org.apache.jute</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.apache.mina</pattern>
+ <shadedPattern>${shaded.package}.org.apache.mina</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.apache.oro</pattern>
+ <shadedPattern>${shaded.package}.org.apache.oro</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.apache.taglibs</pattern>
+ <shadedPattern>${shaded.package}.org.apache.taglibs</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.apache.thrift</pattern>
+ <shadedPattern>${shaded.package}.org.apache.thrift</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.apache.tools</pattern>
+ <shadedPattern>${shaded.package}.org.apache.tools</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.apache.twill</pattern>
+ <shadedPattern>${shaded.package}.org.apache.twill</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.apache.velocity</pattern>
+ <shadedPattern>${shaded.package}.org.apache.velocity</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.apache.zookeeper</pattern>
+ <shadedPattern>${shaded.package}.org.apache.zookeeper</shadedPattern>
+ </relocation>
+
+ <!-- NET relocations -->
+ <relocation>
+ <pattern>net</pattern>
+ <shadedPattern>${shaded.package}.net</shadedPattern>
+ </relocation>
+
+ <!-- Misc relocations -->
+ <relocation>
+ <pattern>antlr</pattern>
+ <shadedPattern>${shaded.package}.antlr</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>it.unimi</pattern>
+ <shadedPattern>${shaded.package}.it.unimi</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>jline</pattern>
+ <shadedPattern>${shaded.package}.jline</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>junit</pattern>
+ <shadedPattern>${shaded.package}.junit</shadedPattern>
+ </relocation>
+ </relocations>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+
+ <dependencies>
+ <!-- Depend on all other internal projects -->
+ <dependency>
+ <groupId>org.apache.phoenix</groupId>
+ <artifactId>phoenix-core</artifactId>
+ <version>${phoenix.version}</version>
+ </dependency>
+ </dependencies>
+</project>
diff --git a/pom.xml b/pom.xml
new file mode 100644
index 0000000..99040bf
--- /dev/null
+++ b/pom.xml
@@ -0,0 +1,554 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <groupId>org.apache.phoenix</groupId>
+ <artifactId>phoenix-queryserver</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <packaging>pom</packaging>
+ <name>Phoenix Query Server</name>
+
+ <licenses>
+ <license>
+ <name>The Apache Software License, Version 2.0</name>
+ <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
+ <distribution>repo</distribution>
+ <comments/>
+ </license>
+ </licenses>
+
+ <organization>
+ <name>Apache Software Foundation</name>
+ <url>http://www.apache.org</url>
+ </organization>
+
+ <modules>
+ <module>queryserver</module>
+ <module>queryserver-client</module>
+ <module>assembly</module>
+ <module>phoenix-client</module>
+ </modules>
+
+ <repositories>
+ <repository>
+ <id>apache release</id>
+ <url>https://repository.apache.org/content/repositories/releases/</url>
+ </repository>
+ </repositories>
+
+ <scm>
+ <connection>scm:git:https://gitbox.apache.org/repos/asf/phoenix-queryserver.git</connection>
+ <url>https://gitbox.apache.org/repos/asf/phoenix-queryserver.git</url>
+ <developerConnection>scm:git:https://gitbox.apache.org/repos/asf/phoenix-queryserver.git</developerConnection>
+ </scm>
+
+ <properties>
+ <!-- General Properties -->
+ <top.dir>${project.basedir}</top.dir>
+
+ <!-- Hadoop Versions -->
+ <hbase.version>1.4.0</hbase.version>
+ <hadoop-two.version>2.7.5</hadoop-two.version>
+ <phoenix.version>4.15.0-HBase-1.4-SNAPSHOT</phoenix.version>
+
+ <!-- Dependency versions -->
+ <protobuf-java.version>2.5.0</protobuf-java.version>
+ <sqlline.version>1.2.0</sqlline.version>
+ <guava.version>13.0.1</guava.version>
+ <jline.version>2.11</jline.version>
+ <commons-logging.version>1.2</commons-logging.version>
+ <!-- Do not change jodatime.version until HBASE-15199 is fixed -->
+ <avatica.version>1.12.0</avatica.version>
+ <servlet.api.version>3.1.0</servlet.api.version>
+ <!-- Test Dependencies -->
+ <mockito-all.version>1.8.5</mockito-all.version>
+ <junit.version>4.12</junit.version>
+
+ <!-- Plugin versions -->
+ <maven-eclipse-plugin.version>2.9</maven-eclipse-plugin.version>
+ <maven-build-helper-plugin.version>1.9.1</maven-build-helper-plugin.version>
+ <maven-surefire-plugin.version>2.20</maven-surefire-plugin.version>
+ <maven-failsafe-plugin.version>2.20</maven-failsafe-plugin.version>
+
+ <maven-dependency-plugin.version>2.1</maven-dependency-plugin.version>
+ <maven.assembly.version>2.5.2</maven.assembly.version>
+
+ <!-- Plugin options -->
+ <numForkedUT>8</numForkedUT>
+ <numForkedIT>7</numForkedIT>
+ <it.failIfNoSpecifiedTests>false</it.failIfNoSpecifiedTests>
+ <surefire.failIfNoSpecifiedTests>false</surefire.failIfNoSpecifiedTests>
+
+ <!-- Set default encoding so multi-byte tests work correctly on the Mac -->
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
+
+ </properties>
+
+ <build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>3.0</version>
+ <configuration>
+ <source>1.8</source>
+ <target>1.8</target>
+ </configuration>
+ </plugin>
+ <!--This plugin's configuration is used to store Eclipse m2e settings
+ only. It has no influence on the Maven build itself. -->
+ <plugin>
+ <groupId>org.eclipse.m2e</groupId>
+ <artifactId>lifecycle-mapping</artifactId>
+ <version>1.0.0</version>
+ <configuration>
+ <lifecycleMappingMetadata>
+ <pluginExecutions>
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>org.antlr</groupId>
+ <artifactId>antlr3-maven-plugin</artifactId>
+ <versionRange>[3.5,)</versionRange>
+ <goals>
+ <goal>antlr</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <ignore />
+ </action>
+ </pluginExecution>
+ </pluginExecutions>
+ </lifecycleMappingMetadata>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-install-plugin</artifactId>
+ <version>2.5.2</version>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-eclipse-plugin</artifactId>
+ <version>${maven-eclipse-plugin.version}</version>
+ </plugin>
+ <plugin>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <version>${maven.assembly.version}</version>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.rat</groupId>
+ <artifactId>apache-rat-plugin</artifactId>
+ <!-- Avoid defining exclusions in pluginManagement as they are global.
+ We already inherit some from the ASF parent pom. -->
+ </plugin>
+ <!-- We put slow-running tests into src/it and run them during the
+ integration-test phase using the failsafe plugin. This way
+ developers can run unit tests conveniently from the IDE or via
+ "mvn package" from the command line without triggering time
+ consuming integration tests. -->
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <version>${maven-build-helper-plugin.version}</version>
+ <executions>
+ <execution>
+ <id>add-test-source</id>
+ <phase>validate</phase>
+ <goals>
+ <goal>add-test-source</goal>
+ </goals>
+ <configuration>
+ <sources>
+ <source>${basedir}/src/it/java</source>
+ </sources>
+ </configuration>
+ </execution>
+ <execution>
+ <id>add-test-resource</id>
+ <phase>validate</phase>
+ <goals>
+ <goal>add-test-resource</goal>
+ </goals>
+ <configuration>
+ <resources>
+ <resource>
+ <directory>${basedir}/src/it/resources</directory>
+ </resource>
+ </resources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-failsafe-plugin</artifactId>
+ <version>${maven-failsafe-plugin.version}</version>
+ <executions>
+ <execution>
+ <id>ParallelStatsEnabledTest</id>
+ <configuration>
+ <encoding>UTF-8</encoding>
+ <forkCount>${numForkedIT}</forkCount>
+ <runOrder>alphabetical</runOrder>
+ <reuseForks>true</reuseForks>
+ <runOrder>alphabetical</runOrder>
+ <!--parallel>methods</parallel>
+ <threadCount>20</threadCount-->
+ <argLine>-Xmx2000m -XX:MaxPermSize=256m -Djava.security.egd=file:/dev/./urandom "-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}" -XX:NewRatio=4 -XX:SurvivorRatio=8 -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+DisableExplicitGC -XX:+UseCMSInitiatingOccupancyOnly -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:CMSInitiatingOccupancyFraction=68 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDump [...]
+ <redirectTestOutputToFile>${test.output.tofile}</redirectTestOutputToFile>
+ <shutdown>kill</shutdown>
+ <testSourceDirectory>${basedir}/src/it/java</testSourceDirectory>
+ <groups>org.apache.phoenix.end2end.ParallelStatsEnabledTest</groups>
+ </configuration>
+ <goals>
+ <goal>integration-test</goal>
+ <goal>verify</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-dependency-plugin</artifactId>
+ <version>${maven-dependency-plugin.version}</version>
+ <executions>
+ <execution>
+ <id>create-mrapp-generated-classpath</id>
+ <phase>generate-test-resources</phase>
+ <goals>
+ <goal>build-classpath</goal>
+ </goals>
+ <configuration>
+ <outputFile>${project.build.directory}/classes/mrapp-generated-classpath
+ </outputFile>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-shade-plugin</artifactId>
+ <version>2.4.3</version>
+ </plugin>
+ <plugin>
+ <!-- Allows us to get the apache-ds bundle artifacts -->
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <version>2.5.3</version>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-checkstyle-plugin</artifactId>
+ <version>2.13</version>
+ <executions>
+ <execution>
+ <id>validate</id>
+ <phase>validate</phase>
+ <configuration>
+ <skip>true</skip>
+ <configLocation>${top.dir}/src/main/config/checkstyle/checker.xml</configLocation>
+ <suppressionsLocation>${top.dir}/src/main/config/checkstyle/suppressions.xml</suppressionsLocation>
+ <consoleOutput>true</consoleOutput>
+ <headerLocation>${top.dir}/src/main/config/checkstyle/header.txt</headerLocation>
+ <failOnViolation><!--true-->false</failOnViolation>
+ <includeTestSourceDirectory><!--true-->false</includeTestSourceDirectory>
+ </configuration>
+ <goals>
+ <goal>check</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-source-plugin</artifactId>
+ <version>2.2.1</version>
+ <executions>
+ <execution>
+ <id>attach-sources</id>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>jar-no-fork</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-javadoc-plugin</artifactId>
+ <version>2.9</version>
+ <configuration>
+ <quiet>true</quiet>
+ <links>
+ <link>http://hbase.apache.org/apidocs/</link>
+ </links>
+ </configuration>
+ <executions>
+ <execution>
+ <id>attach-javadocs</id>
+ <goals>
+ <!-- TODO turn back on javadocs - disabled now for testing -->
+ <!-- <goal>jar</goal> -->
+ </goals>
+ <configuration>
+ <additionalparam>${javadoc.opts}</additionalparam>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>${maven-surefire-plugin.version}</version>
+ <configuration>
+ <forkCount>${numForkedUT}</forkCount>
+ <reuseForks>true</reuseForks>
+ <argLine>-enableassertions -Xmx2250m -XX:MaxPermSize=128m
+ -Djava.security.egd=file:/dev/./urandom "-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}" -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./target/</argLine>
+ <redirectTestOutputToFile>${test.output.tofile}</redirectTestOutputToFile>
+ <shutdown>kill</shutdown>
+ </configuration>
+ </plugin>
+ <!-- All projects create a test jar -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-jar-plugin</artifactId>
+ <version>2.4</version>
+ <executions>
+ <execution>
+ <phase>prepare-package
+ </phase>
+ <goals>
+ <goal>test-jar</goal>
+ </goals>
+ </execution>
+ </executions>
+ <configuration>
+ <skipIfEmpty>true</skipIfEmpty>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-site-plugin</artifactId>
+ <version>3.7.1</version>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.rat</groupId>
+ <artifactId>apache-rat-plugin</artifactId>
+ <configuration>
+ <excludes>
+ <!-- Header on changelog isn't normal -->
+ <exclude>CHANGES</exclude>
+ <!-- IDE configuration -->
+ <exclude>dev/phoenix.importorder</exclude>
+ <!-- Release L&N -->
+ <exclude>dev/release_files/LICENSE</exclude>
+ <exclude>dev/release_files/NOTICE</exclude>
+ <!-- Exclude data files for examples -->
+ <exclude>docs/*.csv</exclude>
+ <exclude>examples/*.csv</exclude>
+ <!-- Exclude SQL files from rat. Sqlline 1.1.9 doesn't work with
+ comments on the first line of a file. -->
+ <exclude>examples/*.sql</exclude>
+ <exclude>examples/pig/testdata</exclude>
+ <!-- precommit? -->
+ <exclude>**/patchprocess/**</exclude>
+ <!-- Argparse is bundled to work around system Python version
+ issues, compatibile with ALv2 -->
+ <exclude>bin/argparse-1.4.0/argparse.py</exclude>
+ <!-- Not our code -->
+ <exclude>python/requests-kerberos/**</exclude>
+ <exclude>python/phoenixdb/phoenixdb/avatica/proto/*</exclude>
+ <exclude>python/phoenixdb/*.rst</exclude>
+ <exclude>python/phoenixdb/ci/**</exclude>
+ <exclude>python/phoenixdb/doc/*.rst</exclude>
+ <exclude>python/phoenixdb/doc/conf.py</exclude>
+ <exclude>python/phoenixdb/doc/Makefile</exclude>
+ </excludes>
+ </configuration>
+ </plugin>
+ <plugin>
+ <!-- Allows us to get the apache-ds bundle artifacts -->
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ <inherited>true</inherited>
+ </plugin>
+ </plugins>
+ </build>
+
+ <dependencyManagement>
+ <dependencies>
+
+ <!-- Intra-project dependencies -->
+ <dependency>
+ <groupId>org.apache.phoenix</groupId>
+ <artifactId>phoenix-core</artifactId>
+ <version>${phoenix.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.phoenix</groupId>
+ <artifactId>phoenix-core</artifactId>
+ <version>${phoenix.version}</version>
+ <classifier>tests</classifier>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.phoenix</groupId>
+ <artifactId>queryserver-client</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <!-- HBase dependencies -->
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-testing-util</artifactId>
+ <version>${hbase.version}</version>
+ <scope>test</scope>
+ <optional>true</optional>
+ <exclusions>
+ <exclusion>
+ <groupId>org.jruby</groupId>
+ <artifactId>jruby-complete</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-hdfs</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-it</artifactId>
+ <version>${hbase.version}</version>
+ <type>test-jar</type>
+ <scope>test</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>org.jruby</groupId>
+ <artifactId>jruby-complete</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <!-- Hadoop Dependencies -->
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-common</artifactId>
+ <version>${hadoop-two.version}</version>
+ <exclusions>
+ <exclusion>
+ <groupId>org.xerial.snappy</groupId>
+ <artifactId>snappy-java</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-minicluster</artifactId>
+ <version>${hadoop-two.version}</version>
+ <optional>true</optional>
+ <scope>test</scope>
+ </dependency>
+
+ <!-- Required for mini-cluster since hbase built against old version of hadoop -->
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+ <version>${hadoop-two.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-client</artifactId>
+ <version>${hadoop-two.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-hdfs</artifactId>
+ <version>${hadoop-two.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-hdfs</artifactId>
+ <version>${hadoop-two.version}</version>
+ <type>test-jar</type> <!-- this does not work which is typical for maven.-->
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-minikdc</artifactId>
+ <version>${hadoop-two.version}</version>
+ </dependency>
+
+ <!-- General Dependencies -->
+ <dependency>
+ <groupId>org.apache.pig</groupId>
+ <artifactId>pig</artifactId>
+ <version>${pig.version}</version>
+ <classifier>h2</classifier>
+ <exclusions>
+ <exclusion>
+ <groupId>org.xerial.snappy</groupId>
+ <artifactId>snappy-java</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.calcite.avatica</groupId>
+ <artifactId>avatica</artifactId>
+ <version>${avatica.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.calcite.avatica</groupId>
+ <artifactId>avatica-core</artifactId>
+ <version>${avatica.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.calcite.avatica</groupId>
+ <artifactId>avatica-server</artifactId>
+ <version>${avatica.version}</version>
+ </dependency>
+
+ <!-- Make sure we have all the antlr dependencies -->
+ <dependency>
+ <groupId>jline</groupId>
+ <artifactId>jline</artifactId>
+ <version>2.11</version>
+ </dependency>
+ <dependency>
+ <groupId>sqlline</groupId>
+ <artifactId>sqlline</artifactId>
+ <version>${sqlline.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>${junit.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>commons-logging</groupId>
+ <artifactId>commons-logging</artifactId>
+ <version>${commons-logging.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-all</artifactId>
+ <version>${mockito-all.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>javax.servlet</groupId>
+ <artifactId>javax.servlet-api</artifactId>
+ <version>${servlet.api.version}</version>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+
+</project>
\ No newline at end of file
diff --git a/queryserver-client/pom.xml b/queryserver-client/pom.xml
new file mode 100644
index 0000000..5afc596
--- /dev/null
+++ b/queryserver-client/pom.xml
@@ -0,0 +1,203 @@
+<?xml version='1.0'?>
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.apache.phoenix</groupId>
+ <artifactId>phoenix-queryserver</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ </parent>
+ <artifactId>queryserver-client</artifactId>
+ <name>Query Server Client</name>
+ <description>A thin JDBC client for interacting with the query server</description>
+
+ <licenses>
+ <license>
+ <name>The Apache Software License, Version 2.0</name>
+ <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
+ <distribution>repo</distribution>
+ <comments/>
+ </license>
+ </licenses>
+
+ <organization>
+ <name>Apache Software Foundation</name>
+ <url>http://www.apache.org</url>
+ </organization>
+
+ <properties>
+ <top.dir>${project.basedir}/..</top.dir>
+ <shaded.package>org.apache.phoenix.shaded</shaded.package>
+ <protobuf-java.version>3.1.0</protobuf-java.version>
+ </properties>
+
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <configuration>
+ <skipAssembly>true</skipAssembly>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-shade-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>thin-client</id>
+ <phase>package</phase>
+ <goals>
+ <goal>shade</goal>
+ </goals>
+ <configuration>
+ <finalName>phoenix-${project.version}-thin-client</finalName>
+
+ <transformers>
+ <transformer
+ implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+ <resource>README.md</resource>
+ <file>${project.basedir}/../README.md</file>
+ </transformer>
+ <transformer
+ implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+ <resource>LICENSE.txt</resource>
+ <file>${project.basedir}/../LICENSE</file>
+ </transformer>
+ <transformer
+ implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+ <resource>NOTICE</resource>
+ <file>${project.basedir}/../NOTICE</file>
+ </transformer>
+ <transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
+ </transformers>
+ <filters>
+ <filter>
+ <artifact>*:*</artifact>
+ <excludes>
+ <exclude>META-INF/*.SF</exclude>
+ <exclude>META-INF/*.DSA</exclude>
+ <exclude>META-INF/*.RSA</exclude>
+ <exclude>META-INF/license/*</exclude>
+ <exclude>LICENSE.*</exclude>
+ <exclude>NOTICE.*</exclude>
+ </excludes>
+ </filter>
+ </filters>
+
+ <relocations>
+
+ <!-- COM relocation -->
+
+ <relocation>
+ <pattern>com.fasterxml</pattern>
+ <shadedPattern>${shaded.package}.com.fasterxml</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>com.google.collect</pattern>
+ <shadedPattern>${shaded.package}.com.google.collect</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>com.google.protobuf</pattern>
+ <shadedPattern>${shaded.package}.com.google.protobuf</shadedPattern>
+ </relocation>
+ <!-- ORG relocation -->
+ <relocation>
+ <pattern>org.apache.calcite.avatica</pattern>
+ <shadedPattern>${shaded.package}.org.apache.calcite.avatica</shadedPattern>
+ <!-- The protobuf messages can't be relocated due to a limitation
+ in the Avatica protocol. -->
+ <excludes>
+ <exclude>org.apache.calcite.avatica.proto.*</exclude>
+ </excludes>
+ </relocation>
+ <relocation>
+ <pattern>org.apache.commons</pattern>
+ <shadedPattern>${shaded.package}.org.apache.commons</shadedPattern>
+ <excludes>
+ <exclude>org.apache.commons.logging.**</exclude>
+ </excludes>
+ </relocation>
+ <relocation>
+ <pattern>org.apache.http</pattern>
+ <shadedPattern>${shaded.package}.org.apache.http</shadedPattern>
+ </relocation>
+
+ <relocation>
+ <pattern>org.fusesource</pattern>
+ <shadedPattern>${shaded.package}.org.fusesource</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.hamcrest</pattern>
+ <shadedPattern>${shaded.package}.org.hamcrest</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>org.junit</pattern>
+ <shadedPattern>${shaded.package}.org.junit</shadedPattern>
+ </relocation>
+
+ <!-- MISC relocations -->
+
+ <relocation>
+ <pattern>jline</pattern>
+ <shadedPattern>${shaded.package}.jline</shadedPattern>
+ </relocation>
+ <relocation>
+ <pattern>junit</pattern>
+ <shadedPattern>${shaded.package}.junit</shadedPattern>
+ </relocation>
+
+ </relocations>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.rat</groupId>
+ <artifactId>apache-rat-plugin</artifactId>
+ <configuration>
+ <excludes>
+ <exclude>src/main/resources/META-INF/services/java.sql.Driver</exclude>
+ </excludes>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.calcite.avatica</groupId>
+ <artifactId>avatica-core</artifactId>
+ </dependency>
+ <dependency>
+ <!-- a dependency for the thin-client uberjar -->
+ <groupId>sqlline</groupId>
+ <artifactId>sqlline</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-common</artifactId>
+ </dependency>
+ </dependencies>
+</project>
diff --git a/queryserver-client/src/main/java/org/apache/phoenix/queryserver/client/Driver.java b/queryserver-client/src/main/java/org/apache/phoenix/queryserver/client/Driver.java
new file mode 100644
index 0000000..5c8f119
--- /dev/null
+++ b/queryserver-client/src/main/java/org/apache/phoenix/queryserver/client/Driver.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.queryserver.client;
+
+import org.apache.calcite.avatica.DriverVersion;
+
+public class Driver extends org.apache.calcite.avatica.remote.Driver {
+
+ public static final String CONNECT_STRING_PREFIX = "jdbc:phoenix:thin:";
+
+ static {
+ new Driver().register();
+ }
+
+ public Driver() {
+ super();
+ }
+
+ @Override
+ protected DriverVersion createDriverVersion() {
+ return DriverVersion.load(
+ Driver.class,
+ "org-apache-phoenix-remote-jdbc.properties",
+ "Phoenix Remote JDBC Driver",
+ "unknown version",
+ "Apache Phoenix",
+ "unknown version");
+ }
+
+ @Override
+ protected String getConnectStringPrefix() {
+ return CONNECT_STRING_PREFIX;
+ }
+}
diff --git a/queryserver-client/src/main/java/org/apache/phoenix/queryserver/client/SqllineWrapper.java b/queryserver-client/src/main/java/org/apache/phoenix/queryserver/client/SqllineWrapper.java
new file mode 100644
index 0000000..7a22334
--- /dev/null
+++ b/queryserver-client/src/main/java/org/apache/phoenix/queryserver/client/SqllineWrapper.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.queryserver.client;
+
+import java.security.PrivilegedExceptionAction;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import sqlline.SqlLine;
+
+/**
+ * Utility class which automatically performs a Kerberos login and then launches sqlline. Tries to
+ * make a pre-populated ticket cache (via kinit before launching) transparently work.
+ */
+public class SqllineWrapper {
+ public static final String HBASE_AUTHENTICATION_ATTR = "hbase.security.authentication";
+ public static final String QUERY_SERVER_SPNEGO_AUTH_DISABLED_ATTRIB = "phoenix.queryserver.spnego.auth.disabled";
+ public static final boolean DEFAULT_QUERY_SERVER_SPNEGO_AUTH_DISABLED = false;
+
+ static UserGroupInformation loginIfNecessary(Configuration conf) {
+ // Try to avoid HBase dependency too. Sadly, we have to bring in all of hadoop-common for this..
+ if ("kerberos".equalsIgnoreCase(conf.get(HBASE_AUTHENTICATION_ATTR))) {
+ // sun.security.krb5.principal is the property for setting the principal name, if that
+ // isn't set, fall back to user.name and hope for the best.
+ String principal = System.getProperty("sun.security.krb5.principal", System.getProperty("user.name"));
+ try {
+ // We got hadoop-auth via hadoop-common, so might as well use it.
+ return UserGroupInformation.getUGIFromTicketCache(null, principal);
+ } catch (Exception e) {
+ throw new RuntimeException("Kerberos login failed using ticket cache. Did you kinit?", e);
+ }
+ }
+ return null;
+ }
+
+ private static String[] updateArgsForKerberos(String[] origArgs) {
+ String[] newArgs = new String[origArgs.length];
+ for (int i = 0; i < origArgs.length; i++) {
+ String arg = origArgs[i];
+ newArgs[i] = arg;
+
+ if (arg.equals("-u")) {
+ // Get the JDBC url which is the next argument
+ i++;
+ arg = origArgs[i];
+ if (!arg.contains("authentication=")) {
+ arg = arg + ";authentication=SPNEGO";
+ }
+ newArgs[i] = arg;
+ }
+ }
+ return newArgs;
+ }
+
+ public static void main(String[] args) throws Exception {
+ final Configuration conf = new Configuration(false);
+ conf.addResource("hbase-site.xml");
+
+ // Check if the server config says SPNEGO auth is actually disabled.
+ final boolean disableSpnego = conf.getBoolean(QUERY_SERVER_SPNEGO_AUTH_DISABLED_ATTRIB,
+ DEFAULT_QUERY_SERVER_SPNEGO_AUTH_DISABLED);
+ if (disableSpnego) {
+ SqlLine.main(args);
+ }
+
+ UserGroupInformation ugi = loginIfNecessary(conf);
+
+ if (null != ugi) {
+ final String[] updatedArgs = updateArgsForKerberos(args);
+ ugi.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ SqlLine.main(updatedArgs);
+ return null;
+ }
+ });
+ } else {
+ SqlLine.main(args);
+ }
+ }
+
+}
diff --git a/queryserver-client/src/main/java/org/apache/phoenix/queryserver/client/ThinClientUtil.java b/queryserver-client/src/main/java/org/apache/phoenix/queryserver/client/ThinClientUtil.java
new file mode 100644
index 0000000..59fe093
--- /dev/null
+++ b/queryserver-client/src/main/java/org/apache/phoenix/queryserver/client/ThinClientUtil.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.queryserver.client;
+
+/**
+ * Utilities for thin clients.
+ */
+public final class ThinClientUtil {
+ // The default serialization is also defined in QueryServicesOptions. phoenix-queryserver-client
+ // currently doesn't depend on phoenix-core so we have to deal with the duplication.
+ private static final String DEFAULT_SERIALIZATION = "PROTOBUF";
+
+ private ThinClientUtil() {}
+
+ public static String getConnectionUrl(String hostname, int port) {
+ return getConnectionUrl("http", hostname, port);
+ }
+
+ public static String getConnectionUrl(String protocol, String hostname, int port) {
+ return getConnectionUrl(protocol, hostname, port, DEFAULT_SERIALIZATION);
+ }
+
+ public static String getConnectionUrl(String protocol, String hostname, int port, String serialization) {
+ String urlFmt = Driver.CONNECT_STRING_PREFIX + "url=%s://%s:%s;serialization=%s";
+ return String.format(urlFmt, protocol, hostname, port, serialization);
+ }
+}
diff --git a/queryserver-client/src/main/resources/META-INF/services/java.sql.Driver b/queryserver-client/src/main/resources/META-INF/services/java.sql.Driver
new file mode 100644
index 0000000..f94d657
--- /dev/null
+++ b/queryserver-client/src/main/resources/META-INF/services/java.sql.Driver
@@ -0,0 +1 @@
+org.apache.phoenix.queryserver.client.Driver
diff --git a/queryserver-client/src/main/resources/version/org-apache-phoenix-remote-jdbc.properties b/queryserver-client/src/main/resources/version/org-apache-phoenix-remote-jdbc.properties
new file mode 100644
index 0000000..7e8eb7e
--- /dev/null
+++ b/queryserver-client/src/main/resources/version/org-apache-phoenix-remote-jdbc.properties
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to you under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+driver.name=Apache Phoenix Remote JDBC Driver
+driver.version=${pom.version}
+product.name=Apache Phoenix
+product.version=${pom.version}
+jdbc.compliant=true
+driver.version.major=${version.major}
+driver.version.minor=${version.minor}
+database.version.major=${version.major}
+database.version.minor=${version.minor}
+build.timestamp=${build.timestamp}
diff --git a/queryserver/pom.xml b/queryserver/pom.xml
new file mode 100644
index 0000000..edf0979
--- /dev/null
+++ b/queryserver/pom.xml
@@ -0,0 +1,188 @@
+<?xml version='1.0'?>
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.apache.phoenix</groupId>
+ <artifactId>phoenix-queryserver</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ </parent>
+ <artifactId>queryserver</artifactId>
+ <name>Query Server</name>
+ <description>A query server for exposing Phoenix to thin clients</description>
+
+
+ <properties>
+ <top.dir>${project.basedir}/..</top.dir>
+ <shaded.package>org.apache.phoenix.shaded</shaded.package>
+ </properties>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-failsafe-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-shade-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>query-server</id>
+ <phase>package</phase>
+ <goals>
+ <goal>shade</goal>
+ </goals>
+ <configuration>
+ <finalName>phoenix-${project.version}-queryserver</finalName>
+ <shadedArtifactAttached>false</shadedArtifactAttached>
+ <promoteTransitiveDependencies>true</promoteTransitiveDependencies>
+ <shadeTestJar>false</shadeTestJar>
+ <transformers>
+ <transformer
+ implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+ <resource>README.md</resource>
+ <file>${project.basedir}/../README.md</file>
+ </transformer>
+ <transformer
+ implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+ <resource>LICENSE.txt</resource>
+ <file>${project.basedir}/../LICENSE</file>
+ </transformer>
+ <transformer
+ implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+ <resource>NOTICE</resource>
+ <file>${project.basedir}/../NOTICE</file>
+ </transformer>
+ <transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
+ </transformers>
+ <artifactSet>
+ <includes>
+ <include>org.apache.calcite.avatica:*</include>
+ <include>org.eclipse.jetty:*</include>
+ <include>javax.servlet:*</include>
+ </includes>
+ </artifactSet>
+ <filters>
+ <filter>
+ <artifact>*:*</artifact>
+ <excludes>
+ <exclude>META-INF/*.SF</exclude>
+ <exclude>META-INF/*.DSA</exclude>
+ <exclude>META-INF/*.RSA</exclude>
+ <exclude>META-INF/license/*</exclude>
+ <exclude>LICENSE.*</exclude>
+ <exclude>NOTICE.*</exclude>
+ </excludes>
+ </filter>
+ </filters>
+ <relocations>
+ <relocation>
+ <pattern>org.eclipse.jetty</pattern>
+ <shadedPattern>${shaded.package}.org.eclipse.jetty</shadedPattern>
+ </relocation>
+ <!-- Calcite/Avatica is not relocated because the wire API (as of <=1.8.0) expects
+ consistent class names on client and server. Relocating these would break
+ backwards compatibility. -->
+ </relocations>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.phoenix</groupId>
+ <artifactId>queryserver-client</artifactId>
+ <exclusions>
+ <!-- Being pulled in via avatica to avoid pb2/pb3 issues.
+ When we use the "pre-shaded" avatica artifact, we don't
+ have to deal with the mess of multiple versions for protobuf.-->
+ <exclusion>
+ <groupId>org.apache.calcite.avatica</groupId>
+ <artifactId>avatica-core</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.calcite.avatica</groupId>
+ <artifactId>avatica</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.calcite.avatica</groupId>
+ <artifactId>avatica-server</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.phoenix</groupId>
+ <artifactId>phoenix-core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>commons-logging</groupId>
+ <artifactId>commons-logging</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>javax.servlet</groupId>
+ <artifactId>javax.servlet-api</artifactId>
+ </dependency>
+ <!-- for tests -->
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-all</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.phoenix</groupId>
+ <artifactId>phoenix-core</artifactId>
+ <classifier>tests</classifier>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-it</artifactId>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-minicluster</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-testing-util</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-minikdc</artifactId>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+</project>
diff --git a/queryserver/src/build/query-server-runnable.xml b/queryserver/src/build/query-server-runnable.xml
new file mode 100644
index 0000000..d098b63
--- /dev/null
+++ b/queryserver/src/build/query-server-runnable.xml
@@ -0,0 +1,52 @@
+<?xml version='1.0'?>
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+ <id>runnable</id>
+ <formats>
+ <format>jar</format>
+ </formats>
+ <includeBaseDirectory>false</includeBaseDirectory>
+ <containerDescriptorHandlers>
+ <containerDescriptorHandler>
+ <!--
+ aggregate SPI's so that things like HDFS FileSystem works in uberjar
+ http://docs.oracle.com/javase/tutorial/sound/SPI-intro.html
+ -->
+ <handlerName>metaInf-services</handlerName>
+ </containerDescriptorHandler>
+ </containerDescriptorHandlers>
+ <dependencySets>
+ <dependencySet>
+ <unpack>true</unpack>
+ <scope>runtime</scope>
+ <outputDirectory>/</outputDirectory>
+ <includes>
+ <include>org.apache.phoenix:phoenix-queryserver</include>
+ <include>org.apache.phoenix:phoenix-queryserver-client</include>
+ <include>org.apache.calcite.avatica:*</include>
+ </includes>
+ </dependencySet>
+ </dependencySets>
+</assembly>
diff --git a/queryserver/src/it/bin/test_phoenixdb.py b/queryserver/src/it/bin/test_phoenixdb.py
new file mode 100644
index 0000000..0d5d0c6
--- /dev/null
+++ b/queryserver/src/it/bin/test_phoenixdb.py
@@ -0,0 +1,39 @@
+############################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+############################################################################
+
+import phoenixdb
+import phoenixdb.cursor
+import sys
+
+
+if __name__ == '__main__':
+ pqs_port = sys.argv[1]
+ database_url = 'http://localhost:' + str(pqs_port) + '/'
+
+ print("CREATING PQS CONNECTION")
+ conn = phoenixdb.connect(database_url, autocommit=True, auth="SPNEGO")
+ cursor = conn.cursor()
+
+ cursor.execute("CREATE TABLE users (id INTEGER PRIMARY KEY, username VARCHAR)")
+ cursor.execute("UPSERT INTO users VALUES (?, ?)", (1, 'admin'))
+ cursor.execute("UPSERT INTO users VALUES (?, ?)", (2, 'user'))
+ cursor.execute("SELECT * FROM users")
+ print("RESULTS")
+ print(cursor.fetchall())
diff --git a/queryserver/src/it/bin/test_phoenixdb.sh b/queryserver/src/it/bin/test_phoenixdb.sh
new file mode 100755
index 0000000..7309dbe
--- /dev/null
+++ b/queryserver/src/it/bin/test_phoenixdb.sh
@@ -0,0 +1,79 @@
+#!/usr/bin/env bash
+#
+############################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+############################################################################
+
+set -u
+set -x
+set -e
+
+function cleanup {
+ # Capture last command status
+ RCODE=$?
+ set +e
+ set +u
+ kdestroy
+ rm -rf $PY_ENV_PATH
+ exit $RCODE
+}
+
+trap cleanup EXIT
+
+echo "LAUNCHING SCRIPT"
+
+LOCAL_PY=$1
+PRINC=$2
+KEYTAB_LOC=$3
+KRB5_CFG_FILE=$4
+PQS_PORT=$5
+PYTHON_SCRIPT=$6
+
+PY_ENV_PATH=$( mktemp -d )
+
+virtualenv $PY_ENV_PATH
+
+pushd ${PY_ENV_PATH}/bin
+
+# conda activate does stuff with unbound variables :(
+set +u
+. activate ""
+
+popd
+
+set -u
+echo "INSTALLING COMPONENTS"
+pip install -e file:///${LOCAL_PY}/requests-kerberos
+pip install -e file:///${LOCAL_PY}/phoenixdb
+
+export KRB5_CONFIG=$KRB5_CFG_FILE
+cat $KRB5_CONFIG
+export KRB5_TRACE=/dev/stdout
+
+echo "RUNNING KINIT"
+kinit -kt $KEYTAB_LOC $PRINC
+klist
+
+unset http_proxy
+unset https_proxy
+
+echo "Working Directory is ${PWD}"
+
+echo "RUN PYTHON TEST on port $PQS_PORT"
+python $PYTHON_SCRIPT $PQS_PORT
diff --git a/queryserver/src/it/java/org/apache/phoenix/end2end/HttpParamImpersonationQueryServerIT.java b/queryserver/src/it/java/org/apache/phoenix/end2end/HttpParamImpersonationQueryServerIT.java
new file mode 100644
index 0000000..db27b9f
--- /dev/null
+++ b/queryserver/src/it/java/org/apache/phoenix/end2end/HttpParamImpersonationQueryServerIT.java
@@ -0,0 +1,438 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.security.PrivilegedAction;
+import java.security.PrivilegedExceptionAction;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.LocalHBaseCluster;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.http.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
+import org.apache.hadoop.hbase.security.access.AccessControlClient;
+import org.apache.hadoop.hbase.security.access.AccessController;
+import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.hadoop.hbase.security.token.TokenProvider;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.util.KerberosName;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.query.ConfigurationFactory;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.queryserver.client.Driver;
+import org.apache.phoenix.queryserver.client.ThinClientUtil;
+import org.apache.phoenix.queryserver.server.QueryServer;
+import org.apache.phoenix.util.InstanceResolver;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Maps;
+
+@Category(NeedsOwnMiniClusterTest.class)
+public class HttpParamImpersonationQueryServerIT {
+ private static final Log LOG = LogFactory.getLog(HttpParamImpersonationQueryServerIT.class);
+
+ private static final List<TableName> SYSTEM_TABLE_NAMES = Arrays.asList(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME,
+ PhoenixDatabaseMetaData.SYSTEM_MUTEX_HBASE_TABLE_NAME,
+ PhoenixDatabaseMetaData.SYSTEM_FUNCTION_HBASE_TABLE_NAME,
+ PhoenixDatabaseMetaData.SYSTEM_SCHEMA_HBASE_TABLE_NAME,
+ PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_HBASE_TABLE_NAME,
+ PhoenixDatabaseMetaData.SYSTEM_STATS_HBASE_TABLE_NAME);
+
+ private static final File TEMP_DIR = new File(getTempDirForClass());
+ private static final File KEYTAB_DIR = new File(TEMP_DIR, "keytabs");
+ private static final List<File> USER_KEYTAB_FILES = new ArrayList<>();
+
+ private static final String SPNEGO_PRINCIPAL = "HTTP/localhost";
+ private static final String PQS_PRINCIPAL = "phoenixqs/localhost";
+ private static final String SERVICE_PRINCIPAL = "securecluster/localhost";
+ private static File KEYTAB;
+
+ private static MiniKdc KDC;
+ private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ private static LocalHBaseCluster HBASE_CLUSTER;
+ private static int NUM_CREATED_USERS;
+
+ private static ExecutorService PQS_EXECUTOR;
+ private static QueryServer PQS;
+ private static int PQS_PORT;
+ private static String PQS_URL;
+
+ private static String getTempDirForClass() {
+ StringBuilder sb = new StringBuilder(32);
+ sb.append(System.getProperty("user.dir")).append(File.separator);
+ sb.append("target").append(File.separator);
+ sb.append(HttpParamImpersonationQueryServerIT.class.getSimpleName());
+ return sb.toString();
+ }
+
+ private static void updateDefaultRealm() throws Exception {
+ // (at least) one other phoenix test triggers the caching of this field before the KDC is up
+ // which causes principal parsing to fail.
+ Field f = KerberosName.class.getDeclaredField("defaultRealm");
+ f.setAccessible(true);
+ // Default realm for MiniKDC
+ f.set(null, "EXAMPLE.COM");
+ }
+
+ private static void createUsers(int numUsers) throws Exception {
+ assertNotNull("KDC is null, was setup method called?", KDC);
+ NUM_CREATED_USERS = numUsers;
+ for (int i = 1; i <= numUsers; i++) {
+ String principal = "user" + i;
+ File keytabFile = new File(KEYTAB_DIR, principal + ".keytab");
+ KDC.createPrincipal(keytabFile, principal);
+ USER_KEYTAB_FILES.add(keytabFile);
+ }
+ }
+
+ private static Entry<String,File> getUser(int offset) {
+ Preconditions.checkArgument(offset > 0 && offset <= NUM_CREATED_USERS);
+ return Maps.immutableEntry("user" + offset, USER_KEYTAB_FILES.get(offset - 1));
+ }
+
+ /**
+ * Setup the security configuration for hdfs.
+ */
+ private static void setHdfsSecuredConfiguration(Configuration conf) throws Exception {
+ // Set principal+keytab configuration for HDFS
+ conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, SERVICE_PRINCIPAL + "@" + KDC.getRealm());
+ conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, KEYTAB.getAbsolutePath());
+ conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, SERVICE_PRINCIPAL + "@" + KDC.getRealm());
+ conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, KEYTAB.getAbsolutePath());
+ conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, SPNEGO_PRINCIPAL + "@" + KDC.getRealm());
+ // Enable token access for HDFS blocks
+ conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
+ // Only use HTTPS (required because we aren't using "secure" ports)
+ conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
+ // Bind on localhost for spnego to have a chance at working
+ conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
+ conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
+
+ // Generate SSL certs
+ File keystoresDir = new File(UTIL.getDataTestDir("keystore").toUri().getPath());
+ keystoresDir.mkdirs();
+ String sslConfDir = KeyStoreTestUtil.getClasspathDir(HttpParamImpersonationQueryServerIT.class);
+ KeyStoreTestUtil.setupSSLConfig(keystoresDir.getAbsolutePath(), sslConfDir, conf, false);
+
+ // Magic flag to tell hdfs to not fail on using ports above 1024
+ conf.setBoolean("ignore.secure.ports.for.testing", true);
+ }
+
+ private static void ensureIsEmptyDirectory(File f) throws IOException {
+ if (f.exists()) {
+ if (f.isDirectory()) {
+ FileUtils.deleteDirectory(f);
+ } else {
+ assertTrue("Failed to delete keytab directory", f.delete());
+ }
+ }
+ assertTrue("Failed to create keytab directory", f.mkdirs());
+ }
+
+ /**
+ * Setup and start kerberos, hbase
+ */
+ @BeforeClass
+ public static void setUp() throws Exception {
+ final Configuration conf = UTIL.getConfiguration();
+ // Ensure the dirs we need are created/empty
+ ensureIsEmptyDirectory(TEMP_DIR);
+ ensureIsEmptyDirectory(KEYTAB_DIR);
+ KEYTAB = new File(KEYTAB_DIR, "test.keytab");
+ // Start a MiniKDC
+ KDC = UTIL.setupMiniKdc(KEYTAB);
+ // Create a service principal and spnego principal in one keytab
+ // NB. Due to some apparent limitations between HDFS and HBase in the same JVM, trying to
+ // use separate identies for HBase and HDFS results in a GSS initiate error. The quick
+ // solution is to just use a single "service" principal instead of "hbase" and "hdfs"
+ // (or "dn" and "nn") per usual.
+ KDC.createPrincipal(KEYTAB, SPNEGO_PRINCIPAL, PQS_PRINCIPAL, SERVICE_PRINCIPAL);
+ // Start ZK by hand
+ UTIL.startMiniZKCluster();
+
+ // Create a number of unprivileged users
+ createUsers(2);
+
+ // Set configuration for HBase
+ HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + KDC.getRealm());
+ HBaseKerberosUtils.setSecuredConfiguration(conf);
+ setHdfsSecuredConfiguration(conf);
+ UserGroupInformation.setConfiguration(conf);
+ conf.setInt(HConstants.MASTER_PORT, 0);
+ conf.setInt(HConstants.MASTER_INFO_PORT, 0);
+ conf.setInt(HConstants.REGIONSERVER_PORT, 0);
+ conf.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
+ conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
+ conf.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
+ conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName(), TokenProvider.class.getName());
+
+ // Secure Phoenix setup
+ conf.set("phoenix.queryserver.kerberos.http.principal", SPNEGO_PRINCIPAL + "@" + KDC.getRealm());
+ conf.set("phoenix.queryserver.http.keytab.file", KEYTAB.getAbsolutePath());
+ conf.set("phoenix.queryserver.kerberos.principal", PQS_PRINCIPAL + "@" + KDC.getRealm());
+ conf.set("phoenix.queryserver.keytab.file", KEYTAB.getAbsolutePath());
+ conf.setBoolean(QueryServices.QUERY_SERVER_DISABLE_KERBEROS_LOGIN, true);
+ conf.setInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB, 0);
+ // Required so that PQS can impersonate the end-users to HBase
+ conf.set("hadoop.proxyuser.phoenixqs.groups", "*");
+ conf.set("hadoop.proxyuser.phoenixqs.hosts", "*");
+ // user1 is allowed to impersonate others, user2 is not
+ conf.set("hadoop.proxyuser.user1.groups", "*");
+ conf.set("hadoop.proxyuser.user1.hosts", "*");
+ conf.setBoolean(QueryServices.QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR_ATTRIB, true);
+
+ // Clear the cached singletons so we can inject our own.
+ InstanceResolver.clearSingletons();
+ // Make sure the ConnectionInfo doesn't try to pull a default Configuration
+ InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
+ @Override
+ public Configuration getConfiguration() {
+ return conf;
+ }
+ @Override
+ public Configuration getConfiguration(Configuration confToClone) {
+ Configuration copy = new Configuration(conf);
+ copy.addResource(confToClone);
+ return copy;
+ }
+ });
+ updateDefaultRealm();
+
+ // Start HDFS
+ UTIL.startMiniDFSCluster(1);
+ // Use LocalHBaseCluster to avoid HBaseTestingUtility from doing something wrong
+ // NB. I'm not actually sure what HTU does incorrect, but this was pulled from some test
+ // classes in HBase itself. I couldn't get HTU to work myself (2017/07/06)
+ Path rootdir = UTIL.getDataTestDirOnTestFS(HttpParamImpersonationQueryServerIT.class.getSimpleName());
+ FSUtils.setRootDir(conf, rootdir);
+ HBASE_CLUSTER = new LocalHBaseCluster(conf, 1);
+ HBASE_CLUSTER.startup();
+
+ // Then fork a thread with PQS in it.
+ startQueryServer();
+ }
+
+ private static void startQueryServer() throws Exception {
+ PQS = new QueryServer(new String[0], UTIL.getConfiguration());
+ // Get the PQS ident for PQS to use
+ final UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(PQS_PRINCIPAL, KEYTAB.getAbsolutePath());
+ PQS_EXECUTOR = Executors.newSingleThreadExecutor();
+ // Launch PQS, doing in the Kerberos login instead of letting PQS do it itself (which would
+ // break the HBase/HDFS logins also running in the same test case).
+ PQS_EXECUTOR.submit(new Runnable() {
+ @Override public void run() {
+ ugi.doAs(new PrivilegedAction<Void>() {
+ @Override public Void run() {
+ PQS.run();
+ return null;
+ }
+ });
+ }
+ });
+ PQS.awaitRunning();
+ PQS_PORT = PQS.getPort();
+ PQS_URL = ThinClientUtil.getConnectionUrl("localhost", PQS_PORT) + ";authentication=SPNEGO";
+ }
+
+ @AfterClass
+ public static void stopKdc() throws Exception {
+ // Remove our custom ConfigurationFactory for future tests
+ InstanceResolver.clearSingletons();
+ if (PQS_EXECUTOR != null) {
+ PQS.stop();
+ PQS_EXECUTOR.shutdown();
+ if (!PQS_EXECUTOR.awaitTermination(5, TimeUnit.SECONDS)) {
+ LOG.info("PQS didn't exit in 5 seconds, proceeding anyways.");
+ }
+ }
+ if (HBASE_CLUSTER != null) {
+ HBASE_CLUSTER.shutdown();
+ HBASE_CLUSTER.join();
+ }
+ if (UTIL != null) {
+ UTIL.shutdownMiniZKCluster();
+ }
+ if (KDC != null) {
+ KDC.stop();
+ }
+ }
+
+ @Test
+ public void testSuccessfulImpersonation() throws Exception {
+ final Entry<String,File> user1 = getUser(1);
+ final Entry<String,File> user2 = getUser(2);
+ // Build the JDBC URL by hand with the doAs
+ final String doAsUrlTemplate = Driver.CONNECT_STRING_PREFIX + "url=http://localhost:" + PQS_PORT + "?"
+ + QueryServicesOptions.DEFAULT_QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM + "=%s;authentication=SPNEGO;serialization=PROTOBUF";
+ final String tableName = "POSITIVE_IMPERSONATION";
+ final int numRows = 5;
+ final UserGroupInformation serviceUgi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(SERVICE_PRINCIPAL, KEYTAB.getAbsolutePath());
+ serviceUgi.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override public Void run() throws Exception {
+ createTable(tableName, numRows);
+ grantUsersToPhoenixSystemTables(Arrays.asList(user1.getKey(), user2.getKey()));
+ return null;
+ }
+ });
+ UserGroupInformation user1Ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(user1.getKey(), user1.getValue().getAbsolutePath());
+ user1Ugi.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override public Void run() throws Exception {
+ // This user should not be able to read the table
+ readAndExpectPermissionError(PQS_URL, tableName, numRows);
+ // Run the same query with the same credentials, but with a doAs. We should be permitted since the user we're impersonating can run the query
+ final String doAsUrl = String.format(doAsUrlTemplate, serviceUgi.getShortUserName());
+ try (Connection conn = DriverManager.getConnection(doAsUrl);
+ Statement stmt = conn.createStatement()) {
+ conn.setAutoCommit(true);
+ readRows(stmt, tableName, numRows);
+ }
+ return null;
+ }
+ });
+ }
+
+ @Test
+ public void testDisallowedImpersonation() throws Exception {
+ final Entry<String,File> user2 = getUser(2);
+ // Build the JDBC URL by hand with the doAs
+ final String doAsUrlTemplate = Driver.CONNECT_STRING_PREFIX + "url=http://localhost:" + PQS_PORT + "?"
+ + QueryServicesOptions.DEFAULT_QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM + "=%s;authentication=SPNEGO;serialization=PROTOBUF";
+ final String tableName = "DISALLOWED_IMPERSONATION";
+ final int numRows = 5;
+ final UserGroupInformation serviceUgi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(SERVICE_PRINCIPAL, KEYTAB.getAbsolutePath());
+ serviceUgi.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override public Void run() throws Exception {
+ createTable(tableName, numRows);
+ grantUsersToPhoenixSystemTables(Arrays.asList(user2.getKey()));
+ return null;
+ }
+ });
+ UserGroupInformation user2Ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(user2.getKey(), user2.getValue().getAbsolutePath());
+ user2Ugi.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override public Void run() throws Exception {
+ // This user is disallowed to read this table
+ readAndExpectPermissionError(PQS_URL, tableName, numRows);
+ // This user is also not allowed to impersonate
+ final String doAsUrl = String.format(doAsUrlTemplate, serviceUgi.getShortUserName());
+ try (Connection conn = DriverManager.getConnection(doAsUrl);
+ Statement stmt = conn.createStatement()) {
+ conn.setAutoCommit(true);
+ readRows(stmt, tableName, numRows);
+ fail("user2 should not be allowed to impersonate the service user");
+ } catch (Exception e) {
+ LOG.info("Caught expected exception", e);
+ }
+ return null;
+ }
+ });
+ }
+
+ void createTable(String tableName, int numRows) throws Exception {
+ try (Connection conn = DriverManager.getConnection(PQS_URL);
+ Statement stmt = conn.createStatement()) {
+ conn.setAutoCommit(true);
+ assertFalse(stmt.execute("CREATE TABLE " + tableName + "(pk integer not null primary key)"));
+ for (int i = 0; i < numRows; i++) {
+ assertEquals(1, stmt.executeUpdate("UPSERT INTO " + tableName + " values(" + i + ")"));
+ }
+ readRows(stmt, tableName, numRows);
+ }
+ }
+
+ void grantUsersToPhoenixSystemTables(List<String> usersToGrant) throws Exception {
+ // Grant permission to the user to access the system tables
+ try {
+ for (String user : usersToGrant) {
+ for (TableName tn : SYSTEM_TABLE_NAMES) {
+ AccessControlClient.grant(UTIL.getConnection(), tn, user, null, null, Action.READ, Action.EXEC);
+ }
+ }
+ } catch (Throwable e) {
+ throw new Exception(e);
+ }
+ }
+
+ void readAndExpectPermissionError(String jdbcUrl, String tableName, int numRows) {
+ try (Connection conn = DriverManager.getConnection(jdbcUrl);
+ Statement stmt = conn.createStatement()) {
+ conn.setAutoCommit(true);
+ readRows(stmt, tableName, numRows);
+ fail("Expected an exception reading another user's table");
+ } catch (Exception e) {
+ LOG.debug("Caught expected exception", e);
+ // Avatica doesn't re-create new exceptions across the wire. Need to just look at the contents of the message.
+ String errorMessage = e.getMessage();
+ assertTrue("Expected the error message to contain an HBase AccessDeniedException", errorMessage.contains("org.apache.hadoop.hbase.security.AccessDeniedException"));
+ // Expecting an error message like: "Insufficient permissions for user 'user1' (table=POSITIVE_IMPERSONATION, action=READ)"
+ // Being overly cautious to make sure we don't inadvertently pass the test due to permission errors on phoenix system tables.
+ assertTrue("Expected message to contain " + tableName + " and READ", errorMessage.contains(tableName) && errorMessage.contains("READ"));
+ }
+ }
+
+ void readRows(Statement stmt, String tableName, int numRows) throws SQLException {
+ try (ResultSet rs = stmt.executeQuery("SELECT * FROM " + tableName)) {
+ for (int i = 0; i < numRows; i++) {
+ assertTrue(rs.next());
+ assertEquals(i, rs.getInt(1));
+ }
+ assertFalse(rs.next());
+ }
+ }
+
+ byte[] copyBytes(byte[] src, int offset, int length) {
+ byte[] dest = new byte[length];
+ System.arraycopy(src, offset, dest, 0, length);
+ return dest;
+ }
+}
diff --git a/queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerBasicsIT.java b/queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerBasicsIT.java
new file mode 100644
index 0000000..ceb0a78
--- /dev/null
+++ b/queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerBasicsIT.java
@@ -0,0 +1,346 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static java.lang.String.format;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_CAT;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_CATALOG;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
+import static org.apache.phoenix.query.QueryConstants.SYSTEM_SCHEMA_NAME;
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.sql.Array;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.Statement;
+import java.util.Properties;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.queryserver.client.ThinClientUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+
+/**
+ * Smoke test for query server.
+ */
+public class QueryServerBasicsIT extends BaseHBaseManagedTimeIT {
+
+ private static final Log LOG = LogFactory.getLog(QueryServerBasicsIT.class);
+
+ private static QueryServerThread AVATICA_SERVER;
+ private static Configuration CONF;
+ private static String CONN_STRING;
+
+ @Rule
+ public TestName name = new TestName();
+
+ @BeforeClass
+ public static void beforeClass() throws Exception {
+ CONF = getTestClusterConfig();
+ CONF.setInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB, 0);
+ String url = getUrl();
+ AVATICA_SERVER = new QueryServerThread(new String[] { url }, CONF,
+ QueryServerBasicsIT.class.getName());
+ AVATICA_SERVER.start();
+ AVATICA_SERVER.getQueryServer().awaitRunning();
+ final int port = AVATICA_SERVER.getQueryServer().getPort();
+ LOG.info("Avatica server started on port " + port);
+ CONN_STRING = ThinClientUtil.getConnectionUrl("localhost", port);
+ LOG.info("JDBC connection string is " + CONN_STRING);
+ }
+
+ @AfterClass
+ public static void afterClass() throws Exception {
+ if (AVATICA_SERVER != null) {
+ AVATICA_SERVER.join(TimeUnit.MINUTES.toMillis(1));
+ Throwable t = AVATICA_SERVER.getQueryServer().getThrowable();
+ if (t != null) {
+ fail("query server threw. " + t.getMessage());
+ }
+ assertEquals("query server didn't exit cleanly", 0, AVATICA_SERVER.getQueryServer()
+ .getRetCode());
+ }
+ }
+
+ @Test
+ public void testCatalogs() throws Exception {
+ try (final Connection connection = DriverManager.getConnection(CONN_STRING)) {
+ assertThat(connection.isClosed(), is(false));
+ try (final ResultSet resultSet = connection.getMetaData().getCatalogs()) {
+ final ResultSetMetaData metaData = resultSet.getMetaData();
+ assertFalse("unexpected populated resultSet", resultSet.next());
+ assertEquals(1, metaData.getColumnCount());
+ assertEquals(TABLE_CAT, metaData.getColumnName(1));
+ }
+ }
+ }
+
+ @Test
+ public void testSchemas() throws Exception {
+ Properties props=new Properties();
+ props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.toString(true));
+ try (final Connection connection = DriverManager.getConnection(CONN_STRING, props)) {
+ connection.createStatement().executeUpdate("CREATE SCHEMA IF NOT EXISTS " + SYSTEM_SCHEMA_NAME);
+ assertThat(connection.isClosed(), is(false));
+ try (final ResultSet resultSet = connection.getMetaData().getSchemas()) {
+ final ResultSetMetaData metaData = resultSet.getMetaData();
+ assertTrue("unexpected empty resultset", resultSet.next());
+ assertEquals(2, metaData.getColumnCount());
+ assertEquals(TABLE_SCHEM, metaData.getColumnName(1));
+ assertEquals(TABLE_CATALOG, metaData.getColumnName(2));
+ boolean containsSystem = false;
+ do {
+ if (resultSet.getString(1).equalsIgnoreCase(SYSTEM_SCHEMA_NAME)) containsSystem = true;
+ } while (resultSet.next());
+ assertTrue(format("should contain at least %s schema.", SYSTEM_SCHEMA_NAME), containsSystem);
+ }
+ }
+ }
+
+ @Test
+ public void smokeTest() throws Exception {
+ final String tableName = name.getMethodName();
+ try (final Connection connection = DriverManager.getConnection(CONN_STRING)) {
+ assertThat(connection.isClosed(), is(false));
+ connection.setAutoCommit(true);
+ try (final Statement stmt = connection.createStatement()) {
+ assertFalse(stmt.execute("DROP TABLE IF EXISTS " + tableName));
+ assertFalse(stmt.execute("CREATE TABLE " + tableName + "("
+ + "id INTEGER NOT NULL, "
+ + "pk varchar(3) NOT NULL "
+ + "CONSTRAINT PK_CONSTRAINT PRIMARY KEY (id, pk))"));
+ assertEquals(0, stmt.getUpdateCount());
+ assertEquals(1, stmt.executeUpdate("UPSERT INTO " + tableName + " VALUES(1, 'foo')"));
+ assertEquals(1, stmt.executeUpdate("UPSERT INTO " + tableName + " VALUES(2, 'bar')"));
+ assertTrue(stmt.execute("SELECT * FROM " + tableName));
+ try (final ResultSet resultSet = stmt.getResultSet()) {
+ assertTrue(resultSet.next());
+ assertEquals(1, resultSet.getInt(1));
+ assertEquals("foo", resultSet.getString(2));
+ assertTrue(resultSet.next());
+ assertEquals(2, resultSet.getInt(1));
+ assertEquals("bar", resultSet.getString(2));
+ }
+ }
+ final String sql = "SELECT * FROM " + tableName + " WHERE id = ?";
+ try (final PreparedStatement stmt = connection.prepareStatement(sql)) {
+ stmt.setInt(1, 1);
+ try (ResultSet resultSet = stmt.executeQuery()) {
+ assertTrue(resultSet.next());
+ assertEquals(1, resultSet.getInt(1));
+ assertEquals("foo", resultSet.getString(2));
+ }
+ stmt.clearParameters();
+ stmt.setInt(1, 5);
+ try (final ResultSet resultSet = stmt.executeQuery()) {
+ assertFalse(resultSet.next());
+ }
+ }
+ }
+ }
+
+ @Test
+ public void arrayTest() throws Exception {
+ final String tableName = name.getMethodName();
+ try (Connection conn = DriverManager.getConnection(CONN_STRING);
+ Statement stmt = conn.createStatement()) {
+ conn.setAutoCommit(false);
+ assertFalse(stmt.execute("DROP TABLE IF EXISTS " + tableName));
+ assertFalse(stmt.execute("CREATE TABLE " + tableName + " ("
+ + "pk VARCHAR NOT NULL PRIMARY KEY, "
+ + "histogram INTEGER[])")
+ );
+ conn.commit();
+ int numRows = 10;
+ int numEvenElements = 4;
+ int numOddElements = 6;
+ for (int i = 0; i < numRows; i++) {
+ int arrayLength = i % 2 == 0 ? numEvenElements : numOddElements;
+ StringBuilder sb = new StringBuilder();
+ for (int arrayOffset = 0; arrayOffset < arrayLength; arrayOffset++) {
+ if (sb.length() > 0) {
+ sb.append(", ");
+ }
+ sb.append(getArrayValueForOffset(arrayOffset));
+ }
+ String updateSql = "UPSERT INTO " + tableName + " values('" + i + "', " + "ARRAY[" + sb.toString() + "])";
+ assertEquals(1, stmt.executeUpdate(updateSql));
+ }
+ conn.commit();
+ try (ResultSet rs = stmt.executeQuery("SELECT * FROM " + tableName)) {
+ for (int i = 0; i < numRows; i++) {
+ assertTrue(rs.next());
+ assertEquals(i, Integer.parseInt(rs.getString(1)));
+ Array array = rs.getArray(2);
+ Object untypedArrayData = array.getArray();
+ assertTrue("Expected array data to be an int array, but was " + untypedArrayData.getClass(), untypedArrayData instanceof Object[]);
+ Object[] arrayData = (Object[]) untypedArrayData;
+ int expectedArrayLength = i % 2 == 0 ? numEvenElements : numOddElements;
+ assertEquals(expectedArrayLength, arrayData.length);
+ for (int arrayOffset = 0; arrayOffset < expectedArrayLength; arrayOffset++) {
+ assertEquals(getArrayValueForOffset(arrayOffset), arrayData[arrayOffset]);
+ }
+ }
+ assertFalse(rs.next());
+ }
+ }
+ }
+
+ @Test
+ public void preparedStatementArrayTest() throws Exception {
+ final String tableName = name.getMethodName();
+ try (Connection conn = DriverManager.getConnection(CONN_STRING);
+ Statement stmt = conn.createStatement()) {
+ conn.setAutoCommit(false);
+ assertFalse(stmt.execute("DROP TABLE IF EXISTS " + tableName));
+ assertFalse(stmt.execute("CREATE TABLE " + tableName + " ("
+ + "pk VARCHAR NOT NULL PRIMARY KEY, "
+ + "histogram INTEGER[])")
+ );
+ conn.commit();
+ int numRows = 10;
+ int numEvenElements = 4;
+ int numOddElements = 6;
+ try (PreparedStatement pstmt = conn.prepareStatement("UPSERT INTO " + tableName + " values(?, ?)")) {
+ for (int i = 0; i < numRows; i++) {
+ pstmt.setString(1, Integer.toString(i));
+ int arrayLength = i % 2 == 0 ? numEvenElements : numOddElements;
+ Object[] arrayData = new Object[arrayLength];
+ for (int arrayOffset = 0; arrayOffset < arrayLength; arrayOffset++) {
+ arrayData[arrayOffset] = getArrayValueForOffset(arrayOffset);
+ }
+ pstmt.setArray(2, conn.createArrayOf("INTEGER", arrayData));
+ assertEquals(1, pstmt.executeUpdate());
+ }
+ conn.commit();
+ }
+ conn.commit();
+ try (ResultSet rs = stmt.executeQuery("SELECT * FROM " + tableName)) {
+ for (int i = 0; i < numRows; i++) {
+ assertTrue(rs.next());
+ assertEquals(i, Integer.parseInt(rs.getString(1)));
+ Array array = rs.getArray(2);
+ Object untypedArrayData = array.getArray();
+ assertTrue("Expected array data to be an int array, but was " + untypedArrayData.getClass(), untypedArrayData instanceof Object[]);
+ Object[] arrayData = (Object[]) untypedArrayData;
+ int expectedArrayLength = i % 2 == 0 ? numEvenElements : numOddElements;
+ assertEquals(expectedArrayLength, arrayData.length);
+ for (int arrayOffset = 0; arrayOffset < expectedArrayLength; arrayOffset++) {
+ assertEquals(getArrayValueForOffset(arrayOffset), arrayData[arrayOffset]);
+ }
+ }
+ assertFalse(rs.next());
+ }
+ }
+ }
+
+ @Test
+ public void preparedStatementVarcharArrayTest() throws Exception {
+ final String tableName = name.getMethodName();
+ try (Connection conn = DriverManager.getConnection(CONN_STRING);
+ Statement stmt = conn.createStatement()) {
+ conn.setAutoCommit(false);
+ assertFalse(stmt.execute("DROP TABLE IF EXISTS " + tableName));
+ assertFalse(stmt.execute("CREATE TABLE " + tableName + " ("
+ + "pk VARCHAR NOT NULL PRIMARY KEY, "
+ + "histogram VARCHAR[])")
+ );
+ conn.commit();
+ int numRows = 10;
+ int numEvenElements = 4;
+ int numOddElements = 6;
+ try (PreparedStatement pstmt = conn.prepareStatement("UPSERT INTO " + tableName + " values(?, ?)")) {
+ for (int i = 0; i < numRows; i++) {
+ pstmt.setString(1, Integer.toString(i));
+ int arrayLength = i % 2 == 0 ? numEvenElements : numOddElements;
+ Object[] arrayData = new Object[arrayLength];
+ for (int arrayOffset = 0; arrayOffset < arrayLength; arrayOffset++) {
+ arrayData[arrayOffset] = Integer.toString(getArrayValueForOffset(arrayOffset));
+ }
+ pstmt.setArray(2, conn.createArrayOf("VARCHAR", arrayData));
+ assertEquals(1, pstmt.executeUpdate());
+ }
+ conn.commit();
+ }
+ conn.commit();
+ try (ResultSet rs = stmt.executeQuery("SELECT * FROM " + tableName)) {
+ for (int i = 0; i < numRows; i++) {
+ assertTrue(rs.next());
+ assertEquals(i, Integer.parseInt(rs.getString(1)));
+ Array array = rs.getArray(2);
+ Object untypedArrayData = array.getArray();
+ assertTrue("Expected array data to be an int array, but was " + untypedArrayData.getClass(), untypedArrayData instanceof Object[]);
+ Object[] arrayData = (Object[]) untypedArrayData;
+ int expectedArrayLength = i % 2 == 0 ? numEvenElements : numOddElements;
+ assertEquals(expectedArrayLength, arrayData.length);
+ for (int arrayOffset = 0; arrayOffset < expectedArrayLength; arrayOffset++) {
+ assertEquals(Integer.toString(getArrayValueForOffset(arrayOffset)), arrayData[arrayOffset]);
+ }
+ }
+ assertFalse(rs.next());
+ }
+ }
+ }
+
+ private int getArrayValueForOffset(int arrayOffset) {
+ return arrayOffset * 2 + 1;
+ }
+
+ @Test
+ public void testParameterizedLikeExpression() throws Exception {
+ final Connection conn = DriverManager.getConnection(CONN_STRING);
+ final String tableName = generateUniqueName();
+ conn.createStatement().execute(
+ "CREATE TABLE " + tableName + " (k VARCHAR NOT NULL PRIMARY KEY, i INTEGER)");
+ conn.commit();
+
+ final PreparedStatement upsert = conn.prepareStatement(
+ "UPSERT INTO " + tableName + " VALUES (?, ?)");
+ upsert.setString(1, "123n7-app-2-");
+ upsert.setInt(2, 1);
+ upsert.executeUpdate();
+ conn.commit();
+
+ final PreparedStatement select = conn.prepareStatement(
+ "select k from " + tableName + " where k like ?");
+ select.setString(1, "12%");
+ ResultSet rs = select.executeQuery();
+ assertTrue(rs.next());
+ assertEquals("123n7-app-2-", rs.getString(1));
+ assertFalse(rs.next());
+
+ select.setString(1, null);
+ rs = select.executeQuery();
+ assertFalse(rs.next());
+ }
+}
diff --git a/queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerTestUtil.java b/queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerTestUtil.java
new file mode 100644
index 0000000..01f73ae
--- /dev/null
+++ b/queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerTestUtil.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import java.io.File;
+import java.security.PrivilegedAction;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.LocalHBaseCluster;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.phoenix.query.ConfigurationFactory;
+import org.apache.phoenix.queryserver.client.ThinClientUtil;
+import org.apache.phoenix.queryserver.server.QueryServer;
+import org.apache.phoenix.util.InstanceResolver;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Strings;
+
+public class QueryServerTestUtil {
+ private static final Logger LOG = LoggerFactory.getLogger(QueryServerTestUtil.class);
+
+ private final Configuration conf;
+ private final HBaseTestingUtility util;
+ private LocalHBaseCluster hbase;
+
+ private final QueryServer pqs;
+ private int port;
+ private String url;
+
+ private String principal;
+ private File keytab;
+
+ private ExecutorService executor;
+
+ public QueryServerTestUtil(Configuration conf) {
+ this.conf = Objects.requireNonNull(conf);
+ this.util = new HBaseTestingUtility(conf);
+ this.pqs = new QueryServer(new String[0], conf);
+ }
+
+ public QueryServerTestUtil(Configuration conf, String principal, File keytab) {
+ this.conf = Objects.requireNonNull(conf);
+ this.principal = principal;
+ this.keytab = keytab;
+ this.util = new HBaseTestingUtility(conf);
+ this.pqs = new QueryServer(new String[0], conf);
+ }
+
+ public void startLocalHBaseCluster(Class testClass) throws Exception {
+ startLocalHBaseCluster(testClass.getCanonicalName());
+ }
+
+ public void startLocalHBaseCluster(String uniqueName) throws Exception {
+ LOG.debug("Starting local HBase cluster for '{}'", uniqueName);
+ // Start ZK
+ util.startMiniZKCluster();
+ // Start HDFS
+ util.startMiniDFSCluster(1);
+ // Start HBase
+ Path rootdir = util.getDataTestDirOnTestFS(uniqueName);
+ FSUtils.setRootDir(conf, rootdir);
+ hbase = new LocalHBaseCluster(conf, 1);
+ hbase.startup();
+ }
+
+ public void stopLocalHBaseCluster() throws Exception {
+ LOG.debug("Stopping local HBase cluster");
+ if (hbase != null) {
+ hbase.shutdown();
+ hbase.join();
+ }
+ if (util != null) {
+ util.shutdownMiniDFSCluster();
+ util.shutdownMiniZKCluster();
+ }
+ }
+
+ public void startQueryServer() throws Exception {
+ setupQueryServerConfiguration(conf);
+ executor = Executors.newSingleThreadExecutor();
+ if (!Strings.isNullOrEmpty(principal) && null != keytab) {
+ // Get the PQS ident for PQS to use
+ final UserGroupInformation ugi = UserGroupInformation
+ .loginUserFromKeytabAndReturnUGI(principal, keytab.getAbsolutePath());
+ // Launch PQS, doing in the Kerberos login instead of letting PQS do it itself (which would
+ // break the HBase/HDFS logins also running in the same test case).
+ executor.submit(new Runnable() {
+ @Override
+ public void run() {
+ ugi.doAs(new PrivilegedAction<Void>() {
+ @Override
+ public Void run() {
+ pqs.run();
+ return null;
+ }
+ });
+ }
+ });
+ } else {
+ // Launch PQS without a login
+ executor.submit(new Runnable() {
+ @Override
+ public void run() {
+ pqs.run();
+ }
+ });
+ }
+ pqs.awaitRunning();
+ port = pqs.getPort();
+ url = ThinClientUtil.getConnectionUrl("localhost", port);
+ }
+
+ public void stopQueryServer() throws Exception {
+ if (pqs != null) {
+ pqs.stop();
+ }
+ if (executor != null) {
+ executor.shutdown();
+ if (!executor.awaitTermination(5, TimeUnit.SECONDS)) {
+ LOG.info("PQS didn't exit in 5 seconds, proceeding anyways.");
+ }
+ }
+ }
+
+ public static void setupQueryServerConfiguration(final Configuration conf) {
+ // Make sure the ConnectionInfo doesn't try to pull a default Configuration
+ InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
+ @Override
+ public Configuration getConfiguration() {
+ return conf;
+ }
+ @Override
+ public Configuration getConfiguration(Configuration confToClone) {
+ Configuration copy = new Configuration(conf);
+ copy.addResource(confToClone);
+ return copy;
+ }
+ });
+ }
+
+ public int getPort() {
+ return port;
+ }
+
+ public String getUrl() {
+ return url;
+ }
+
+ /**
+ * Returns the query server URL with the specified URL params
+ * @param params URL params
+ * @return URL with params
+ */
+ public String getUrl(Map<String, String> params) {
+ if (params == null || params.size() == 0) {
+ return url;
+ }
+ StringBuilder urlParams = new StringBuilder();
+ for (Map.Entry<String, String> param : params.entrySet()) {
+ urlParams.append(";").append(param.getKey()).append("=").append(param.getValue());
+ }
+ return url + urlParams;
+ }
+}
diff --git a/queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerThread.java b/queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerThread.java
new file mode 100644
index 0000000..0010656
--- /dev/null
+++ b/queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerThread.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.phoenix.queryserver.server.QueryServer;
+
+/** Wraps up the query server for tests. */
+public class QueryServerThread extends Thread {
+
+ private final QueryServer main;
+
+ public QueryServerThread(String[] argv, Configuration conf) {
+ this(argv, conf, null);
+ }
+
+ public QueryServerThread(String[] argv, Configuration conf, String name) {
+ this(new QueryServer(argv, conf), name);
+ }
+
+ private QueryServerThread(QueryServer m, String name) {
+ super(m, "query server" + (name == null ? "" : (" - " + name)));
+ this.main = m;
+ setDaemon(true);
+ }
+
+ public QueryServer getQueryServer() {
+ return main;
+ }
+}
diff --git a/queryserver/src/it/java/org/apache/phoenix/end2end/SecureQueryServerIT.java b/queryserver/src/it/java/org/apache/phoenix/end2end/SecureQueryServerIT.java
new file mode 100644
index 0000000..c3ff885
--- /dev/null
+++ b/queryserver/src/it/java/org/apache/phoenix/end2end/SecureQueryServerIT.java
@@ -0,0 +1,323 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.security.PrivilegedAction;
+import java.security.PrivilegedExceptionAction;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.LocalHBaseCluster;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.http.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
+import org.apache.hadoop.hbase.security.token.TokenProvider;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.util.KerberosName;
+import org.apache.phoenix.query.ConfigurationFactory;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.queryserver.client.ThinClientUtil;
+import org.apache.phoenix.queryserver.server.QueryServer;
+import org.apache.phoenix.util.InstanceResolver;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Maps;
+
+@Category(NeedsOwnMiniClusterTest.class)
+public class SecureQueryServerIT {
+ private static final Log LOG = LogFactory.getLog(SecureQueryServerIT.class);
+
+ private static final File TEMP_DIR = new File(getTempDirForClass());
+ private static final File KEYTAB_DIR = new File(TEMP_DIR, "keytabs");
+ private static final List<File> USER_KEYTAB_FILES = new ArrayList<>();
+
+ private static final String SPNEGO_PRINCIPAL = "HTTP/localhost";
+ private static final String PQS_PRINCIPAL = "phoenixqs/localhost";
+ private static final String SERVICE_PRINCIPAL = "securecluster/localhost";
+ private static File KEYTAB;
+
+ private static MiniKdc KDC;
+ private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ private static LocalHBaseCluster HBASE_CLUSTER;
+ private static int NUM_CREATED_USERS;
+
+ private static ExecutorService PQS_EXECUTOR;
+ private static QueryServer PQS;
+ private static int PQS_PORT;
+ private static String PQS_URL;
+
+ private static String getTempDirForClass() {
+ StringBuilder sb = new StringBuilder(32);
+ sb.append(System.getProperty("user.dir")).append(File.separator);
+ sb.append("target").append(File.separator);
+ sb.append(SecureQueryServerIT.class.getSimpleName());
+ return sb.toString();
+ }
+
+ private static void updateDefaultRealm() throws Exception {
+ // (at least) one other phoenix test triggers the caching of this field before the KDC is up
+ // which causes principal parsing to fail.
+ Field f = KerberosName.class.getDeclaredField("defaultRealm");
+ f.setAccessible(true);
+ // Default realm for MiniKDC
+ f.set(null, "EXAMPLE.COM");
+ }
+
+ private static void createUsers(int numUsers) throws Exception {
+ assertNotNull("KDC is null, was setup method called?", KDC);
+ NUM_CREATED_USERS = numUsers;
+ for (int i = 1; i <= numUsers; i++) {
+ String principal = "user" + i;
+ File keytabFile = new File(KEYTAB_DIR, principal + ".keytab");
+ KDC.createPrincipal(keytabFile, principal);
+ USER_KEYTAB_FILES.add(keytabFile);
+ }
+ }
+
+ private static Entry<String,File> getUser(int offset) {
+ Preconditions.checkArgument(offset > 0 && offset <= NUM_CREATED_USERS);
+ return Maps.immutableEntry("user" + offset, USER_KEYTAB_FILES.get(offset - 1));
+ }
+
+ /**
+ * Setup the security configuration for hdfs.
+ */
+ private static void setHdfsSecuredConfiguration(Configuration conf) throws Exception {
+ // Set principal+keytab configuration for HDFS
+ conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, SERVICE_PRINCIPAL + "@" + KDC.getRealm());
+ conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, KEYTAB.getAbsolutePath());
+ conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, SERVICE_PRINCIPAL + "@" + KDC.getRealm());
+ conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, KEYTAB.getAbsolutePath());
+ conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, SPNEGO_PRINCIPAL + "@" + KDC.getRealm());
+ // Enable token access for HDFS blocks
+ conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
+ // Only use HTTPS (required because we aren't using "secure" ports)
+ conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
+ // Bind on localhost for spnego to have a chance at working
+ conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
+ conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
+
+ // Generate SSL certs
+ File keystoresDir = new File(UTIL.getDataTestDir("keystore").toUri().getPath());
+ keystoresDir.mkdirs();
+ String sslConfDir = KeyStoreTestUtil.getClasspathDir(SecureQueryServerIT.class);
+ KeyStoreTestUtil.setupSSLConfig(keystoresDir.getAbsolutePath(), sslConfDir, conf, false);
+
+ // Magic flag to tell hdfs to not fail on using ports above 1024
+ conf.setBoolean("ignore.secure.ports.for.testing", true);
+ }
+
+ private static void ensureIsEmptyDirectory(File f) throws IOException {
+ if (f.exists()) {
+ if (f.isDirectory()) {
+ FileUtils.deleteDirectory(f);
+ } else {
+ assertTrue("Failed to delete keytab directory", f.delete());
+ }
+ }
+ assertTrue("Failed to create keytab directory", f.mkdirs());
+ }
+
+ /**
+ * Setup and start kerberos, hbase
+ */
+ @BeforeClass
+ public static void setUp() throws Exception {
+ final Configuration conf = UTIL.getConfiguration();
+ // Ensure the dirs we need are created/empty
+ ensureIsEmptyDirectory(TEMP_DIR);
+ ensureIsEmptyDirectory(KEYTAB_DIR);
+ KEYTAB = new File(KEYTAB_DIR, "test.keytab");
+ // Start a MiniKDC
+ KDC = UTIL.setupMiniKdc(KEYTAB);
+ // Create a service principal and spnego principal in one keytab
+ // NB. Due to some apparent limitations between HDFS and HBase in the same JVM, trying to
+ // use separate identies for HBase and HDFS results in a GSS initiate error. The quick
+ // solution is to just use a single "service" principal instead of "hbase" and "hdfs"
+ // (or "dn" and "nn") per usual.
+ KDC.createPrincipal(KEYTAB, SPNEGO_PRINCIPAL, PQS_PRINCIPAL, SERVICE_PRINCIPAL);
+ // Start ZK by hand
+ UTIL.startMiniZKCluster();
+
+ // Create a number of unprivileged users
+ createUsers(3);
+
+ // Set configuration for HBase
+ HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + KDC.getRealm());
+ HBaseKerberosUtils.setSecuredConfiguration(conf);
+ setHdfsSecuredConfiguration(conf);
+ UserGroupInformation.setConfiguration(conf);
+ conf.setInt(HConstants.MASTER_PORT, 0);
+ conf.setInt(HConstants.MASTER_INFO_PORT, 0);
+ conf.setInt(HConstants.REGIONSERVER_PORT, 0);
+ conf.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
+ conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
+ TokenProvider.class.getName());
+
+ // Secure Phoenix setup
+ conf.set("phoenix.queryserver.kerberos.http.principal", SPNEGO_PRINCIPAL + "@" + KDC.getRealm());
+ conf.set("phoenix.queryserver.http.keytab.file", KEYTAB.getAbsolutePath());
+ conf.set("phoenix.queryserver.kerberos.principal", PQS_PRINCIPAL + "@" + KDC.getRealm());
+ conf.set("phoenix.queryserver.keytab.file", KEYTAB.getAbsolutePath());
+ conf.setBoolean(QueryServices.QUERY_SERVER_DISABLE_KERBEROS_LOGIN, true);
+ conf.setInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB, 0);
+ // Required so that PQS can impersonate the end-users to HBase
+ conf.set("hadoop.proxyuser.phoenixqs.groups", "*");
+ conf.set("hadoop.proxyuser.phoenixqs.hosts", "*");
+
+ // Clear the cached singletons so we can inject our own.
+ InstanceResolver.clearSingletons();
+ // Make sure the ConnectionInfo doesn't try to pull a default Configuration
+ InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
+ @Override
+ public Configuration getConfiguration() {
+ return conf;
+ }
+ @Override
+ public Configuration getConfiguration(Configuration confToClone) {
+ Configuration copy = new Configuration(conf);
+ copy.addResource(confToClone);
+ return copy;
+ }
+ });
+ updateDefaultRealm();
+
+ // Start HDFS
+ UTIL.startMiniDFSCluster(1);
+ // Use LocalHBaseCluster to avoid HBaseTestingUtility from doing something wrong
+ // NB. I'm not actually sure what HTU does incorrect, but this was pulled from some test
+ // classes in HBase itself. I couldn't get HTU to work myself (2017/07/06)
+ Path rootdir = UTIL.getDataTestDirOnTestFS(SecureQueryServerIT.class.getSimpleName());
+ FSUtils.setRootDir(conf, rootdir);
+ HBASE_CLUSTER = new LocalHBaseCluster(conf, 1);
+ HBASE_CLUSTER.startup();
+
+ // Then fork a thread with PQS in it.
+ startQueryServer();
+ }
+
+ private static void startQueryServer() throws Exception {
+ PQS = new QueryServer(new String[0], UTIL.getConfiguration());
+ // Get the PQS ident for PQS to use
+ final UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(PQS_PRINCIPAL, KEYTAB.getAbsolutePath());
+ PQS_EXECUTOR = Executors.newSingleThreadExecutor();
+ // Launch PQS, doing in the Kerberos login instead of letting PQS do it itself (which would
+ // break the HBase/HDFS logins also running in the same test case).
+ PQS_EXECUTOR.submit(new Runnable() {
+ @Override public void run() {
+ ugi.doAs(new PrivilegedAction<Void>() {
+ @Override public Void run() {
+ PQS.run();
+ return null;
+ }
+ });
+ }
+ });
+ PQS.awaitRunning();
+ PQS_PORT = PQS.getPort();
+ PQS_URL = ThinClientUtil.getConnectionUrl("localhost", PQS_PORT) + ";authentication=SPNEGO";
+ }
+
+ @AfterClass
+ public static void stopKdc() throws Exception {
+ // Remove our custom ConfigurationFactory for future tests
+ InstanceResolver.clearSingletons();
+ if (PQS_EXECUTOR != null) {
+ PQS.stop();
+ PQS_EXECUTOR.shutdown();
+ if (!PQS_EXECUTOR.awaitTermination(5, TimeUnit.SECONDS)) {
+ LOG.info("PQS didn't exit in 5 seconds, proceeding anyways.");
+ }
+ }
+ if (HBASE_CLUSTER != null) {
+ HBASE_CLUSTER.shutdown();
+ HBASE_CLUSTER.join();
+ }
+ if (UTIL != null) {
+ UTIL.shutdownMiniZKCluster();
+ }
+ if (KDC != null) {
+ KDC.stop();
+ }
+ }
+
+ @Test
+ public void testBasicReadWrite() throws Exception {
+ final Entry<String,File> user1 = getUser(1);
+ UserGroupInformation user1Ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(user1.getKey(), user1.getValue().getAbsolutePath());
+ user1Ugi.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override public Void run() throws Exception {
+ // Phoenix
+ final String tableName = "phx_table1";
+ try (java.sql.Connection conn = DriverManager.getConnection(PQS_URL);
+ Statement stmt = conn.createStatement()) {
+ conn.setAutoCommit(true);
+ assertFalse(stmt.execute("CREATE TABLE " + tableName + "(pk integer not null primary key)"));
+ final int numRows = 5;
+ for (int i = 0; i < numRows; i++) {
+ assertEquals(1, stmt.executeUpdate("UPSERT INTO " + tableName + " values(" + i + ")"));
+ }
+
+ try (ResultSet rs = stmt.executeQuery("SELECT * FROM " + tableName)) {
+ for (int i = 0; i < numRows; i++) {
+ assertTrue(rs.next());
+ assertEquals(i, rs.getInt(1));
+ }
+ assertFalse(rs.next());
+ }
+ }
+ return null;
+ }
+ });
+ }
+
+ byte[] copyBytes(byte[] src, int offset, int length) {
+ byte[] dest = new byte[length];
+ System.arraycopy(src, offset, dest, 0, length);
+ return dest;
+ }
+}
diff --git a/queryserver/src/it/java/org/apache/phoenix/end2end/SecureQueryServerPhoenixDBIT.java b/queryserver/src/it/java/org/apache/phoenix/end2end/SecureQueryServerPhoenixDBIT.java
new file mode 100644
index 0000000..205a831
--- /dev/null
+++ b/queryserver/src/it/java/org/apache/phoenix/end2end/SecureQueryServerPhoenixDBIT.java
@@ -0,0 +1,424 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.lang.reflect.Field;
+import java.nio.file.Paths;
+import java.security.PrivilegedAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.LocalHBaseCluster;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.http.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
+import org.apache.hadoop.hbase.security.token.TokenProvider;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.util.KerberosName;
+import org.apache.phoenix.query.ConfigurationFactory;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.queryserver.client.ThinClientUtil;
+import org.apache.phoenix.queryserver.server.QueryServer;
+import org.apache.phoenix.util.InstanceResolver;
+import org.junit.AfterClass;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Maps;
+
+/**
+ * This integration test stands up a secured PQS and runs Python code against it. See supporting
+ * files in phoenix-queryserver/src/it/bin.
+ */
+@Category(NeedsOwnMiniClusterTest.class)
+public class SecureQueryServerPhoenixDBIT {
+ private static enum Kdc {
+ MIT,
+ HEIMDAL;
+ }
+ private static final Logger LOG = LoggerFactory.getLogger(SecureQueryServerPhoenixDBIT.class);
+
+ private static final File TEMP_DIR = new File(getTempDirForClass());
+ private static final File KEYTAB_DIR = new File(TEMP_DIR, "keytabs");
+ private static final List<File> USER_KEYTAB_FILES = new ArrayList<>();
+
+ private static final String SPNEGO_PRINCIPAL = "HTTP/localhost";
+ private static final String PQS_PRINCIPAL = "phoenixqs/localhost";
+ private static final String SERVICE_PRINCIPAL = "securecluster/localhost";
+ private static File KEYTAB;
+
+ private static MiniKdc KDC;
+ private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ private static LocalHBaseCluster HBASE_CLUSTER;
+ private static int NUM_CREATED_USERS;
+
+ private static ExecutorService PQS_EXECUTOR;
+ private static QueryServer PQS;
+ private static int PQS_PORT;
+ private static String PQS_URL;
+
+ private static String getTempDirForClass() {
+ StringBuilder sb = new StringBuilder(32);
+ sb.append(System.getProperty("user.dir")).append(File.separator);
+ sb.append("target").append(File.separator);
+ sb.append(SecureQueryServerPhoenixDBIT.class.getSimpleName());
+ return sb.toString();
+ }
+
+ private static void updateDefaultRealm() throws Exception {
+ // (at least) one other phoenix test triggers the caching of this field before the KDC is up
+ // which causes principal parsing to fail.
+ Field f = KerberosName.class.getDeclaredField("defaultRealm");
+ f.setAccessible(true);
+ // Default realm for MiniKDC
+ f.set(null, "EXAMPLE.COM");
+ }
+
+ private static void createUsers(int numUsers) throws Exception {
+ assertNotNull("KDC is null, was setup method called?", KDC);
+ NUM_CREATED_USERS = numUsers;
+ for (int i = 1; i <= numUsers; i++) {
+ String principal = "user" + i;
+ File keytabFile = new File(KEYTAB_DIR, principal + ".keytab");
+ KDC.createPrincipal(keytabFile, principal);
+ USER_KEYTAB_FILES.add(keytabFile);
+ }
+ }
+
+ private static Entry<String,File> getUser(int offset) {
+ Preconditions.checkArgument(offset > 0 && offset <= NUM_CREATED_USERS);
+ return Maps.immutableEntry("user" + offset, USER_KEYTAB_FILES.get(offset - 1));
+ }
+
+ /**
+ * Setup the security configuration for hdfs.
+ */
+ private static void setHdfsSecuredConfiguration(Configuration conf) throws Exception {
+ // Set principal+keytab configuration for HDFS
+ conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, SERVICE_PRINCIPAL + "@" + KDC.getRealm());
+ conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, KEYTAB.getAbsolutePath());
+ conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, SERVICE_PRINCIPAL + "@" + KDC.getRealm());
+ conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, KEYTAB.getAbsolutePath());
+ conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, SPNEGO_PRINCIPAL + "@" + KDC.getRealm());
+ // Enable token access for HDFS blocks
+ conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
+ // Only use HTTPS (required because we aren't using "secure" ports)
+ conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
+ // Bind on localhost for spnego to have a chance at working
+ conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
+ conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
+
+ // Generate SSL certs
+ File keystoresDir = new File(UTIL.getDataTestDir("keystore").toUri().getPath());
+ keystoresDir.mkdirs();
+ String sslConfDir = KeyStoreTestUtil.getClasspathDir(SecureQueryServerPhoenixDBIT.class);
+ KeyStoreTestUtil.setupSSLConfig(keystoresDir.getAbsolutePath(), sslConfDir, conf, false);
+
+ // Magic flag to tell hdfs to not fail on using ports above 1024
+ conf.setBoolean("ignore.secure.ports.for.testing", true);
+ }
+
+ private static void ensureIsEmptyDirectory(File f) throws IOException {
+ if (f.exists()) {
+ if (f.isDirectory()) {
+ FileUtils.deleteDirectory(f);
+ } else {
+ assertTrue("Failed to delete keytab directory", f.delete());
+ }
+ }
+ assertTrue("Failed to create keytab directory", f.mkdirs());
+ }
+
+ /**
+ * Verifies that there is a python Executable on the PATH
+ */
+ private static void checkForCommandOnPath(String command) throws Exception {
+ Process runPythonProcess = new ProcessBuilder(Arrays.asList("which", command)).start();
+ BufferedReader processOutput = new BufferedReader(new InputStreamReader(runPythonProcess.getInputStream()));
+ BufferedReader processError = new BufferedReader(new InputStreamReader(runPythonProcess.getErrorStream()));
+ int exitCode = runPythonProcess.waitFor();
+ // dump stdout and stderr
+ while (processOutput.ready()) {
+ LOG.info(processOutput.readLine());
+ }
+ while (processError.ready()) {
+ LOG.error(processError.readLine());
+ }
+ Assume.assumeTrue("Could not find '" + command + "' on the PATH", exitCode == 0);
+ }
+
+ /**
+ * Setup and start kerberos, hbase
+ */
+ @BeforeClass
+ public static void setUp() throws Exception {
+ checkForCommandOnPath("python");
+ checkForCommandOnPath("virtualenv");
+ checkForCommandOnPath("kinit");
+
+ final Configuration conf = UTIL.getConfiguration();
+ // Ensure the dirs we need are created/empty
+ ensureIsEmptyDirectory(TEMP_DIR);
+ ensureIsEmptyDirectory(KEYTAB_DIR);
+ KEYTAB = new File(KEYTAB_DIR, "test.keytab");
+ // Start a MiniKDC
+ KDC = UTIL.setupMiniKdc(KEYTAB);
+ // Create a service principal and spnego principal in one keytab
+ // NB. Due to some apparent limitations between HDFS and HBase in the same JVM, trying to
+ // use separate identies for HBase and HDFS results in a GSS initiate error. The quick
+ // solution is to just use a single "service" principal instead of "hbase" and "hdfs"
+ // (or "dn" and "nn") per usual.
+ KDC.createPrincipal(KEYTAB, SPNEGO_PRINCIPAL, PQS_PRINCIPAL, SERVICE_PRINCIPAL);
+ // Start ZK by hand
+ UTIL.startMiniZKCluster();
+
+ // Create a number of unprivileged users
+ createUsers(3);
+
+ // Set configuration for HBase
+ HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + KDC.getRealm());
+ HBaseKerberosUtils.setSecuredConfiguration(conf);
+ setHdfsSecuredConfiguration(conf);
+ UserGroupInformation.setConfiguration(conf);
+ conf.setInt(HConstants.MASTER_PORT, 0);
+ conf.setInt(HConstants.MASTER_INFO_PORT, 0);
+ conf.setInt(HConstants.REGIONSERVER_PORT, 0);
+ conf.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
+ conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
+ TokenProvider.class.getName());
+
+ // Secure Phoenix setup
+ conf.set("phoenix.queryserver.kerberos.http.principal", SPNEGO_PRINCIPAL + "@" + KDC.getRealm());
+ conf.set("phoenix.queryserver.http.keytab.file", KEYTAB.getAbsolutePath());
+ conf.set("phoenix.queryserver.kerberos.principal", PQS_PRINCIPAL + "@" + KDC.getRealm());
+ conf.set("phoenix.queryserver.keytab.file", KEYTAB.getAbsolutePath());
+ conf.setBoolean(QueryServices.QUERY_SERVER_DISABLE_KERBEROS_LOGIN, true);
+ conf.setInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB, 0);
+ // Required so that PQS can impersonate the end-users to HBase
+ conf.set("hadoop.proxyuser.phoenixqs.groups", "*");
+ conf.set("hadoop.proxyuser.phoenixqs.hosts", "*");
+
+ // Clear the cached singletons so we can inject our own.
+ InstanceResolver.clearSingletons();
+ // Make sure the ConnectionInfo doesn't try to pull a default Configuration
+ InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
+ @Override
+ public Configuration getConfiguration() {
+ return conf;
+ }
+ @Override
+ public Configuration getConfiguration(Configuration confToClone) {
+ Configuration copy = new Configuration(conf);
+ copy.addResource(confToClone);
+ return copy;
+ }
+ });
+ updateDefaultRealm();
+
+ // Start HDFS
+ UTIL.startMiniDFSCluster(1);
+ // Use LocalHBaseCluster to avoid HBaseTestingUtility from doing something wrong
+ // NB. I'm not actually sure what HTU does incorrect, but this was pulled from some test
+ // classes in HBase itself. I couldn't get HTU to work myself (2017/07/06)
+ Path rootdir = UTIL.getDataTestDirOnTestFS(SecureQueryServerPhoenixDBIT.class.getSimpleName());
+ FSUtils.setRootDir(conf, rootdir);
+ HBASE_CLUSTER = new LocalHBaseCluster(conf, 1);
+ HBASE_CLUSTER.startup();
+
+ // Then fork a thread with PQS in it.
+ startQueryServer();
+ }
+
+ private static void startQueryServer() throws Exception {
+ PQS = new QueryServer(new String[0], UTIL.getConfiguration());
+ // Get the PQS ident for PQS to use
+ final UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(PQS_PRINCIPAL, KEYTAB.getAbsolutePath());
+ PQS_EXECUTOR = Executors.newSingleThreadExecutor();
+ // Launch PQS, doing in the Kerberos login instead of letting PQS do it itself (which would
+ // break the HBase/HDFS logins also running in the same test case).
+ PQS_EXECUTOR.submit(new Runnable() {
+ @Override public void run() {
+ ugi.doAs(new PrivilegedAction<Void>() {
+ @Override public Void run() {
+ PQS.run();
+ return null;
+ }
+ });
+ }
+ });
+ PQS.awaitRunning();
+ PQS_PORT = PQS.getPort();
+ PQS_URL = ThinClientUtil.getConnectionUrl("localhost", PQS_PORT) + ";authentication=SPNEGO";
+ }
+
+ @AfterClass
+ public static void stopKdc() throws Exception {
+ // Remove our custom ConfigurationFactory for future tests
+ InstanceResolver.clearSingletons();
+ if (PQS_EXECUTOR != null) {
+ PQS.stop();
+ PQS_EXECUTOR.shutdown();
+ if (!PQS_EXECUTOR.awaitTermination(5, TimeUnit.SECONDS)) {
+ LOG.info("PQS didn't exit in 5 seconds, proceeding anyways.");
+ }
+ }
+ if (HBASE_CLUSTER != null) {
+ HBASE_CLUSTER.shutdown();
+ HBASE_CLUSTER.join();
+ }
+ if (UTIL != null) {
+ UTIL.shutdownMiniZKCluster();
+ }
+ if (KDC != null) {
+ KDC.stop();
+ }
+ }
+
+ @Test
+ public void testBasicReadWrite() throws Exception {
+ final Entry<String,File> user1 = getUser(1);
+ String currentDirectory;
+ File file = new File(".");
+ currentDirectory = file.getAbsolutePath();
+ LOG.debug("Current working directory : "+currentDirectory);
+ LOG.debug("PQS_PORT:" + PQS_PORT);
+ LOG.debug("PQS_URL: " + PQS_URL);
+ ArrayList<String> cmdList = new ArrayList<>();
+ // This assumes the test is being run from phoenix/phoenix-queryserver
+ cmdList.add(Paths.get(currentDirectory, "src", "it", "bin", "test_phoenixdb.sh").toString());
+ cmdList.add(Paths.get(currentDirectory, "..", "python").toString());
+ cmdList.add(user1.getKey() + "@" + KDC.getRealm());
+ cmdList.add(user1.getValue().getAbsolutePath());
+ final String osName = System.getProperty("os.name").toLowerCase();
+ final Kdc kdcType;
+ final String kdcImpl = System.getProperty("PHOENIXDB_KDC_IMPL", "");
+ if (kdcImpl.isEmpty()) {
+ if (osName.indexOf("mac") >= 0) {
+ kdcType = Kdc.HEIMDAL;
+ } else {
+ kdcType = Kdc.MIT;
+ }
+ } else if (kdcImpl.trim().equalsIgnoreCase(Kdc.HEIMDAL.name())) {
+ kdcType = Kdc.HEIMDAL;
+ } else {
+ kdcType = Kdc.MIT;
+ }
+ LOG.info("Generating krb5.conf for KDC type:'{}'. OS='{}', PHOENIXDB_KDC_IMPL='{}'", kdcType, osName, kdcImpl);
+ File krb5ConfFile = null;
+ switch (kdcType) {
+ // It appears that we cannot generate a krb5.conf that is compatible with both MIT Kerberos
+ // and Heimdal Kerberos that works with MiniKdc. MiniKdc forces a choice between either UDP or
+ // or TCP for the KDC port. If we could have MiniKdc support both UDP and TCP, then we might be
+ // able to converge on a single krb5.conf for both MIT and Heimdal.
+ //
+ // With the below Heimdal configuration, MIT kerberos will fail on a DNS lookup to the hostname
+ // "tcp/localhost" instead of pulling off the "tcp/" prefix.
+ case HEIMDAL:
+ int kdcPort = KDC.getPort();
+ LOG.info("MINIKDC PORT " + kdcPort);
+ // Render a Heimdal compatible krb5.conf
+ // Currently kinit will only try tcp if the KDC is defined as
+ // kdc = tcp/hostname:port
+ StringBuilder krb5conf = new StringBuilder();
+ krb5conf.append("[libdefaults]\n");
+ krb5conf.append(" default_realm = EXAMPLE.COM\n");
+ krb5conf.append(" udp_preference_limit = 1\n");
+ krb5conf.append("\n");
+ krb5conf.append("[realms]\n");
+ krb5conf.append(" EXAMPLE.COM = {\n");
+ krb5conf.append(" kdc = localhost:");
+ krb5conf.append(kdcPort);
+ krb5conf.append("\n");
+ krb5conf.append(" kdc = tcp/localhost:");
+ krb5conf.append(kdcPort);
+ krb5conf.append("\n");
+ krb5conf.append(" }\n");
+
+ LOG.info("Writing Heimdal style krb5.conf");
+ LOG.info(krb5conf.toString());
+ krb5ConfFile = File.createTempFile("krb5.conf", null);
+ FileOutputStream fos = new FileOutputStream(krb5ConfFile);
+ fos.write(krb5conf.toString().getBytes());
+ fos.close();
+ LOG.info("krb5.conf written to " + krb5ConfFile.getAbsolutePath());
+ cmdList.add(krb5ConfFile.getAbsolutePath());
+ break;
+ case MIT:
+ cmdList.add(System.getProperty("java.security.krb5.conf"));
+ LOG.info("Using miniKDC provided krb5.conf " + KDC.getKrb5conf().getAbsolutePath());
+ break;
+ default:
+ throw new RuntimeException("Unhandled KDC type: " + kdcType);
+ }
+
+ cmdList.add(Integer.toString(PQS_PORT));
+ cmdList.add(Paths.get(currentDirectory, "src", "it", "bin", "test_phoenixdb.py").toString());
+
+ Process runPythonProcess = new ProcessBuilder(cmdList).start();
+ BufferedReader processOutput = new BufferedReader(new InputStreamReader(runPythonProcess.getInputStream()));
+ BufferedReader processError = new BufferedReader(new InputStreamReader(runPythonProcess.getErrorStream()));
+ int exitCode = runPythonProcess.waitFor();
+
+ // dump stdout and stderr
+ while (processOutput.ready()) {
+ LOG.info(processOutput.readLine());
+ }
+ while (processError.ready()) {
+ LOG.error(processError.readLine());
+ }
+
+ // Not managed by miniKDC so we have to clean up
+ if (krb5ConfFile != null)
+ krb5ConfFile.delete();
+
+ assertEquals("Subprocess exited with errors", 0, exitCode);
+ }
+
+ byte[] copyBytes(byte[] src, int offset, int length) {
+ byte[] dest = new byte[length];
+ System.arraycopy(src, offset, dest, 0, length);
+ return dest;
+ }
+}
diff --git a/queryserver/src/it/java/org/apache/phoenix/end2end/ServerCustomizersIT.java b/queryserver/src/it/java/org/apache/phoenix/end2end/ServerCustomizersIT.java
new file mode 100644
index 0000000..db08908
--- /dev/null
+++ b/queryserver/src/it/java/org/apache/phoenix/end2end/ServerCustomizersIT.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.Statement;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
+import org.apache.calcite.avatica.server.ServerCustomizer;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.queryserver.server.ServerCustomizersFactory;
+import org.apache.phoenix.util.InstanceResolver;
+import org.eclipse.jetty.security.ConstraintMapping;
+import org.eclipse.jetty.security.ConstraintSecurityHandler;
+import org.eclipse.jetty.security.HashLoginService;
+import org.eclipse.jetty.security.authentication.BasicAuthenticator;
+import org.eclipse.jetty.server.Server;
+import org.eclipse.jetty.util.security.Constraint;
+import org.eclipse.jetty.util.security.Credential;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ServerCustomizersIT extends BaseHBaseManagedTimeIT {
+ private static final Logger LOG = LoggerFactory.getLogger(ServerCustomizersIT.class);
+ private static final String USER_AUTHORIZED = "user3";
+ private static final String USER_NOT_AUTHORIZED = "user1";
+ private static final String USER_PW = "s3cr3t";
+
+ private static QueryServerTestUtil PQS_UTIL;
+
+ @Rule
+ public ExpectedException expected = ExpectedException.none();
+
+ @BeforeClass
+ public static void setup() throws Exception {
+ Configuration conf = getTestClusterConfig();
+ conf.set(QueryServices.QUERY_SERVER_CUSTOMIZERS_ENABLED, "true");
+ PQS_UTIL = new QueryServerTestUtil(conf);
+ PQS_UTIL.startLocalHBaseCluster(ServerCustomizersIT.class);
+ // Register a test jetty server customizer
+ InstanceResolver.clearSingletons();
+ InstanceResolver.getSingleton(ServerCustomizersFactory.class, new ServerCustomizersFactory() {
+ @Override
+ public List<ServerCustomizer<Server>> createServerCustomizers(Configuration conf,
+ AvaticaServerConfiguration avaticaServerConfiguration) {
+ return Collections.<ServerCustomizer<Server>>singletonList(new TestServerCustomizer());
+ }
+ });
+ PQS_UTIL.startQueryServer();
+ }
+
+ @AfterClass
+ public static void teardown() throws Exception {
+ // Remove custom singletons for future tests
+ InstanceResolver.clearSingletons();
+ if (PQS_UTIL != null) {
+ PQS_UTIL.stopQueryServer();
+ PQS_UTIL.stopLocalHBaseCluster();
+ }
+ }
+
+ @Test
+ public void testUserAuthorized() throws Exception {
+ try (Connection conn = DriverManager.getConnection(PQS_UTIL.getUrl(
+ getBasicAuthParams(USER_AUTHORIZED)));
+ Statement stmt = conn.createStatement()) {
+ Assert.assertFalse("user3 should have access", stmt.execute(
+ "create table "+ServerCustomizersIT.class.getSimpleName()+" (pk integer not null primary key)"));
+ }
+ }
+
+ @Test
+ public void testUserNotAuthorized() throws Exception {
+ expected.expect(RuntimeException.class);
+ expected.expectMessage("HTTP/401");
+ try (Connection conn = DriverManager.getConnection(PQS_UTIL.getUrl(
+ getBasicAuthParams(USER_NOT_AUTHORIZED)));
+ Statement stmt = conn.createStatement()) {
+ Assert.assertFalse(stmt.execute(
+ "select access from database"));
+ }
+ }
+
+ private Map<String, String> getBasicAuthParams(String user) {
+ Map<String, String> params = new HashMap<>();
+ params.put("authentication", "BASIC");
+ params.put("avatica_user", user);
+ params.put("avatica_password", USER_PW);
+ return params;
+ }
+
+ /**
+ * Contrived customizer that enables BASIC auth for a single user
+ */
+ public static class TestServerCustomizer implements ServerCustomizer<Server> {
+ @Override
+ public void customize(Server server) {
+ LOG.debug("Customizing server to allow requests for {}", USER_AUTHORIZED);
+ HashLoginService login = new HashLoginService();
+ login.putUser(USER_AUTHORIZED, Credential.getCredential(USER_PW), new String[] {"users"});
+ login.setName("users");
+
+ Constraint constraint = new Constraint();
+ constraint.setName(Constraint.__BASIC_AUTH);
+ constraint.setRoles(new String[]{"users"});
+ constraint.setAuthenticate(true);
+
+ ConstraintMapping cm = new ConstraintMapping();
+ cm.setConstraint(constraint);
+ cm.setPathSpec("/*");
+
+ ConstraintSecurityHandler security = new ConstraintSecurityHandler();
+ security.setAuthenticator(new BasicAuthenticator());
+ security.setRealmName("users");
+ security.addConstraintMapping(cm);
+ security.setLoginService(login);
+
+ // chain the PQS handler to security
+ security.setHandler(server.getHandlers()[0]);
+ server.setHandler(security);
+ }
+ }
+}
diff --git a/queryserver/src/it/resources/log4j.properties b/queryserver/src/it/resources/log4j.properties
new file mode 100644
index 0000000..f90cf16
--- /dev/null
+++ b/queryserver/src/it/resources/log4j.properties
@@ -0,0 +1,68 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hbase.root.logger=DEBUG,console
+hbase.log.dir=.
+hbase.log.file=hbase.log
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=${hbase.root.logger}
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+# Debugging Pattern format
+log4j.appender.DRFA.layout.ConversionPattern=%d %-5p [%t] %C{2}(%L): %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d %-5p [%t] %C{2}(%L): %m%n
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+
+log4j.logger.org.apache.zookeeper=ERROR
+
+# Suppresses junk from minikdc
+log4j.logger.org.mortbay.log=WARN
+log4j.logger.org.apache.directory=WARN
+log4j.logger.net.sf.ehcache=WARN
+# Suppress the "no group for user" spamming
+log4j.logger.org.apache.hadoop.security.UserGroupInformation=ERROR
diff --git a/queryserver/src/main/java/org/apache/phoenix/loadbalancer/service/LoadBalanceZookeeperConf.java b/queryserver/src/main/java/org/apache/phoenix/loadbalancer/service/LoadBalanceZookeeperConf.java
new file mode 100644
index 0000000..afce5be
--- /dev/null
+++ b/queryserver/src/main/java/org/apache/phoenix/loadbalancer/service/LoadBalanceZookeeperConf.java
@@ -0,0 +1,42 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.loadbalancer.service;
+
+import com.google.common.net.HostAndPort;
+import org.apache.zookeeper.data.ACL;
+
+import java.util.List;
+
+
+public interface LoadBalanceZookeeperConf {
+
+ String getQueryServerBasePath();
+
+ String getServiceName();
+
+ String getZkConnectString();
+
+ List<ACL> getAcls();
+
+ String getParentPath();
+
+ String getFullPathToNode(HostAndPort hostAndPort);
+
+
+}
diff --git a/queryserver/src/main/java/org/apache/phoenix/queryserver/register/Registry.java b/queryserver/src/main/java/org/apache/phoenix/queryserver/register/Registry.java
new file mode 100644
index 0000000..598fc5a
--- /dev/null
+++ b/queryserver/src/main/java/org/apache/phoenix/queryserver/register/Registry.java
@@ -0,0 +1,48 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.queryserver.register;
+
+
+import org.apache.phoenix.loadbalancer.service.LoadBalanceZookeeperConf;
+
+/**
+ * Registry interface for implementing registering
+ * and un-registering to service locator.
+ */
+public interface Registry {
+
+ /**
+ * Unreqister the server with zookeeper. All Cleanup
+ * is done in this method.
+ * @throws Exception
+ */
+ void unRegisterServer() throws Exception;
+
+ /**
+ * Registers the server with the service locator ( zookeeper).
+ * @param configuration - Hbase Configuration
+ * @param port - port for PQS server
+ * @param connectString - zookeeper connect string
+ * @param pqsHost - host for PQS server.
+ * @throws Exception
+ */
+ void registerServer(LoadBalanceZookeeperConf configuration, int port
+ , String connectString, String pqsHost) throws Exception ;
+
+}
diff --git a/queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java b/queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
new file mode 100644
index 0000000..33fd590
--- /dev/null
+++ b/queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.queryserver.server;
+
+import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+
+
+public interface AvaticaServerConfigurationFactory {
+
+ AvaticaServerConfiguration getAvaticaServerConfiguration(Configuration conf, UserGroupInformation ugi);
+
+ class AvaticaServerConfigurationFactoryImpl implements AvaticaServerConfigurationFactory {
+
+ @Override
+ public AvaticaServerConfiguration getAvaticaServerConfiguration(Configuration conf, UserGroupInformation ugi) {
+ return null;
+ }
+ }
+
+}
diff --git a/queryserver/src/main/java/org/apache/phoenix/queryserver/server/PhoenixMetaFactory.java b/queryserver/src/main/java/org/apache/phoenix/queryserver/server/PhoenixMetaFactory.java
new file mode 100644
index 0000000..02344a3
--- /dev/null
+++ b/queryserver/src/main/java/org/apache/phoenix/queryserver/server/PhoenixMetaFactory.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.queryserver.server;
+
+import org.apache.calcite.avatica.Meta;
+import org.apache.hadoop.conf.Configurable;
+
+/**
+ * A @{link Meta.Factory} that can also respect Hadoop
+ * {@link org.apache.hadoop.conf.Configuration} objects.
+ */
+public interface PhoenixMetaFactory extends Meta.Factory, Configurable {
+}
diff --git a/queryserver/src/main/java/org/apache/phoenix/queryserver/server/PhoenixMetaFactoryImpl.java b/queryserver/src/main/java/org/apache/phoenix/queryserver/server/PhoenixMetaFactoryImpl.java
new file mode 100644
index 0000000..c74d2c9
--- /dev/null
+++ b/queryserver/src/main/java/org/apache/phoenix/queryserver/server/PhoenixMetaFactoryImpl.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.queryserver.server;
+
+import com.google.common.base.Preconditions;
+import org.apache.calcite.avatica.Meta;
+import org.apache.calcite.avatica.jdbc.JdbcMeta;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
+import org.apache.phoenix.util.QueryUtil;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * Bridge between Phoenix and Avatica.
+ */
+public class PhoenixMetaFactoryImpl extends Configured implements PhoenixMetaFactory {
+
+ // invoked via reflection
+ public PhoenixMetaFactoryImpl() {
+ super(HBaseConfiguration.create());
+ }
+
+ // invoked via reflection
+ public PhoenixMetaFactoryImpl(Configuration conf) {
+ super(conf);
+ }
+
+ @Override
+ public Meta create(List<String> args) {
+ Configuration conf = Preconditions.checkNotNull(getConf(), "Configuration must not be null.");
+ Properties info = new Properties();
+ info.putAll(conf.getValByRegex("avatica.*"));
+ try {
+ final String url;
+ if (args.size() == 0) {
+ url = QueryUtil.getConnectionUrl(info, conf);
+ } else if (args.size() == 1) {
+ url = args.get(0);
+ } else {
+ throw new RuntimeException(
+ "0 or 1 argument expected. Received " + Arrays.toString(args.toArray()));
+ }
+ // TODO: what about -D configs passed in from cli? How do they get pushed down?
+ return new JdbcMeta(url, info);
+ } catch (SQLException | ClassNotFoundException e) {
+ throw new RuntimeException(e);
+ }
+ }
+}
diff --git a/queryserver/src/main/java/org/apache/phoenix/queryserver/server/QueryServer.java b/queryserver/src/main/java/org/apache/phoenix/queryserver/server/QueryServer.java
new file mode 100644
index 0000000..4766394
--- /dev/null
+++ b/queryserver/src/main/java/org/apache/phoenix/queryserver/server/QueryServer.java
@@ -0,0 +1,606 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.queryserver.server;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+
+import org.apache.calcite.avatica.Meta;
+import org.apache.calcite.avatica.remote.Driver;
+import org.apache.calcite.avatica.remote.LocalService;
+import org.apache.calcite.avatica.remote.Service;
+import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
+import org.apache.calcite.avatica.server.DoAsRemoteUserCallback;
+import org.apache.calcite.avatica.server.HttpServer;
+import org.apache.calcite.avatica.server.RemoteUserExtractor;
+import org.apache.calcite.avatica.server.RemoteUserExtractionException;
+import org.apache.calcite.avatica.server.HttpRequestRemoteUserExtractor;
+import org.apache.calcite.avatica.server.HttpQueryStringParameterRemoteUserExtractor;
+import org.apache.calcite.avatica.server.ServerCustomizer;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.util.Strings;
+import org.apache.hadoop.net.DNS;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.loadbalancer.service.LoadBalanceZookeeperConf;
+import org.apache.phoenix.queryserver.register.Registry;
+import org.apache.phoenix.util.InstanceResolver;
+import org.eclipse.jetty.server.Server;
+
+import java.io.File;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.RuntimeMXBean;
+import java.net.InetAddress;
+import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.ServiceConfigurationError;
+import java.util.ServiceLoader;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+
+import javax.servlet.http.HttpServletRequest;
+
+import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_QUERY_SERVER_CUSTOM_AUTH_ENABLED;
+
+/**
+ * A query server for Phoenix over Calcite's Avatica.
+ */
+public final class QueryServer extends Configured implements Tool, Runnable {
+
+ protected static final Log LOG = LogFactory.getLog(QueryServer.class);
+
+ private final String[] argv;
+ private final CountDownLatch runningLatch = new CountDownLatch(1);
+ private HttpServer server = null;
+ private int retCode = 0;
+ private Throwable t = null;
+ private Registry registry;
+
+ /**
+ * Log information about the currently running JVM.
+ */
+ public static void logJVMInfo() {
+ // Print out vm stats before starting up.
+ RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
+ if (runtime != null) {
+ LOG.info("vmName=" + runtime.getVmName() + ", vmVendor=" +
+ runtime.getVmVendor() + ", vmVersion=" + runtime.getVmVersion());
+ LOG.info("vmInputArguments=" + runtime.getInputArguments());
+ }
+ }
+
+ /**
+ * Logs information about the currently running JVM process including
+ * the environment variables. Logging of env vars can be disabled by
+ * setting {@code "phoenix.envvars.logging.disabled"} to {@code "true"}.
+ * <p>If enabled, you can also exclude environment variables containing
+ * certain substrings by setting {@code "phoenix.envvars.logging.skipwords"}
+ * to comma separated list of such substrings.
+ */
+ public static void logProcessInfo(Configuration conf) {
+ // log environment variables unless asked not to
+ if (conf == null || !conf.getBoolean(QueryServices.QUERY_SERVER_ENV_LOGGING_ATTRIB, false)) {
+ Set<String> skipWords = new HashSet<String>(
+ QueryServicesOptions.DEFAULT_QUERY_SERVER_SKIP_WORDS);
+ if (conf != null) {
+ String[] confSkipWords = conf.getStrings(
+ QueryServices.QUERY_SERVER_ENV_LOGGING_SKIPWORDS_ATTRIB);
+ if (confSkipWords != null) {
+ skipWords.addAll(Arrays.asList(confSkipWords));
+ }
+ }
+
+ nextEnv:
+ for (Map.Entry<String, String> entry : System.getenv().entrySet()) {
+ String key = entry.getKey().toLowerCase();
+ String value = entry.getValue().toLowerCase();
+ // exclude variables which may contain skip words
+ for(String skipWord : skipWords) {
+ if (key.contains(skipWord) || value.contains(skipWord))
+ continue nextEnv;
+ }
+ LOG.info("env:"+entry);
+ }
+ }
+ // and JVM info
+ logJVMInfo();
+ }
+
+ /** Constructor for use from {@link org.apache.hadoop.util.ToolRunner}. */
+ public QueryServer() {
+ this(null, null);
+ }
+
+ /** Constructor for use as {@link java.lang.Runnable}. */
+ public QueryServer(String[] argv, Configuration conf) {
+ this.argv = argv;
+ setConf(conf);
+ }
+
+ /**
+ * @return the port number this instance is bound to, or {@code -1} if the server is not running.
+ */
+ @VisibleForTesting
+ public int getPort() {
+ if (server == null) return -1;
+ return server.getPort();
+ }
+
+ /**
+ * @return the return code from running as a {@link Tool}.
+ */
+ @VisibleForTesting
+ public int getRetCode() {
+ return retCode;
+ }
+
+ /**
+ * @return the throwable from an unsuccessful run, or null otherwise.
+ */
+ @VisibleForTesting
+ public Throwable getThrowable() {
+ return t;
+ }
+
+ /** Calling thread waits until the server is running. */
+ public void awaitRunning() throws InterruptedException {
+ runningLatch.await();
+ }
+
+ /** Calling thread waits until the server is running. */
+ public void awaitRunning(long timeout, TimeUnit unit) throws InterruptedException {
+ runningLatch.await(timeout, unit);
+ }
+
+ @Override
+ public int run(String[] args) throws Exception {
+ logProcessInfo(getConf());
+ final boolean loadBalancerEnabled = getConf().getBoolean(QueryServices.PHOENIX_QUERY_SERVER_LOADBALANCER_ENABLED,
+ QueryServicesOptions.DEFAULT_PHOENIX_QUERY_SERVER_LOADBALANCER_ENABLED);
+ try {
+ final boolean isKerberos = "kerberos".equalsIgnoreCase(getConf().get(
+ QueryServices.QUERY_SERVER_HBASE_SECURITY_CONF_ATTRIB));
+ final boolean disableSpnego = getConf().getBoolean(QueryServices.QUERY_SERVER_SPNEGO_AUTH_DISABLED_ATTRIB,
+ QueryServicesOptions.DEFAULT_QUERY_SERVER_SPNEGO_AUTH_DISABLED);
+ String hostname;
+ final boolean disableLogin = getConf().getBoolean(QueryServices.QUERY_SERVER_DISABLE_KERBEROS_LOGIN,
+ QueryServicesOptions.DEFAULT_QUERY_SERVER_DISABLE_KERBEROS_LOGIN);
+
+ // handle secure cluster credentials
+ if (isKerberos && !disableLogin) {
+ hostname = Strings.domainNamePointerToHostName(DNS.getDefaultHost(
+ getConf().get(QueryServices.QUERY_SERVER_DNS_INTERFACE_ATTRIB, "default"),
+ getConf().get(QueryServices.QUERY_SERVER_DNS_NAMESERVER_ATTRIB, "default")));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Login to " + hostname + " using " + getConf().get(
+ QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB)
+ + " and principal " + getConf().get(
+ QueryServices.QUERY_SERVER_KERBEROS_PRINCIPAL_ATTRIB) + ".");
+ }
+ SecurityUtil.login(getConf(), QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB,
+ QueryServices.QUERY_SERVER_KERBEROS_PRINCIPAL_ATTRIB, hostname);
+ LOG.info("Login successful.");
+ } else {
+ hostname = InetAddress.getLocalHost().getHostName();
+ LOG.info(" Kerberos is off and hostname is : "+hostname);
+ }
+
+ int port = getConf().getInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB,
+ QueryServicesOptions.DEFAULT_QUERY_SERVER_HTTP_PORT);
+ LOG.debug("Listening on port " + port);
+
+ // Update proxyuser configuration for impersonation
+ ProxyUsers.refreshSuperUserGroupsConfiguration(getConf());
+
+ // Start building the Avatica HttpServer
+ final HttpServer.Builder<Server>
+ builder =
+ HttpServer.Builder.<Server>newBuilder().withPort(port);
+
+ UserGroupInformation ugi = getUserGroupInformation();
+
+ AvaticaServerConfiguration avaticaServerConfiguration = null;
+
+ // RemoteUserCallbacks and RemoteUserExtractor are part of AvaticaServerConfiguration
+ // Hence they should be customizable when using QUERY_SERVER_CUSTOM_AUTH_ENABLED
+ // Handlers should be customized via ServerCustomizers
+ if (getConf().getBoolean(QueryServices.QUERY_SERVER_CUSTOM_AUTH_ENABLED,
+ DEFAULT_QUERY_SERVER_CUSTOM_AUTH_ENABLED)) {
+ avaticaServerConfiguration = enableCustomAuth(builder, getConf(), ugi);
+ } else {
+ if (isKerberos) {
+ // Enable client auth when using Kerberos auth for HBase
+ configureClientAuthentication(builder, disableSpnego, ugi);
+ }
+ setRemoteUserExtractorIfNecessary(builder, getConf());
+ setHandler(args, builder);
+ }
+
+ enableServerCustomizersIfNecessary(builder, getConf(), avaticaServerConfiguration);
+
+ // Build and start the HttpServer
+ server = builder.build();
+ server.start();
+ if (loadBalancerEnabled) {
+ registerToServiceProvider(hostname);
+ }
+ runningLatch.countDown();
+ server.join();
+ return 0;
+ } catch (Throwable t) {
+ LOG.fatal("Unrecoverable service error. Shutting down.", t);
+ this.t = t;
+ return -1;
+ } finally {
+ if (loadBalancerEnabled) {
+ unRegister();
+ }
+ }
+ }
+
+ @VisibleForTesting
+ void configureClientAuthentication(final HttpServer.Builder builder, boolean disableSpnego, UserGroupInformation ugi) throws IOException {
+
+ // Enable SPNEGO for client authentication unless it's explicitly disabled
+ if (!disableSpnego) {
+ configureSpnegoAuthentication(builder, ugi);
+ }
+ configureCallBack(builder, ugi);
+ }
+
+ @VisibleForTesting
+ void configureSpnegoAuthentication(HttpServer.Builder builder, UserGroupInformation ugi) {
+ String keytabPath = getConf().get(QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB);
+ File keytab = new File(keytabPath);
+ String httpKeytabPath =
+ getConf().get(QueryServices.QUERY_SERVER_HTTP_KEYTAB_FILENAME_ATTRIB, null);
+ String httpPrincipal =
+ getConf().get(QueryServices.QUERY_SERVER_KERBEROS_HTTP_PRINCIPAL_ATTRIB, null);
+ // Backwards compat for a configuration key change
+ if (httpPrincipal == null) {
+ httpPrincipal =
+ getConf().get(QueryServices.QUERY_SERVER_KERBEROS_HTTP_PRINCIPAL_ATTRIB_LEGACY, null);
+ }
+ File httpKeytab = null;
+ if (null != httpKeytabPath) {
+ httpKeytab = new File(httpKeytabPath);
+ }
+
+ String realmsString = getConf().get(QueryServices.QUERY_SERVER_KERBEROS_ALLOWED_REALMS, null);
+ String[] additionalAllowedRealms = null;
+ if (null != realmsString) {
+ additionalAllowedRealms = StringUtils.split(realmsString, ',');
+ }
+ if (null != httpKeytabPath && null != httpPrincipal) {
+ builder.withSpnego(httpPrincipal, additionalAllowedRealms).withAutomaticLogin(httpKeytab);
+ } else {
+ builder.withSpnego(ugi.getUserName(), additionalAllowedRealms)
+ .withAutomaticLogin(keytab);
+ }
+ }
+
+ @VisibleForTesting
+ UserGroupInformation getUserGroupInformation() throws IOException {
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ LOG.debug("Current user is " + ugi);
+ if (!ugi.hasKerberosCredentials()) {
+ ugi = UserGroupInformation.getLoginUser();
+ LOG.debug("Current user does not have Kerberos credentials, using instead " + ugi);
+ }
+ return ugi;
+ }
+
+ @VisibleForTesting
+ void configureCallBack(HttpServer.Builder<Server> builder, UserGroupInformation ugi) {
+ builder.withImpersonation(new PhoenixDoAsCallback(ugi, getConf()));
+ }
+
+ private void setHandler(String[] args, HttpServer.Builder<Server> builder) throws Exception {
+ Class<? extends PhoenixMetaFactory> factoryClass = getConf().getClass(
+ QueryServices.QUERY_SERVER_META_FACTORY_ATTRIB, PhoenixMetaFactoryImpl.class,
+ PhoenixMetaFactory.class);
+ PhoenixMetaFactory factory =
+ factoryClass.getDeclaredConstructor(Configuration.class).newInstance(getConf());
+ Meta meta = factory.create(Arrays.asList(args));
+ Service service = new LocalService(meta);
+ builder.withHandler(service, getSerialization(getConf()));
+ }
+
+ public synchronized void stop() {
+ server.stop();
+ }
+
+ public boolean registerToServiceProvider(String hostName) {
+
+ boolean success = true ;
+ try {
+ LoadBalanceZookeeperConf loadBalanceConfiguration = getLoadBalanceConfiguration();
+ Preconditions.checkNotNull(loadBalanceConfiguration);
+ this.registry = getRegistry();
+ Preconditions.checkNotNull(registry);
+ String zkConnectString = loadBalanceConfiguration.getZkConnectString();
+ this.registry.registerServer(loadBalanceConfiguration, getPort(), zkConnectString, hostName);
+ } catch(Throwable ex){
+ LOG.debug("Caught an error trying to register with the load balancer", ex);
+ success = false;
+ } finally {
+ return success;
+ }
+ }
+
+
+ public LoadBalanceZookeeperConf getLoadBalanceConfiguration() {
+ ServiceLoader<LoadBalanceZookeeperConf> serviceLocator= ServiceLoader.load(LoadBalanceZookeeperConf.class);
+ LoadBalanceZookeeperConf zookeeperConfig = null;
+ try {
+ if (serviceLocator.iterator().hasNext())
+ zookeeperConfig = serviceLocator.iterator().next();
+ } catch(ServiceConfigurationError ex) {
+ LOG.debug("Unable to locate the service provider for load balancer configuration", ex);
+ } finally {
+ return zookeeperConfig;
+ }
+ }
+
+ public Registry getRegistry() {
+ ServiceLoader<Registry> serviceLocator= ServiceLoader.load(Registry.class);
+ Registry registry = null;
+ try {
+ if (serviceLocator.iterator().hasNext())
+ registry = serviceLocator.iterator().next();
+ } catch(ServiceConfigurationError ex) {
+ LOG.debug("Unable to locate the zookeeper registry for the load balancer", ex);
+ } finally {
+ return registry;
+ }
+ }
+
+ public boolean unRegister() {
+ boolean success = true;
+ try {
+ registry.unRegisterServer();
+ }catch(Throwable ex) {
+ LOG.debug("Caught an error while de-registering the query server from the load balancer",ex);
+ success = false;
+ } finally {
+ return success;
+ }
+ }
+ /**
+ * Parses the serialization method from the configuration.
+ *
+ * @param conf The configuration to parse
+ * @return The Serialization method
+ */
+ Driver.Serialization getSerialization(Configuration conf) {
+ String serializationName = conf.get(QueryServices.QUERY_SERVER_SERIALIZATION_ATTRIB,
+ QueryServicesOptions.DEFAULT_QUERY_SERVER_SERIALIZATION);
+
+ Driver.Serialization serialization;
+ // Otherwise, use what was provided in the configuration
+ try {
+ serialization = Driver.Serialization.valueOf(serializationName);
+ } catch (Exception e) {
+ LOG.error("Unknown message serialization type for " + serializationName);
+ throw e;
+ }
+
+ return serialization;
+ }
+
+ @Override public void run() {
+ try {
+ retCode = run(argv);
+ } catch (Exception e) {
+ // already logged
+ }
+ }
+
+ // add remoteUserExtractor to builder if enabled
+ @VisibleForTesting
+ public void setRemoteUserExtractorIfNecessary(HttpServer.Builder builder, Configuration conf) {
+ if (conf.getBoolean(QueryServices.QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR_ATTRIB,
+ QueryServicesOptions.DEFAULT_QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR)) {
+ builder.withRemoteUserExtractor(createRemoteUserExtractor(conf));
+ }
+ }
+
+ @VisibleForTesting
+ public void enableServerCustomizersIfNecessary(HttpServer.Builder<Server> builder,
+ Configuration conf, AvaticaServerConfiguration avaticaServerConfiguration) {
+ if (conf.getBoolean(QueryServices.QUERY_SERVER_CUSTOMIZERS_ENABLED,
+ QueryServicesOptions.DEFAULT_QUERY_SERVER_CUSTOMIZERS_ENABLED)) {
+ builder.withServerCustomizers(createServerCustomizers(conf, avaticaServerConfiguration), Server.class);
+ }
+ }
+
+ @VisibleForTesting
+ public AvaticaServerConfiguration enableCustomAuth(HttpServer.Builder<Server> builder,
+ Configuration conf, UserGroupInformation ugi) {
+ AvaticaServerConfiguration avaticaServerConfiguration = createAvaticaServerConfig(conf, ugi);
+ builder.withCustomAuthentication(avaticaServerConfiguration);
+ return avaticaServerConfiguration;
+ }
+
+ private static final RemoteUserExtractorFactory DEFAULT_USER_EXTRACTOR =
+ new RemoteUserExtractorFactory.RemoteUserExtractorFactoryImpl();
+
+ private static final ServerCustomizersFactory DEFAULT_SERVER_CUSTOMIZERS =
+ new ServerCustomizersFactory.ServerCustomizersFactoryImpl();
+
+ private static final AvaticaServerConfigurationFactory DEFAULT_SERVER_CONFIG =
+ new AvaticaServerConfigurationFactory.AvaticaServerConfigurationFactoryImpl();
+
+ @VisibleForTesting
+ RemoteUserExtractor createRemoteUserExtractor(Configuration conf) {
+ RemoteUserExtractorFactory factory =
+ InstanceResolver.getSingleton(RemoteUserExtractorFactory.class, DEFAULT_USER_EXTRACTOR);
+ return factory.createRemoteUserExtractor(conf);
+ }
+
+ @VisibleForTesting
+ List<ServerCustomizer<Server>> createServerCustomizers(Configuration conf, AvaticaServerConfiguration avaticaServerConfiguration) {
+ ServerCustomizersFactory factory =
+ InstanceResolver.getSingleton(ServerCustomizersFactory.class, DEFAULT_SERVER_CUSTOMIZERS);
+ return factory.createServerCustomizers(conf, avaticaServerConfiguration);
+ }
+
+ @VisibleForTesting
+ AvaticaServerConfiguration createAvaticaServerConfig(Configuration conf, UserGroupInformation ugi) {
+ AvaticaServerConfigurationFactory factory =
+ InstanceResolver.getSingleton(AvaticaServerConfigurationFactory.class, DEFAULT_SERVER_CONFIG);
+ return factory.getAvaticaServerConfiguration(conf, ugi);
+ }
+
+ /**
+ * Use the correctly way to extract end user.
+ */
+ static class PhoenixRemoteUserExtractor implements RemoteUserExtractor{
+ private final HttpQueryStringParameterRemoteUserExtractor paramRemoteUserExtractor;
+ private final HttpRequestRemoteUserExtractor requestRemoteUserExtractor;
+ private final String userExtractParam;
+
+ public PhoenixRemoteUserExtractor(Configuration conf) {
+ this.requestRemoteUserExtractor = new HttpRequestRemoteUserExtractor();
+ this.userExtractParam = conf.get(QueryServices.QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM,
+ QueryServicesOptions.DEFAULT_QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM);
+ this.paramRemoteUserExtractor = new HttpQueryStringParameterRemoteUserExtractor(userExtractParam);
+ }
+
+ @Override
+ public String extract(HttpServletRequest request) throws RemoteUserExtractionException {
+ if (request.getParameter(userExtractParam) != null) {
+ String extractedUser = paramRemoteUserExtractor.extract(request);
+ UserGroupInformation ugi = UserGroupInformation.createRemoteUser(request.getRemoteUser());
+ UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(extractedUser, ugi);
+
+ // Check if this user is allowed to be impersonated.
+ // Will throw AuthorizationException if the impersonation as this user is not allowed
+ try {
+ ProxyUsers.authorize(proxyUser, request.getRemoteAddr());
+ return extractedUser;
+ } catch (AuthorizationException e) {
+ throw new RemoteUserExtractionException(e.getMessage(), e);
+ }
+ } else {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("The parameter (" + userExtractParam + ") used to extract the remote user doesn't exist in the request.");
+ }
+ return requestRemoteUserExtractor.extract(request);
+ }
+
+ }
+ }
+
+ /**
+ * Callback to run the Avatica server action as the remote (proxy) user instead of the server.
+ */
+ public static class PhoenixDoAsCallback implements DoAsRemoteUserCallback {
+ private final UserGroupInformation serverUgi;
+ private final LoadingCache<String,UserGroupInformation> ugiCache;
+
+ public PhoenixDoAsCallback(UserGroupInformation serverUgi, Configuration conf) {
+ this.serverUgi = Objects.requireNonNull(serverUgi);
+ this.ugiCache = CacheBuilder.newBuilder()
+ .initialCapacity(conf.getInt(QueryServices.QUERY_SERVER_UGI_CACHE_INITIAL_SIZE,
+ QueryServicesOptions.DEFAULT_QUERY_SERVER_UGI_CACHE_INITIAL_SIZE))
+ .concurrencyLevel(conf.getInt(QueryServices.QUERY_SERVER_UGI_CACHE_CONCURRENCY,
+ QueryServicesOptions.DEFAULT_QUERY_SERVER_UGI_CACHE_CONCURRENCY))
+ .maximumSize(conf.getLong(QueryServices.QUERY_SERVER_UGI_CACHE_MAX_SIZE,
+ QueryServicesOptions.DEFAULT_QUERY_SERVER_UGI_CACHE_MAX_SIZE))
+ .build(new UgiCacheLoader(this.serverUgi));
+ }
+
+ @Override
+ public <T> T doAsRemoteUser(String remoteUserName, String remoteAddress,
+ final Callable<T> action) throws Exception {
+ // We are guaranteed by Avatica that the `remoteUserName` is properly authenticated by the
+ // time this method is called. We don't have to verify the wire credentials, we can assume the
+ // user provided valid credentials for who it claimed it was.
+
+ // Proxy this user on top of the server's user (the real user). Get a cached instance, the
+ // LoadingCache will create a new instance for us if one isn't cached.
+ UserGroupInformation proxyUser = createProxyUser(remoteUserName);
+
+ // Execute the actual call as this proxy user
+ return proxyUser.doAs(new PrivilegedExceptionAction<T>() {
+ @Override
+ public T run() throws Exception {
+ return action.call();
+ }
+ });
+ }
+
+ @VisibleForTesting
+ UserGroupInformation createProxyUser(String remoteUserName) throws ExecutionException {
+ // PHOENIX-3164 UGI's hashCode and equals methods rely on reference checks, not
+ // value-based checks. We need to make sure we return the same UGI instance for a remote
+ // user, otherwise downstream code in Phoenix and HBase may not treat two of the same
+ // calls from one user as equivalent.
+ return ugiCache.get(remoteUserName);
+ }
+
+ @VisibleForTesting
+ LoadingCache<String,UserGroupInformation> getCache() {
+ return ugiCache;
+ }
+ }
+
+ /**
+ * CacheLoader implementation which creates a "proxy" UGI instance for the given user name.
+ */
+ static class UgiCacheLoader extends CacheLoader<String,UserGroupInformation> {
+ private final UserGroupInformation serverUgi;
+
+ public UgiCacheLoader(UserGroupInformation serverUgi) {
+ this.serverUgi = Objects.requireNonNull(serverUgi);
+ }
+
+ @Override
+ public UserGroupInformation load(String remoteUserName) throws Exception {
+ return UserGroupInformation.createProxyUser(remoteUserName, serverUgi);
+ }
+ }
+
+ public static void main(String[] argv) throws Exception {
+ int ret = ToolRunner.run(HBaseConfiguration.create(), new QueryServer(), argv);
+ System.exit(ret);
+ }
+}
diff --git a/queryserver/src/main/java/org/apache/phoenix/queryserver/server/RemoteUserExtractorFactory.java b/queryserver/src/main/java/org/apache/phoenix/queryserver/server/RemoteUserExtractorFactory.java
new file mode 100644
index 0000000..ff5e0d2
--- /dev/null
+++ b/queryserver/src/main/java/org/apache/phoenix/queryserver/server/RemoteUserExtractorFactory.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.queryserver.server;
+
+import org.apache.calcite.avatica.server.RemoteUserExtractor;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Creates remote user extractors.
+ */
+public interface RemoteUserExtractorFactory {
+
+ RemoteUserExtractor createRemoteUserExtractor(Configuration conf);
+
+ class RemoteUserExtractorFactoryImpl implements RemoteUserExtractorFactory {
+ @Override
+ public RemoteUserExtractor createRemoteUserExtractor(Configuration conf) {
+ return new QueryServer.PhoenixRemoteUserExtractor(conf);
+ }
+ }
+}
diff --git a/queryserver/src/main/java/org/apache/phoenix/queryserver/server/ServerCustomizersFactory.java b/queryserver/src/main/java/org/apache/phoenix/queryserver/server/ServerCustomizersFactory.java
new file mode 100644
index 0000000..942660a
--- /dev/null
+++ b/queryserver/src/main/java/org/apache/phoenix/queryserver/server/ServerCustomizersFactory.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.queryserver.server;
+
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
+import org.apache.calcite.avatica.server.ServerCustomizer;
+import org.apache.hadoop.conf.Configuration;
+import org.eclipse.jetty.server.Server;
+
+/**
+ * Creates customizers for the underlying Avatica HTTP server.
+ * Allows for fine grained control of authentication, etc.
+ */
+public interface ServerCustomizersFactory {
+ /**
+ * Creates a list of customizers that will customize the server.
+ * @param conf Configuration to use
+ * @param avaticaServerConfiguration to use in case custom-auth is enabled
+ * @return List of server suctomizers
+ */
+ List<ServerCustomizer<Server>> createServerCustomizers(Configuration conf, AvaticaServerConfiguration avaticaServerConfiguration);
+
+ /**
+ * Factory that creates an empty list of customizers.
+ */
+ class ServerCustomizersFactoryImpl implements ServerCustomizersFactory {
+ private static final List<ServerCustomizer<Server>> EMPTY_LIST = Collections.emptyList();
+ @Override
+ public List<ServerCustomizer<Server>> createServerCustomizers(Configuration conf,
+ AvaticaServerConfiguration avaticaServerConfiguration) {
+ return EMPTY_LIST;
+ }
+ }
+}
diff --git a/queryserver/src/test/java/org/apache/phoenix/DriverCohabitationTest.java b/queryserver/src/test/java/org/apache/phoenix/DriverCohabitationTest.java
new file mode 100644
index 0000000..1df6d2c
--- /dev/null
+++ b/queryserver/src/test/java/org/apache/phoenix/DriverCohabitationTest.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix;
+
+import org.apache.phoenix.queryserver.client.ThinClientUtil;
+import org.apache.phoenix.util.QueryUtil;
+import org.junit.Test;
+
+import java.sql.Driver;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Collections;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Ensure the "thick" Phoenix driver and it's "thin" counterpart can coexist on
+ * the same classpath.
+ */
+public class DriverCohabitationTest {
+
+ @Test
+ public void testDriverCohabitation() throws SQLException {
+ Driver thickDriver = null;
+ Driver thinDriver = null;
+
+ for (Driver d : Collections.list(DriverManager.getDrivers())) {
+ if (d instanceof org.apache.phoenix.jdbc.PhoenixDriver) {
+ thickDriver = d;
+ } else if (d instanceof org.apache.phoenix.queryserver.client.Driver) {
+ thinDriver = d;
+ }
+ }
+ assertNotNull("Thick driver not registered with DriverManager.", thickDriver);
+ assertNotNull("Thin driver not registered with DriverManager.", thinDriver);
+
+ final String thickUrl = QueryUtil.getUrl("localhost");
+ final String thinUrl = ThinClientUtil.getConnectionUrl("localhost", 1234);
+ assertTrue("Thick driver should accept connections like " + thickUrl,
+ thickDriver.acceptsURL(thickUrl));
+ assertFalse("Thick driver should reject connections like " + thinUrl,
+ thickDriver.acceptsURL(thinUrl));
+ assertTrue("Thin driver should accept connections like " + thinUrl,
+ thinDriver.acceptsURL(thinUrl));
+ assertFalse("Thin driver should reject connections like " + thickUrl,
+ thinDriver.acceptsURL(thickUrl));
+ }
+}
diff --git a/queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java b/queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
new file mode 100644
index 0000000..fb59e0d
--- /dev/null
+++ b/queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.queryserver.server;
+
+import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+
+public class CustomAvaticaServerConfigurationTest {
+ @Test
+ public void testDefaultFactory() throws IOException {
+ QueryServer queryServer = new QueryServer();
+ UserGroupInformation ugi = queryServer.getUserGroupInformation();
+ // the default factory creates null object
+ AvaticaServerConfiguration avaticaServerConfiguration = queryServer.createAvaticaServerConfig(new Configuration(), ugi);
+ Assert.assertNull(avaticaServerConfiguration);
+ }
+}
diff --git a/queryserver/src/test/java/org/apache/phoenix/queryserver/server/PhoenixDoAsCallbackTest.java b/queryserver/src/test/java/org/apache/phoenix/queryserver/server/PhoenixDoAsCallbackTest.java
new file mode 100644
index 0000000..c016363
--- /dev/null
+++ b/queryserver/src/test/java/org/apache/phoenix/queryserver/server/PhoenixDoAsCallbackTest.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.queryserver.server;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.phoenix.queryserver.server.QueryServer.PhoenixDoAsCallback;
+import org.junit.Test;
+
+/**
+ * Tests for the authorization callback hook Avatica provides for Phoenix to implement.
+ */
+public class PhoenixDoAsCallbackTest {
+
+ @Test
+ public void ugiInstancesAreCached() throws Exception {
+ Configuration conf = new Configuration(false);
+ UserGroupInformation serverUgi = UserGroupInformation.createUserForTesting("server", new String[0]);
+ PhoenixDoAsCallback callback = new PhoenixDoAsCallback(serverUgi, conf);
+
+ UserGroupInformation ugi1 = callback.createProxyUser("user1");
+ assertEquals(1, callback.getCache().size());
+ assertTrue(ugi1.getRealUser() == serverUgi);
+ UserGroupInformation ugi2 = callback.createProxyUser("user2");
+ assertEquals(2, callback.getCache().size());
+ assertTrue(ugi2.getRealUser() == serverUgi);
+
+ UserGroupInformation ugi1Reference = callback.createProxyUser("user1");
+ assertTrue(ugi1 == ugi1Reference);
+ assertEquals(2, callback.getCache().size());
+ }
+
+ @Test
+ public void proxyingUsersAreCached() throws Exception {
+ Configuration conf = new Configuration(false);
+ // The user "server" can impersonate anyone
+ conf.set("hadoop.proxyuser.server.groups", "*");
+ conf.set("hadoop.proxyuser.server.hosts", "*");
+ // Trigger ProxyUsers to refresh itself with the above configuration
+ ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+ UserGroupInformation serverUgi = UserGroupInformation.createUserForTesting("server", new String[0]);
+ PhoenixDoAsCallback callback = new PhoenixDoAsCallback(serverUgi, conf);
+
+ UserGroupInformation user1 = callback.doAsRemoteUser("user1", "localhost:1234", new Callable<UserGroupInformation>() {
+ public UserGroupInformation call() throws Exception {
+ return UserGroupInformation.getCurrentUser();
+ }
+ });
+
+ UserGroupInformation user2 = callback.doAsRemoteUser("user2", "localhost:1235", new Callable<UserGroupInformation>() {
+ public UserGroupInformation call() throws Exception {
+ return UserGroupInformation.getCurrentUser();
+ }
+ });
+
+ UserGroupInformation user1Reference = callback.doAsRemoteUser("user1", "localhost:1234", new Callable<UserGroupInformation>() {
+ public UserGroupInformation call() throws Exception {
+ return UserGroupInformation.getCurrentUser();
+ }
+ });
+
+ // The UserGroupInformation.getCurrentUser() actually returns a new UGI instance, but the internal
+ // subject is the same. We can verify things will work as expected that way.
+ assertNotEquals(user1.hashCode(), user2.hashCode());
+ assertEquals("These should be the same (cached) instance", user1.hashCode(), user1Reference.hashCode());
+ assertEquals("These should be the same (cached) instance", user1, user1Reference);
+ }
+}
diff --git a/queryserver/src/test/java/org/apache/phoenix/queryserver/server/PhoenixRemoteUserExtractorTest.java b/queryserver/src/test/java/org/apache/phoenix/queryserver/server/PhoenixRemoteUserExtractorTest.java
new file mode 100644
index 0000000..9351989
--- /dev/null
+++ b/queryserver/src/test/java/org/apache/phoenix/queryserver/server/PhoenixRemoteUserExtractorTest.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.queryserver.server;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import org.apache.calcite.avatica.server.HttpServer;
+import org.apache.calcite.avatica.server.RemoteUserExtractionException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.phoenix.queryserver.server.QueryServer.PhoenixRemoteUserExtractor;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.http.HttpServletRequest;
+
+/**
+ * Tests for the RemoteUserExtractor Method Avatica provides for Phoenix to implement.
+ */
+public class PhoenixRemoteUserExtractorTest {
+ private static final Logger LOG = LoggerFactory.getLogger(PhoenixRemoteUserExtractorTest.class);
+
+ @Test
+ public void testWithRemoteUserExtractorSuccess() {
+ HttpServletRequest request = mock(HttpServletRequest.class);
+ when(request.getRemoteUser()).thenReturn("proxyserver");
+ when(request.getParameter("doAs")).thenReturn("enduser");
+ when(request.getRemoteAddr()).thenReturn("localhost:1234");
+
+ Configuration conf = new Configuration(false);
+ conf.set("hadoop.proxyuser.proxyserver.groups", "*");
+ conf.set("hadoop.proxyuser.proxyserver.hosts", "*");
+ conf.set("phoenix.queryserver.withRemoteUserExtractor", "true");
+ ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+
+ PhoenixRemoteUserExtractor extractor = new PhoenixRemoteUserExtractor(conf);
+ try {
+ assertEquals("enduser", extractor.extract(request));
+ } catch (RemoteUserExtractionException e) {
+ LOG.info(e.getMessage());
+ }
+ }
+
+ @Test
+ public void testNoRemoteUserExtractorParam() {
+ HttpServletRequest request = mock(HttpServletRequest.class);
+ when(request.getRemoteUser()).thenReturn("proxyserver");
+ when(request.getRemoteAddr()).thenReturn("localhost:1234");
+
+ Configuration conf = new Configuration(false);
+ conf.set("hadoop.proxyuser.proxyserver.groups", "*");
+ conf.set("hadoop.proxyuser.proxyserver.hosts", "*");
+ conf.set("phoenix.queryserver.withRemoteUserExtractor", "true");
+ ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+
+ PhoenixRemoteUserExtractor extractor = new PhoenixRemoteUserExtractor(conf);
+ try {
+ assertEquals("proxyserver", extractor.extract(request));
+ } catch (RemoteUserExtractionException e) {
+ LOG.info(e.getMessage());
+ }
+ }
+
+ @Test
+ public void testDoNotUseRemoteUserExtractor() {
+
+ HttpServer.Builder builder = mock(HttpServer.Builder.class);
+ Configuration conf = new Configuration(false);
+ QueryServer queryServer = new QueryServer();
+ queryServer.setRemoteUserExtractorIfNecessary(builder, conf);
+ verify(builder, never()).withRemoteUserExtractor(any(PhoenixRemoteUserExtractor.class));
+ }
+
+ @Test
+ public void testUseRemoteUserExtractor() {
+
+ HttpServer.Builder builder = mock(HttpServer.Builder.class);
+ Configuration conf = new Configuration(false);
+ conf.set("phoenix.queryserver.withRemoteUserExtractor", "true");
+ QueryServer queryServer = new QueryServer();
+ queryServer.setRemoteUserExtractorIfNecessary(builder, conf);
+ verify(builder).withRemoteUserExtractor(any(PhoenixRemoteUserExtractor.class));
+ }
+
+}
diff --git a/queryserver/src/test/java/org/apache/phoenix/queryserver/server/QueryServerConfigurationTest.java b/queryserver/src/test/java/org/apache/phoenix/queryserver/server/QueryServerConfigurationTest.java
new file mode 100644
index 0000000..d01d2ea
--- /dev/null
+++ b/queryserver/src/test/java/org/apache/phoenix/queryserver/server/QueryServerConfigurationTest.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.queryserver.server;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
+import org.apache.calcite.avatica.server.DoAsRemoteUserCallback;
+import org.apache.calcite.avatica.server.HttpServer;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.phoenix.query.QueryServices;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static org.mockito.Mockito.*;
+
+public class QueryServerConfigurationTest {
+ private static final Configuration CONF = HBaseConfiguration.create();
+
+ @Rule public TemporaryFolder testFolder = new TemporaryFolder();
+
+ private HttpServer.Builder builder;
+ private QueryServer queryServer;
+ private UserGroupInformation ugi;
+
+ @Before
+ public void setup() throws IOException {
+ builder = mock(HttpServer.Builder.class);
+ queryServer = new QueryServer(new String[0], CONF);
+ ugi = queryServer.getUserGroupInformation();
+ }
+
+ @Test
+ public void testSpnegoEnabled() throws IOException {
+ setupKeytabForSpnego();
+ // SPENEGO settings will be provided to the builder when enabled
+ doReturn(builder).when(builder).withSpnego(anyString(), any(String[].class));
+ configureAndVerifyImpersonation(builder, false);
+ // A keytab file will also be provided for automatic login
+ verify(builder).withAutomaticLogin(any(File.class));
+ verify(builder, never()).withCustomAuthentication(any(AvaticaServerConfiguration.class));
+ }
+
+ @Test
+ public void testSpnegoDisabled() throws IOException {
+ setupKeytabForSpnego();
+ configureAndVerifyImpersonation(builder, true);
+ verify(builder, never()).withSpnego(anyString(), any(String[].class));
+ verify(builder, never()).withAutomaticLogin(any(File.class));
+ verify(builder, never()).withCustomAuthentication(any(AvaticaServerConfiguration.class));
+ }
+
+ @Test
+ public void testCustomServerConfiguration() {
+ queryServer.enableCustomAuth(builder, CONF, ugi);
+ verify(builder).withCustomAuthentication(any(AvaticaServerConfiguration.class));
+ verify(builder, never()).withSpnego(anyString(), any(String[].class));
+ verify(builder, never()).withAutomaticLogin(any(File.class));
+ verify(builder, never()).withImpersonation(any(DoAsRemoteUserCallback.class));
+ }
+
+ private void setupKeytabForSpnego() throws IOException {
+ File keytabFile = testFolder.newFile("test.keytab");
+ CONF.set(QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB, keytabFile.getAbsolutePath());
+ }
+
+ private void configureAndVerifyImpersonation(HttpServer.Builder builder, boolean disableSpnego)
+ throws IOException {
+ queryServer.configureClientAuthentication(builder, disableSpnego, ugi);
+ verify(builder).withImpersonation(any(DoAsRemoteUserCallback.class));
+ }
+}
diff --git a/queryserver/src/test/java/org/apache/phoenix/queryserver/server/RemoteUserExtractorFactoryTest.java b/queryserver/src/test/java/org/apache/phoenix/queryserver/server/RemoteUserExtractorFactoryTest.java
new file mode 100644
index 0000000..975ee26
--- /dev/null
+++ b/queryserver/src/test/java/org/apache/phoenix/queryserver/server/RemoteUserExtractorFactoryTest.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.queryserver.server;
+
+import org.apache.calcite.avatica.server.RemoteUserExtractor;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class RemoteUserExtractorFactoryTest {
+
+ @Test
+ public void testProvidesDefaultFactory() {
+ QueryServer queryServer = new QueryServer();
+ RemoteUserExtractor extractor = queryServer.createRemoteUserExtractor(new Configuration());
+ Assert.assertTrue(
+ "Not an instance of PhoenixRemoteUserExtractor: " + extractor.getClass().getName(),
+ extractor instanceof QueryServer.PhoenixRemoteUserExtractor);
+ }
+}
diff --git a/queryserver/src/test/java/org/apache/phoenix/queryserver/server/ServerCustomizersTest.java b/queryserver/src/test/java/org/apache/phoenix/queryserver/server/ServerCustomizersTest.java
new file mode 100644
index 0000000..93e1e37
--- /dev/null
+++ b/queryserver/src/test/java/org/apache/phoenix/queryserver/server/ServerCustomizersTest.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.queryserver.server;
+
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.calcite.avatica.server.HttpServer;
+import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
+import org.apache.calcite.avatica.server.ServerCustomizer;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.InstanceResolver;
+import org.eclipse.jetty.server.Server;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.mockito.Matchers.*;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+
+public class ServerCustomizersTest {
+ @Before @After
+ public void clearSingletons() {
+ // clean up singletons
+ InstanceResolver.clearSingletons();
+ }
+
+ @Test
+ public void testDefaultFactory() {
+ QueryServer queryServer = new QueryServer();
+ AvaticaServerConfiguration avaticaServerConfiguration = null;
+ // the default factory creates an empty list of server customizers
+ List<ServerCustomizer<Server>> customizers =
+ queryServer.createServerCustomizers(new Configuration(), avaticaServerConfiguration);
+ Assert.assertEquals(0, customizers.size());
+ }
+
+ @Test
+ public void testUseProvidedCustomizers() {
+ AvaticaServerConfiguration avaticaServerConfiguration = null;
+ final List<ServerCustomizer<Server>> expected =
+ Collections.<ServerCustomizer<Server>> singletonList(new ServerCustomizer<Server>() {
+ @Override
+ public void customize(Server server) {
+ // no-op customizer
+ }
+ });
+ // Register the server customizer list
+ InstanceResolver.getSingleton(ServerCustomizersFactory.class, new ServerCustomizersFactory() {
+ @Override
+ public List<ServerCustomizer<Server>> createServerCustomizers(Configuration conf,
+ AvaticaServerConfiguration avaticaServerConfiguration) {
+ return expected;
+ }
+ });
+ Configuration conf = new Configuration(false);
+ conf.set(QueryServices.QUERY_SERVER_CUSTOMIZERS_ENABLED, "true");
+ QueryServer queryServer = new QueryServer();
+ List<ServerCustomizer<Server>> actual = queryServer.createServerCustomizers(conf, avaticaServerConfiguration);
+ Assert.assertEquals("Customizers are different", expected, actual);
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testEnableCustomizers() {
+ AvaticaServerConfiguration avaticaServerConfiguration = null;
+ HttpServer.Builder builder = mock(HttpServer.Builder.class);
+ Configuration conf = new Configuration(false);
+ conf.set(QueryServices.QUERY_SERVER_CUSTOMIZERS_ENABLED, "true");
+ QueryServer queryServer = new QueryServer();
+ queryServer.enableServerCustomizersIfNecessary(builder, conf, avaticaServerConfiguration);
+ verify(builder).withServerCustomizers(anyList(), any(Class.class));
+ }
+}
\ No newline at end of file