You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ka...@apache.org on 2019/03/05 22:33:40 UTC

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5063 Create a new repo for the phoenix query server (#422)

This is an automated email from the ASF dual-hosted git repository.

karanmehta93 pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
     new dae4077  PHOENIX-5063 Create a new repo for the phoenix query server (#422)
dae4077 is described below

commit dae40779bbc985bf2d1634c61ca37cfe98371f19
Author: karanmehta93 <ka...@gmail.com>
AuthorDate: Tue Mar 5 14:33:35 2019 -0800

    PHOENIX-5063 Create a new repo for the phoenix query server (#422)
    
    Corresponding PR in phoenix-queryserver project has been committed:
    https://github.com/apache/phoenix-queryserver/pull/2
---
 phoenix-queryserver-client/pom.xml                 | 203 -------
 .../apache/phoenix/queryserver/client/Driver.java  |  49 --
 .../phoenix/queryserver/client/SqllineWrapper.java |  97 ----
 .../phoenix/queryserver/client/ThinClientUtil.java |  42 --
 .../resources/META-INF/services/java.sql.Driver    |   1 -
 .../org-apache-phoenix-remote-jdbc.properties      |  25 -
 phoenix-queryserver/pom.xml                        | 194 -------
 .../src/build/query-server-runnable.xml            |  52 --
 phoenix-queryserver/src/it/bin/test_phoenixdb.py   |  39 --
 phoenix-queryserver/src/it/bin/test_phoenixdb.sh   |  79 ---
 .../HttpParamImpersonationQueryServerIT.java       | 438 ---------------
 .../phoenix/end2end/QueryServerBasicsIT.java       | 346 ------------
 .../phoenix/end2end/QueryServerTestUtil.java       | 187 -------
 .../apache/phoenix/end2end/QueryServerThread.java  |  45 --
 .../phoenix/end2end/SecureQueryServerIT.java       | 323 -----------
 .../end2end/SecureQueryServerPhoenixDBIT.java      | 424 --------------
 .../phoenix/end2end/ServerCustomizersIT.java       | 149 -----
 .../src/it/resources/log4j.properties              |  68 ---
 .../service/LoadBalanceZookeeperConf.java          |  42 --
 .../phoenix/queryserver/register/Registry.java     |  48 --
 .../server/AvaticaServerConfigurationFactory.java  |  37 --
 .../queryserver/server/PhoenixMetaFactory.java     |  28 -
 .../queryserver/server/PhoenixMetaFactoryImpl.java |  76 ---
 .../phoenix/queryserver/server/QueryServer.java    | 606 ---------------------
 .../server/RemoteUserExtractorFactory.java         |  36 --
 .../server/ServerCustomizersFactory.java           |  52 --
 .../org/apache/phoenix/DriverCohabitationTest.java |  65 ---
 .../CustomAvaticaServerConfigurationTest.java      |  37 --
 .../server/PhoenixDoAsCallbackTest.java            |  89 ---
 .../server/PhoenixRemoteUserExtractorTest.java     | 108 ----
 .../server/QueryServerConfigurationTest.java       |  92 ----
 .../server/RemoteUserExtractorFactoryTest.java     |  35 --
 .../queryserver/server/ServerCustomizersTest.java  |  92 ----
 pom.xml                                            |   2 -
 34 files changed, 4206 deletions(-)

diff --git a/phoenix-queryserver-client/pom.xml b/phoenix-queryserver-client/pom.xml
deleted file mode 100644
index af5b91c..0000000
--- a/phoenix-queryserver-client/pom.xml
+++ /dev/null
@@ -1,203 +0,0 @@
-<?xml version='1.0'?>
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
-
--->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.phoenix</groupId>
-    <artifactId>phoenix</artifactId>
-    <version>4.15.0-HBase-1.4-SNAPSHOT</version>
-  </parent>
-  <artifactId>phoenix-queryserver-client</artifactId>
-  <name>Phoenix Query Server Client</name>
-  <description>A thin JDBC client for interacting with the query server</description>
-
-  <licenses>
-    <license>
-      <name>The Apache Software License, Version 2.0</name>
-      <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
-      <distribution>repo</distribution>
-      <comments/>
-    </license>
-  </licenses>
-
-  <organization>
-    <name>Apache Software Foundation</name>
-    <url>http://www.apache.org</url>
-  </organization>
-
-  <properties>
-    <top.dir>${project.basedir}/..</top.dir>
-    <shaded.package>org.apache.phoenix.shaded</shaded.package>
-    <protobuf-java.version>3.1.0</protobuf-java.version>
-  </properties>
-
-  <build>
-    <plugins>
-      <plugin>
-        <artifactId>maven-assembly-plugin</artifactId>
-        <configuration>
-          <skipAssembly>true</skipAssembly>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-shade-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>thin-client</id>
-            <phase>package</phase>
-            <goals>
-              <goal>shade</goal>
-            </goals>
-            <configuration>
-              <finalName>phoenix-${project.version}-thin-client</finalName>
-
-              <transformers>
-                <transformer
-                    implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
-                  <resource>README.md</resource>
-                  <file>${project.basedir}/../README.md</file>
-                </transformer>
-                <transformer
-                    implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
-                  <resource>LICENSE.txt</resource>
-                  <file>${project.basedir}/../LICENSE</file>
-                </transformer>
-                <transformer
-                    implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
-                  <resource>NOTICE</resource>
-                  <file>${project.basedir}/../NOTICE</file>
-                </transformer>
-                <transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
-              </transformers>
-              <filters>
-                <filter>
-                  <artifact>*:*</artifact>
-                  <excludes>
-                    <exclude>META-INF/*.SF</exclude>
-                    <exclude>META-INF/*.DSA</exclude>
-                    <exclude>META-INF/*.RSA</exclude>
-                    <exclude>META-INF/license/*</exclude>
-                    <exclude>LICENSE.*</exclude>
-                    <exclude>NOTICE.*</exclude>
-                  </excludes>
-                </filter>
-              </filters>
-
-              <relocations>
-
-                <!-- COM relocation -->
-
-                <relocation>
-                  <pattern>com.fasterxml</pattern>
-                  <shadedPattern>${shaded.package}.com.fasterxml</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>com.google.collect</pattern>
-                  <shadedPattern>${shaded.package}.com.google.collect</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>com.google.protobuf</pattern>
-                  <shadedPattern>${shaded.package}.com.google.protobuf</shadedPattern>
-                </relocation>
-                <!-- ORG relocation -->
-                <relocation>
-                  <pattern>org.apache.calcite.avatica</pattern>
-                  <shadedPattern>${shaded.package}.org.apache.calcite.avatica</shadedPattern>
-                  <!-- The protobuf messages can't be relocated due to a limitation
-                       in the Avatica protocol. -->
-                  <excludes>
-                    <exclude>org.apache.calcite.avatica.proto.*</exclude>
-                  </excludes>
-                </relocation>
-                <relocation>
-                  <pattern>org.apache.commons</pattern>
-                  <shadedPattern>${shaded.package}.org.apache.commons</shadedPattern>
-                  <excludes>
-                    <exclude>org.apache.commons.logging.**</exclude>
-                  </excludes>
-                </relocation>
-                <relocation>
-                  <pattern>org.apache.http</pattern>
-                  <shadedPattern>${shaded.package}.org.apache.http</shadedPattern>
-                </relocation>
-
-                <relocation>
-                  <pattern>org.fusesource</pattern>
-                  <shadedPattern>${shaded.package}.org.fusesource</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.hamcrest</pattern>
-                  <shadedPattern>${shaded.package}.org.hamcrest</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.junit</pattern>
-                  <shadedPattern>${shaded.package}.org.junit</shadedPattern>
-                </relocation>
-
-                <!-- MISC relocations -->
-
-                <relocation>
-                  <pattern>jline</pattern>
-                  <shadedPattern>${shaded.package}.jline</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>junit</pattern>
-                  <shadedPattern>${shaded.package}.junit</shadedPattern>
-                </relocation>
-
-              </relocations>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.rat</groupId>
-        <artifactId>apache-rat-plugin</artifactId>
-        <configuration>
-          <excludes>
-            <exclude>src/main/resources/META-INF/services/java.sql.Driver</exclude>
-          </excludes>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.calcite.avatica</groupId>
-      <artifactId>avatica-core</artifactId>
-    </dependency>
-    <dependency>
-      <!-- a dependency for the thin-client uberjar -->
-      <groupId>sqlline</groupId>
-      <artifactId>sqlline</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/phoenix-queryserver-client/src/main/java/org/apache/phoenix/queryserver/client/Driver.java b/phoenix-queryserver-client/src/main/java/org/apache/phoenix/queryserver/client/Driver.java
deleted file mode 100644
index 5c8f119..0000000
--- a/phoenix-queryserver-client/src/main/java/org/apache/phoenix/queryserver/client/Driver.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.queryserver.client;
-
-import org.apache.calcite.avatica.DriverVersion;
-
-public class Driver extends org.apache.calcite.avatica.remote.Driver {
-
-  public static final String CONNECT_STRING_PREFIX = "jdbc:phoenix:thin:";
-
-  static {
-    new Driver().register();
-  }
-
-  public Driver() {
-    super();
-  }
-
-  @Override
-  protected DriverVersion createDriverVersion() {
-    return DriverVersion.load(
-        Driver.class,
-        "org-apache-phoenix-remote-jdbc.properties",
-        "Phoenix Remote JDBC Driver",
-        "unknown version",
-        "Apache Phoenix",
-        "unknown version");
-  }
-
-  @Override
-  protected String getConnectStringPrefix() {
-    return CONNECT_STRING_PREFIX;
-  }
-}
diff --git a/phoenix-queryserver-client/src/main/java/org/apache/phoenix/queryserver/client/SqllineWrapper.java b/phoenix-queryserver-client/src/main/java/org/apache/phoenix/queryserver/client/SqllineWrapper.java
deleted file mode 100644
index 7a22334..0000000
--- a/phoenix-queryserver-client/src/main/java/org/apache/phoenix/queryserver/client/SqllineWrapper.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.queryserver.client;
-
-import java.security.PrivilegedExceptionAction;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import sqlline.SqlLine;
-
-/**
- * Utility class which automatically performs a Kerberos login and then launches sqlline. Tries to
- * make a pre-populated ticket cache (via kinit before launching) transparently work.
- */
-public class SqllineWrapper {
-  public static final String HBASE_AUTHENTICATION_ATTR = "hbase.security.authentication";
-  public static final String QUERY_SERVER_SPNEGO_AUTH_DISABLED_ATTRIB = "phoenix.queryserver.spnego.auth.disabled";
-  public static final boolean DEFAULT_QUERY_SERVER_SPNEGO_AUTH_DISABLED = false;
-
-  static UserGroupInformation loginIfNecessary(Configuration conf) {
-    // Try to avoid HBase dependency too. Sadly, we have to bring in all of hadoop-common for this..
-    if ("kerberos".equalsIgnoreCase(conf.get(HBASE_AUTHENTICATION_ATTR))) {
-      // sun.security.krb5.principal is the property for setting the principal name, if that
-      // isn't set, fall back to user.name and hope for the best.
-      String principal = System.getProperty("sun.security.krb5.principal", System.getProperty("user.name"));
-      try {
-        // We got hadoop-auth via hadoop-common, so might as well use it.
-        return UserGroupInformation.getUGIFromTicketCache(null, principal);
-      } catch (Exception e) {
-        throw new RuntimeException("Kerberos login failed using ticket cache. Did you kinit?", e);
-      }
-    }
-    return null;
-  }
-
-  private static String[] updateArgsForKerberos(String[] origArgs) {
-    String[] newArgs = new String[origArgs.length];
-    for (int i = 0; i < origArgs.length; i++) {
-      String arg = origArgs[i];
-      newArgs[i] = arg;
-
-      if (arg.equals("-u")) {
-        // Get the JDBC url which is the next argument
-        i++;
-        arg = origArgs[i];
-        if (!arg.contains("authentication=")) {
-          arg = arg + ";authentication=SPNEGO";
-        }
-        newArgs[i] = arg;
-      }
-    }
-    return newArgs;
-  }
-
-  public static void main(String[] args) throws Exception {
-    final Configuration conf = new Configuration(false);
-    conf.addResource("hbase-site.xml");
-
-    // Check if the server config says SPNEGO auth is actually disabled.
-    final boolean disableSpnego = conf.getBoolean(QUERY_SERVER_SPNEGO_AUTH_DISABLED_ATTRIB,
-        DEFAULT_QUERY_SERVER_SPNEGO_AUTH_DISABLED);
-    if (disableSpnego) {
-      SqlLine.main(args);
-    }
-
-    UserGroupInformation ugi = loginIfNecessary(conf);
-
-    if (null != ugi) {
-      final String[] updatedArgs = updateArgsForKerberos(args);
-      ugi.doAs(new PrivilegedExceptionAction<Void>() {
-        @Override
-        public Void run() throws Exception {
-          SqlLine.main(updatedArgs);
-          return null;
-        }
-      });
-    } else {
-      SqlLine.main(args);
-    }
-  }
-
-}
diff --git a/phoenix-queryserver-client/src/main/java/org/apache/phoenix/queryserver/client/ThinClientUtil.java b/phoenix-queryserver-client/src/main/java/org/apache/phoenix/queryserver/client/ThinClientUtil.java
deleted file mode 100644
index 59fe093..0000000
--- a/phoenix-queryserver-client/src/main/java/org/apache/phoenix/queryserver/client/ThinClientUtil.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.queryserver.client;
-
-/**
- * Utilities for thin clients.
- */
-public final class ThinClientUtil {
-  // The default serialization is also defined in QueryServicesOptions. phoenix-queryserver-client
-  // currently doesn't depend on phoenix-core so we have to deal with the duplication.
-  private static final String DEFAULT_SERIALIZATION = "PROTOBUF";
-
-  private ThinClientUtil() {}
-
-  public static String getConnectionUrl(String hostname, int port) {
-    return getConnectionUrl("http", hostname, port);
-  }
-
-  public static String getConnectionUrl(String protocol, String hostname, int port) {
-    return getConnectionUrl(protocol, hostname, port, DEFAULT_SERIALIZATION);
-  }
-
-  public static String getConnectionUrl(String protocol, String hostname, int port, String serialization) {
-    String urlFmt = Driver.CONNECT_STRING_PREFIX + "url=%s://%s:%s;serialization=%s";
-    return String.format(urlFmt, protocol, hostname, port, serialization);
-  }
-}
diff --git a/phoenix-queryserver-client/src/main/resources/META-INF/services/java.sql.Driver b/phoenix-queryserver-client/src/main/resources/META-INF/services/java.sql.Driver
deleted file mode 100644
index f94d657..0000000
--- a/phoenix-queryserver-client/src/main/resources/META-INF/services/java.sql.Driver
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.phoenix.queryserver.client.Driver
diff --git a/phoenix-queryserver-client/src/main/resources/version/org-apache-phoenix-remote-jdbc.properties b/phoenix-queryserver-client/src/main/resources/version/org-apache-phoenix-remote-jdbc.properties
deleted file mode 100644
index 7e8eb7e..0000000
--- a/phoenix-queryserver-client/src/main/resources/version/org-apache-phoenix-remote-jdbc.properties
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to you under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-driver.name=Apache Phoenix Remote JDBC Driver
-driver.version=${pom.version}
-product.name=Apache Phoenix
-product.version=${pom.version}
-jdbc.compliant=true
-driver.version.major=${version.major}
-driver.version.minor=${version.minor}
-database.version.major=${version.major}
-database.version.minor=${version.minor}
-build.timestamp=${build.timestamp}
diff --git a/phoenix-queryserver/pom.xml b/phoenix-queryserver/pom.xml
deleted file mode 100644
index 48ed74d..0000000
--- a/phoenix-queryserver/pom.xml
+++ /dev/null
@@ -1,194 +0,0 @@
-<?xml version='1.0'?>
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
-
--->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.phoenix</groupId>
-    <artifactId>phoenix</artifactId>
-    <version>4.15.0-HBase-1.4-SNAPSHOT</version>
-  </parent>
-  <artifactId>phoenix-queryserver</artifactId>
-  <name>Phoenix Query Server</name>
-  <description>A query server for exposing Phoenix to thin clients</description>
-
-
-  <properties>
-    <top.dir>${project.basedir}/..</top.dir>
-    <shaded.package>org.apache.phoenix.shaded</shaded.package>
-  </properties>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-failsafe-plugin</artifactId>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-shade-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>query-server</id>
-            <phase>package</phase>
-            <goals>
-              <goal>shade</goal>
-            </goals>
-            <configuration>
-              <finalName>phoenix-${project.version}-queryserver</finalName>
-              <shadedArtifactAttached>false</shadedArtifactAttached>
-              <promoteTransitiveDependencies>true</promoteTransitiveDependencies>
-              <shadeTestJar>false</shadeTestJar>
-              <transformers>
-                <transformer
-                    implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
-                  <resource>README.md</resource>
-                  <file>${project.basedir}/../README.md</file>
-                </transformer>
-                <transformer
-                    implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
-                  <resource>LICENSE.txt</resource>
-                  <file>${project.basedir}/../LICENSE</file>
-                </transformer>
-                <transformer
-                    implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
-                  <resource>NOTICE</resource>
-                  <file>${project.basedir}/../NOTICE</file>
-                </transformer>
-                <transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
-              </transformers>
-              <artifactSet>
-                <includes>
-                  <include>org.apache.calcite.avatica:*</include>
-                  <include>org.eclipse.jetty:*</include>
-                  <include>javax.servlet:*</include>
-                </includes>
-              </artifactSet>
-              <filters>
-                <filter>
-                  <artifact>*:*</artifact>
-                  <excludes>
-                    <exclude>META-INF/*.SF</exclude>
-                    <exclude>META-INF/*.DSA</exclude>
-                    <exclude>META-INF/*.RSA</exclude>
-                    <exclude>META-INF/license/*</exclude>
-                    <exclude>LICENSE.*</exclude>
-                    <exclude>NOTICE.*</exclude>
-                  </excludes>
-                </filter>
-              </filters>
-              <relocations>
-                <relocation>
-                  <pattern>org.eclipse.jetty</pattern>
-                  <shadedPattern>${shaded.package}.org.eclipse.jetty</shadedPattern>
-                </relocation>
-                <!-- Calcite/Avatica is not relocated because the wire API (as of <=1.8.0) expects
-                     consistent class names on client and server. Relocating these would break
-                     backwards compatibility. -->
-              </relocations>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-queryserver-client</artifactId>
-      <exclusions>
-        <!-- Being pulled in via avatica to avoid pb2/pb3 issues.
-             When we use the "pre-shaded" avatica artifact, we don't
-             have to deal with the mess of multiple versions for protobuf.-->
-        <exclusion>
-          <groupId>org.apache.calcite.avatica</groupId>
-          <artifactId>avatica-core</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.calcite.avatica</groupId>
-      <artifactId>avatica</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.calcite.avatica</groupId>
-      <artifactId>avatica-server</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>commons-logging</groupId>
-      <artifactId>commons-logging</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>javax.servlet</groupId>
-      <artifactId>javax.servlet-api</artifactId>
-    </dependency>
-    <!-- for tests -->
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-all</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-core</artifactId>
-      <classifier>tests</classifier>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-it</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-minicluster</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-testing-util</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.tephra</groupId>
-      <artifactId>tephra-core</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-minikdc</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/phoenix-queryserver/src/build/query-server-runnable.xml b/phoenix-queryserver/src/build/query-server-runnable.xml
deleted file mode 100644
index d098b63..0000000
--- a/phoenix-queryserver/src/build/query-server-runnable.xml
+++ /dev/null
@@ -1,52 +0,0 @@
-<?xml version='1.0'?>
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
-
--->
-
-<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
-  <id>runnable</id>
-  <formats>
-    <format>jar</format>
-  </formats>
-  <includeBaseDirectory>false</includeBaseDirectory>
-  <containerDescriptorHandlers>
-    <containerDescriptorHandler>
-      <!--
-          aggregate SPI's so that things like HDFS FileSystem works in uberjar
-          http://docs.oracle.com/javase/tutorial/sound/SPI-intro.html
-      -->
-      <handlerName>metaInf-services</handlerName>
-    </containerDescriptorHandler>
-  </containerDescriptorHandlers>
-  <dependencySets>
-    <dependencySet>
-      <unpack>true</unpack>
-      <scope>runtime</scope>
-      <outputDirectory>/</outputDirectory>
-      <includes>
-        <include>org.apache.phoenix:phoenix-queryserver</include>
-        <include>org.apache.phoenix:phoenix-queryserver-client</include>
-        <include>org.apache.calcite.avatica:*</include>
-      </includes>
-    </dependencySet>
-  </dependencySets>
-</assembly>
diff --git a/phoenix-queryserver/src/it/bin/test_phoenixdb.py b/phoenix-queryserver/src/it/bin/test_phoenixdb.py
deleted file mode 100644
index 0d5d0c6..0000000
--- a/phoenix-queryserver/src/it/bin/test_phoenixdb.py
+++ /dev/null
@@ -1,39 +0,0 @@
-############################################################################
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-############################################################################
-
-import phoenixdb
-import phoenixdb.cursor
-import sys
-
-
-if __name__ == '__main__':
-    pqs_port = sys.argv[1]
-    database_url = 'http://localhost:' + str(pqs_port) + '/'
-
-    print("CREATING PQS CONNECTION")
-    conn = phoenixdb.connect(database_url, autocommit=True, auth="SPNEGO")
-    cursor = conn.cursor()
-
-    cursor.execute("CREATE TABLE users (id INTEGER PRIMARY KEY, username VARCHAR)")
-    cursor.execute("UPSERT INTO users VALUES (?, ?)", (1, 'admin'))
-    cursor.execute("UPSERT INTO users VALUES (?, ?)", (2, 'user'))
-    cursor.execute("SELECT * FROM users")
-    print("RESULTS")
-    print(cursor.fetchall())
diff --git a/phoenix-queryserver/src/it/bin/test_phoenixdb.sh b/phoenix-queryserver/src/it/bin/test_phoenixdb.sh
deleted file mode 100755
index 7309dbe..0000000
--- a/phoenix-queryserver/src/it/bin/test_phoenixdb.sh
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env bash
-#
-############################################################################
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-############################################################################
-
-set -u
-set -x
-set -e
-
-function cleanup {
-    # Capture last command status
-    RCODE=$?
-    set +e
-    set +u
-    kdestroy
-    rm -rf $PY_ENV_PATH
-    exit $RCODE
-}
-
-trap cleanup EXIT
-
-echo "LAUNCHING SCRIPT"
-
-LOCAL_PY=$1
-PRINC=$2
-KEYTAB_LOC=$3
-KRB5_CFG_FILE=$4
-PQS_PORT=$5
-PYTHON_SCRIPT=$6
-
-PY_ENV_PATH=$( mktemp -d )
-
-virtualenv $PY_ENV_PATH
-
-pushd ${PY_ENV_PATH}/bin
-
-# conda activate does stuff with unbound variables :(
-set +u
-. activate ""
-
-popd
-
-set -u
-echo "INSTALLING COMPONENTS"
-pip install -e file:///${LOCAL_PY}/requests-kerberos
-pip install -e file:///${LOCAL_PY}/phoenixdb
-
-export KRB5_CONFIG=$KRB5_CFG_FILE
-cat $KRB5_CONFIG
-export KRB5_TRACE=/dev/stdout
-
-echo "RUNNING KINIT"
-kinit -kt $KEYTAB_LOC $PRINC
-klist
-
-unset http_proxy
-unset https_proxy
-
-echo "Working Directory is ${PWD}"
-
-echo "RUN PYTHON TEST on port $PQS_PORT"
-python $PYTHON_SCRIPT $PQS_PORT
diff --git a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/HttpParamImpersonationQueryServerIT.java b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/HttpParamImpersonationQueryServerIT.java
deleted file mode 100644
index db27b9f..0000000
--- a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/HttpParamImpersonationQueryServerIT.java
+++ /dev/null
@@ -1,438 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.lang.reflect.Field;
-import java.security.PrivilegedAction;
-import java.security.PrivilegedExceptionAction;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.LocalHBaseCluster;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-import org.apache.hadoop.hbase.http.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
-import org.apache.hadoop.hbase.security.access.AccessControlClient;
-import org.apache.hadoop.hbase.security.access.AccessController;
-import org.apache.hadoop.hbase.security.access.Permission.Action;
-import org.apache.hadoop.hbase.security.token.TokenProvider;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.minikdc.MiniKdc;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.util.KerberosName;
-import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
-import org.apache.phoenix.query.ConfigurationFactory;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.queryserver.client.Driver;
-import org.apache.phoenix.queryserver.client.ThinClientUtil;
-import org.apache.phoenix.queryserver.server.QueryServer;
-import org.apache.phoenix.util.InstanceResolver;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-
-@Category(NeedsOwnMiniClusterTest.class)
-public class HttpParamImpersonationQueryServerIT {
-    private static final Log LOG = LogFactory.getLog(HttpParamImpersonationQueryServerIT.class);
-
-    private static final List<TableName> SYSTEM_TABLE_NAMES = Arrays.asList(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME,
-        PhoenixDatabaseMetaData.SYSTEM_MUTEX_HBASE_TABLE_NAME,
-        PhoenixDatabaseMetaData.SYSTEM_FUNCTION_HBASE_TABLE_NAME,
-        PhoenixDatabaseMetaData.SYSTEM_SCHEMA_HBASE_TABLE_NAME,
-        PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_HBASE_TABLE_NAME,
-        PhoenixDatabaseMetaData.SYSTEM_STATS_HBASE_TABLE_NAME);
-
-    private static final File TEMP_DIR = new File(getTempDirForClass());
-    private static final File KEYTAB_DIR = new File(TEMP_DIR, "keytabs");
-    private static final List<File> USER_KEYTAB_FILES = new ArrayList<>();
-
-    private static final String SPNEGO_PRINCIPAL = "HTTP/localhost";
-    private static final String PQS_PRINCIPAL = "phoenixqs/localhost";
-    private static final String SERVICE_PRINCIPAL = "securecluster/localhost";
-    private static File KEYTAB;
-
-    private static MiniKdc KDC;
-    private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
-    private static LocalHBaseCluster HBASE_CLUSTER;
-    private static int NUM_CREATED_USERS;
-
-    private static ExecutorService PQS_EXECUTOR;
-    private static QueryServer PQS;
-    private static int PQS_PORT;
-    private static String PQS_URL;
-
-    private static String getTempDirForClass() {
-        StringBuilder sb = new StringBuilder(32);
-        sb.append(System.getProperty("user.dir")).append(File.separator);
-        sb.append("target").append(File.separator);
-        sb.append(HttpParamImpersonationQueryServerIT.class.getSimpleName());
-        return sb.toString();
-    }
-
-    private static void updateDefaultRealm() throws Exception {
-        // (at least) one other phoenix test triggers the caching of this field before the KDC is up
-        // which causes principal parsing to fail.
-        Field f = KerberosName.class.getDeclaredField("defaultRealm");
-        f.setAccessible(true);
-        // Default realm for MiniKDC
-        f.set(null, "EXAMPLE.COM");
-    }
-
-    private static void createUsers(int numUsers) throws Exception {
-        assertNotNull("KDC is null, was setup method called?", KDC);
-        NUM_CREATED_USERS = numUsers;
-        for (int i = 1; i <= numUsers; i++) {
-            String principal = "user" + i;
-            File keytabFile = new File(KEYTAB_DIR, principal + ".keytab");
-            KDC.createPrincipal(keytabFile, principal);
-            USER_KEYTAB_FILES.add(keytabFile);
-        }
-    }
-
-    private static Entry<String,File> getUser(int offset) {
-        Preconditions.checkArgument(offset > 0 && offset <= NUM_CREATED_USERS);
-        return Maps.immutableEntry("user" + offset, USER_KEYTAB_FILES.get(offset - 1));
-    }
-
-    /**
-     * Setup the security configuration for hdfs.
-     */
-    private static void setHdfsSecuredConfiguration(Configuration conf) throws Exception {
-        // Set principal+keytab configuration for HDFS
-        conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, SERVICE_PRINCIPAL + "@" + KDC.getRealm());
-        conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, KEYTAB.getAbsolutePath());
-        conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, SERVICE_PRINCIPAL + "@" + KDC.getRealm());
-        conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, KEYTAB.getAbsolutePath());
-        conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, SPNEGO_PRINCIPAL + "@" + KDC.getRealm());
-        // Enable token access for HDFS blocks
-        conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
-        // Only use HTTPS (required because we aren't using "secure" ports)
-        conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
-        // Bind on localhost for spnego to have a chance at working
-        conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
-        conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
-
-        // Generate SSL certs
-        File keystoresDir = new File(UTIL.getDataTestDir("keystore").toUri().getPath());
-        keystoresDir.mkdirs();
-        String sslConfDir = KeyStoreTestUtil.getClasspathDir(HttpParamImpersonationQueryServerIT.class);
-        KeyStoreTestUtil.setupSSLConfig(keystoresDir.getAbsolutePath(), sslConfDir, conf, false);
-
-        // Magic flag to tell hdfs to not fail on using ports above 1024
-        conf.setBoolean("ignore.secure.ports.for.testing", true);
-    }
-
-    private static void ensureIsEmptyDirectory(File f) throws IOException {
-        if (f.exists()) {
-            if (f.isDirectory()) {
-                FileUtils.deleteDirectory(f);
-            } else {
-                assertTrue("Failed to delete keytab directory", f.delete());
-            }
-        }
-        assertTrue("Failed to create keytab directory", f.mkdirs());
-    }
-
-    /**
-     * Setup and start kerberos, hbase
-     */
-    @BeforeClass
-    public static void setUp() throws Exception {
-        final Configuration conf = UTIL.getConfiguration();
-        // Ensure the dirs we need are created/empty
-        ensureIsEmptyDirectory(TEMP_DIR);
-        ensureIsEmptyDirectory(KEYTAB_DIR);
-        KEYTAB = new File(KEYTAB_DIR, "test.keytab");
-        // Start a MiniKDC
-        KDC = UTIL.setupMiniKdc(KEYTAB);
-        // Create a service principal and spnego principal in one keytab
-        // NB. Due to some apparent limitations between HDFS and HBase in the same JVM, trying to
-        //     use separate identies for HBase and HDFS results in a GSS initiate error. The quick
-        //     solution is to just use a single "service" principal instead of "hbase" and "hdfs"
-        //     (or "dn" and "nn") per usual.
-        KDC.createPrincipal(KEYTAB, SPNEGO_PRINCIPAL, PQS_PRINCIPAL, SERVICE_PRINCIPAL);
-        // Start ZK by hand
-        UTIL.startMiniZKCluster();
-
-        // Create a number of unprivileged users
-        createUsers(2);
-
-        // Set configuration for HBase
-        HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + KDC.getRealm());
-        HBaseKerberosUtils.setSecuredConfiguration(conf);
-        setHdfsSecuredConfiguration(conf);
-        UserGroupInformation.setConfiguration(conf);
-        conf.setInt(HConstants.MASTER_PORT, 0);
-        conf.setInt(HConstants.MASTER_INFO_PORT, 0);
-        conf.setInt(HConstants.REGIONSERVER_PORT, 0);
-        conf.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
-        conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
-        conf.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
-        conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName(), TokenProvider.class.getName());
-
-        // Secure Phoenix setup
-        conf.set("phoenix.queryserver.kerberos.http.principal", SPNEGO_PRINCIPAL + "@" + KDC.getRealm());
-        conf.set("phoenix.queryserver.http.keytab.file", KEYTAB.getAbsolutePath());
-        conf.set("phoenix.queryserver.kerberos.principal", PQS_PRINCIPAL + "@" + KDC.getRealm());
-        conf.set("phoenix.queryserver.keytab.file", KEYTAB.getAbsolutePath());
-        conf.setBoolean(QueryServices.QUERY_SERVER_DISABLE_KERBEROS_LOGIN, true);
-        conf.setInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB, 0);
-        // Required so that PQS can impersonate the end-users to HBase
-        conf.set("hadoop.proxyuser.phoenixqs.groups", "*");
-        conf.set("hadoop.proxyuser.phoenixqs.hosts", "*");
-        // user1 is allowed to impersonate others, user2 is not
-        conf.set("hadoop.proxyuser.user1.groups", "*");
-        conf.set("hadoop.proxyuser.user1.hosts", "*");
-        conf.setBoolean(QueryServices.QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR_ATTRIB, true);
-
-        // Clear the cached singletons so we can inject our own.
-        InstanceResolver.clearSingletons();
-        // Make sure the ConnectionInfo doesn't try to pull a default Configuration
-        InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
-            @Override
-            public Configuration getConfiguration() {
-                return conf;
-            }
-            @Override
-            public Configuration getConfiguration(Configuration confToClone) {
-                Configuration copy = new Configuration(conf);
-                copy.addResource(confToClone);
-                return copy;
-            }
-        });
-        updateDefaultRealm();
-
-        // Start HDFS
-        UTIL.startMiniDFSCluster(1);
-        // Use LocalHBaseCluster to avoid HBaseTestingUtility from doing something wrong
-        // NB. I'm not actually sure what HTU does incorrect, but this was pulled from some test
-        //     classes in HBase itself. I couldn't get HTU to work myself (2017/07/06)
-        Path rootdir = UTIL.getDataTestDirOnTestFS(HttpParamImpersonationQueryServerIT.class.getSimpleName());
-        FSUtils.setRootDir(conf, rootdir);
-        HBASE_CLUSTER = new LocalHBaseCluster(conf, 1);
-        HBASE_CLUSTER.startup();
-
-        // Then fork a thread with PQS in it.
-        startQueryServer();
-    }
-
-    private static void startQueryServer() throws Exception {
-        PQS = new QueryServer(new String[0], UTIL.getConfiguration());
-        // Get the PQS ident for PQS to use
-        final UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(PQS_PRINCIPAL, KEYTAB.getAbsolutePath());
-        PQS_EXECUTOR = Executors.newSingleThreadExecutor();
-        // Launch PQS, doing in the Kerberos login instead of letting PQS do it itself (which would
-        // break the HBase/HDFS logins also running in the same test case).
-        PQS_EXECUTOR.submit(new Runnable() {
-            @Override public void run() {
-                ugi.doAs(new PrivilegedAction<Void>() {
-                    @Override public Void run() {
-                        PQS.run();
-                        return null;
-                    }
-                });
-            }
-        });
-        PQS.awaitRunning();
-        PQS_PORT = PQS.getPort();
-        PQS_URL = ThinClientUtil.getConnectionUrl("localhost", PQS_PORT) + ";authentication=SPNEGO";
-    }
-
-    @AfterClass
-    public static void stopKdc() throws Exception {
-        // Remove our custom ConfigurationFactory for future tests
-        InstanceResolver.clearSingletons();
-        if (PQS_EXECUTOR != null) {
-            PQS.stop();
-            PQS_EXECUTOR.shutdown();
-            if (!PQS_EXECUTOR.awaitTermination(5, TimeUnit.SECONDS)) {
-                LOG.info("PQS didn't exit in 5 seconds, proceeding anyways.");
-            }
-        }
-        if (HBASE_CLUSTER != null) {
-            HBASE_CLUSTER.shutdown();
-            HBASE_CLUSTER.join();
-        }
-        if (UTIL != null) {
-            UTIL.shutdownMiniZKCluster();
-        }
-        if (KDC != null) {
-            KDC.stop();
-        }
-    }
-
-    @Test
-    public void testSuccessfulImpersonation() throws Exception {
-        final Entry<String,File> user1 = getUser(1);
-        final Entry<String,File> user2 = getUser(2);
-        // Build the JDBC URL by hand with the doAs
-        final String doAsUrlTemplate = Driver.CONNECT_STRING_PREFIX + "url=http://localhost:" + PQS_PORT + "?"
-            + QueryServicesOptions.DEFAULT_QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM + "=%s;authentication=SPNEGO;serialization=PROTOBUF";
-        final String tableName = "POSITIVE_IMPERSONATION";
-        final int numRows = 5;
-        final UserGroupInformation serviceUgi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(SERVICE_PRINCIPAL, KEYTAB.getAbsolutePath());
-        serviceUgi.doAs(new PrivilegedExceptionAction<Void>() {
-            @Override public Void run() throws Exception {
-                createTable(tableName, numRows);
-                grantUsersToPhoenixSystemTables(Arrays.asList(user1.getKey(), user2.getKey()));
-                return null;
-            }
-        });
-        UserGroupInformation user1Ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(user1.getKey(), user1.getValue().getAbsolutePath());
-        user1Ugi.doAs(new PrivilegedExceptionAction<Void>() {
-            @Override public Void run() throws Exception {
-                // This user should not be able to read the table
-                readAndExpectPermissionError(PQS_URL, tableName, numRows);
-                // Run the same query with the same credentials, but with a doAs. We should be permitted since the user we're impersonating can run the query
-                final String doAsUrl = String.format(doAsUrlTemplate, serviceUgi.getShortUserName());
-                try (Connection conn = DriverManager.getConnection(doAsUrl);
-                        Statement stmt = conn.createStatement()) {
-                    conn.setAutoCommit(true);
-                    readRows(stmt, tableName, numRows);
-                }
-                return null;
-            }
-        });
-    }
-
-    @Test
-    public void testDisallowedImpersonation() throws Exception {
-        final Entry<String,File> user2 = getUser(2);
-        // Build the JDBC URL by hand with the doAs
-        final String doAsUrlTemplate = Driver.CONNECT_STRING_PREFIX + "url=http://localhost:" + PQS_PORT + "?"
-            + QueryServicesOptions.DEFAULT_QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM + "=%s;authentication=SPNEGO;serialization=PROTOBUF";
-        final String tableName = "DISALLOWED_IMPERSONATION";
-        final int numRows = 5;
-        final UserGroupInformation serviceUgi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(SERVICE_PRINCIPAL, KEYTAB.getAbsolutePath());
-        serviceUgi.doAs(new PrivilegedExceptionAction<Void>() {
-            @Override public Void run() throws Exception {
-                createTable(tableName, numRows);
-                grantUsersToPhoenixSystemTables(Arrays.asList(user2.getKey()));
-                return null;
-            }
-        });
-        UserGroupInformation user2Ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(user2.getKey(), user2.getValue().getAbsolutePath());
-        user2Ugi.doAs(new PrivilegedExceptionAction<Void>() {
-            @Override public Void run() throws Exception {
-                // This user is disallowed to read this table
-                readAndExpectPermissionError(PQS_URL, tableName, numRows);
-                // This user is also not allowed to impersonate
-                final String doAsUrl = String.format(doAsUrlTemplate, serviceUgi.getShortUserName());
-                try (Connection conn = DriverManager.getConnection(doAsUrl);
-                        Statement stmt = conn.createStatement()) {
-                    conn.setAutoCommit(true);
-                    readRows(stmt, tableName, numRows);
-                    fail("user2 should not be allowed to impersonate the service user");
-                } catch (Exception e) {
-                    LOG.info("Caught expected exception", e);
-                }
-                return null;
-            }
-        });
-    }
-
-    void createTable(String tableName, int numRows) throws Exception {
-        try (Connection conn = DriverManager.getConnection(PQS_URL);
-            Statement stmt = conn.createStatement()) {
-            conn.setAutoCommit(true);
-            assertFalse(stmt.execute("CREATE TABLE " + tableName + "(pk integer not null primary key)"));
-            for (int i = 0; i < numRows; i++) {
-                assertEquals(1, stmt.executeUpdate("UPSERT INTO " + tableName + " values(" + i + ")"));
-            }
-            readRows(stmt, tableName, numRows);
-        }
-    }
-
-    void grantUsersToPhoenixSystemTables(List<String> usersToGrant) throws Exception {
-        // Grant permission to the user to access the system tables
-        try {
-            for (String user : usersToGrant) {
-                for (TableName tn : SYSTEM_TABLE_NAMES) {
-                    AccessControlClient.grant(UTIL.getConnection(), tn, user, null, null, Action.READ, Action.EXEC);
-                }
-            }
-        } catch (Throwable e) {
-            throw new Exception(e);
-        }
-    }
-
-    void readAndExpectPermissionError(String jdbcUrl, String tableName, int numRows) {
-        try (Connection conn = DriverManager.getConnection(jdbcUrl);
-            Statement stmt = conn.createStatement()) {
-            conn.setAutoCommit(true);
-            readRows(stmt, tableName, numRows);
-            fail("Expected an exception reading another user's table");
-        } catch (Exception e) {
-            LOG.debug("Caught expected exception", e);
-            // Avatica doesn't re-create new exceptions across the wire. Need to just look at the contents of the message.
-            String errorMessage = e.getMessage();
-            assertTrue("Expected the error message to contain an HBase AccessDeniedException", errorMessage.contains("org.apache.hadoop.hbase.security.AccessDeniedException"));
-            // Expecting an error message like: "Insufficient permissions for user 'user1' (table=POSITIVE_IMPERSONATION, action=READ)"
-            // Being overly cautious to make sure we don't inadvertently pass the test due to permission errors on phoenix system tables.
-            assertTrue("Expected message to contain " + tableName + " and READ", errorMessage.contains(tableName) && errorMessage.contains("READ"));
-        }
-    }
-
-    void readRows(Statement stmt, String tableName, int numRows) throws SQLException {
-        try (ResultSet rs = stmt.executeQuery("SELECT * FROM " + tableName)) {
-            for (int i = 0; i < numRows; i++) {
-                assertTrue(rs.next());
-                assertEquals(i, rs.getInt(1));
-            }
-            assertFalse(rs.next());
-        }
-    }
-
-    byte[] copyBytes(byte[] src, int offset, int length) {
-        byte[] dest = new byte[length];
-        System.arraycopy(src, offset, dest, 0, length);
-        return dest;
-    }
-}
diff --git a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerBasicsIT.java b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerBasicsIT.java
deleted file mode 100644
index ceb0a78..0000000
--- a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerBasicsIT.java
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static java.lang.String.format;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_CAT;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_CATALOG;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
-import static org.apache.phoenix.query.QueryConstants.SYSTEM_SCHEMA_NAME;
-import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.sql.Array;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.ResultSetMetaData;
-import java.sql.Statement;
-import java.util.Properties;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.queryserver.client.ThinClientUtil;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
-
-/**
- * Smoke test for query server.
- */
-public class QueryServerBasicsIT extends BaseHBaseManagedTimeIT {
-
-  private static final Log LOG = LogFactory.getLog(QueryServerBasicsIT.class);
-
-  private static QueryServerThread AVATICA_SERVER;
-  private static Configuration CONF;
-  private static String CONN_STRING;
-
-  @Rule
-  public TestName name = new TestName();
-
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    CONF = getTestClusterConfig();
-    CONF.setInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB, 0);
-    String url = getUrl();
-    AVATICA_SERVER = new QueryServerThread(new String[] { url }, CONF,
-            QueryServerBasicsIT.class.getName());
-    AVATICA_SERVER.start();
-    AVATICA_SERVER.getQueryServer().awaitRunning();
-    final int port = AVATICA_SERVER.getQueryServer().getPort();
-    LOG.info("Avatica server started on port " + port);
-    CONN_STRING = ThinClientUtil.getConnectionUrl("localhost", port);
-    LOG.info("JDBC connection string is " + CONN_STRING);
-  }
-
-  @AfterClass
-  public static void afterClass() throws Exception {
-    if (AVATICA_SERVER != null) {
-      AVATICA_SERVER.join(TimeUnit.MINUTES.toMillis(1));
-      Throwable t = AVATICA_SERVER.getQueryServer().getThrowable();
-      if (t != null) {
-        fail("query server threw. " + t.getMessage());
-      }
-      assertEquals("query server didn't exit cleanly", 0, AVATICA_SERVER.getQueryServer()
-        .getRetCode());
-    }
-  }
-
-  @Test
-  public void testCatalogs() throws Exception {
-    try (final Connection connection = DriverManager.getConnection(CONN_STRING)) {
-      assertThat(connection.isClosed(), is(false));
-      try (final ResultSet resultSet = connection.getMetaData().getCatalogs()) {
-        final ResultSetMetaData metaData = resultSet.getMetaData();
-        assertFalse("unexpected populated resultSet", resultSet.next());
-        assertEquals(1, metaData.getColumnCount());
-        assertEquals(TABLE_CAT, metaData.getColumnName(1));
-      }
-    }
-  }
-
-  @Test
-  public void testSchemas() throws Exception {
-      Properties props=new Properties();
-      props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.toString(true));
-      try (final Connection connection = DriverManager.getConnection(CONN_STRING, props)) {
-      connection.createStatement().executeUpdate("CREATE SCHEMA IF NOT EXISTS " + SYSTEM_SCHEMA_NAME);
-      assertThat(connection.isClosed(), is(false));
-      try (final ResultSet resultSet = connection.getMetaData().getSchemas()) {
-        final ResultSetMetaData metaData = resultSet.getMetaData();
-        assertTrue("unexpected empty resultset", resultSet.next());
-        assertEquals(2, metaData.getColumnCount());
-        assertEquals(TABLE_SCHEM, metaData.getColumnName(1));
-        assertEquals(TABLE_CATALOG, metaData.getColumnName(2));
-        boolean containsSystem = false;
-        do {
-          if (resultSet.getString(1).equalsIgnoreCase(SYSTEM_SCHEMA_NAME)) containsSystem = true;
-        } while (resultSet.next());
-        assertTrue(format("should contain at least %s schema.", SYSTEM_SCHEMA_NAME), containsSystem);
-      }
-    }
-  }
-
-  @Test
-  public void smokeTest() throws Exception {
-    final String tableName = name.getMethodName();
-    try (final Connection connection = DriverManager.getConnection(CONN_STRING)) {
-      assertThat(connection.isClosed(), is(false));
-      connection.setAutoCommit(true);
-      try (final Statement stmt = connection.createStatement()) {
-        assertFalse(stmt.execute("DROP TABLE IF EXISTS " + tableName));
-        assertFalse(stmt.execute("CREATE TABLE " + tableName + "("
-            + "id INTEGER NOT NULL, "
-            + "pk varchar(3) NOT NULL "
-            + "CONSTRAINT PK_CONSTRAINT PRIMARY KEY (id, pk))"));
-        assertEquals(0, stmt.getUpdateCount());
-        assertEquals(1, stmt.executeUpdate("UPSERT INTO " + tableName + " VALUES(1, 'foo')"));
-        assertEquals(1, stmt.executeUpdate("UPSERT INTO " + tableName + " VALUES(2, 'bar')"));
-        assertTrue(stmt.execute("SELECT * FROM " + tableName));
-        try (final ResultSet resultSet = stmt.getResultSet()) {
-          assertTrue(resultSet.next());
-          assertEquals(1, resultSet.getInt(1));
-          assertEquals("foo", resultSet.getString(2));
-          assertTrue(resultSet.next());
-          assertEquals(2, resultSet.getInt(1));
-          assertEquals("bar", resultSet.getString(2));
-        }
-      }
-      final String sql = "SELECT * FROM " + tableName + " WHERE id = ?";
-      try (final PreparedStatement stmt = connection.prepareStatement(sql)) {
-        stmt.setInt(1, 1);
-        try (ResultSet resultSet = stmt.executeQuery()) {
-          assertTrue(resultSet.next());
-          assertEquals(1, resultSet.getInt(1));
-          assertEquals("foo", resultSet.getString(2));
-        }
-        stmt.clearParameters();
-        stmt.setInt(1, 5);
-        try (final ResultSet resultSet = stmt.executeQuery()) {
-          assertFalse(resultSet.next());
-        }
-      }
-    }
-  }
-
-  @Test
-  public void arrayTest() throws Exception {
-      final String tableName = name.getMethodName();
-      try (Connection conn = DriverManager.getConnection(CONN_STRING);
-              Statement stmt = conn.createStatement()) {
-          conn.setAutoCommit(false);
-          assertFalse(stmt.execute("DROP TABLE IF EXISTS " + tableName));
-          assertFalse(stmt.execute("CREATE TABLE " + tableName + " ("
-              + "pk VARCHAR NOT NULL PRIMARY KEY, "
-              + "histogram INTEGER[])")
-              );
-          conn.commit();
-          int numRows = 10;
-          int numEvenElements = 4;
-          int numOddElements = 6;
-          for (int i = 0; i < numRows; i++) {
-              int arrayLength = i % 2 == 0 ? numEvenElements : numOddElements;
-              StringBuilder sb = new StringBuilder();
-              for (int arrayOffset = 0; arrayOffset < arrayLength; arrayOffset++) {
-                  if (sb.length() > 0) {
-                      sb.append(", ");
-                  }
-                  sb.append(getArrayValueForOffset(arrayOffset));
-              }
-              String updateSql = "UPSERT INTO " + tableName + " values('" + i + "', " + "ARRAY[" + sb.toString() + "])";
-              assertEquals(1, stmt.executeUpdate(updateSql));
-          }
-          conn.commit();
-          try (ResultSet rs = stmt.executeQuery("SELECT * FROM " + tableName)) {
-              for (int i = 0; i < numRows; i++) {
-                  assertTrue(rs.next());
-                  assertEquals(i, Integer.parseInt(rs.getString(1)));
-                  Array array = rs.getArray(2);
-                  Object untypedArrayData = array.getArray();
-                  assertTrue("Expected array data to be an int array, but was " + untypedArrayData.getClass(), untypedArrayData instanceof Object[]);
-                  Object[] arrayData = (Object[]) untypedArrayData;
-                  int expectedArrayLength = i % 2 == 0 ? numEvenElements : numOddElements;
-                  assertEquals(expectedArrayLength, arrayData.length);
-                  for (int arrayOffset = 0; arrayOffset < expectedArrayLength; arrayOffset++) {
-                      assertEquals(getArrayValueForOffset(arrayOffset), arrayData[arrayOffset]);
-                  }
-              }
-              assertFalse(rs.next());
-          }
-      }
-  }
-
-  @Test
-  public void preparedStatementArrayTest() throws Exception {
-      final String tableName = name.getMethodName();
-      try (Connection conn = DriverManager.getConnection(CONN_STRING);
-              Statement stmt = conn.createStatement()) {
-          conn.setAutoCommit(false);
-          assertFalse(stmt.execute("DROP TABLE IF EXISTS " + tableName));
-          assertFalse(stmt.execute("CREATE TABLE " + tableName + " ("
-              + "pk VARCHAR NOT NULL PRIMARY KEY, "
-              + "histogram INTEGER[])")
-              );
-          conn.commit();
-          int numRows = 10;
-          int numEvenElements = 4;
-          int numOddElements = 6;
-          try (PreparedStatement pstmt = conn.prepareStatement("UPSERT INTO " + tableName + " values(?, ?)")) {
-              for (int i = 0; i < numRows; i++) {
-                pstmt.setString(1, Integer.toString(i));
-                int arrayLength = i % 2 == 0 ? numEvenElements : numOddElements;
-                Object[] arrayData = new Object[arrayLength];
-                for (int arrayOffset = 0; arrayOffset < arrayLength; arrayOffset++) {
-                  arrayData[arrayOffset] = getArrayValueForOffset(arrayOffset);
-                }
-                pstmt.setArray(2, conn.createArrayOf("INTEGER", arrayData));
-                assertEquals(1, pstmt.executeUpdate());
-              }
-              conn.commit();
-          }
-          conn.commit();
-          try (ResultSet rs = stmt.executeQuery("SELECT * FROM " + tableName)) {
-              for (int i = 0; i < numRows; i++) {
-                  assertTrue(rs.next());
-                  assertEquals(i, Integer.parseInt(rs.getString(1)));
-                  Array array = rs.getArray(2);
-                  Object untypedArrayData = array.getArray();
-                  assertTrue("Expected array data to be an int array, but was " + untypedArrayData.getClass(), untypedArrayData instanceof Object[]);
-                  Object[] arrayData = (Object[]) untypedArrayData;
-                  int expectedArrayLength = i % 2 == 0 ? numEvenElements : numOddElements;
-                  assertEquals(expectedArrayLength, arrayData.length);
-                  for (int arrayOffset = 0; arrayOffset < expectedArrayLength; arrayOffset++) {
-                      assertEquals(getArrayValueForOffset(arrayOffset), arrayData[arrayOffset]);
-                  }
-              }
-              assertFalse(rs.next());
-          }
-      }
-  }
-
-  @Test
-  public void preparedStatementVarcharArrayTest() throws Exception {
-      final String tableName = name.getMethodName();
-      try (Connection conn = DriverManager.getConnection(CONN_STRING);
-              Statement stmt = conn.createStatement()) {
-          conn.setAutoCommit(false);
-          assertFalse(stmt.execute("DROP TABLE IF EXISTS " + tableName));
-          assertFalse(stmt.execute("CREATE TABLE " + tableName + " ("
-              + "pk VARCHAR NOT NULL PRIMARY KEY, "
-              + "histogram VARCHAR[])")
-              );
-          conn.commit();
-          int numRows = 10;
-          int numEvenElements = 4;
-          int numOddElements = 6;
-          try (PreparedStatement pstmt = conn.prepareStatement("UPSERT INTO " + tableName + " values(?, ?)")) {
-              for (int i = 0; i < numRows; i++) {
-                pstmt.setString(1, Integer.toString(i));
-                int arrayLength = i % 2 == 0 ? numEvenElements : numOddElements;
-                Object[] arrayData = new Object[arrayLength];
-                for (int arrayOffset = 0; arrayOffset < arrayLength; arrayOffset++) {
-                  arrayData[arrayOffset] = Integer.toString(getArrayValueForOffset(arrayOffset));
-                }
-                pstmt.setArray(2, conn.createArrayOf("VARCHAR", arrayData));
-                assertEquals(1, pstmt.executeUpdate());
-              }
-              conn.commit();
-          }
-          conn.commit();
-          try (ResultSet rs = stmt.executeQuery("SELECT * FROM " + tableName)) {
-              for (int i = 0; i < numRows; i++) {
-                  assertTrue(rs.next());
-                  assertEquals(i, Integer.parseInt(rs.getString(1)));
-                  Array array = rs.getArray(2);
-                  Object untypedArrayData = array.getArray();
-                  assertTrue("Expected array data to be an int array, but was " + untypedArrayData.getClass(), untypedArrayData instanceof Object[]);
-                  Object[] arrayData = (Object[]) untypedArrayData;
-                  int expectedArrayLength = i % 2 == 0 ? numEvenElements : numOddElements;
-                  assertEquals(expectedArrayLength, arrayData.length);
-                  for (int arrayOffset = 0; arrayOffset < expectedArrayLength; arrayOffset++) {
-                      assertEquals(Integer.toString(getArrayValueForOffset(arrayOffset)), arrayData[arrayOffset]);
-                  }
-              }
-              assertFalse(rs.next());
-          }
-      }
-  }
-
-  private int getArrayValueForOffset(int arrayOffset) {
-      return arrayOffset * 2 + 1;
-  }
-
-  @Test
-  public void testParameterizedLikeExpression() throws Exception {
-    final Connection conn = DriverManager.getConnection(CONN_STRING);
-    final String tableName = generateUniqueName();
-    conn.createStatement().execute(
-            "CREATE TABLE " + tableName + " (k VARCHAR NOT NULL PRIMARY KEY, i INTEGER)");
-    conn.commit();
-
-    final PreparedStatement upsert = conn.prepareStatement(
-            "UPSERT INTO " + tableName + " VALUES (?, ?)");
-    upsert.setString(1, "123n7-app-2-");
-    upsert.setInt(2, 1);
-    upsert.executeUpdate();
-    conn.commit();
-
-    final PreparedStatement select = conn.prepareStatement(
-            "select k from " + tableName + " where k like ?");
-    select.setString(1, "12%");
-    ResultSet rs = select.executeQuery();
-    assertTrue(rs.next());
-    assertEquals("123n7-app-2-", rs.getString(1));
-    assertFalse(rs.next());
-
-    select.setString(1, null);
-    rs = select.executeQuery();
-    assertFalse(rs.next());
-  }
-}
diff --git a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerTestUtil.java b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerTestUtil.java
deleted file mode 100644
index 01f73ae..0000000
--- a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerTestUtil.java
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import java.io.File;
-import java.security.PrivilegedAction;
-import java.util.Map;
-import java.util.Objects;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.LocalHBaseCluster;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.phoenix.query.ConfigurationFactory;
-import org.apache.phoenix.queryserver.client.ThinClientUtil;
-import org.apache.phoenix.queryserver.server.QueryServer;
-import org.apache.phoenix.util.InstanceResolver;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Strings;
-
-public class QueryServerTestUtil {
-    private static final Logger LOG = LoggerFactory.getLogger(QueryServerTestUtil.class);
-
-    private final Configuration conf;
-    private final HBaseTestingUtility util;
-    private LocalHBaseCluster hbase;
-
-    private final QueryServer pqs;
-    private int port;
-    private String url;
-
-    private String principal;
-    private File keytab;
-
-    private ExecutorService executor;
-
-    public QueryServerTestUtil(Configuration conf) {
-        this.conf = Objects.requireNonNull(conf);
-        this.util = new HBaseTestingUtility(conf);
-        this.pqs = new QueryServer(new String[0], conf);
-    }
-
-    public QueryServerTestUtil(Configuration conf, String principal, File keytab) {
-        this.conf = Objects.requireNonNull(conf);
-        this.principal = principal;
-        this.keytab = keytab;
-        this.util = new HBaseTestingUtility(conf);
-        this.pqs = new QueryServer(new String[0], conf);
-    }
-
-    public void startLocalHBaseCluster(Class testClass) throws Exception {
-        startLocalHBaseCluster(testClass.getCanonicalName());
-    }
-
-    public void startLocalHBaseCluster(String uniqueName) throws Exception {
-        LOG.debug("Starting local HBase cluster for '{}'", uniqueName);
-        // Start ZK
-        util.startMiniZKCluster();
-        // Start HDFS
-        util.startMiniDFSCluster(1);
-        // Start HBase
-        Path rootdir = util.getDataTestDirOnTestFS(uniqueName);
-        FSUtils.setRootDir(conf, rootdir);
-        hbase = new LocalHBaseCluster(conf, 1);
-        hbase.startup();
-    }
-
-    public void stopLocalHBaseCluster() throws Exception {
-        LOG.debug("Stopping local HBase cluster");
-        if (hbase != null) {
-            hbase.shutdown();
-            hbase.join();
-        }
-        if (util != null) {
-            util.shutdownMiniDFSCluster();
-            util.shutdownMiniZKCluster();
-        }
-    }
-
-    public void startQueryServer() throws Exception {
-        setupQueryServerConfiguration(conf);
-        executor = Executors.newSingleThreadExecutor();
-        if (!Strings.isNullOrEmpty(principal) && null != keytab) {
-            // Get the PQS ident for PQS to use
-            final UserGroupInformation ugi = UserGroupInformation
-                    .loginUserFromKeytabAndReturnUGI(principal, keytab.getAbsolutePath());
-            // Launch PQS, doing in the Kerberos login instead of letting PQS do it itself (which would
-            // break the HBase/HDFS logins also running in the same test case).
-            executor.submit(new Runnable() {
-                @Override
-                public void run() {
-                    ugi.doAs(new PrivilegedAction<Void>() {
-                        @Override
-                        public Void run() {
-                            pqs.run();
-                            return null;
-                        }
-                    });
-                }
-            });
-        } else {
-            // Launch PQS without a login
-            executor.submit(new Runnable() {
-                @Override
-                public void run() {
-                    pqs.run();
-                }
-            });
-        }
-        pqs.awaitRunning();
-        port = pqs.getPort();
-        url = ThinClientUtil.getConnectionUrl("localhost", port);
-    }
-
-    public void stopQueryServer() throws Exception {
-        if (pqs != null) {
-            pqs.stop();
-        }
-        if (executor != null) {
-            executor.shutdown();
-            if (!executor.awaitTermination(5, TimeUnit.SECONDS)) {
-                LOG.info("PQS didn't exit in 5 seconds, proceeding anyways.");
-            }
-        }
-    }
-
-    public static void setupQueryServerConfiguration(final Configuration conf) {
-        // Make sure the ConnectionInfo doesn't try to pull a default Configuration
-        InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
-            @Override
-            public Configuration getConfiguration() {
-                return conf;
-            }
-            @Override
-            public Configuration getConfiguration(Configuration confToClone) {
-                Configuration copy = new Configuration(conf);
-                copy.addResource(confToClone);
-                return copy;
-            }
-        });
-    }
-
-    public int getPort() {
-        return port;
-    }
-
-    public String getUrl() {
-        return url;
-    }
-
-    /**
-     * Returns the query server URL with the specified URL params
-     * @param params URL params
-     * @return URL with params
-     */
-    public String getUrl(Map<String, String> params) {
-        if (params == null || params.size() == 0) {
-            return url;
-        }
-        StringBuilder urlParams = new StringBuilder();
-        for (Map.Entry<String, String> param : params.entrySet()) {
-            urlParams.append(";").append(param.getKey()).append("=").append(param.getValue());
-        }
-        return url + urlParams;
-    }
-}
diff --git a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerThread.java b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerThread.java
deleted file mode 100644
index 0010656..0000000
--- a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerThread.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.phoenix.queryserver.server.QueryServer;
-
-/** Wraps up the query server for tests. */
-public class QueryServerThread extends Thread {
-
-  private final QueryServer main;
-
-  public QueryServerThread(String[] argv, Configuration conf) {
-    this(argv, conf, null);
-  }
-
-  public QueryServerThread(String[] argv, Configuration conf, String name) {
-    this(new QueryServer(argv, conf), name);
-  }
-
-  private QueryServerThread(QueryServer m, String name) {
-    super(m, "query server" + (name == null ? "" : (" - " + name)));
-    this.main = m;
-    setDaemon(true);
-  }
-
-  public QueryServer getQueryServer() {
-    return main;
-  }
-}
diff --git a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/SecureQueryServerIT.java b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/SecureQueryServerIT.java
deleted file mode 100644
index c3ff885..0000000
--- a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/SecureQueryServerIT.java
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.lang.reflect.Field;
-import java.security.PrivilegedAction;
-import java.security.PrivilegedExceptionAction;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.LocalHBaseCluster;
-import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-import org.apache.hadoop.hbase.http.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
-import org.apache.hadoop.hbase.security.token.TokenProvider;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.minikdc.MiniKdc;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.util.KerberosName;
-import org.apache.phoenix.query.ConfigurationFactory;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.queryserver.client.ThinClientUtil;
-import org.apache.phoenix.queryserver.server.QueryServer;
-import org.apache.phoenix.util.InstanceResolver;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-
-@Category(NeedsOwnMiniClusterTest.class)
-public class SecureQueryServerIT {
-    private static final Log LOG = LogFactory.getLog(SecureQueryServerIT.class);
-
-    private static final File TEMP_DIR = new File(getTempDirForClass());
-    private static final File KEYTAB_DIR = new File(TEMP_DIR, "keytabs");
-    private static final List<File> USER_KEYTAB_FILES = new ArrayList<>();
-
-    private static final String SPNEGO_PRINCIPAL = "HTTP/localhost";
-    private static final String PQS_PRINCIPAL = "phoenixqs/localhost";
-    private static final String SERVICE_PRINCIPAL = "securecluster/localhost";
-    private static File KEYTAB;
-
-    private static MiniKdc KDC;
-    private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
-    private static LocalHBaseCluster HBASE_CLUSTER;
-    private static int NUM_CREATED_USERS;
-
-    private static ExecutorService PQS_EXECUTOR;
-    private static QueryServer PQS;
-    private static int PQS_PORT;
-    private static String PQS_URL;
-
-    private static String getTempDirForClass() {
-        StringBuilder sb = new StringBuilder(32);
-        sb.append(System.getProperty("user.dir")).append(File.separator);
-        sb.append("target").append(File.separator);
-        sb.append(SecureQueryServerIT.class.getSimpleName());
-        return sb.toString();
-    }
-
-    private static void updateDefaultRealm() throws Exception {
-        // (at least) one other phoenix test triggers the caching of this field before the KDC is up
-        // which causes principal parsing to fail.
-        Field f = KerberosName.class.getDeclaredField("defaultRealm");
-        f.setAccessible(true);
-        // Default realm for MiniKDC
-        f.set(null, "EXAMPLE.COM");
-    }
-
-    private static void createUsers(int numUsers) throws Exception {
-        assertNotNull("KDC is null, was setup method called?", KDC);
-        NUM_CREATED_USERS = numUsers;
-        for (int i = 1; i <= numUsers; i++) {
-            String principal = "user" + i;
-            File keytabFile = new File(KEYTAB_DIR, principal + ".keytab");
-            KDC.createPrincipal(keytabFile, principal);
-            USER_KEYTAB_FILES.add(keytabFile);
-        }
-    }
-
-    private static Entry<String,File> getUser(int offset) {
-        Preconditions.checkArgument(offset > 0 && offset <= NUM_CREATED_USERS);
-        return Maps.immutableEntry("user" + offset, USER_KEYTAB_FILES.get(offset - 1));
-    }
-
-    /**
-     * Setup the security configuration for hdfs.
-     */
-    private static void setHdfsSecuredConfiguration(Configuration conf) throws Exception {
-        // Set principal+keytab configuration for HDFS
-        conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, SERVICE_PRINCIPAL + "@" + KDC.getRealm());
-        conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, KEYTAB.getAbsolutePath());
-        conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, SERVICE_PRINCIPAL + "@" + KDC.getRealm());
-        conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, KEYTAB.getAbsolutePath());
-        conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, SPNEGO_PRINCIPAL + "@" + KDC.getRealm());
-        // Enable token access for HDFS blocks
-        conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
-        // Only use HTTPS (required because we aren't using "secure" ports)
-        conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
-        // Bind on localhost for spnego to have a chance at working
-        conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
-        conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
-
-        // Generate SSL certs
-        File keystoresDir = new File(UTIL.getDataTestDir("keystore").toUri().getPath());
-        keystoresDir.mkdirs();
-        String sslConfDir = KeyStoreTestUtil.getClasspathDir(SecureQueryServerIT.class);
-        KeyStoreTestUtil.setupSSLConfig(keystoresDir.getAbsolutePath(), sslConfDir, conf, false);
-
-        // Magic flag to tell hdfs to not fail on using ports above 1024
-        conf.setBoolean("ignore.secure.ports.for.testing", true);
-    }
-
-    private static void ensureIsEmptyDirectory(File f) throws IOException {
-        if (f.exists()) {
-            if (f.isDirectory()) {
-                FileUtils.deleteDirectory(f);
-            } else {
-                assertTrue("Failed to delete keytab directory", f.delete());
-            }
-        }
-        assertTrue("Failed to create keytab directory", f.mkdirs());
-    }
-
-    /**
-     * Setup and start kerberos, hbase
-     */
-    @BeforeClass
-    public static void setUp() throws Exception {
-        final Configuration conf = UTIL.getConfiguration();
-        // Ensure the dirs we need are created/empty
-        ensureIsEmptyDirectory(TEMP_DIR);
-        ensureIsEmptyDirectory(KEYTAB_DIR);
-        KEYTAB = new File(KEYTAB_DIR, "test.keytab");
-        // Start a MiniKDC
-        KDC = UTIL.setupMiniKdc(KEYTAB);
-        // Create a service principal and spnego principal in one keytab
-        // NB. Due to some apparent limitations between HDFS and HBase in the same JVM, trying to
-        //     use separate identies for HBase and HDFS results in a GSS initiate error. The quick
-        //     solution is to just use a single "service" principal instead of "hbase" and "hdfs"
-        //     (or "dn" and "nn") per usual.
-        KDC.createPrincipal(KEYTAB, SPNEGO_PRINCIPAL, PQS_PRINCIPAL, SERVICE_PRINCIPAL);
-        // Start ZK by hand
-        UTIL.startMiniZKCluster();
-
-        // Create a number of unprivileged users
-        createUsers(3);
-
-        // Set configuration for HBase
-        HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + KDC.getRealm());
-        HBaseKerberosUtils.setSecuredConfiguration(conf);
-        setHdfsSecuredConfiguration(conf);
-        UserGroupInformation.setConfiguration(conf);
-        conf.setInt(HConstants.MASTER_PORT, 0);
-        conf.setInt(HConstants.MASTER_INFO_PORT, 0);
-        conf.setInt(HConstants.REGIONSERVER_PORT, 0);
-        conf.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
-        conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
-            TokenProvider.class.getName());
-
-        // Secure Phoenix setup
-        conf.set("phoenix.queryserver.kerberos.http.principal", SPNEGO_PRINCIPAL + "@" + KDC.getRealm());
-        conf.set("phoenix.queryserver.http.keytab.file", KEYTAB.getAbsolutePath());
-        conf.set("phoenix.queryserver.kerberos.principal", PQS_PRINCIPAL + "@" + KDC.getRealm());
-        conf.set("phoenix.queryserver.keytab.file", KEYTAB.getAbsolutePath());
-        conf.setBoolean(QueryServices.QUERY_SERVER_DISABLE_KERBEROS_LOGIN, true);
-        conf.setInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB, 0);
-        // Required so that PQS can impersonate the end-users to HBase
-        conf.set("hadoop.proxyuser.phoenixqs.groups", "*");
-        conf.set("hadoop.proxyuser.phoenixqs.hosts", "*");
-
-        // Clear the cached singletons so we can inject our own.
-        InstanceResolver.clearSingletons();
-        // Make sure the ConnectionInfo doesn't try to pull a default Configuration
-        InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
-            @Override
-            public Configuration getConfiguration() {
-                return conf;
-            }
-            @Override
-            public Configuration getConfiguration(Configuration confToClone) {
-                Configuration copy = new Configuration(conf);
-                copy.addResource(confToClone);
-                return copy;
-            }
-        });
-        updateDefaultRealm();
-
-        // Start HDFS
-        UTIL.startMiniDFSCluster(1);
-        // Use LocalHBaseCluster to avoid HBaseTestingUtility from doing something wrong
-        // NB. I'm not actually sure what HTU does incorrect, but this was pulled from some test
-        //     classes in HBase itself. I couldn't get HTU to work myself (2017/07/06)
-        Path rootdir = UTIL.getDataTestDirOnTestFS(SecureQueryServerIT.class.getSimpleName());
-        FSUtils.setRootDir(conf, rootdir);
-        HBASE_CLUSTER = new LocalHBaseCluster(conf, 1);
-        HBASE_CLUSTER.startup();
-
-        // Then fork a thread with PQS in it.
-        startQueryServer();
-    }
-
-    private static void startQueryServer() throws Exception {
-        PQS = new QueryServer(new String[0], UTIL.getConfiguration());
-        // Get the PQS ident for PQS to use
-        final UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(PQS_PRINCIPAL, KEYTAB.getAbsolutePath());
-        PQS_EXECUTOR = Executors.newSingleThreadExecutor();
-        // Launch PQS, doing in the Kerberos login instead of letting PQS do it itself (which would
-        // break the HBase/HDFS logins also running in the same test case).
-        PQS_EXECUTOR.submit(new Runnable() {
-            @Override public void run() {
-                ugi.doAs(new PrivilegedAction<Void>() {
-                    @Override public Void run() {
-                        PQS.run();
-                        return null;
-                    }
-                });
-            }
-        });
-        PQS.awaitRunning();
-        PQS_PORT = PQS.getPort();
-        PQS_URL = ThinClientUtil.getConnectionUrl("localhost", PQS_PORT) + ";authentication=SPNEGO";
-    }
-
-    @AfterClass
-    public static void stopKdc() throws Exception {
-        // Remove our custom ConfigurationFactory for future tests
-        InstanceResolver.clearSingletons();
-        if (PQS_EXECUTOR != null) {
-            PQS.stop();
-            PQS_EXECUTOR.shutdown();
-            if (!PQS_EXECUTOR.awaitTermination(5, TimeUnit.SECONDS)) {
-                LOG.info("PQS didn't exit in 5 seconds, proceeding anyways.");
-            }
-        }
-        if (HBASE_CLUSTER != null) {
-            HBASE_CLUSTER.shutdown();
-            HBASE_CLUSTER.join();
-        }
-        if (UTIL != null) {
-            UTIL.shutdownMiniZKCluster();
-        }
-        if (KDC != null) {
-            KDC.stop();
-        }
-    }
-
-    @Test
-    public void testBasicReadWrite() throws Exception {
-        final Entry<String,File> user1 = getUser(1);
-        UserGroupInformation user1Ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(user1.getKey(), user1.getValue().getAbsolutePath());
-        user1Ugi.doAs(new PrivilegedExceptionAction<Void>() {
-            @Override public Void run() throws Exception {
-                // Phoenix
-                final String tableName = "phx_table1";
-                try (java.sql.Connection conn = DriverManager.getConnection(PQS_URL);
-                        Statement stmt = conn.createStatement()) {
-                    conn.setAutoCommit(true);
-                    assertFalse(stmt.execute("CREATE TABLE " + tableName + "(pk integer not null primary key)"));
-                    final int numRows = 5;
-                    for (int i = 0; i < numRows; i++) {
-                      assertEquals(1, stmt.executeUpdate("UPSERT INTO " + tableName + " values(" + i + ")"));
-                    }
-
-                    try (ResultSet rs = stmt.executeQuery("SELECT * FROM " + tableName)) {
-                        for (int i = 0; i < numRows; i++) {
-                            assertTrue(rs.next());
-                            assertEquals(i, rs.getInt(1));
-                        }
-                        assertFalse(rs.next());
-                    }
-                }
-                return null;
-            }
-        });
-    }
-
-    byte[] copyBytes(byte[] src, int offset, int length) {
-        byte[] dest = new byte[length];
-        System.arraycopy(src, offset, dest, 0, length);
-        return dest;
-    }
-}
diff --git a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/SecureQueryServerPhoenixDBIT.java b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/SecureQueryServerPhoenixDBIT.java
deleted file mode 100644
index 205a831..0000000
--- a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/SecureQueryServerPhoenixDBIT.java
+++ /dev/null
@@ -1,424 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.lang.reflect.Field;
-import java.nio.file.Paths;
-import java.security.PrivilegedAction;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.LocalHBaseCluster;
-import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-import org.apache.hadoop.hbase.http.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
-import org.apache.hadoop.hbase.security.token.TokenProvider;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.minikdc.MiniKdc;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.util.KerberosName;
-import org.apache.phoenix.query.ConfigurationFactory;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.queryserver.client.ThinClientUtil;
-import org.apache.phoenix.queryserver.server.QueryServer;
-import org.apache.phoenix.util.InstanceResolver;
-import org.junit.AfterClass;
-import org.junit.Assume;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-
-/**
- * This integration test stands up a secured PQS and runs Python code against it. See supporting
- * files in phoenix-queryserver/src/it/bin.
- */
-@Category(NeedsOwnMiniClusterTest.class)
-public class SecureQueryServerPhoenixDBIT {
-    private static enum Kdc {
-      MIT,
-      HEIMDAL;
-    }
-    private static final Logger LOG = LoggerFactory.getLogger(SecureQueryServerPhoenixDBIT.class);
-
-    private static final File TEMP_DIR = new File(getTempDirForClass());
-    private static final File KEYTAB_DIR = new File(TEMP_DIR, "keytabs");
-    private static final List<File> USER_KEYTAB_FILES = new ArrayList<>();
-
-    private static final String SPNEGO_PRINCIPAL = "HTTP/localhost";
-    private static final String PQS_PRINCIPAL = "phoenixqs/localhost";
-    private static final String SERVICE_PRINCIPAL = "securecluster/localhost";
-    private static File KEYTAB;
-
-    private static MiniKdc KDC;
-    private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
-    private static LocalHBaseCluster HBASE_CLUSTER;
-    private static int NUM_CREATED_USERS;
-
-    private static ExecutorService PQS_EXECUTOR;
-    private static QueryServer PQS;
-    private static int PQS_PORT;
-    private static String PQS_URL;
-
-    private static String getTempDirForClass() {
-        StringBuilder sb = new StringBuilder(32);
-        sb.append(System.getProperty("user.dir")).append(File.separator);
-        sb.append("target").append(File.separator);
-        sb.append(SecureQueryServerPhoenixDBIT.class.getSimpleName());
-        return sb.toString();
-    }
-
-    private static void updateDefaultRealm() throws Exception {
-        // (at least) one other phoenix test triggers the caching of this field before the KDC is up
-        // which causes principal parsing to fail.
-        Field f = KerberosName.class.getDeclaredField("defaultRealm");
-        f.setAccessible(true);
-        // Default realm for MiniKDC
-        f.set(null, "EXAMPLE.COM");
-    }
-
-    private static void createUsers(int numUsers) throws Exception {
-        assertNotNull("KDC is null, was setup method called?", KDC);
-        NUM_CREATED_USERS = numUsers;
-        for (int i = 1; i <= numUsers; i++) {
-            String principal = "user" + i;
-            File keytabFile = new File(KEYTAB_DIR, principal + ".keytab");
-            KDC.createPrincipal(keytabFile, principal);
-            USER_KEYTAB_FILES.add(keytabFile);
-        }
-    }
-
-    private static Entry<String,File> getUser(int offset) {
-        Preconditions.checkArgument(offset > 0 && offset <= NUM_CREATED_USERS);
-        return Maps.immutableEntry("user" + offset, USER_KEYTAB_FILES.get(offset - 1));
-    }
-
-    /**
-     * Setup the security configuration for hdfs.
-     */
-    private static void setHdfsSecuredConfiguration(Configuration conf) throws Exception {
-        // Set principal+keytab configuration for HDFS
-        conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, SERVICE_PRINCIPAL + "@" + KDC.getRealm());
-        conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, KEYTAB.getAbsolutePath());
-        conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, SERVICE_PRINCIPAL + "@" + KDC.getRealm());
-        conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, KEYTAB.getAbsolutePath());
-        conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, SPNEGO_PRINCIPAL + "@" + KDC.getRealm());
-        // Enable token access for HDFS blocks
-        conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
-        // Only use HTTPS (required because we aren't using "secure" ports)
-        conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
-        // Bind on localhost for spnego to have a chance at working
-        conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
-        conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
-
-        // Generate SSL certs
-        File keystoresDir = new File(UTIL.getDataTestDir("keystore").toUri().getPath());
-        keystoresDir.mkdirs();
-        String sslConfDir = KeyStoreTestUtil.getClasspathDir(SecureQueryServerPhoenixDBIT.class);
-        KeyStoreTestUtil.setupSSLConfig(keystoresDir.getAbsolutePath(), sslConfDir, conf, false);
-
-        // Magic flag to tell hdfs to not fail on using ports above 1024
-        conf.setBoolean("ignore.secure.ports.for.testing", true);
-    }
-
-    private static void ensureIsEmptyDirectory(File f) throws IOException {
-        if (f.exists()) {
-            if (f.isDirectory()) {
-                FileUtils.deleteDirectory(f);
-            } else {
-                assertTrue("Failed to delete keytab directory", f.delete());
-            }
-        }
-        assertTrue("Failed to create keytab directory", f.mkdirs());
-    }
-
-    /**
-     * Verifies that there is a python Executable on the PATH
-     */
-    private static void checkForCommandOnPath(String command) throws Exception {
-        Process runPythonProcess = new ProcessBuilder(Arrays.asList("which", command)).start();
-        BufferedReader processOutput = new BufferedReader(new InputStreamReader(runPythonProcess.getInputStream()));
-        BufferedReader processError = new BufferedReader(new InputStreamReader(runPythonProcess.getErrorStream()));
-        int exitCode = runPythonProcess.waitFor();
-        // dump stdout and stderr
-        while (processOutput.ready()) {
-            LOG.info(processOutput.readLine());
-        }
-        while (processError.ready()) {
-            LOG.error(processError.readLine());
-        }
-        Assume.assumeTrue("Could not find '" + command + "' on the PATH", exitCode == 0);
-    }
-
-    /**
-     * Setup and start kerberos, hbase
-     */
-    @BeforeClass
-    public static void setUp() throws Exception {
-        checkForCommandOnPath("python");
-        checkForCommandOnPath("virtualenv");
-        checkForCommandOnPath("kinit");
-
-        final Configuration conf = UTIL.getConfiguration();
-        // Ensure the dirs we need are created/empty
-        ensureIsEmptyDirectory(TEMP_DIR);
-        ensureIsEmptyDirectory(KEYTAB_DIR);
-        KEYTAB = new File(KEYTAB_DIR, "test.keytab");
-        // Start a MiniKDC
-        KDC = UTIL.setupMiniKdc(KEYTAB);
-        // Create a service principal and spnego principal in one keytab
-        // NB. Due to some apparent limitations between HDFS and HBase in the same JVM, trying to
-        //     use separate identies for HBase and HDFS results in a GSS initiate error. The quick
-        //     solution is to just use a single "service" principal instead of "hbase" and "hdfs"
-        //     (or "dn" and "nn") per usual.
-        KDC.createPrincipal(KEYTAB, SPNEGO_PRINCIPAL, PQS_PRINCIPAL, SERVICE_PRINCIPAL);
-        // Start ZK by hand
-        UTIL.startMiniZKCluster();
-
-        // Create a number of unprivileged users
-        createUsers(3);
-
-        // Set configuration for HBase
-        HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + KDC.getRealm());
-        HBaseKerberosUtils.setSecuredConfiguration(conf);
-        setHdfsSecuredConfiguration(conf);
-        UserGroupInformation.setConfiguration(conf);
-        conf.setInt(HConstants.MASTER_PORT, 0);
-        conf.setInt(HConstants.MASTER_INFO_PORT, 0);
-        conf.setInt(HConstants.REGIONSERVER_PORT, 0);
-        conf.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
-        conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
-            TokenProvider.class.getName());
-
-        // Secure Phoenix setup
-        conf.set("phoenix.queryserver.kerberos.http.principal", SPNEGO_PRINCIPAL + "@" + KDC.getRealm());
-        conf.set("phoenix.queryserver.http.keytab.file", KEYTAB.getAbsolutePath());
-        conf.set("phoenix.queryserver.kerberos.principal", PQS_PRINCIPAL + "@" + KDC.getRealm());
-        conf.set("phoenix.queryserver.keytab.file", KEYTAB.getAbsolutePath());
-        conf.setBoolean(QueryServices.QUERY_SERVER_DISABLE_KERBEROS_LOGIN, true);
-        conf.setInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB, 0);
-        // Required so that PQS can impersonate the end-users to HBase
-        conf.set("hadoop.proxyuser.phoenixqs.groups", "*");
-        conf.set("hadoop.proxyuser.phoenixqs.hosts", "*");
-
-        // Clear the cached singletons so we can inject our own.
-        InstanceResolver.clearSingletons();
-        // Make sure the ConnectionInfo doesn't try to pull a default Configuration
-        InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
-            @Override
-            public Configuration getConfiguration() {
-                return conf;
-            }
-            @Override
-            public Configuration getConfiguration(Configuration confToClone) {
-                Configuration copy = new Configuration(conf);
-                copy.addResource(confToClone);
-                return copy;
-            }
-        });
-        updateDefaultRealm();
-
-        // Start HDFS
-        UTIL.startMiniDFSCluster(1);
-        // Use LocalHBaseCluster to avoid HBaseTestingUtility from doing something wrong
-        // NB. I'm not actually sure what HTU does incorrect, but this was pulled from some test
-        //     classes in HBase itself. I couldn't get HTU to work myself (2017/07/06)
-        Path rootdir = UTIL.getDataTestDirOnTestFS(SecureQueryServerPhoenixDBIT.class.getSimpleName());
-        FSUtils.setRootDir(conf, rootdir);
-        HBASE_CLUSTER = new LocalHBaseCluster(conf, 1);
-        HBASE_CLUSTER.startup();
-
-        // Then fork a thread with PQS in it.
-        startQueryServer();
-    }
-
-    private static void startQueryServer() throws Exception {
-        PQS = new QueryServer(new String[0], UTIL.getConfiguration());
-        // Get the PQS ident for PQS to use
-        final UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(PQS_PRINCIPAL, KEYTAB.getAbsolutePath());
-        PQS_EXECUTOR = Executors.newSingleThreadExecutor();
-        // Launch PQS, doing in the Kerberos login instead of letting PQS do it itself (which would
-        // break the HBase/HDFS logins also running in the same test case).
-        PQS_EXECUTOR.submit(new Runnable() {
-            @Override public void run() {
-                ugi.doAs(new PrivilegedAction<Void>() {
-                    @Override public Void run() {
-                        PQS.run();
-                        return null;
-                    }
-                });
-            }
-        });
-        PQS.awaitRunning();
-        PQS_PORT = PQS.getPort();
-        PQS_URL = ThinClientUtil.getConnectionUrl("localhost", PQS_PORT) + ";authentication=SPNEGO";
-    }
-
-    @AfterClass
-    public static void stopKdc() throws Exception {
-        // Remove our custom ConfigurationFactory for future tests
-        InstanceResolver.clearSingletons();
-        if (PQS_EXECUTOR != null) {
-            PQS.stop();
-            PQS_EXECUTOR.shutdown();
-            if (!PQS_EXECUTOR.awaitTermination(5, TimeUnit.SECONDS)) {
-                LOG.info("PQS didn't exit in 5 seconds, proceeding anyways.");
-            }
-        }
-        if (HBASE_CLUSTER != null) {
-            HBASE_CLUSTER.shutdown();
-            HBASE_CLUSTER.join();
-        }
-        if (UTIL != null) {
-            UTIL.shutdownMiniZKCluster();
-        }
-        if (KDC != null) {
-            KDC.stop();
-        }
-    }
-
-    @Test
-    public void testBasicReadWrite() throws Exception {
-        final Entry<String,File> user1 = getUser(1);
-        String currentDirectory;
-        File file = new File(".");
-        currentDirectory = file.getAbsolutePath();
-        LOG.debug("Current working directory : "+currentDirectory);
-        LOG.debug("PQS_PORT:" + PQS_PORT);
-        LOG.debug("PQS_URL: " + PQS_URL);
-        ArrayList<String> cmdList = new ArrayList<>();
-        // This assumes the test is being run from phoenix/phoenix-queryserver
-        cmdList.add(Paths.get(currentDirectory, "src", "it", "bin", "test_phoenixdb.sh").toString());
-        cmdList.add(Paths.get(currentDirectory, "..", "python").toString());
-        cmdList.add(user1.getKey() + "@" + KDC.getRealm());
-        cmdList.add(user1.getValue().getAbsolutePath());
-        final String osName = System.getProperty("os.name").toLowerCase();
-        final Kdc kdcType;
-        final String kdcImpl = System.getProperty("PHOENIXDB_KDC_IMPL", "");
-        if (kdcImpl.isEmpty()) {
-          if (osName.indexOf("mac") >= 0) {
-            kdcType = Kdc.HEIMDAL;
-          } else {
-            kdcType = Kdc.MIT;
-          }
-        } else if (kdcImpl.trim().equalsIgnoreCase(Kdc.HEIMDAL.name())) {
-          kdcType = Kdc.HEIMDAL;
-        } else {
-          kdcType = Kdc.MIT;
-        }
-        LOG.info("Generating krb5.conf for KDC type:'{}'. OS='{}', PHOENIXDB_KDC_IMPL='{}'", kdcType, osName, kdcImpl);
-        File krb5ConfFile = null;
-        switch (kdcType) {
-            // It appears that we cannot generate a krb5.conf that is compatible with both MIT Kerberos
-            // and Heimdal Kerberos that works with MiniKdc. MiniKdc forces a choice between either UDP or
-            // or TCP for the KDC port. If we could have MiniKdc support both UDP and TCP, then we might be
-            // able to converge on a single krb5.conf for both MIT and Heimdal.
-            //
-            // With the below Heimdal configuration, MIT kerberos will fail on a DNS lookup to the hostname
-            // "tcp/localhost" instead of pulling off the "tcp/" prefix.
-            case HEIMDAL:
-                int kdcPort = KDC.getPort();
-                LOG.info("MINIKDC PORT " + kdcPort);
-                // Render a Heimdal compatible krb5.conf
-                // Currently kinit will only try tcp if the KDC is defined as
-                // kdc = tcp/hostname:port
-                StringBuilder krb5conf = new StringBuilder();
-                krb5conf.append("[libdefaults]\n");
-                krb5conf.append("     default_realm = EXAMPLE.COM\n");
-                krb5conf.append("     udp_preference_limit = 1\n");
-                krb5conf.append("\n");
-                krb5conf.append("[realms]\n");
-                krb5conf.append("    EXAMPLE.COM = {\n");
-                krb5conf.append("       kdc = localhost:");
-                krb5conf.append(kdcPort);
-                krb5conf.append("\n");
-                krb5conf.append("       kdc = tcp/localhost:");
-                krb5conf.append(kdcPort);
-                krb5conf.append("\n");
-                krb5conf.append("    }\n");
-
-                LOG.info("Writing Heimdal style krb5.conf");
-                LOG.info(krb5conf.toString());
-                krb5ConfFile = File.createTempFile("krb5.conf", null);
-                FileOutputStream fos = new FileOutputStream(krb5ConfFile);
-                fos.write(krb5conf.toString().getBytes());
-                fos.close();
-                LOG.info("krb5.conf written to " + krb5ConfFile.getAbsolutePath());
-                cmdList.add(krb5ConfFile.getAbsolutePath());
-                break;
-            case MIT:
-                cmdList.add(System.getProperty("java.security.krb5.conf"));
-                LOG.info("Using miniKDC provided krb5.conf  " + KDC.getKrb5conf().getAbsolutePath());
-                break;
-            default:
-                throw new RuntimeException("Unhandled KDC type: " + kdcType);
-        }
-
-        cmdList.add(Integer.toString(PQS_PORT));
-        cmdList.add(Paths.get(currentDirectory, "src", "it", "bin", "test_phoenixdb.py").toString());
-
-        Process runPythonProcess = new ProcessBuilder(cmdList).start();
-        BufferedReader processOutput = new BufferedReader(new InputStreamReader(runPythonProcess.getInputStream()));
-        BufferedReader processError = new BufferedReader(new InputStreamReader(runPythonProcess.getErrorStream()));
-        int exitCode = runPythonProcess.waitFor();
-
-        // dump stdout and stderr
-        while (processOutput.ready()) {
-            LOG.info(processOutput.readLine());
-        }
-        while (processError.ready()) {
-            LOG.error(processError.readLine());
-        }
-
-        // Not managed by miniKDC so we have to clean up
-        if (krb5ConfFile != null)
-            krb5ConfFile.delete();
-
-        assertEquals("Subprocess exited with errors", 0, exitCode);
-    }
-
-    byte[] copyBytes(byte[] src, int offset, int length) {
-        byte[] dest = new byte[length];
-        System.arraycopy(src, offset, dest, 0, length);
-        return dest;
-    }
-}
diff --git a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/ServerCustomizersIT.java b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/ServerCustomizersIT.java
deleted file mode 100644
index db08908..0000000
--- a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/ServerCustomizersIT.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.Statement;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
-import org.apache.calcite.avatica.server.ServerCustomizer;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.queryserver.server.ServerCustomizersFactory;
-import org.apache.phoenix.util.InstanceResolver;
-import org.eclipse.jetty.security.ConstraintMapping;
-import org.eclipse.jetty.security.ConstraintSecurityHandler;
-import org.eclipse.jetty.security.HashLoginService;
-import org.eclipse.jetty.security.authentication.BasicAuthenticator;
-import org.eclipse.jetty.server.Server;
-import org.eclipse.jetty.util.security.Constraint;
-import org.eclipse.jetty.util.security.Credential;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ServerCustomizersIT extends BaseHBaseManagedTimeIT {
-    private static final Logger LOG = LoggerFactory.getLogger(ServerCustomizersIT.class);
-    private static final String USER_AUTHORIZED = "user3";
-    private static final String USER_NOT_AUTHORIZED = "user1";
-    private static final String USER_PW = "s3cr3t";
-
-    private static QueryServerTestUtil PQS_UTIL;
-
-    @Rule
-    public ExpectedException expected = ExpectedException.none();
-
-    @BeforeClass
-    public static void setup() throws Exception {
-        Configuration conf = getTestClusterConfig();
-        conf.set(QueryServices.QUERY_SERVER_CUSTOMIZERS_ENABLED, "true");
-        PQS_UTIL = new QueryServerTestUtil(conf);
-        PQS_UTIL.startLocalHBaseCluster(ServerCustomizersIT.class);
-        // Register a test jetty server customizer
-        InstanceResolver.clearSingletons();
-        InstanceResolver.getSingleton(ServerCustomizersFactory.class, new ServerCustomizersFactory() {
-            @Override
-            public List<ServerCustomizer<Server>> createServerCustomizers(Configuration conf,
-                                                                          AvaticaServerConfiguration avaticaServerConfiguration) {
-                return Collections.<ServerCustomizer<Server>>singletonList(new TestServerCustomizer());
-            }
-        });
-        PQS_UTIL.startQueryServer();
-    }
-
-    @AfterClass
-    public static void teardown() throws Exception {
-        // Remove custom singletons for future tests
-        InstanceResolver.clearSingletons();
-        if (PQS_UTIL != null) {
-            PQS_UTIL.stopQueryServer();
-            PQS_UTIL.stopLocalHBaseCluster();
-        }
-    }
-
-    @Test
-    public void testUserAuthorized() throws Exception {
-        try (Connection conn = DriverManager.getConnection(PQS_UTIL.getUrl(
-                getBasicAuthParams(USER_AUTHORIZED)));
-                Statement stmt = conn.createStatement()) {
-            Assert.assertFalse("user3 should have access", stmt.execute(
-                "create table "+ServerCustomizersIT.class.getSimpleName()+" (pk integer not null primary key)"));
-        }
-    }
-
-    @Test
-    public void testUserNotAuthorized() throws Exception {
-        expected.expect(RuntimeException.class);
-        expected.expectMessage("HTTP/401");
-        try (Connection conn = DriverManager.getConnection(PQS_UTIL.getUrl(
-                getBasicAuthParams(USER_NOT_AUTHORIZED)));
-                Statement stmt = conn.createStatement()) {
-            Assert.assertFalse(stmt.execute(
-                    "select access from database"));
-        }
-    }
-
-    private Map<String, String> getBasicAuthParams(String user) {
-        Map<String, String> params = new HashMap<>();
-        params.put("authentication", "BASIC");
-        params.put("avatica_user", user);
-        params.put("avatica_password", USER_PW);
-        return params;
-    }
-
-    /**
-     * Contrived customizer that enables BASIC auth for a single user
-     */
-    public static class TestServerCustomizer implements ServerCustomizer<Server> {
-        @Override
-        public void customize(Server server) {
-            LOG.debug("Customizing server to allow requests for {}", USER_AUTHORIZED);
-            HashLoginService login = new HashLoginService();
-            login.putUser(USER_AUTHORIZED, Credential.getCredential(USER_PW), new String[] {"users"});
-            login.setName("users");
-
-            Constraint constraint = new Constraint();
-            constraint.setName(Constraint.__BASIC_AUTH);
-            constraint.setRoles(new String[]{"users"});
-            constraint.setAuthenticate(true);
-
-            ConstraintMapping cm = new ConstraintMapping();
-            cm.setConstraint(constraint);
-            cm.setPathSpec("/*");
-
-            ConstraintSecurityHandler security = new ConstraintSecurityHandler();
-            security.setAuthenticator(new BasicAuthenticator());
-            security.setRealmName("users");
-            security.addConstraintMapping(cm);
-            security.setLoginService(login);
-
-            // chain the PQS handler to security
-            security.setHandler(server.getHandlers()[0]);
-            server.setHandler(security);
-        }
-    }
-}
diff --git a/phoenix-queryserver/src/it/resources/log4j.properties b/phoenix-queryserver/src/it/resources/log4j.properties
deleted file mode 100644
index f90cf16..0000000
--- a/phoenix-queryserver/src/it/resources/log4j.properties
+++ /dev/null
@@ -1,68 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hbase.root.logger=DEBUG,console
-hbase.log.dir=.
-hbase.log.file=hbase.log
-
-# Define the root logger to the system property "hbase.root.logger".
-log4j.rootLogger=${hbase.root.logger}
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-# Debugging Pattern format
-log4j.appender.DRFA.layout.ConversionPattern=%d %-5p [%t] %C{2}(%L): %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d %-5p [%t] %C{2}(%L): %m%n
-
-# Custom Logging levels
-
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-
-log4j.logger.org.apache.zookeeper=ERROR
-
-# Suppresses junk from minikdc
-log4j.logger.org.mortbay.log=WARN
-log4j.logger.org.apache.directory=WARN
-log4j.logger.net.sf.ehcache=WARN
-# Suppress the "no group for user" spamming
-log4j.logger.org.apache.hadoop.security.UserGroupInformation=ERROR
diff --git a/phoenix-queryserver/src/main/java/org/apache/phoenix/loadbalancer/service/LoadBalanceZookeeperConf.java b/phoenix-queryserver/src/main/java/org/apache/phoenix/loadbalancer/service/LoadBalanceZookeeperConf.java
deleted file mode 100644
index afce5be..0000000
--- a/phoenix-queryserver/src/main/java/org/apache/phoenix/loadbalancer/service/LoadBalanceZookeeperConf.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.loadbalancer.service;
-
-import com.google.common.net.HostAndPort;
-import org.apache.zookeeper.data.ACL;
-
-import java.util.List;
-
-
-public interface LoadBalanceZookeeperConf {
-
-    String getQueryServerBasePath();
-
-    String getServiceName();
-
-    String getZkConnectString();
-
-    List<ACL> getAcls();
-
-    String getParentPath();
-
-    String getFullPathToNode(HostAndPort hostAndPort);
-
-
-}
diff --git a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/register/Registry.java b/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/register/Registry.java
deleted file mode 100644
index 598fc5a..0000000
--- a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/register/Registry.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.queryserver.register;
-
-
-import org.apache.phoenix.loadbalancer.service.LoadBalanceZookeeperConf;
-
-/**
- * Registry interface for implementing registering
- * and un-registering to service locator.
- */
-public interface  Registry  {
-
-    /**
-     * Unreqister the server with zookeeper. All Cleanup
-     * is done in this method.
-     * @throws Exception
-     */
-    void unRegisterServer() throws Exception;
-
-    /**
-     * Registers the server with the service locator ( zookeeper).
-     * @param configuration - Hbase Configuration
-     * @param port - port for PQS server
-     * @param connectString - zookeeper connect string
-     * @param pqsHost - host for PQS server.
-     * @throws Exception
-     */
-    void registerServer(LoadBalanceZookeeperConf configuration, int port
-            , String connectString, String pqsHost) throws Exception ;
-
-}
diff --git a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java b/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
deleted file mode 100644
index 33fd590..0000000
--- a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.queryserver.server;
-
-import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
-
-
-public interface AvaticaServerConfigurationFactory {
-
-    AvaticaServerConfiguration getAvaticaServerConfiguration(Configuration conf, UserGroupInformation ugi);
-
-    class AvaticaServerConfigurationFactoryImpl implements AvaticaServerConfigurationFactory {
-
-        @Override
-        public AvaticaServerConfiguration getAvaticaServerConfiguration(Configuration conf, UserGroupInformation ugi) {
-            return null;
-        }
-    }
-
-}
diff --git a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/PhoenixMetaFactory.java b/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/PhoenixMetaFactory.java
deleted file mode 100644
index 02344a3..0000000
--- a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/PhoenixMetaFactory.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.queryserver.server;
-
-import org.apache.calcite.avatica.Meta;
-import org.apache.hadoop.conf.Configurable;
-
-/**
- * A @{link Meta.Factory} that can also respect Hadoop
- * {@link org.apache.hadoop.conf.Configuration} objects.
- */
-public interface PhoenixMetaFactory extends Meta.Factory, Configurable {
-}
diff --git a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/PhoenixMetaFactoryImpl.java b/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/PhoenixMetaFactoryImpl.java
deleted file mode 100644
index c74d2c9..0000000
--- a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/PhoenixMetaFactoryImpl.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.queryserver.server;
-
-import com.google.common.base.Preconditions;
-import org.apache.calcite.avatica.Meta;
-import org.apache.calcite.avatica.jdbc.JdbcMeta;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
-import org.apache.phoenix.util.QueryUtil;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
-/**
- * Bridge between Phoenix and Avatica.
- */
-public class PhoenixMetaFactoryImpl extends Configured implements PhoenixMetaFactory {
-
-  // invoked via reflection
-  public PhoenixMetaFactoryImpl() {
-    super(HBaseConfiguration.create());
-  }
-
-  // invoked via reflection
-  public PhoenixMetaFactoryImpl(Configuration conf) {
-    super(conf);
-  }
-
-  @Override
-  public Meta create(List<String> args) {
-    Configuration conf = Preconditions.checkNotNull(getConf(), "Configuration must not be null.");
-    Properties info = new Properties();
-    info.putAll(conf.getValByRegex("avatica.*"));
-    try {
-      final String url;
-      if (args.size() == 0) {
-        url = QueryUtil.getConnectionUrl(info, conf);
-      } else if (args.size() == 1) {
-        url = args.get(0);
-      } else {
-        throw new RuntimeException(
-            "0 or 1 argument expected. Received " + Arrays.toString(args.toArray()));
-      }
-      // TODO: what about -D configs passed in from cli? How do they get pushed down?
-      return new JdbcMeta(url, info);
-    } catch (SQLException | ClassNotFoundException e) {
-      throw new RuntimeException(e);
-    }
-  }
-}
diff --git a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/QueryServer.java b/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/QueryServer.java
deleted file mode 100644
index 4766394..0000000
--- a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/QueryServer.java
+++ /dev/null
@@ -1,606 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.queryserver.server;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-
-import org.apache.calcite.avatica.Meta;
-import org.apache.calcite.avatica.remote.Driver;
-import org.apache.calcite.avatica.remote.LocalService;
-import org.apache.calcite.avatica.remote.Service;
-import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
-import org.apache.calcite.avatica.server.DoAsRemoteUserCallback;
-import org.apache.calcite.avatica.server.HttpServer;
-import org.apache.calcite.avatica.server.RemoteUserExtractor;
-import org.apache.calcite.avatica.server.RemoteUserExtractionException;
-import org.apache.calcite.avatica.server.HttpRequestRemoteUserExtractor;
-import org.apache.calcite.avatica.server.HttpQueryStringParameterRemoteUserExtractor;
-import org.apache.calcite.avatica.server.ServerCustomizer;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.util.Strings;
-import org.apache.hadoop.net.DNS;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.loadbalancer.service.LoadBalanceZookeeperConf;
-import org.apache.phoenix.queryserver.register.Registry;
-import org.apache.phoenix.util.InstanceResolver;
-import org.eclipse.jetty.server.Server;
-
-import java.io.File;
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.lang.management.RuntimeMXBean;
-import java.net.InetAddress;
-import java.security.PrivilegedExceptionAction;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Set;
-import java.util.ServiceConfigurationError;
-import java.util.ServiceLoader;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-
-import javax.servlet.http.HttpServletRequest;
-
-import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_QUERY_SERVER_CUSTOM_AUTH_ENABLED;
-
-/**
- * A query server for Phoenix over Calcite's Avatica.
- */
-public final class QueryServer extends Configured implements Tool, Runnable {
-
-  protected static final Log LOG = LogFactory.getLog(QueryServer.class);
-
-  private final String[] argv;
-  private final CountDownLatch runningLatch = new CountDownLatch(1);
-  private HttpServer server = null;
-  private int retCode = 0;
-  private Throwable t = null;
-  private Registry registry;
-
-  /**
-   * Log information about the currently running JVM.
-   */
-  public static void logJVMInfo() {
-    // Print out vm stats before starting up.
-    RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
-    if (runtime != null) {
-      LOG.info("vmName=" + runtime.getVmName() + ", vmVendor=" +
-              runtime.getVmVendor() + ", vmVersion=" + runtime.getVmVersion());
-      LOG.info("vmInputArguments=" + runtime.getInputArguments());
-    }
-  }
-
-  /**
-   * Logs information about the currently running JVM process including
-   * the environment variables. Logging of env vars can be disabled by
-   * setting {@code "phoenix.envvars.logging.disabled"} to {@code "true"}.
-   * <p>If enabled, you can also exclude environment variables containing
-   * certain substrings by setting {@code "phoenix.envvars.logging.skipwords"}
-   * to comma separated list of such substrings.
-   */
-  public static void logProcessInfo(Configuration conf) {
-    // log environment variables unless asked not to
-    if (conf == null || !conf.getBoolean(QueryServices.QUERY_SERVER_ENV_LOGGING_ATTRIB, false)) {
-      Set<String> skipWords = new HashSet<String>(
-          QueryServicesOptions.DEFAULT_QUERY_SERVER_SKIP_WORDS);
-      if (conf != null) {
-        String[] confSkipWords = conf.getStrings(
-            QueryServices.QUERY_SERVER_ENV_LOGGING_SKIPWORDS_ATTRIB);
-        if (confSkipWords != null) {
-          skipWords.addAll(Arrays.asList(confSkipWords));
-        }
-      }
-
-      nextEnv:
-      for (Map.Entry<String, String> entry : System.getenv().entrySet()) {
-        String key = entry.getKey().toLowerCase();
-        String value = entry.getValue().toLowerCase();
-        // exclude variables which may contain skip words
-        for(String skipWord : skipWords) {
-          if (key.contains(skipWord) || value.contains(skipWord))
-            continue nextEnv;
-        }
-        LOG.info("env:"+entry);
-      }
-    }
-    // and JVM info
-    logJVMInfo();
-  }
-
-  /** Constructor for use from {@link org.apache.hadoop.util.ToolRunner}. */
-  public QueryServer() {
-    this(null, null);
-  }
-
-  /** Constructor for use as {@link java.lang.Runnable}. */
-  public QueryServer(String[] argv, Configuration conf) {
-    this.argv = argv;
-    setConf(conf);
-  }
-
-  /**
-   * @return the port number this instance is bound to, or {@code -1} if the server is not running.
-   */
-  @VisibleForTesting
-  public int getPort() {
-    if (server == null) return -1;
-    return server.getPort();
-  }
-
-  /**
-   * @return the return code from running as a {@link Tool}.
-   */
-  @VisibleForTesting
-  public int getRetCode() {
-    return retCode;
-  }
-
-  /**
-   * @return the throwable from an unsuccessful run, or null otherwise.
-   */
-  @VisibleForTesting
-  public Throwable getThrowable() {
-    return t;
-  }
-
-  /** Calling thread waits until the server is running. */
-  public void awaitRunning() throws InterruptedException {
-    runningLatch.await();
-  }
-
-  /** Calling thread waits until the server is running. */
-  public void awaitRunning(long timeout, TimeUnit unit) throws InterruptedException {
-    runningLatch.await(timeout, unit);
-  }
-
-  @Override
-  public int run(String[] args) throws Exception {
-    logProcessInfo(getConf());
-    final boolean loadBalancerEnabled = getConf().getBoolean(QueryServices.PHOENIX_QUERY_SERVER_LOADBALANCER_ENABLED,
-            QueryServicesOptions.DEFAULT_PHOENIX_QUERY_SERVER_LOADBALANCER_ENABLED);
-    try {
-      final boolean isKerberos = "kerberos".equalsIgnoreCase(getConf().get(
-          QueryServices.QUERY_SERVER_HBASE_SECURITY_CONF_ATTRIB));
-      final boolean disableSpnego = getConf().getBoolean(QueryServices.QUERY_SERVER_SPNEGO_AUTH_DISABLED_ATTRIB,
-              QueryServicesOptions.DEFAULT_QUERY_SERVER_SPNEGO_AUTH_DISABLED);
-      String hostname;
-      final boolean disableLogin = getConf().getBoolean(QueryServices.QUERY_SERVER_DISABLE_KERBEROS_LOGIN,
-              QueryServicesOptions.DEFAULT_QUERY_SERVER_DISABLE_KERBEROS_LOGIN);
-
-      // handle secure cluster credentials
-      if (isKerberos && !disableLogin) {
-        hostname = Strings.domainNamePointerToHostName(DNS.getDefaultHost(
-            getConf().get(QueryServices.QUERY_SERVER_DNS_INTERFACE_ATTRIB, "default"),
-            getConf().get(QueryServices.QUERY_SERVER_DNS_NAMESERVER_ATTRIB, "default")));
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Login to " + hostname + " using " + getConf().get(
-              QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB)
-              + " and principal " + getConf().get(
-                  QueryServices.QUERY_SERVER_KERBEROS_PRINCIPAL_ATTRIB) + ".");
-        }
-        SecurityUtil.login(getConf(), QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB,
-            QueryServices.QUERY_SERVER_KERBEROS_PRINCIPAL_ATTRIB, hostname);
-        LOG.info("Login successful.");
-      } else {
-        hostname = InetAddress.getLocalHost().getHostName();
-        LOG.info(" Kerberos is off and hostname is : "+hostname);
-      }
-
-      int port = getConf().getInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB,
-          QueryServicesOptions.DEFAULT_QUERY_SERVER_HTTP_PORT);
-      LOG.debug("Listening on port " + port);
-
-      // Update proxyuser configuration for impersonation
-      ProxyUsers.refreshSuperUserGroupsConfiguration(getConf());
-
-      // Start building the Avatica HttpServer
-      final HttpServer.Builder<Server>
-              builder =
-              HttpServer.Builder.<Server>newBuilder().withPort(port);
-
-      UserGroupInformation ugi = getUserGroupInformation();
-
-      AvaticaServerConfiguration avaticaServerConfiguration = null;
-
-      // RemoteUserCallbacks and RemoteUserExtractor are part of AvaticaServerConfiguration
-      // Hence they should be customizable when using QUERY_SERVER_CUSTOM_AUTH_ENABLED
-      // Handlers should be customized via ServerCustomizers
-      if (getConf().getBoolean(QueryServices.QUERY_SERVER_CUSTOM_AUTH_ENABLED,
-              DEFAULT_QUERY_SERVER_CUSTOM_AUTH_ENABLED)) {
-        avaticaServerConfiguration = enableCustomAuth(builder, getConf(), ugi);
-      } else {
-        if (isKerberos) {
-          // Enable client auth when using Kerberos auth for HBase
-          configureClientAuthentication(builder, disableSpnego, ugi);
-        }
-        setRemoteUserExtractorIfNecessary(builder, getConf());
-        setHandler(args, builder);
-      }
-
-      enableServerCustomizersIfNecessary(builder, getConf(), avaticaServerConfiguration);
-
-      // Build and start the HttpServer
-      server = builder.build();
-      server.start();
-      if (loadBalancerEnabled) {
-        registerToServiceProvider(hostname);
-      }
-      runningLatch.countDown();
-      server.join();
-      return 0;
-    } catch (Throwable t) {
-      LOG.fatal("Unrecoverable service error. Shutting down.", t);
-      this.t = t;
-      return -1;
-    } finally {
-      if (loadBalancerEnabled) {
-        unRegister();
-      }
-    }
-  }
-
-  @VisibleForTesting
-  void configureClientAuthentication(final HttpServer.Builder builder, boolean disableSpnego, UserGroupInformation ugi) throws IOException {
-
-    // Enable SPNEGO for client authentication unless it's explicitly disabled
-    if (!disableSpnego) {
-      configureSpnegoAuthentication(builder, ugi);
-    }
-    configureCallBack(builder, ugi);
-  }
-
-  @VisibleForTesting
-  void configureSpnegoAuthentication(HttpServer.Builder builder, UserGroupInformation ugi) {
-    String keytabPath = getConf().get(QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB);
-    File keytab = new File(keytabPath);
-    String httpKeytabPath =
-            getConf().get(QueryServices.QUERY_SERVER_HTTP_KEYTAB_FILENAME_ATTRIB, null);
-    String httpPrincipal =
-            getConf().get(QueryServices.QUERY_SERVER_KERBEROS_HTTP_PRINCIPAL_ATTRIB, null);
-    // Backwards compat for a configuration key change
-    if (httpPrincipal == null) {
-      httpPrincipal =
-              getConf().get(QueryServices.QUERY_SERVER_KERBEROS_HTTP_PRINCIPAL_ATTRIB_LEGACY, null);
-    }
-    File httpKeytab = null;
-    if (null != httpKeytabPath) {
-        httpKeytab = new File(httpKeytabPath);
-    }
-
-    String realmsString = getConf().get(QueryServices.QUERY_SERVER_KERBEROS_ALLOWED_REALMS, null);
-    String[] additionalAllowedRealms = null;
-    if (null != realmsString) {
-      additionalAllowedRealms = StringUtils.split(realmsString, ',');
-    }
-    if (null != httpKeytabPath && null != httpPrincipal) {
-      builder.withSpnego(httpPrincipal, additionalAllowedRealms).withAutomaticLogin(httpKeytab);
-    } else {
-      builder.withSpnego(ugi.getUserName(), additionalAllowedRealms)
-              .withAutomaticLogin(keytab);
-    }
-  }
-
-  @VisibleForTesting
-  UserGroupInformation getUserGroupInformation() throws IOException {
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    LOG.debug("Current user is " + ugi);
-    if (!ugi.hasKerberosCredentials()) {
-      ugi = UserGroupInformation.getLoginUser();
-      LOG.debug("Current user does not have Kerberos credentials, using instead " + ugi);
-    }
-    return ugi;
-  }
-
-  @VisibleForTesting
-  void configureCallBack(HttpServer.Builder<Server> builder, UserGroupInformation ugi) {
-    builder.withImpersonation(new PhoenixDoAsCallback(ugi, getConf()));
-  }
-
-  private void setHandler(String[] args, HttpServer.Builder<Server> builder) throws Exception {
-    Class<? extends PhoenixMetaFactory> factoryClass = getConf().getClass(
-            QueryServices.QUERY_SERVER_META_FACTORY_ATTRIB, PhoenixMetaFactoryImpl.class,
-            PhoenixMetaFactory.class);
-    PhoenixMetaFactory factory =
-            factoryClass.getDeclaredConstructor(Configuration.class).newInstance(getConf());
-    Meta meta = factory.create(Arrays.asList(args));
-    Service service = new LocalService(meta);
-    builder.withHandler(service, getSerialization(getConf()));
-  }
-
-  public synchronized void stop() {
-    server.stop();
-  }
-
-  public boolean registerToServiceProvider(String hostName)  {
-
-    boolean success = true ;
-    try {
-      LoadBalanceZookeeperConf loadBalanceConfiguration = getLoadBalanceConfiguration();
-      Preconditions.checkNotNull(loadBalanceConfiguration);
-      this.registry = getRegistry();
-      Preconditions.checkNotNull(registry);
-      String zkConnectString = loadBalanceConfiguration.getZkConnectString();
-      this.registry.registerServer(loadBalanceConfiguration, getPort(), zkConnectString, hostName);
-    } catch(Throwable ex){
-      LOG.debug("Caught an error trying to register with the load balancer", ex);
-      success = false;
-    } finally {
-      return success;
-    }
-  }
-
-
-  public LoadBalanceZookeeperConf getLoadBalanceConfiguration()  {
-    ServiceLoader<LoadBalanceZookeeperConf> serviceLocator= ServiceLoader.load(LoadBalanceZookeeperConf.class);
-    LoadBalanceZookeeperConf zookeeperConfig = null;
-    try {
-      if (serviceLocator.iterator().hasNext())
-        zookeeperConfig = serviceLocator.iterator().next();
-    } catch(ServiceConfigurationError ex) {
-      LOG.debug("Unable to locate the service provider for load balancer configuration", ex);
-    } finally {
-      return zookeeperConfig;
-    }
-  }
-
-  public Registry getRegistry()  {
-    ServiceLoader<Registry> serviceLocator= ServiceLoader.load(Registry.class);
-    Registry registry = null;
-    try {
-      if (serviceLocator.iterator().hasNext())
-        registry = serviceLocator.iterator().next();
-    } catch(ServiceConfigurationError ex) {
-      LOG.debug("Unable to locate the zookeeper registry for the load balancer", ex);
-    } finally {
-      return registry;
-    }
-  }
-
-  public boolean unRegister()  {
-    boolean success = true;
-    try {
-      registry.unRegisterServer();
-    }catch(Throwable ex) {
-      LOG.debug("Caught an error while de-registering the query server from the load balancer",ex);
-      success = false;
-    } finally {
-      return success;
-    }
-  }
-  /**
-   * Parses the serialization method from the configuration.
-   *
-   * @param conf The configuration to parse
-   * @return The Serialization method
-   */
-  Driver.Serialization getSerialization(Configuration conf) {
-    String serializationName = conf.get(QueryServices.QUERY_SERVER_SERIALIZATION_ATTRIB,
-        QueryServicesOptions.DEFAULT_QUERY_SERVER_SERIALIZATION);
-
-    Driver.Serialization serialization;
-    // Otherwise, use what was provided in the configuration
-    try {
-      serialization = Driver.Serialization.valueOf(serializationName);
-    } catch (Exception e) {
-      LOG.error("Unknown message serialization type for " + serializationName);
-      throw e;
-    }
-
-    return serialization;
-  }
-
-  @Override public void run() {
-    try {
-      retCode = run(argv);
-    } catch (Exception e) {
-      // already logged
-    }
-  }
-
-  // add remoteUserExtractor to builder if enabled
-  @VisibleForTesting
-  public void setRemoteUserExtractorIfNecessary(HttpServer.Builder builder, Configuration conf) {
-    if (conf.getBoolean(QueryServices.QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR_ATTRIB,
-            QueryServicesOptions.DEFAULT_QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR)) {
-      builder.withRemoteUserExtractor(createRemoteUserExtractor(conf));
-    }
-  }
-
-  @VisibleForTesting
-  public void enableServerCustomizersIfNecessary(HttpServer.Builder<Server> builder,
-                                                 Configuration conf, AvaticaServerConfiguration avaticaServerConfiguration) {
-    if (conf.getBoolean(QueryServices.QUERY_SERVER_CUSTOMIZERS_ENABLED,
-            QueryServicesOptions.DEFAULT_QUERY_SERVER_CUSTOMIZERS_ENABLED)) {
-      builder.withServerCustomizers(createServerCustomizers(conf, avaticaServerConfiguration), Server.class);
-    }
-  }
-
-  @VisibleForTesting
-  public AvaticaServerConfiguration enableCustomAuth(HttpServer.Builder<Server> builder,
-                                                     Configuration conf, UserGroupInformation ugi) {
-    AvaticaServerConfiguration avaticaServerConfiguration = createAvaticaServerConfig(conf, ugi);
-    builder.withCustomAuthentication(avaticaServerConfiguration);
-    return avaticaServerConfiguration;
-  }
-
-  private static final RemoteUserExtractorFactory DEFAULT_USER_EXTRACTOR =
-    new RemoteUserExtractorFactory.RemoteUserExtractorFactoryImpl();
-
-  private static final ServerCustomizersFactory DEFAULT_SERVER_CUSTOMIZERS =
-    new ServerCustomizersFactory.ServerCustomizersFactoryImpl();
-
-  private static final AvaticaServerConfigurationFactory DEFAULT_SERVER_CONFIG =
-    new AvaticaServerConfigurationFactory.AvaticaServerConfigurationFactoryImpl();
-
-  @VisibleForTesting
-  RemoteUserExtractor createRemoteUserExtractor(Configuration conf) {
-    RemoteUserExtractorFactory factory =
-        InstanceResolver.getSingleton(RemoteUserExtractorFactory.class, DEFAULT_USER_EXTRACTOR);
-    return factory.createRemoteUserExtractor(conf);
-  }
-
-  @VisibleForTesting
-  List<ServerCustomizer<Server>> createServerCustomizers(Configuration conf, AvaticaServerConfiguration avaticaServerConfiguration) {
-    ServerCustomizersFactory factory =
-      InstanceResolver.getSingleton(ServerCustomizersFactory.class, DEFAULT_SERVER_CUSTOMIZERS);
-    return factory.createServerCustomizers(conf, avaticaServerConfiguration);
-  }
-
-  @VisibleForTesting
-  AvaticaServerConfiguration createAvaticaServerConfig(Configuration conf, UserGroupInformation ugi) {
-    AvaticaServerConfigurationFactory factory =
-            InstanceResolver.getSingleton(AvaticaServerConfigurationFactory.class, DEFAULT_SERVER_CONFIG);
-    return factory.getAvaticaServerConfiguration(conf, ugi);
-  }
-
-  /**
-   * Use the correctly way to extract end user.
-   */
-  static class PhoenixRemoteUserExtractor implements RemoteUserExtractor{
-    private final HttpQueryStringParameterRemoteUserExtractor paramRemoteUserExtractor;
-    private final HttpRequestRemoteUserExtractor requestRemoteUserExtractor;
-    private final String userExtractParam;
-
-    public PhoenixRemoteUserExtractor(Configuration conf) {
-      this.requestRemoteUserExtractor = new HttpRequestRemoteUserExtractor();
-      this.userExtractParam = conf.get(QueryServices.QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM,
-              QueryServicesOptions.DEFAULT_QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM);
-      this.paramRemoteUserExtractor = new HttpQueryStringParameterRemoteUserExtractor(userExtractParam);
-    }
-
-    @Override
-    public String extract(HttpServletRequest request) throws RemoteUserExtractionException {
-      if (request.getParameter(userExtractParam) != null) {
-        String extractedUser = paramRemoteUserExtractor.extract(request);
-        UserGroupInformation ugi = UserGroupInformation.createRemoteUser(request.getRemoteUser());
-        UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(extractedUser, ugi);
-
-        // Check if this user is allowed to be impersonated.
-        // Will throw AuthorizationException if the impersonation as this user is not allowed
-        try {
-          ProxyUsers.authorize(proxyUser, request.getRemoteAddr());
-          return extractedUser;
-        } catch (AuthorizationException e) {
-          throw new RemoteUserExtractionException(e.getMessage(), e);
-        }
-      } else {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("The parameter (" + userExtractParam + ") used to extract the remote user doesn't exist in the request.");
-        }
-        return requestRemoteUserExtractor.extract(request);
-      }
-
-    }
-  }
-
-  /**
-   * Callback to run the Avatica server action as the remote (proxy) user instead of the server.
-   */
-  public static class PhoenixDoAsCallback implements DoAsRemoteUserCallback {
-    private final UserGroupInformation serverUgi;
-    private final LoadingCache<String,UserGroupInformation> ugiCache;
-
-    public PhoenixDoAsCallback(UserGroupInformation serverUgi, Configuration conf) {
-      this.serverUgi = Objects.requireNonNull(serverUgi);
-      this.ugiCache = CacheBuilder.newBuilder()
-          .initialCapacity(conf.getInt(QueryServices.QUERY_SERVER_UGI_CACHE_INITIAL_SIZE,
-                  QueryServicesOptions.DEFAULT_QUERY_SERVER_UGI_CACHE_INITIAL_SIZE))
-          .concurrencyLevel(conf.getInt(QueryServices.QUERY_SERVER_UGI_CACHE_CONCURRENCY,
-                  QueryServicesOptions.DEFAULT_QUERY_SERVER_UGI_CACHE_CONCURRENCY))
-          .maximumSize(conf.getLong(QueryServices.QUERY_SERVER_UGI_CACHE_MAX_SIZE,
-                  QueryServicesOptions.DEFAULT_QUERY_SERVER_UGI_CACHE_MAX_SIZE))
-          .build(new UgiCacheLoader(this.serverUgi));
-    }
-
-    @Override
-    public <T> T doAsRemoteUser(String remoteUserName, String remoteAddress,
-        final Callable<T> action) throws Exception {
-      // We are guaranteed by Avatica that the `remoteUserName` is properly authenticated by the
-      // time this method is called. We don't have to verify the wire credentials, we can assume the
-      // user provided valid credentials for who it claimed it was.
-
-      // Proxy this user on top of the server's user (the real user). Get a cached instance, the
-      // LoadingCache will create a new instance for us if one isn't cached.
-      UserGroupInformation proxyUser = createProxyUser(remoteUserName);
-
-      // Execute the actual call as this proxy user
-      return proxyUser.doAs(new PrivilegedExceptionAction<T>() {
-        @Override
-        public T run() throws Exception {
-          return action.call();
-        }
-      });
-    }
-
-      @VisibleForTesting
-      UserGroupInformation createProxyUser(String remoteUserName) throws ExecutionException {
-          // PHOENIX-3164 UGI's hashCode and equals methods rely on reference checks, not
-          // value-based checks. We need to make sure we return the same UGI instance for a remote
-          // user, otherwise downstream code in Phoenix and HBase may not treat two of the same
-          // calls from one user as equivalent.
-          return ugiCache.get(remoteUserName);
-      }
-
-      @VisibleForTesting
-      LoadingCache<String,UserGroupInformation> getCache() {
-          return ugiCache;
-      }
-  }
-
-  /**
-   * CacheLoader implementation which creates a "proxy" UGI instance for the given user name.
-   */
-  static class UgiCacheLoader extends CacheLoader<String,UserGroupInformation> {
-      private final UserGroupInformation serverUgi;
-
-      public UgiCacheLoader(UserGroupInformation serverUgi) {
-          this.serverUgi = Objects.requireNonNull(serverUgi);
-      }
-
-      @Override
-      public UserGroupInformation load(String remoteUserName) throws Exception {
-          return UserGroupInformation.createProxyUser(remoteUserName, serverUgi);
-      }
-  }
-
-  public static void main(String[] argv) throws Exception {
-    int ret = ToolRunner.run(HBaseConfiguration.create(), new QueryServer(), argv);
-    System.exit(ret);
-  }
-}
diff --git a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/RemoteUserExtractorFactory.java b/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/RemoteUserExtractorFactory.java
deleted file mode 100644
index ff5e0d2..0000000
--- a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/RemoteUserExtractorFactory.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.queryserver.server;
-
-import org.apache.calcite.avatica.server.RemoteUserExtractor;
-import org.apache.hadoop.conf.Configuration;
-
-/**
- * Creates remote user extractors.
- */
-public interface RemoteUserExtractorFactory {
-
-  RemoteUserExtractor createRemoteUserExtractor(Configuration conf);
-
-  class RemoteUserExtractorFactoryImpl implements RemoteUserExtractorFactory {
-    @Override
-    public RemoteUserExtractor createRemoteUserExtractor(Configuration conf) {
-      return new QueryServer.PhoenixRemoteUserExtractor(conf);
-    }
-  }
-}
diff --git a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/ServerCustomizersFactory.java b/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/ServerCustomizersFactory.java
deleted file mode 100644
index 942660a..0000000
--- a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/ServerCustomizersFactory.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.queryserver.server;
-
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
-import org.apache.calcite.avatica.server.ServerCustomizer;
-import org.apache.hadoop.conf.Configuration;
-import org.eclipse.jetty.server.Server;
-
-/**
- * Creates customizers for the underlying Avatica HTTP server.
- * Allows for fine grained control of authentication, etc.
- */
-public interface ServerCustomizersFactory {
-    /**
-     * Creates a list of customizers that will customize the server.
-     * @param conf Configuration to use
-     * @param avaticaServerConfiguration to use in case custom-auth is enabled
-     * @return List of server suctomizers
-     */
-    List<ServerCustomizer<Server>> createServerCustomizers(Configuration conf, AvaticaServerConfiguration avaticaServerConfiguration);
-
-    /**
-     * Factory that creates an empty list of customizers.
-     */
-    class ServerCustomizersFactoryImpl implements ServerCustomizersFactory {
-        private static final List<ServerCustomizer<Server>> EMPTY_LIST = Collections.emptyList();
-        @Override
-        public List<ServerCustomizer<Server>> createServerCustomizers(Configuration conf,
-                                                                      AvaticaServerConfiguration avaticaServerConfiguration) {
-            return EMPTY_LIST;
-        }
-    }
-}
diff --git a/phoenix-queryserver/src/test/java/org/apache/phoenix/DriverCohabitationTest.java b/phoenix-queryserver/src/test/java/org/apache/phoenix/DriverCohabitationTest.java
deleted file mode 100644
index 1df6d2c..0000000
--- a/phoenix-queryserver/src/test/java/org/apache/phoenix/DriverCohabitationTest.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix;
-
-import org.apache.phoenix.queryserver.client.ThinClientUtil;
-import org.apache.phoenix.util.QueryUtil;
-import org.junit.Test;
-
-import java.sql.Driver;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.util.Collections;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Ensure the "thick" Phoenix driver and it's "thin" counterpart can coexist on
- * the same classpath.
- */
-public class DriverCohabitationTest {
-
-  @Test
-  public void testDriverCohabitation() throws SQLException {
-    Driver thickDriver = null;
-    Driver thinDriver = null;
-
-    for (Driver d : Collections.list(DriverManager.getDrivers())) {
-      if (d instanceof org.apache.phoenix.jdbc.PhoenixDriver) {
-        thickDriver = d;
-      } else if (d instanceof org.apache.phoenix.queryserver.client.Driver) {
-        thinDriver = d;
-      }
-    }
-    assertNotNull("Thick driver not registered with DriverManager.", thickDriver);
-    assertNotNull("Thin driver not registered with DriverManager.", thinDriver);
-
-    final String thickUrl = QueryUtil.getUrl("localhost");
-    final String thinUrl = ThinClientUtil.getConnectionUrl("localhost", 1234);
-    assertTrue("Thick driver should accept connections like " + thickUrl,
-        thickDriver.acceptsURL(thickUrl));
-    assertFalse("Thick driver should reject connections like " + thinUrl,
-        thickDriver.acceptsURL(thinUrl));
-    assertTrue("Thin driver should accept connections like " + thinUrl,
-        thinDriver.acceptsURL(thinUrl));
-    assertFalse("Thin driver should reject connections like " + thickUrl,
-        thinDriver.acceptsURL(thickUrl));
-  }
-}
diff --git a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java b/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
deleted file mode 100644
index fb59e0d..0000000
--- a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.queryserver.server;
-
-import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.io.IOException;
-
-public class CustomAvaticaServerConfigurationTest {
-    @Test
-    public void testDefaultFactory() throws IOException {
-        QueryServer queryServer = new QueryServer();
-        UserGroupInformation ugi = queryServer.getUserGroupInformation();
-        // the default factory creates null object
-        AvaticaServerConfiguration avaticaServerConfiguration = queryServer.createAvaticaServerConfig(new Configuration(), ugi);
-        Assert.assertNull(avaticaServerConfiguration);
-    }
-}
diff --git a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/PhoenixDoAsCallbackTest.java b/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/PhoenixDoAsCallbackTest.java
deleted file mode 100644
index c016363..0000000
--- a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/PhoenixDoAsCallbackTest.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.queryserver.server;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.phoenix.queryserver.server.QueryServer.PhoenixDoAsCallback;
-import org.junit.Test;
-
-/**
- * Tests for the authorization callback hook Avatica provides for Phoenix to implement.
- */
-public class PhoenixDoAsCallbackTest {
-
-    @Test
-    public void ugiInstancesAreCached() throws Exception {
-        Configuration conf = new Configuration(false);
-        UserGroupInformation serverUgi = UserGroupInformation.createUserForTesting("server", new String[0]);
-        PhoenixDoAsCallback callback = new PhoenixDoAsCallback(serverUgi, conf);
-
-        UserGroupInformation ugi1 = callback.createProxyUser("user1");
-        assertEquals(1, callback.getCache().size());
-        assertTrue(ugi1.getRealUser() == serverUgi);
-        UserGroupInformation ugi2 = callback.createProxyUser("user2");
-        assertEquals(2, callback.getCache().size());
-        assertTrue(ugi2.getRealUser() == serverUgi);
-
-        UserGroupInformation ugi1Reference = callback.createProxyUser("user1");
-        assertTrue(ugi1 == ugi1Reference);
-        assertEquals(2, callback.getCache().size());
-    }
-
-    @Test
-    public void proxyingUsersAreCached() throws Exception {
-      Configuration conf = new Configuration(false);
-      // The user "server" can impersonate anyone
-      conf.set("hadoop.proxyuser.server.groups", "*");
-      conf.set("hadoop.proxyuser.server.hosts", "*");
-      // Trigger ProxyUsers to refresh itself with the above configuration
-      ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
-      UserGroupInformation serverUgi = UserGroupInformation.createUserForTesting("server", new String[0]);
-      PhoenixDoAsCallback callback = new PhoenixDoAsCallback(serverUgi, conf);
-
-      UserGroupInformation user1 = callback.doAsRemoteUser("user1", "localhost:1234", new Callable<UserGroupInformation>() {
-          public UserGroupInformation call() throws Exception {
-            return UserGroupInformation.getCurrentUser();
-          }
-      });
-
-      UserGroupInformation user2 = callback.doAsRemoteUser("user2", "localhost:1235", new Callable<UserGroupInformation>() {
-          public UserGroupInformation call() throws Exception {
-            return UserGroupInformation.getCurrentUser();
-          }
-      });
-
-      UserGroupInformation user1Reference = callback.doAsRemoteUser("user1", "localhost:1234", new Callable<UserGroupInformation>() {
-          public UserGroupInformation call() throws Exception {
-            return UserGroupInformation.getCurrentUser();
-          }
-      });
-
-      // The UserGroupInformation.getCurrentUser() actually returns a new UGI instance, but the internal
-      // subject is the same. We can verify things will work as expected that way.
-      assertNotEquals(user1.hashCode(), user2.hashCode());
-      assertEquals("These should be the same (cached) instance", user1.hashCode(), user1Reference.hashCode());
-      assertEquals("These should be the same (cached) instance", user1, user1Reference);
-    }
-}
diff --git a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/PhoenixRemoteUserExtractorTest.java b/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/PhoenixRemoteUserExtractorTest.java
deleted file mode 100644
index 9351989..0000000
--- a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/PhoenixRemoteUserExtractorTest.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.queryserver.server;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.mockito.Mockito.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-import org.apache.calcite.avatica.server.HttpServer;
-import org.apache.calcite.avatica.server.RemoteUserExtractionException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.phoenix.queryserver.server.QueryServer.PhoenixRemoteUserExtractor;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.http.HttpServletRequest;
-
-/**
- * Tests for the RemoteUserExtractor Method Avatica provides for Phoenix to implement.
- */
-public class PhoenixRemoteUserExtractorTest {
-  private static final Logger LOG = LoggerFactory.getLogger(PhoenixRemoteUserExtractorTest.class);
-
-  @Test
-  public void testWithRemoteUserExtractorSuccess() {
-    HttpServletRequest request = mock(HttpServletRequest.class);
-    when(request.getRemoteUser()).thenReturn("proxyserver");
-    when(request.getParameter("doAs")).thenReturn("enduser");
-    when(request.getRemoteAddr()).thenReturn("localhost:1234");
-
-    Configuration conf = new Configuration(false);
-    conf.set("hadoop.proxyuser.proxyserver.groups", "*");
-    conf.set("hadoop.proxyuser.proxyserver.hosts", "*");
-    conf.set("phoenix.queryserver.withRemoteUserExtractor", "true");
-    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
-
-    PhoenixRemoteUserExtractor extractor = new PhoenixRemoteUserExtractor(conf);
-    try {
-      assertEquals("enduser", extractor.extract(request));
-    } catch (RemoteUserExtractionException e) {
-      LOG.info(e.getMessage());
-    }
-  }
-
-  @Test
-  public void testNoRemoteUserExtractorParam() {
-    HttpServletRequest request = mock(HttpServletRequest.class);
-    when(request.getRemoteUser()).thenReturn("proxyserver");
-    when(request.getRemoteAddr()).thenReturn("localhost:1234");
-
-    Configuration conf = new Configuration(false);
-    conf.set("hadoop.proxyuser.proxyserver.groups", "*");
-    conf.set("hadoop.proxyuser.proxyserver.hosts", "*");
-    conf.set("phoenix.queryserver.withRemoteUserExtractor", "true");
-    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
-
-    PhoenixRemoteUserExtractor extractor = new PhoenixRemoteUserExtractor(conf);
-    try {
-      assertEquals("proxyserver", extractor.extract(request));
-    } catch (RemoteUserExtractionException e) {
-      LOG.info(e.getMessage());
-    }
-  }
-
-  @Test
-  public void testDoNotUseRemoteUserExtractor() {
-
-    HttpServer.Builder builder = mock(HttpServer.Builder.class);
-    Configuration conf = new Configuration(false);
-    QueryServer queryServer = new QueryServer();
-    queryServer.setRemoteUserExtractorIfNecessary(builder, conf);
-    verify(builder, never()).withRemoteUserExtractor(any(PhoenixRemoteUserExtractor.class));
-  }
-
-  @Test
-  public void testUseRemoteUserExtractor() {
-
-    HttpServer.Builder builder = mock(HttpServer.Builder.class);
-    Configuration conf = new Configuration(false);
-    conf.set("phoenix.queryserver.withRemoteUserExtractor", "true");
-    QueryServer queryServer = new QueryServer();
-    queryServer.setRemoteUserExtractorIfNecessary(builder, conf);
-    verify(builder).withRemoteUserExtractor(any(PhoenixRemoteUserExtractor.class));
-  }
-
-}
diff --git a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/QueryServerConfigurationTest.java b/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/QueryServerConfigurationTest.java
deleted file mode 100644
index d01d2ea..0000000
--- a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/QueryServerConfigurationTest.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.queryserver.server;
-
-import java.io.File;
-import java.io.IOException;
-
-import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
-import org.apache.calcite.avatica.server.DoAsRemoteUserCallback;
-import org.apache.calcite.avatica.server.HttpServer;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.phoenix.query.QueryServices;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import static org.mockito.Mockito.*;
-
-public class QueryServerConfigurationTest {
-  private static final Configuration CONF = HBaseConfiguration.create();
-
-  @Rule public TemporaryFolder testFolder = new TemporaryFolder();
-
-  private HttpServer.Builder builder;
-  private QueryServer queryServer;
-  private UserGroupInformation ugi;
-
-  @Before
-  public void setup() throws IOException {
-    builder = mock(HttpServer.Builder.class);
-    queryServer = new QueryServer(new String[0], CONF);
-    ugi = queryServer.getUserGroupInformation();
-  }
-
-  @Test
-  public void testSpnegoEnabled() throws IOException {
-    setupKeytabForSpnego();
-    // SPENEGO settings will be provided to the builder when enabled
-    doReturn(builder).when(builder).withSpnego(anyString(), any(String[].class));
-    configureAndVerifyImpersonation(builder, false);
-    // A keytab file will also be provided for automatic login
-    verify(builder).withAutomaticLogin(any(File.class));
-    verify(builder, never()).withCustomAuthentication(any(AvaticaServerConfiguration.class));
-  }
-
-  @Test
-  public void testSpnegoDisabled() throws IOException {
-    setupKeytabForSpnego();
-    configureAndVerifyImpersonation(builder, true);
-    verify(builder, never()).withSpnego(anyString(), any(String[].class));
-    verify(builder, never()).withAutomaticLogin(any(File.class));
-    verify(builder, never()).withCustomAuthentication(any(AvaticaServerConfiguration.class));
-  }
-
-  @Test
-  public void testCustomServerConfiguration() {
-    queryServer.enableCustomAuth(builder, CONF, ugi);
-    verify(builder).withCustomAuthentication(any(AvaticaServerConfiguration.class));
-    verify(builder, never()).withSpnego(anyString(), any(String[].class));
-    verify(builder, never()).withAutomaticLogin(any(File.class));
-    verify(builder, never()).withImpersonation(any(DoAsRemoteUserCallback.class));
-  }
-
-  private void setupKeytabForSpnego() throws IOException {
-    File keytabFile = testFolder.newFile("test.keytab");
-    CONF.set(QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB, keytabFile.getAbsolutePath());
-  }
-
-  private void configureAndVerifyImpersonation(HttpServer.Builder builder, boolean disableSpnego)
-      throws IOException {
-    queryServer.configureClientAuthentication(builder, disableSpnego, ugi);
-    verify(builder).withImpersonation(any(DoAsRemoteUserCallback.class));
-  }
-}
diff --git a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/RemoteUserExtractorFactoryTest.java b/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/RemoteUserExtractorFactoryTest.java
deleted file mode 100644
index 975ee26..0000000
--- a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/RemoteUserExtractorFactoryTest.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.queryserver.server;
-
-import org.apache.calcite.avatica.server.RemoteUserExtractor;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class RemoteUserExtractorFactoryTest {
-
-  @Test
-  public void testProvidesDefaultFactory() {
-    QueryServer queryServer = new QueryServer();
-    RemoteUserExtractor extractor = queryServer.createRemoteUserExtractor(new Configuration());
-    Assert.assertTrue(
-      "Not an instance of PhoenixRemoteUserExtractor: " + extractor.getClass().getName(),
-      extractor instanceof QueryServer.PhoenixRemoteUserExtractor);
-  }
-}
diff --git a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/ServerCustomizersTest.java b/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/ServerCustomizersTest.java
deleted file mode 100644
index 93e1e37..0000000
--- a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/ServerCustomizersTest.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.queryserver.server;
-
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.calcite.avatica.server.HttpServer;
-import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
-import org.apache.calcite.avatica.server.ServerCustomizer;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.InstanceResolver;
-import org.eclipse.jetty.server.Server;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import static org.mockito.Matchers.*;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-
-public class ServerCustomizersTest {
-    @Before @After
-    public void clearSingletons() {
-        // clean up singletons
-        InstanceResolver.clearSingletons();
-    }
-
-    @Test
-    public void testDefaultFactory() {
-        QueryServer queryServer = new QueryServer();
-        AvaticaServerConfiguration avaticaServerConfiguration = null;
-        // the default factory creates an empty list of server customizers
-        List<ServerCustomizer<Server>> customizers =
-                queryServer.createServerCustomizers(new Configuration(), avaticaServerConfiguration);
-        Assert.assertEquals(0, customizers.size());
-    }
-
-    @Test
-    public void testUseProvidedCustomizers() {
-        AvaticaServerConfiguration avaticaServerConfiguration = null;
-        final List<ServerCustomizer<Server>> expected =
-            Collections.<ServerCustomizer<Server>> singletonList(new ServerCustomizer<Server>() {
-              @Override
-              public void customize(Server server) {
-                // no-op customizer
-              }
-        });
-        // Register the server customizer list
-        InstanceResolver.getSingleton(ServerCustomizersFactory.class, new ServerCustomizersFactory() {
-            @Override
-            public List<ServerCustomizer<Server>> createServerCustomizers(Configuration conf,
-                                                                          AvaticaServerConfiguration avaticaServerConfiguration) {
-                return expected;
-            }
-        });
-        Configuration conf = new Configuration(false);
-        conf.set(QueryServices.QUERY_SERVER_CUSTOMIZERS_ENABLED, "true");
-        QueryServer queryServer = new QueryServer();
-        List<ServerCustomizer<Server>> actual = queryServer.createServerCustomizers(conf, avaticaServerConfiguration);
-        Assert.assertEquals("Customizers are different", expected, actual);
-    }
-
-    @Test
-    @SuppressWarnings("unchecked")
-    public void testEnableCustomizers() {
-        AvaticaServerConfiguration avaticaServerConfiguration = null;
-        HttpServer.Builder builder = mock(HttpServer.Builder.class);
-        Configuration conf = new Configuration(false);
-        conf.set(QueryServices.QUERY_SERVER_CUSTOMIZERS_ENABLED, "true");
-        QueryServer queryServer = new QueryServer();
-        queryServer.enableServerCustomizersIfNecessary(builder, conf, avaticaServerConfiguration);
-        verify(builder).withServerCustomizers(anyList(), any(Class.class));
-    }
-}
\ No newline at end of file
diff --git a/pom.xml b/pom.xml
index 6d663d9..30d8dc7 100644
--- a/pom.xml
+++ b/pom.xml
@@ -27,8 +27,6 @@
     <module>phoenix-flume</module>
     <module>phoenix-kafka</module>
     <module>phoenix-pig</module>
-    <module>phoenix-queryserver-client</module>
-    <module>phoenix-queryserver</module>
     <module>phoenix-pherf</module>
     <module>phoenix-spark</module>
     <module>phoenix-hive</module>