You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by td...@apache.org on 2019/03/26 18:25:24 UTC

[phoenix] branch master updated: PHOENIX-5062 Create a new repo for the phoenix connectors (addendum)

This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
     new 679dd55  PHOENIX-5062 Create a new repo for the phoenix connectors (addendum)
679dd55 is described below

commit 679dd55d504123b584eed88baecf98f197438bf9
Author: Thomas D'Silva <td...@apache.org>
AuthorDate: Tue Mar 26 11:24:32 2019 -0700

    PHOENIX-5062 Create a new repo for the phoenix connectors (addendum)
---
 phoenix-flume/pom.xml                              |  203 --
 .../org/apache/phoenix/flume/PhoenixSinkIT.java    |  272 ---
 phoenix-hive/pom.xml                               |  294 ---
 .../hadoop/hive/ql/QTestProcessExecResult.java     |   66 -
 .../java/org/apache/hadoop/hive/ql/QTestUtil.java  | 2489 --------------------
 .../hive/ql/security/DummyAuthenticator.java       |   70 -
 .../phoenix/hive/BaseHivePhoenixStoreIT.java       |  180 --
 .../org/apache/phoenix/hive/HiveMapReduceIT.java   |   62 -
 .../apache/phoenix/hive/HivePhoenixStoreIT.java    |  344 ---
 .../java/org/apache/phoenix/hive/HiveTestUtil.java |   40 -
 .../it/java/org/apache/phoenix/hive/HiveTezIT.java |   32 -
 .../apache/phoenix/hive/PhoenixRecordUpdater.java  |  341 ---
 .../apache/phoenix/hive/PhoenixStorageHandler.java |  276 ---
 .../constants/PhoenixStorageHandlerConstants.java  |  108 -
 .../phoenix/hive/mapreduce/PhoenixInputFormat.java |  271 ---
 .../hive/mapreduce/PhoenixRecordWriter.java        |  360 ---
 .../PhoenixByteObjectInspector.java                |   59 -
 .../PhoenixDoubleObjectInspector.java              |   59 -
 .../hive/ql/index/IndexPredicateAnalyzer.java      |  521 ----
 .../phoenix/hive/query/PhoenixQueryBuilder.java    |  849 -------
 .../phoenix/hive/util/PhoenixConnectionUtil.java   |  119 -
 .../hive/util/PhoenixStorageHandlerUtil.java       |  321 ---
 .../org/apache/phoenix/hive/util/PhoenixUtil.java  |  210 --
 .../hive/query/PhoenixQueryBuilderTest.java        |  173 --
 phoenix-hive/src/test/resources/hive-site.xml      |  123 -
 phoenix-hive/src/test/resources/tez-site.xml       |   69 -
 phoenix-kafka/pom.xml                              |  421 ----
 phoenix-pig/pom.xml                                |  464 ----
 .../org/apache/phoenix/pig/PhoenixHBaseLoader.java |  265 ---
 .../phoenix/pig/util/PhoenixPigSchemaUtil.java     |   90 -
 phoenix-spark/pom.xml                              |  607 -----
 phoenix-spark/src/it/resources/hbase-site.xml      |   40 -
 .../v2/reader/PhoenixDataSourceReader.java         |  201 --
 .../v2/reader/PhoenixInputPartitionReader.java     |  169 --
 34 files changed, 10168 deletions(-)

diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
deleted file mode 100644
index 1d66c90..0000000
--- a/phoenix-flume/pom.xml
+++ /dev/null
@@ -1,203 +0,0 @@
-<?xml version='1.0'?>
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
-
--->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.phoenix</groupId>
-    <artifactId>phoenix</artifactId>
-    <version>5.1.0-HBase-2.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>phoenix-flume</artifactId>
-  <name>Phoenix - Flume</name>
-
-  <properties>
-    <top.dir>${project.basedir}/..</top.dir>
-  </properties>
-
-  <dependencies>
-   <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-core</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-
-    <!-- Test Dependencies -->
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-all</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.github.stephenc.high-scale-lib</groupId>
-      <artifactId>high-scale-lib</artifactId>
-      <version>1.1.1</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>com.yammer.metrics</groupId>
-      <artifactId>metrics-core</artifactId>
-      <version>2.1.2</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.htrace</groupId>
-      <artifactId>htrace-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>commons-codec</groupId>
-      <artifactId>commons-codec</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-testing-util</artifactId>
-      <scope>test</scope>
-      <optional>true</optional>
-      <exclusions>
-        <exclusion>
-          <groupId>org.jruby</groupId>
-          <artifactId>jruby-complete</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-it</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>org.jruby</groupId>
-          <artifactId>jruby-complete</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-mapreduce</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-protocol</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-client</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-hadoop-compat</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-hadoop-compat</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-hadoop2-compat</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-hadoop2-compat</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-annotations</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-mapreduce-client-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-minicluster</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.tephra</groupId>
-      <artifactId>tephra-core</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-
-    <!-- to work with json data using flume -->
-    <dependency>
-      <groupId>com.tdunning</groupId>
-      <artifactId>json</artifactId>
-      <version>1.8</version>
-    </dependency>
-    <dependency>
-      <groupId>com.jayway.jsonpath</groupId>
-      <artifactId>json-path</artifactId>
-      <version>2.2.0</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.commons</groupId>
-      <artifactId>commons-csv</artifactId>
-      <version>${commons-csv.version}</version>
-    </dependency>
-    <!-- Main dependency on flume. The last to avoid using old commons-io in IT -->
-    <dependency>
-      <groupId>org.apache.flume</groupId>
-      <artifactId>flume-ng-core</artifactId>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-failsafe-plugin</artifactId>
-      </plugin>
-      <plugin>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <version>${maven-dependency-plugin.version}</version>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/phoenix-flume/src/it/java/org/apache/phoenix/flume/PhoenixSinkIT.java b/phoenix-flume/src/it/java/org/apache/phoenix/flume/PhoenixSinkIT.java
deleted file mode 100644
index 01e106f..0000000
--- a/phoenix-flume/src/it/java/org/apache/phoenix/flume/PhoenixSinkIT.java
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.flume;
-
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.util.Properties;
-
-import org.apache.flume.Channel;
-import org.apache.flume.Context;
-import org.apache.flume.Sink;
-import org.apache.flume.SinkFactory;
-import org.apache.flume.Transaction;
-import org.apache.flume.channel.MemoryChannel;
-import org.apache.flume.conf.Configurables;
-import org.apache.flume.event.EventBuilder;
-import org.apache.flume.lifecycle.LifecycleState;
-import org.apache.flume.sink.DefaultSinkFactory;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT;
-import org.apache.phoenix.flume.serializer.CustomSerializer;
-import org.apache.phoenix.flume.serializer.EventSerializers;
-import org.apache.phoenix.flume.sink.NullPhoenixSink;
-import org.apache.phoenix.flume.sink.PhoenixSink;
-import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.TestUtil;
-import org.junit.Assert;
-import org.junit.Test;
-
-
-public class PhoenixSinkIT extends BaseHBaseManagedTimeIT {
-
-    private Context sinkContext;
-    private PhoenixSink sink;
-   
-   
-    @Test
-    public void testSinkCreation() {
-        SinkFactory factory = new DefaultSinkFactory ();
-        Sink sink = factory.create("PhoenixSink__", "org.apache.phoenix.flume.sink.PhoenixSink");
-        Assert.assertNotNull(sink);
-        Assert.assertTrue(PhoenixSink.class.isInstance(sink));
-    }
-    @Test
-    public void testConfiguration () {
-        
-        sinkContext = new Context ();
-        sinkContext.put(FlumeConstants.CONFIG_TABLE, "test");
-        sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, getUrl());
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());
-
-        sink = new PhoenixSink();
-        Configurables.configure(sink, sinkContext);
-    }
-    
-    
-    
-    @Test(expected= NullPointerException.class)
-    public void testInvalidConfiguration () {
-        
-        sinkContext = new Context ();
-        sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, getUrl());
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());
-
-        sink = new PhoenixSink();
-        Configurables.configure(sink, sinkContext);
-    }
-    
-    @Test(expected=RuntimeException.class)
-    public void testInvalidConfigurationOfSerializer () {
-        
-        sinkContext = new Context ();
-        sinkContext.put(FlumeConstants.CONFIG_TABLE, "test");
-        sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, getUrl());
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,"unknown");
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());
-
-        sink = new PhoenixSink();
-        Configurables.configure(sink, sinkContext);
-    }
-    
-    @Test
-    public void testInvalidTable() {
-        sinkContext = new Context ();
-        sinkContext.put(FlumeConstants.CONFIG_TABLE, "flume_test");
-        sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, getUrl());
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER, EventSerializers.REGEX.name());
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES, "col1,col2");
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());
-
-        sink = new PhoenixSink();
-        Configurables.configure(sink, sinkContext);
-
-        final Channel channel = this.initChannel();
-        sink.setChannel(channel);
-        try {
-            sink.start();
-            fail();
-        }catch(Exception e) {
-            assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1012 (42M03): Table undefined."));
-        }
-    }
-    
-    @Test
-    public void testSinkLifecycle () {
-        String tableName = generateUniqueName();
-
-        String ddl = "CREATE TABLE " + tableName +
-                "  (flume_time timestamp not null, col1 varchar , col2 varchar" +
-                "  CONSTRAINT pk PRIMARY KEY (flume_time))\n";
-        
-        sinkContext = new Context ();
-        sinkContext.put(FlumeConstants.CONFIG_TABLE,  tableName);
-        sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, getUrl());
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
-        sinkContext.put(FlumeConstants.CONFIG_TABLE_DDL, ddl);
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_REGULAR_EXPRESSION,"^([^\t]+)\t([^\t]+)$");
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name());
-
-
-        sink = new PhoenixSink();
-        Configurables.configure(sink, sinkContext);
-        Assert.assertEquals(LifecycleState.IDLE, sink.getLifecycleState());
-
-        final Channel channel = this.initChannel();
-        sink.setChannel(channel);
-
-        sink.start();
-        Assert.assertEquals(LifecycleState.START, sink.getLifecycleState());
-        sink.stop();
-        Assert.assertEquals(LifecycleState.STOP, sink.getLifecycleState());
-    }
-    
-    @Test
-    public void testCreateTable () throws Exception {
-        String tableName = generateUniqueName();
-        String ddl = "CREATE TABLE " + tableName + " " +
-                "  (flume_time timestamp not null, col1 varchar , col2 varchar" +
-                "  CONSTRAINT pk PRIMARY KEY (flume_time))\n";
-
-        final String fullTableName =  tableName;
-        sinkContext = new Context ();
-        sinkContext.put(FlumeConstants.CONFIG_TABLE, fullTableName);
-        sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, getUrl());
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name());
-        sinkContext.put(FlumeConstants.CONFIG_TABLE_DDL, ddl);
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_REGULAR_EXPRESSION,"^([^\t]+)\t([^\t]+)$");
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2");
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR, DefaultKeyGenerator.TIMESTAMP.name());
-
-
-        sink = new PhoenixSink();
-        Configurables.configure(sink, sinkContext);
-        Assert.assertEquals(LifecycleState.IDLE, sink.getLifecycleState());
-
-        final Channel channel = this.initChannel();
-        sink.setChannel(channel);
-        
-        sink.start();
-        Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
-        try {
-            boolean exists = admin.tableExists(TableName.valueOf(fullTableName));
-            Assert.assertTrue(exists);
-        }finally {
-            admin.close();
-        }
-    }
-
-    @Test
-    public void testExtendedSink() throws Exception {
-        // Create a mock NullPhoenixSink which extends PhoenixSink, and verify configure is invoked()
-
-        PhoenixSink sink = mock(NullPhoenixSink.class);
-        sinkContext = new Context();
-        sinkContext.put(FlumeConstants.CONFIG_TABLE, "FLUME_TEST_EXTENDED");
-        sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, getUrl());
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER, CustomSerializer.class.getName());
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES, "ID, COUNTS");
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR, DefaultKeyGenerator.TIMESTAMP.name());
-
-        Configurables.configure(sink, sinkContext);
-        verify(sink).configure(sinkContext);
-    }
-
-    @Test
-    public void testExtendedSerializer() throws Exception {
-        /*
-        Sadly, we can't mock a serializer, as the PhoenixSink does a Class.forName() to instantiate
-        it. Instead. we'll setup a Flume channel and verify the data our custom serializer wrote.
-        */
-
-        final String fullTableName = "FLUME_TEST_EXTENDED";
-        final String ddl = "CREATE TABLE " + fullTableName + " (ID BIGINT NOT NULL PRIMARY KEY, COUNTS UNSIGNED_LONG)";
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        final Connection conn = DriverManager.getConnection(getUrl(), props);
-        conn.createStatement().execute(ddl);
-        conn.commit();
-
-        sinkContext = new Context();
-        sinkContext.put(FlumeConstants.CONFIG_TABLE, "FLUME_TEST_EXTENDED");
-        sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, getUrl());
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER, CustomSerializer.class.getName());
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES, "ID, COUNTS");
-        sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR, DefaultKeyGenerator.TIMESTAMP.name());
-
-        PhoenixSink sink = new PhoenixSink();
-        Configurables.configure(sink, sinkContext);
-
-        // Send a test event through Flume, using our custom serializer
-        final Channel channel = this.initChannel();
-        sink.setChannel(channel);
-        sink.start();
-
-        final Transaction transaction = channel.getTransaction();
-        transaction.begin();
-        channel.put(EventBuilder.withBody(Bytes.toBytes("test event")));
-        transaction.commit();
-        transaction.close();
-
-        sink.process();
-        sink.stop();
-
-        // Verify our serializer wrote out data
-        ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM FLUME_TEST_EXTENDED");
-        assertTrue(rs.next());
-        assertTrue(rs.getLong(1) == 1L);
-    }
-    
-    private Channel initChannel() {
-        //Channel configuration
-        Context channelContext = new Context();
-        channelContext.put("capacity", "10000");
-        channelContext.put("transactionCapacity", "200");
-
-        Channel channel = new MemoryChannel();
-        channel.setName("memorychannel");
-        Configurables.configure(channel, channelContext);
-        return channel;
-    }
-    
-    
-}
diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
deleted file mode 100644
index 08ad855..0000000
--- a/phoenix-hive/pom.xml
+++ /dev/null
@@ -1,294 +0,0 @@
-<?xml version='1.0'?>
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
-
--->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.phoenix</groupId>
-    <artifactId>phoenix</artifactId>
-    <version>5.1.0-HBase-2.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>phoenix-hive</artifactId>
-  <name>Phoenix - Hive</name>
-<properties>
-    <test.tmp.dir>${project.build.directory}/tmp</test.tmp.dir>
-</properties>
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-core</artifactId>
-      <exclusions>
-	  <exclusion>
-          <groupId>com.google.guava</groupId>
-          <artifactId>guava</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-cli</artifactId>
-      <version>${hive.version}</version>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-metastore</artifactId>
-      <version>${hive.version}</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
-      <type>test-jar</type>
-      <version>${hive.version}</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-exec</artifactId>
-      <version>${hive.version}</version>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-mapreduce-client-core</artifactId>
-      <exclusions>
-        <exclusion>
-          <groupId>io.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-
-    <!-- Test dependencies -->
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-core</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-testing-util</artifactId>
-      <scope>test</scope>
-      <optional>true</optional>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-it</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>io.netty</groupId>
-      <artifactId>netty-all</artifactId>
-      <version>4.1.17.Final</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-auth</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-mapreduce-client-common</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-minicluster</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.tez</groupId>
-      <artifactId>tez-tests</artifactId>
-      <scope>test</scope>
-      <version>0.9.1</version>
-      <type>test-jar</type>
-      <exclusions>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-yarn-api</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.tez</groupId>
-      <artifactId>tez-dag</artifactId>
-      <scope>test</scope>
-      <version>0.9.1</version>
-      <exclusions>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-yarn-api</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-      <dependency>
-        <groupId>org.eclipse.jetty</groupId>
-        <artifactId>jetty-util</artifactId>
-        <scope>test</scope>
-        <version>9.3.8.v20160314</version>
-      </dependency>
-      <dependency>
-        <groupId>org.eclipse.jetty</groupId>
-        <artifactId>jetty-http</artifactId>
-        <scope>test</scope>
-        <version>9.3.8.v20160314</version>
-      </dependency>
-      <dependency>
-        <groupId>org.eclipse.jetty</groupId>
-        <artifactId>jetty-server</artifactId>
-        <scope>test</scope>
-        <version>9.3.8.v20160314</version>
-      </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-all</artifactId>
-      <version>${mockito-all.version}</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-      <version>19.0</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.calcite.avatica</groupId>
-      <artifactId>avatica</artifactId>
-      <!-- Overriding the version of Avatica that PQS uses so that Hive will work -->
-      <version>${avatica.version}</version>
-      <scope>test</scope>
-      <!-- And removing a bunch of dependencies that haven't been shaded in this older
-           Avatica version which conflict with HDFS -->
-      <exclusions>
-        <exclusion>
-          <groupId>org.hsqldb</groupId>
-          <artifactId>hsqldb</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.fasterxml.jackson.core</groupId>
-          <artifactId>jackson-databind</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.fasterxml.jackson.core</groupId>
-          <artifactId>jackson-annotations</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.fasterxml.jackson.core</groupId>
-          <artifactId>jackson-core</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-failsafe-plugin</artifactId>
-        <executions>
-	    <execution>
-              <id>NeedTheirOwnClusterTests</id>
-              <configuration>
-                 <encoding>UTF-8</encoding>
-                 <forkCount>1</forkCount>
-                 <runOrder>alphabetical</runOrder>
-                 <reuseForks>false</reuseForks>
-                 <argLine>-enableassertions -Xmx2000m -XX:MaxPermSize=256m -Djava.security.egd=file:/dev/./urandom "-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}" -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./target/ -Dorg.apache.hadoop.hbase.shaded.io.netty.packagePrefix=org.apache.hadoop.hbase.shaded.</argLine>
-                 <redirectTestOutputToFile>${test.output.tofile}</redirectTestOutputToFile>
-                 <testSourceDirectory>${basedir}/src/it/java</testSourceDirectory>
-                 <groups>org.apache.phoenix.end2end.NeedsOwnMiniClusterTest</groups>
-                 <shutdown>kill</shutdown>
-              </configuration>
-              <goals>
-                 <goal>integration-test</goal>
-                 <goal>verify</goal>
-              </goals>
-            </execution>
-          </executions>
-      </plugin>
-      <plugin>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <version>${maven-dependency-plugin.version}</version>
-        <executions>
-          <execution>
-            <id>copy-dependencies</id>
-            <phase>package</phase>
-            <goals>
-              <goal>copy-dependencies</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <artifactId>maven-assembly-plugin</artifactId>
-        <configuration>
-          <descriptorRefs>
-            <descriptorRef>jar-with-dependencies</descriptorRef>
-          </descriptorRefs>
-        </configuration>
-        <executions>
-          <execution>
-            <id>make-jar-with-dependencies</id>
-            <phase>package</phase>
-            <goals>
-              <goal>single</goal>
-            </goals>
-            <configuration>
-              <appendAssemblyId>false</appendAssemblyId>
-              <finalName>phoenix-${project.version}-hive</finalName>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/QTestProcessExecResult.java b/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/QTestProcessExecResult.java
deleted file mode 100644
index f9f7057..0000000
--- a/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/QTestProcessExecResult.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql;
-
-/**
- * Standard output and return code of a process executed during the qtests.
- */
-public class QTestProcessExecResult {
-
-  private static final String TRUNCATED_OUTPUT = "Output was too long and had to be truncated...";
-  private static final short MAX_OUTPUT_CHAR_LENGTH = 2000;
-
-  private final int returnCode;
-  private final String standardOut;
-
-  QTestProcessExecResult(int code, String output) {
-    this.returnCode = code;
-    this.standardOut = truncatefNeeded(output);
-  }
-
-  /**
-   * @return executed process return code
-   */
-  public int getReturnCode() {
-    return this.returnCode;
-  }
-
-  /**
-   * @return output captured from stdout while process was executing
-   */
-  public String getCapturedOutput() {
-    return this.standardOut;
-  }
-
-  public static QTestProcessExecResult create(int code, String output) {
-    return new QTestProcessExecResult(code, output);
-  }
-
-  public static  QTestProcessExecResult createWithoutOutput(int code) {
-    return new QTestProcessExecResult(code, "");
-  }
-
-  private String truncatefNeeded(String orig) {
-    if (orig.length() > MAX_OUTPUT_CHAR_LENGTH) {
-      return orig.substring(0, MAX_OUTPUT_CHAR_LENGTH) + "\r\n" + TRUNCATED_OUTPUT;
-    } else {
-      return orig;
-    }
-  }
-}
diff --git a/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/QTestUtil.java b/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/QTestUtil.java
deleted file mode 100644
index 9721162..0000000
--- a/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ /dev/null
@@ -1,2489 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql;
-
-import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.FileReader;
-import java.io.FileWriter;
-import java.io.FilenameFilter;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.io.OutputStreamWriter;
-import java.io.PrintStream;
-import java.io.Serializable;
-import java.io.StringWriter;
-import java.net.URL;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.FileSystems;
-import java.nio.file.Files;
-import java.nio.file.StandardOpenOption;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.Deque;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeUnit;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import java.util.stream.Stream;
-
-import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.io.output.ByteArrayOutputStream;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
-import org.apache.hadoop.hive.cli.CliDriver;
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.common.io.CachingPrintStream;
-import org.apache.hadoop.hive.common.io.DigestPrintStream;
-import org.apache.hadoop.hive.common.io.SortAndDigestPrintStream;
-import org.apache.hadoop.hive.common.io.SortPrintStream;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.llap.io.api.LlapProxy;
-import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache;
-import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.exec.spark.session.SparkSession;
-import org.apache.hadoop.hive.ql.exec.spark.session.SparkSessionManagerImpl;
-import org.apache.hadoop.hive.ql.exec.tez.TezSessionState;
-import org.apache.hadoop.hive.ql.lockmgr.zookeeper.CuratorFrameworkSingleton;
-import org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager;
-import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry;
-import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.parse.ASTNode;
-import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
-import org.apache.hadoop.hive.ql.parse.ParseDriver;
-import org.apache.hadoop.hive.ql.parse.ParseException;
-import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.processors.CommandProcessor;
-import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory;
-import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
-import org.apache.hadoop.hive.ql.processors.HiveCommand;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.shims.HadoopShims;
-import org.apache.hadoop.hive.shims.ShimLoader;
-import org.apache.hive.common.util.StreamPrinter;
-import org.apache.logging.log4j.util.Strings;
-import org.apache.tools.ant.BuildException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.ZooKeeper;
-import org.junit.Assert;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableList;
-
-import junit.framework.TestSuite;
-
-/**
- * QTestUtil. Cloned from Hive 3.0.0 as hive doesn't release hive-it-util artifact
- *
- */
-public class QTestUtil {
-  public static final String UTF_8 = "UTF-8";
-  public static final String HIVE_ROOT = getHiveRoot();
-  // security property names
-  private static final String SECURITY_KEY_PROVIDER_URI_NAME = "dfs.encryption.key.provider.uri";
-  private static final String CRLF = System.getProperty("line.separator");
-
-  public static final String QTEST_LEAVE_FILES = "QTEST_LEAVE_FILES";
-  private static final Logger LOG = LoggerFactory.getLogger("QTestUtil");
-  private final static String defaultInitScript = "q_test_init.sql";
-  private final static String defaultCleanupScript = "q_test_cleanup.sql";
-  private final String[] testOnlyCommands = new String[]{"crypto"};
-
-  private static final String TEST_TMP_DIR_PROPERTY = "test.tmp.dir"; // typically target/tmp
-  private static final String BUILD_DIR_PROPERTY = "build.dir"; // typically target
-
-  public static final String PATH_HDFS_REGEX = "(hdfs://)([a-zA-Z0-9:/_\\-\\.=])+";
-  public static final String PATH_HDFS_WITH_DATE_USER_GROUP_REGEX = "([a-z]+) ([a-z]+)([ ]+)([0-9]+) ([0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}) " + PATH_HDFS_REGEX;
-
-  private String testWarehouse;
-  private final String testFiles;
-  protected final String outDir;
-  protected String overrideResultsDir;
-  protected final String logDir;
-  private final TreeMap<String, String> qMap;
-  private final Set<String> qSkipSet;
-  private final Set<String> qSortSet;
-  private final Set<String> qSortQuerySet;
-  private final Set<String> qHashQuerySet;
-  private final Set<String> qSortNHashQuerySet;
-  private final Set<String> qNoSessionReuseQuerySet;
-  private final Set<String> qJavaVersionSpecificOutput;
-  private static final String SORT_SUFFIX = ".sorted";
-  private final Set<String> srcTables;
-  private final Set<String> srcUDFs;
-  private final MiniClusterType clusterType;
-  private final FsType fsType;
-  private ParseDriver pd;
-  protected Hive db;
-  protected QueryState queryState;
-  protected HiveConf conf;
-  private IDriver drv;
-  private BaseSemanticAnalyzer sem;
-  protected final boolean overWrite;
-  private CliDriver cliDriver;
-  private HadoopShims.MiniMrShim mr = null;
-  private HadoopShims.MiniDFSShim dfs = null;
-  private FileSystem fs;
-  private HadoopShims.HdfsEncryptionShim hes = null;
-  private String hadoopVer = null;
-  private QTestSetup setup = null;
-  private SparkSession sparkSession = null;
-  private boolean isSessionStateStarted = false;
-  private static final String javaVersion = getJavaVersion();
-
-  private final String initScript;
-  private final String cleanupScript;
-
-
-  public interface SuiteAddTestFunctor {
-    public void addTestToSuite(TestSuite suite, Object setup, String tName);
-  }
-
-  public static Set<String> getSrcTables() {
-    HashSet<String> srcTables = new HashSet<String>();
-    // FIXME: moved default value to here...for now
-    // i think this features is never really used from the command line
-    String defaultTestSrcTables = "src,src1,srcbucket,srcbucket2,src_json,src_thrift," +
-        "src_sequencefile,srcpart,alltypesorc,src_hbase,cbo_t1,cbo_t2,cbo_t3,src_cbo,part," +
-        "lineitem,alltypesparquet";
-    for (String srcTable : System.getProperty("test.src.tables", defaultTestSrcTables).trim().split(",")) {
-      srcTable = srcTable.trim();
-      if (!srcTable.isEmpty()) {
-        srcTables.add(srcTable);
-      }
-    }
-    if (srcTables.isEmpty()) {
-      throw new RuntimeException("Source tables cannot be empty");
-    }
-    return srcTables;
-  }
-
-  /**
-   * Returns the default UDF names which should not be removed when resetting the test database
-   * @return The list of the UDF names not to remove
-   */
-  private Set<String> getSrcUDFs() {
-    HashSet<String> srcUDFs = new HashSet<String>();
-    // FIXME: moved default value to here...for now
-    // i think this features is never really used from the command line
-    String defaultTestSrcUDFs = "qtest_get_java_boolean";
-    for (String srcUDF : System.getProperty("test.src.udfs", defaultTestSrcUDFs).trim().split(","))
-    {
-      srcUDF = srcUDF.trim();
-      if (!srcUDF.isEmpty()) {
-        srcUDFs.add(srcUDF);
-      }
-    }
-    if (srcUDFs.isEmpty()) {
-      throw new RuntimeException("Source UDFs cannot be empty");
-    }
-    return srcUDFs;
-  }
-
-
-
-  public HiveConf getConf() {
-    return conf;
-  }
-
-  public boolean deleteDirectory(File path) {
-    if (path.exists()) {
-      File[] files = path.listFiles();
-      for (File file : files) {
-        if (file.isDirectory()) {
-          deleteDirectory(file);
-        } else {
-          file.delete();
-        }
-      }
-    }
-    return (path.delete());
-  }
-
-  public void copyDirectoryToLocal(Path src, Path dest) throws Exception {
-
-    FileSystem srcFs = src.getFileSystem(conf);
-    FileSystem destFs = dest.getFileSystem(conf);
-    if (srcFs.exists(src)) {
-      FileStatus[] files = srcFs.listStatus(src);
-      for (FileStatus file : files) {
-        String name = file.getPath().getName();
-        Path dfs_path = file.getPath();
-        Path local_path = new Path(dest, name);
-
-        // If this is a source table we do not copy it out
-        if (srcTables.contains(name)) {
-          continue;
-        }
-
-        if (file.isDirectory()) {
-          if (!destFs.exists(local_path)) {
-            destFs.mkdirs(local_path);
-          }
-          copyDirectoryToLocal(dfs_path, local_path);
-        } else {
-          srcFs.copyToLocalFile(dfs_path, local_path);
-        }
-      }
-    }
-  }
-
-  static Pattern mapTok = Pattern.compile("(\\.?)(.*)_map_(.*)");
-  static Pattern reduceTok = Pattern.compile("(.*)(reduce_[^\\.]*)((\\..*)?)");
-
-  public void normalizeNames(File path) throws Exception {
-    if (path.isDirectory()) {
-      File[] files = path.listFiles();
-      for (File file : files) {
-        normalizeNames(file);
-      }
-    } else {
-      Matcher m = reduceTok.matcher(path.getName());
-      if (m.matches()) {
-        String name = m.group(1) + "reduce" + m.group(3);
-        path.renameTo(new File(path.getParent(), name));
-      } else {
-        m = mapTok.matcher(path.getName());
-        if (m.matches()) {
-          String name = m.group(1) + "map_" + m.group(3);
-          path.renameTo(new File(path.getParent(), name));
-        }
-      }
-    }
-  }
-
-  public String getOutputDirectory() {
-    return outDir;
-  }
-
-  public String getLogDirectory() {
-    return logDir;
-  }
-
-  private String getHadoopMainVersion(String input) {
-    if (input == null) {
-      return null;
-    }
-    Pattern p = Pattern.compile("^(\\d+\\.\\d+).*");
-    Matcher m = p.matcher(input);
-    if (m.matches()) {
-      return m.group(1);
-    }
-    return null;
-  }
-
-  public void initConf() throws Exception {
-
-    String vectorizationEnabled = System.getProperty("test.vectorization.enabled");
-    if(vectorizationEnabled != null && vectorizationEnabled.equalsIgnoreCase("true")) {
-      conf.setBoolVar(ConfVars.HIVE_VECTORIZATION_ENABLED, true);
-    }
-
-    // Plug verifying metastore in for testing DirectSQL.
-    conf.setVar(ConfVars.METASTORE_RAW_STORE_IMPL,
-        "org.apache.hadoop.hive.metastore.VerifyingObjectStore");
-
-    if (mr != null) {
-      mr.setupConfiguration(conf);
-
-      // TODO Ideally this should be done independent of whether mr is setup or not.
-      setFsRelatedProperties(conf, fs.getScheme().equals("file"),fs);
-    }
-    conf.set(ConfVars.HIVE_EXECUTION_ENGINE.varname, clusterType.name());
-  }
-
-  private void setFsRelatedProperties(HiveConf conf, boolean isLocalFs, FileSystem fs) {
-    String fsUriString = fs.getUri().toString();
-
-    // Different paths if running locally vs a remote fileSystem. Ideally this difference should not exist.
-    Path warehousePath;
-    Path jarPath;
-    Path userInstallPath;
-    if (isLocalFs) {
-      String buildDir = System.getProperty(BUILD_DIR_PROPERTY);
-      Preconditions.checkState(Strings.isNotBlank(buildDir));
-      Path path = new Path(fsUriString, buildDir);
-
-      // Create a fake fs root for local fs
-      Path localFsRoot  = new Path(path, "localfs");
-      warehousePath = new Path(localFsRoot, "warehouse");
-      jarPath = new Path(localFsRoot, "jar");
-      userInstallPath = new Path(localFsRoot, "user_install");
-    } else {
-      // TODO Why is this changed from the default in hive-conf?
-      warehousePath = new Path(fsUriString, "/build/ql/test/data/warehouse/");
-      jarPath = new Path(new Path(fsUriString, "/user"), "hive");
-      userInstallPath = new Path(fsUriString, "/user");
-    }
-
-    warehousePath = fs.makeQualified(warehousePath);
-    jarPath = fs.makeQualified(jarPath);
-    userInstallPath = fs.makeQualified(userInstallPath);
-
-    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsUriString);
-
-    // Remote dirs
-    conf.setVar(ConfVars.METASTOREWAREHOUSE, warehousePath.toString());
-    conf.setVar(ConfVars.HIVE_JAR_DIRECTORY, jarPath.toString());
-    conf.setVar(ConfVars.HIVE_USER_INSTALL_DIR, userInstallPath.toString());
-    // ConfVars.SCRATCHDIR - {test.tmp.dir}/scratchdir
-
-    // Local dirs
-    // ConfVars.LOCALSCRATCHDIR - {test.tmp.dir}/localscratchdir
-
-    // TODO Make sure to cleanup created dirs.
-  }
-
-  private void createRemoteDirs() {
-    assert fs != null;
-    Path warehousePath = fs.makeQualified(new Path(conf.getVar(ConfVars.METASTOREWAREHOUSE)));
-    assert warehousePath != null;
-    Path hiveJarPath = fs.makeQualified(new Path(conf.getVar(ConfVars.HIVE_JAR_DIRECTORY)));
-    assert hiveJarPath != null;
-    Path userInstallPath = fs.makeQualified(new Path(conf.getVar(ConfVars.HIVE_USER_INSTALL_DIR)));
-    assert userInstallPath != null;
-    try {
-      fs.mkdirs(warehousePath);
-    } catch (IOException e) {
-      LOG.error("Failed to create path={}. Continuing. Exception message={}", warehousePath,
-          e.getMessage());
-    }
-    try {
-      fs.mkdirs(hiveJarPath);
-    } catch (IOException e) {
-      LOG.error("Failed to create path={}. Continuing. Exception message={}", warehousePath,
-          e.getMessage());
-    }
-    try {
-      fs.mkdirs(userInstallPath);
-    } catch (IOException e) {
-      LOG.error("Failed to create path={}. Continuing. Exception message={}", warehousePath,
-          e.getMessage());
-    }
-  }
-
-  private enum CoreClusterType {
-    MR,
-    TEZ,
-    SPARK,
-    DRUID
-  }
-
-  public enum FsType {
-    local,
-    hdfs,
-    encrypted_hdfs,
-  }
-
-  public enum MiniClusterType {
-
-    mr(CoreClusterType.MR, FsType.hdfs),
-    tez(CoreClusterType.TEZ, FsType.hdfs),
-    tez_local(CoreClusterType.TEZ, FsType.local),
-    spark(CoreClusterType.SPARK, FsType.local),
-    miniSparkOnYarn(CoreClusterType.SPARK, FsType.hdfs),
-    llap(CoreClusterType.TEZ, FsType.hdfs),
-    llap_local(CoreClusterType.TEZ, FsType.local),
-    none(CoreClusterType.MR, FsType.local),
-    druid(CoreClusterType.DRUID, FsType.hdfs);
-
-
-    private final CoreClusterType coreClusterType;
-    private final FsType defaultFsType;
-
-    MiniClusterType(CoreClusterType coreClusterType, FsType defaultFsType) {
-      this.coreClusterType = coreClusterType;
-      this.defaultFsType = defaultFsType;
-    }
-
-    public CoreClusterType getCoreClusterType() {
-      return coreClusterType;
-    }
-
-    public FsType getDefaultFsType() {
-      return defaultFsType;
-    }
-
-    public static MiniClusterType valueForString(String type) {
-      // Replace this with valueOf.
-      if (type.equals("miniMR")) {
-        return mr;
-      } else if (type.equals("tez")) {
-        return tez;
-      } else if (type.equals("tez_local")) {
-        return tez_local;
-      } else if (type.equals("spark")) {
-        return spark;
-      } else if (type.equals("miniSparkOnYarn")) {
-        return miniSparkOnYarn;
-      } else if (type.equals("llap")) {
-        return llap;
-      } else if (type.equals("llap_local")) {
-        return llap_local;
-      } else if (type.equals("druid")) {
-      return druid;
-      } else {
-        return none;
-      }
-    }
-  }
-
-
-  private String getKeyProviderURI() {
-    // Use the target directory if it is not specified
-    String keyDir = HIVE_ROOT + "ql/target/";
-
-    // put the jks file in the current test path only for test purpose
-    return "jceks://file" + new Path(keyDir, "test.jks").toUri();
-  }
-
-  public QTestUtil(String outDir, String logDir, MiniClusterType clusterType,
-                   String confDir, String hadoopVer, String initScript, String cleanupScript,
-                   boolean withLlapIo) throws Exception {
-    this(outDir, logDir, clusterType, confDir, hadoopVer, initScript, cleanupScript,
-        withLlapIo, null);
-  }
-
-  public QTestUtil(String outDir, String logDir, MiniClusterType clusterType,
-      String confDir, String hadoopVer, String initScript, String cleanupScript,
-      boolean withLlapIo, FsType fsType)
-    throws Exception {
-    LOG.info("Setting up QTestUtil with outDir="+outDir+", logDir="+logDir+", clusterType="+clusterType+", confDir="+confDir+"," +
-        " hadoopVer="+hadoopVer+", initScript="+initScript+", cleanupScript="+cleanupScript+", withLlapIo="+withLlapIo+"," +
-            " fsType="+fsType+"");
-    Preconditions.checkNotNull(clusterType, "ClusterType cannot be null");
-    if (fsType != null) {
-      this.fsType = fsType;
-    } else {
-      this.fsType = clusterType.getDefaultFsType();
-    }
-    this.outDir = outDir;
-    this.logDir = logDir;
-    this.srcTables=getSrcTables();
-    this.srcUDFs = getSrcUDFs();
-
-    // HIVE-14443 move this fall-back logic to CliConfigs
-    if (confDir != null && !confDir.isEmpty()) {
-      HiveConf.setHiveSiteLocation(new URL("file://"+ new File(confDir).toURI().getPath() + "/hive-site.xml"));
-      MetastoreConf.setHiveSiteLocation(HiveConf.getHiveSiteLocation());
-      System.out.println("Setting hive-site: "+HiveConf.getHiveSiteLocation());
-    }
-
-    queryState = new QueryState.Builder().withHiveConf(new HiveConf(IDriver.class)).build();
-    conf = queryState.getConf();
-    this.hadoopVer = getHadoopMainVersion(hadoopVer);
-    qMap = new TreeMap<String, String>();
-    qSkipSet = new HashSet<String>();
-    qSortSet = new HashSet<String>();
-    qSortQuerySet = new HashSet<String>();
-    qHashQuerySet = new HashSet<String>();
-    qSortNHashQuerySet = new HashSet<String>();
-    qNoSessionReuseQuerySet = new HashSet<String>();
-    qJavaVersionSpecificOutput = new HashSet<String>();
-    this.clusterType = clusterType;
-
-    HadoopShims shims = ShimLoader.getHadoopShims();
-
-    setupFileSystem(shims);
-
-    setup = new QTestSetup();
-    setup.preTest(conf);
-
-    setupMiniCluster(shims, confDir);
-
-    initConf();
-
-    if (withLlapIo && (clusterType == MiniClusterType.none)) {
-      LOG.info("initializing llap IO");
-      LlapProxy.initializeLlapIo(conf);
-    }
-
-
-    // Use the current directory if it is not specified
-    String dataDir = conf.get("test.data.files");
-    if (dataDir == null) {
-      dataDir = new File(".").getAbsolutePath() + "/data/files";
-    }
-    testFiles = dataDir;
-
-    // Use the current directory if it is not specified
-    String scriptsDir = conf.get("test.data.scripts");
-    if (scriptsDir == null) {
-      scriptsDir = new File(".").getAbsolutePath() + "/data/scripts";
-    }
-
-    this.initScript = scriptsDir + File.separator + initScript;
-    this.cleanupScript = scriptsDir + File.separator + cleanupScript;
-
-    overWrite = "true".equalsIgnoreCase(System.getProperty("test.output.overwrite"));
-
-    init();
-  }
-
-  private void setupFileSystem(HadoopShims shims) throws IOException {
-
-    if (fsType == FsType.local) {
-      fs = FileSystem.getLocal(conf);
-    } else if (fsType == FsType.hdfs || fsType == FsType.encrypted_hdfs) {
-      int numDataNodes = 4;
-
-      if (fsType == FsType.encrypted_hdfs) {
-        // Set the security key provider so that the MiniDFS cluster is initialized
-        // with encryption
-        conf.set(SECURITY_KEY_PROVIDER_URI_NAME, getKeyProviderURI());
-        conf.setInt("fs.trash.interval", 50);
-
-        dfs = shims.getMiniDfs(conf, numDataNodes, true, null);
-        fs = dfs.getFileSystem();
-
-        // set up the java key provider for encrypted hdfs cluster
-        hes = shims.createHdfsEncryptionShim(fs, conf);
-
-        LOG.info("key provider is initialized");
-      } else {
-        dfs = shims.getMiniDfs(conf, numDataNodes, true, null);
-        fs = dfs.getFileSystem();
-      }
-    } else {
-      throw new IllegalArgumentException("Unknown or unhandled fsType [" + fsType + "]");
-    }
-  }
-
-  private void setupMiniCluster(HadoopShims shims, String confDir) throws
-      IOException {
-
-    String uriString = fs.getUri().toString();
-
-    if (clusterType.getCoreClusterType() == CoreClusterType.TEZ) {
-        if (confDir != null && !confDir.isEmpty()) {
-          conf.addResource(new URL("file://" + new File(confDir).toURI().getPath()
-              + "/tez-site.xml"));
-        }
-        int numTrackers = 2;
-        if (EnumSet.of(MiniClusterType.llap_local, MiniClusterType.tez_local).contains(clusterType)) {
-          mr = shims.getLocalMiniTezCluster(conf, clusterType == MiniClusterType.llap_local);
-        } else {
-          mr = shims.getMiniTezCluster(conf, numTrackers, uriString,
-              EnumSet.of(MiniClusterType.llap, MiniClusterType.llap_local).contains(clusterType));
-        }
-      } else if (clusterType == MiniClusterType.miniSparkOnYarn) {
-        mr = shims.getMiniSparkCluster(conf, 2, uriString, 1);
-      } else if (clusterType == MiniClusterType.mr) {
-        mr = shims.getMiniMrCluster(conf, 2, uriString, 1);
-      }
-  }
-
-
-  public void shutdown() throws Exception {
-    if (System.getenv(QTEST_LEAVE_FILES) == null) {
-      cleanUp();
-    }
-
-    if (clusterType.getCoreClusterType() == CoreClusterType.TEZ) {
-      SessionState.get().getTezSession().destroy();
-    }
-    
-    setup.tearDown();
-    if (sparkSession != null) {
-      try {
-        SparkSessionManagerImpl.getInstance().closeSession(sparkSession);
-      } catch (Exception ex) {
-        LOG.error("Error closing spark session.", ex);
-      } finally {
-        sparkSession = null;
-      }
-    }
-    if (mr != null) {
-      mr.shutdown();
-      mr = null;
-    }
-    FileSystem.closeAll();
-    if (dfs != null) {
-      dfs.shutdown();
-      dfs = null;
-    }
-    Hive.closeCurrent();
-  }
-
-  public String readEntireFileIntoString(File queryFile) throws IOException {
-    InputStreamReader isr = new InputStreamReader(
-        new BufferedInputStream(new FileInputStream(queryFile)), QTestUtil.UTF_8);
-    StringWriter sw = new StringWriter();
-    try {
-      IOUtils.copy(isr, sw);
-    } finally {
-      if (isr != null) {
-        isr.close();
-      }
-    }
-    return sw.toString();
-  }
-
-  public void addFile(String queryFile) throws IOException {
-    addFile(queryFile, false);
-  }
-
-  public void addFile(String queryFile, boolean partial) throws IOException {
-    addFile(new File(queryFile));
-  }
-
-  public void addFile(File qf) throws IOException {
-    addFile(qf, false);
-  }
-
-  public void addFile(File qf, boolean partial) throws IOException  {
-    String query = readEntireFileIntoString(qf);
-    qMap.put(qf.getName(), query);
-    if (partial) {
-      return;
-    }
-
-    if(checkHadoopVersionExclude(qf.getName(), query)) {
-      qSkipSet.add(qf.getName());
-    }
-
-    if (checkNeedJavaSpecificOutput(qf.getName(), query)) {
-      qJavaVersionSpecificOutput.add(qf.getName());
-    }
-
-    if (matches(SORT_BEFORE_DIFF, query)) {
-      qSortSet.add(qf.getName());
-    } else if (matches(SORT_QUERY_RESULTS, query)) {
-      qSortQuerySet.add(qf.getName());
-    } else if (matches(HASH_QUERY_RESULTS, query)) {
-      qHashQuerySet.add(qf.getName());
-    } else if (matches(SORT_AND_HASH_QUERY_RESULTS, query)) {
-      qSortNHashQuerySet.add(qf.getName());
-    }
-    if (matches(NO_SESSION_REUSE, query)) {
-      qNoSessionReuseQuerySet.add(qf.getName());
-    }
-  }
-
-  private static final Pattern SORT_BEFORE_DIFF = Pattern.compile("-- SORT_BEFORE_DIFF");
-  private static final Pattern SORT_QUERY_RESULTS = Pattern.compile("-- SORT_QUERY_RESULTS");
-  private static final Pattern HASH_QUERY_RESULTS = Pattern.compile("-- HASH_QUERY_RESULTS");
-  private static final Pattern SORT_AND_HASH_QUERY_RESULTS = Pattern.compile("-- SORT_AND_HASH_QUERY_RESULTS");
-  private static final Pattern NO_SESSION_REUSE = Pattern.compile("-- NO_SESSION_REUSE");
-
-  private boolean matches(Pattern pattern, String query) {
-    Matcher matcher = pattern.matcher(query);
-    if (matcher.find()) {
-      return true;
-    }
-    return false;
-  }
-
-  private boolean checkHadoopVersionExclude(String fileName, String query){
-
-    // Look for a hint to not run a test on some Hadoop versions
-    Pattern pattern = Pattern.compile("-- (EX|IN)CLUDE_HADOOP_MAJOR_VERSIONS\\((.*)\\)");
-
-    boolean excludeQuery = false;
-    boolean includeQuery = false;
-    Set<String> versionSet = new HashSet<String>();
-    String hadoopVer = ShimLoader.getMajorVersion();
-
-    Matcher matcher = pattern.matcher(query);
-
-    // Each qfile may include at most one INCLUDE or EXCLUDE directive.
-    //
-    // If a qfile contains an INCLUDE directive, and hadoopVer does
-    // not appear in the list of versions to include, then the qfile
-    // is skipped.
-    //
-    // If a qfile contains an EXCLUDE directive, and hadoopVer is
-    // listed in the list of versions to EXCLUDE, then the qfile is
-    // skipped.
-    //
-    // Otherwise, the qfile is included.
-
-    if (matcher.find()) {
-
-      String prefix = matcher.group(1);
-      if ("EX".equals(prefix)) {
-        excludeQuery = true;
-      } else {
-        includeQuery = true;
-      }
-
-      String versions = matcher.group(2);
-      for (String s : versions.split("\\,")) {
-        s = s.trim();
-        versionSet.add(s);
-      }
-    }
-
-    if (matcher.find()) {
-      //2nd match is not supposed to be there
-      String message = "QTestUtil: qfile " + fileName
-        + " contains more than one reference to (EX|IN)CLUDE_HADOOP_MAJOR_VERSIONS";
-      throw new UnsupportedOperationException(message);
-    }
-
-    if (excludeQuery && versionSet.contains(hadoopVer)) {
-      System.out.println("QTestUtil: " + fileName
-        + " EXCLUDE list contains Hadoop Version " + hadoopVer + ". Skipping...");
-      return true;
-    } else if (includeQuery && !versionSet.contains(hadoopVer)) {
-      System.out.println("QTestUtil: " + fileName
-        + " INCLUDE list does not contain Hadoop Version " + hadoopVer + ". Skipping...");
-      return true;
-    }
-    return false;
-  }
-
-  private boolean checkNeedJavaSpecificOutput(String fileName, String query) {
-    Pattern pattern = Pattern.compile("-- JAVA_VERSION_SPECIFIC_OUTPUT");
-    Matcher matcher = pattern.matcher(query);
-    if (matcher.find()) {
-      System.out.println("Test is flagged to generate Java version specific " +
-          "output. Since we are using Java version " + javaVersion +
-          ", we will generated Java " + javaVersion + " specific " +
-          "output file for query file " + fileName);
-      return true;
-    }
-
-    return false;
-  }
-
-  /**
-   * Get formatted Java version to include minor version, but
-   * exclude patch level.
-   *
-   * @return Java version formatted as major_version.minor_version
-   */
-  private static String getJavaVersion() {
-    String version = System.getProperty("java.version");
-    if (version == null) {
-      throw new NullPointerException("No java version could be determined " +
-          "from system properties");
-    }
-
-    // "java version" system property is formatted
-    // major_version.minor_version.patch_level.
-    // Find second dot, instead of last dot, to be safe
-    int pos = version.indexOf('.');
-    pos = version.indexOf('.', pos + 1);
-    return version.substring(0, pos);
-  }
-
-  /**
-   * Clear out any side effects of running tests
-   */
-  public void clearPostTestEffects() throws Exception {
-    setup.postTest(conf);
-  }
-
-  public void clearKeysCreatedInTests() {
-    if (hes == null) {
-      return;
-    }
-    try {
-      for (String keyAlias : hes.getKeys()) {
-        hes.deleteKey(keyAlias);
-      }
-    } catch (IOException e) {
-      LOG.error("Fail to clean the keys created in test due to the error", e);
-    }
-  }
-
-  public void clearUDFsCreatedDuringTests() throws Exception {
-    if (System.getenv(QTEST_LEAVE_FILES) != null) {
-      return;
-    }
-    // Delete functions created by the tests
-    // It is enough to remove functions from the default database, other databases are dropped
-    for (String udfName : db.getFunctions(DEFAULT_DATABASE_NAME, ".*")) {
-      if (!srcUDFs.contains(udfName)) {
-        db.dropFunction(DEFAULT_DATABASE_NAME, udfName);
-      }
-    }
-  }
-
-  /**
-   * Clear out any side effects of running tests
-   */
-  public void clearTablesCreatedDuringTests() throws Exception {
-    if (System.getenv(QTEST_LEAVE_FILES) != null) {
-      return;
-    }
-
-    conf.set("hive.metastore.filter.hook",
-        "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl");
-    db = Hive.get(conf);
-
-    // First delete any MVs to avoid race conditions
-    for (String dbName : db.getAllDatabases()) {
-      SessionState.get().setCurrentDatabase(dbName);
-      for (String tblName : db.getAllTables()) {
-        Table tblObj = null;
-        try {
-          tblObj = db.getTable(tblName);
-        } catch (InvalidTableException e) {
-          LOG.warn("Trying to drop table " + e.getTableName() + ". But it does not exist.");
-          continue;
-        }
-        // only remove MVs first
-        if (!tblObj.isMaterializedView()) {
-          continue;
-        }
-        db.dropTable(dbName, tblName, true, true, fsType == FsType.encrypted_hdfs);
-      }
-    }
-
-    // Delete any tables other than the source tables
-    // and any databases other than the default database.
-    for (String dbName : db.getAllDatabases()) {
-      SessionState.get().setCurrentDatabase(dbName);
-      for (String tblName : db.getAllTables()) {
-        if (!DEFAULT_DATABASE_NAME.equals(dbName) || !srcTables.contains(tblName)) {
-          Table tblObj = null;
-          try {
-            tblObj = db.getTable(tblName);
-          } catch (InvalidTableException e) {
-            LOG.warn("Trying to drop table " + e.getTableName() + ". But it does not exist.");
-            continue;
-          }
-          // only remove MVs first
-          if (!tblObj.isMaterializedView()) {
-            continue;
-          }
-          db.dropTable(dbName, tblName, true, true, fsType == FsType.encrypted_hdfs);
-        }
-      }
-      if (!DEFAULT_DATABASE_NAME.equals(dbName)) {
-        // Drop cascade, functions dropped by cascade
-        db.dropDatabase(dbName, true, true, true);
-      }
-    }
-
-    // delete remaining directories for external tables (can affect stats for following tests)
-    try {
-      Path p = new Path(testWarehouse);
-      FileSystem fileSystem = p.getFileSystem(conf);
-      if (fileSystem.exists(p)) {
-        for (FileStatus status : fileSystem.listStatus(p)) {
-          if (status.isDirectory() && !srcTables.contains(status.getPath().getName())) {
-            fileSystem.delete(status.getPath(), true);
-          }
-        }
-      }
-    } catch (IllegalArgumentException e) {
-      // ignore.. provides invalid url sometimes intentionally
-    }
-    SessionState.get().setCurrentDatabase(DEFAULT_DATABASE_NAME);
-
-    List<String> roleNames = db.getAllRoleNames();
-      for (String roleName : roleNames) {
-        if (!"PUBLIC".equalsIgnoreCase(roleName) && !"ADMIN".equalsIgnoreCase(roleName)) {
-          db.dropRole(roleName);
-        }
-    }
-  }
-
-  /**
-   * Clear out any side effects of running tests
-   */
-  public void clearTestSideEffects() throws Exception {
-    if (System.getenv(QTEST_LEAVE_FILES) != null) {
-      return;
-    }
-
-    // Remove any cached results from the previous test.
-    QueryResultsCache.cleanupInstance();
-
-    // allocate and initialize a new conf since a test can
-    // modify conf by using 'set' commands
-    conf = new HiveConf(IDriver.class);
-    initConf();
-    initConfFromSetup();
-
-    // renew the metastore since the cluster type is unencrypted
-    db = Hive.get(conf);  // propagate new conf to meta store
-
-    clearTablesCreatedDuringTests();
-    clearUDFsCreatedDuringTests();
-    clearKeysCreatedInTests();
-  }
-
-  protected void initConfFromSetup() throws Exception {
-    setup.preTest(conf);
-  }
-
-  public void cleanUp() throws Exception {
-    cleanUp(null);
-  }
-
-  public void cleanUp(String tname) throws Exception {
-    boolean canReuseSession = (tname == null) || !qNoSessionReuseQuerySet.contains(tname);
-    if(!isSessionStateStarted) {
-      startSessionState(canReuseSession);
-    }
-    if (System.getenv(QTEST_LEAVE_FILES) != null) {
-      return;
-    }
-
-    clearTablesCreatedDuringTests();
-    clearUDFsCreatedDuringTests();
-    clearKeysCreatedInTests();
-
-    File cleanupFile = new File(cleanupScript);
-    if (cleanupFile.isFile()) {
-      String cleanupCommands = readEntireFileIntoString(cleanupFile);
-      LOG.info("Cleanup (" + cleanupScript + "):\n" + cleanupCommands);
-      if(cliDriver == null) {
-        cliDriver = new CliDriver();
-      }
-      SessionState.get().getConf().setBoolean("hive.test.shutdown.phase", true);
-      int result = cliDriver.processLine(cleanupCommands);
-      if (result != 0) {
-        LOG.error("Failed during cleanup processLine with code={}. Ignoring", result);
-        // TODO Convert this to an Assert.fail once HIVE-14682 is fixed
-      }
-      SessionState.get().getConf().setBoolean("hive.test.shutdown.phase", false);
-    } else {
-      LOG.info("No cleanup script detected. Skipping.");
-    }
-
-    // delete any contents in the warehouse dir
-    Path p = new Path(testWarehouse);
-    FileSystem fs = p.getFileSystem(conf);
-
-    try {
-      FileStatus [] ls = fs.listStatus(p);
-      for (int i=0; (ls != null) && (i<ls.length); i++) {
-        fs.delete(ls[i].getPath(), true);
-      }
-    } catch (FileNotFoundException e) {
-      // Best effort
-    }
-
-    // TODO: Clean up all the other paths that are created.
-
-    FunctionRegistry.unregisterTemporaryUDF("test_udaf");
-    FunctionRegistry.unregisterTemporaryUDF("test_error");
-  }
-
-  protected void runCreateTableCmd(String createTableCmd) throws Exception {
-    int ecode = 0;
-    ecode = drv.run(createTableCmd).getResponseCode();
-    if (ecode != 0) {
-      throw new Exception("create table command: " + createTableCmd
-          + " failed with exit code= " + ecode);
-    }
-
-    return;
-  }
-
-  protected void runCmd(String cmd) throws Exception {
-    int ecode = 0;
-    ecode = drv.run(cmd).getResponseCode();
-    drv.close();
-    if (ecode != 0) {
-      throw new Exception("command: " + cmd
-          + " failed with exit code= " + ecode);
-    }
-    return;
-  }
-
-  public void createSources() throws Exception {
-    createSources(null);
-  }
-
-  public void createSources(String tname) throws Exception {
-    boolean canReuseSession = (tname == null) || !qNoSessionReuseQuerySet.contains(tname);
-    if(!isSessionStateStarted) {
-      startSessionState(canReuseSession);
-    }
-
-    if(cliDriver == null) {
-      cliDriver = new CliDriver();
-    }
-    cliDriver.processLine("set test.data.dir=" + testFiles + ";");
-    File scriptFile = new File(this.initScript);
-    if (!scriptFile.isFile()) {
-      LOG.info("No init script detected. Skipping");
-      return;
-    }
-    conf.setBoolean("hive.test.init.phase", true);
-
-    String initCommands = readEntireFileIntoString(scriptFile);
-    LOG.info("Initial setup (" + initScript + "):\n" + initCommands);
-
-    int result = cliDriver.processLine(initCommands);
-    LOG.info("Result from cliDrriver.processLine in createSources=" + result);
-    if (result != 0) {
-      Assert.fail("Failed during createSources processLine with code=" + result);
-    }
-
-    conf.setBoolean("hive.test.init.phase", false);
-  }
-
-  public void init() throws Exception {
-
-    // Create remote dirs once.
-    if (mr != null) {
-      createRemoteDirs();
-    }
-
-    // Create views registry
-    HiveMaterializedViewsRegistry.get().init();
-
-    testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);
-    String execEngine = conf.get("hive.execution.engine");
-    conf.set("hive.execution.engine", "mr");
-    SessionState.start(conf);
-    conf.set("hive.execution.engine", execEngine);
-    db = Hive.get(conf);
-    drv = DriverFactory.newDriver(conf);
-    pd = new ParseDriver();
-    sem = new SemanticAnalyzer(queryState);
-  }
-
-  public void init(String tname) throws Exception {
-    cleanUp(tname);
-    createSources(tname);
-    cliDriver.processCmd("set hive.cli.print.header=true;");
-  }
-
-  public void cliInit(String tname) throws Exception {
-    cliInit(tname, true);
-  }
-
-  public String cliInit(String tname, boolean recreate) throws Exception {
-    if (recreate) {
-      cleanUp(tname);
-      createSources(tname);
-    }
-
-    HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER,
-    "org.apache.hadoop.hive.ql.security.DummyAuthenticator");
-    Utilities.clearWorkMap(conf);
-    CliSessionState ss = new CliSessionState(conf);
-    assert ss != null;
-    ss.in = System.in;
-
-    String outFileExtension = getOutFileExtension(tname);
-    String stdoutName = null;
-    if (outDir != null) {
-      // TODO: why is this needed?
-      File qf = new File(outDir, tname);
-      stdoutName = qf.getName().concat(outFileExtension);
-    } else {
-      stdoutName = tname + outFileExtension;
-    }
-
-    File outf = new File(logDir, stdoutName);
-    OutputStream fo = new BufferedOutputStream(new FileOutputStream(outf));
-    if (qSortQuerySet.contains(tname)) {
-      ss.out = new SortPrintStream(fo, "UTF-8");
-    } else if (qHashQuerySet.contains(tname)) {
-      ss.out = new DigestPrintStream(fo, "UTF-8");
-    } else if (qSortNHashQuerySet.contains(tname)) {
-      ss.out = new SortAndDigestPrintStream(fo, "UTF-8");
-    } else {
-      ss.out = new PrintStream(fo, true, "UTF-8");
-    }
-    ss.err = new CachingPrintStream(fo, true, "UTF-8");
-    ss.setIsSilent(true);
-    SessionState oldSs = SessionState.get();
-
-    boolean canReuseSession = !qNoSessionReuseQuerySet.contains(tname);
-    if (oldSs != null && canReuseSession && clusterType.getCoreClusterType() == CoreClusterType.TEZ) {
-      // Copy the tezSessionState from the old CliSessionState.
-      TezSessionState tezSessionState = oldSs.getTezSession();
-      oldSs.setTezSession(null);
-      ss.setTezSession(tezSessionState);
-      oldSs.close();
-    }
-
-    if (oldSs != null && clusterType.getCoreClusterType() == CoreClusterType.SPARK) {
-      sparkSession = oldSs.getSparkSession();
-      ss.setSparkSession(sparkSession);
-      oldSs.setSparkSession(null);
-      oldSs.close();
-    }
-
-    if (oldSs != null && oldSs.out != null && oldSs.out != System.out) {
-      oldSs.out.close();
-    }
-    if (oldSs != null) {
-      oldSs.close();
-    }
-    SessionState.start(ss);
-
-    cliDriver = new CliDriver();
-
-    if (tname.equals("init_file.q")) {
-      ss.initFiles.add(HIVE_ROOT + "/data/scripts/test_init_file.sql");
-    }
-    cliDriver.processInitFiles(ss);
-
-    return outf.getAbsolutePath();
-  }
-
-  private CliSessionState startSessionState(boolean canReuseSession)
-      throws IOException {
-
-    HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER,
-        "org.apache.hadoop.hive.ql.security.DummyAuthenticator");
-
-    String execEngine = conf.get("hive.execution.engine");
-    conf.set("hive.execution.engine", "mr");
-    CliSessionState ss = new CliSessionState(conf);
-    assert ss != null;
-    ss.in = System.in;
-    ss.out = System.out;
-    ss.err = System.out;
-
-    SessionState oldSs = SessionState.get();
-    if (oldSs != null && canReuseSession && clusterType.getCoreClusterType() == CoreClusterType.TEZ) {
-      // Copy the tezSessionState from the old CliSessionState.
-      TezSessionState tezSessionState = oldSs.getTezSession();
-      ss.setTezSession(tezSessionState);
-      oldSs.setTezSession(null);
-      oldSs.close();
-    }
-
-    if (oldSs != null && clusterType.getCoreClusterType() == CoreClusterType.SPARK) {
-      sparkSession = oldSs.getSparkSession();
-      ss.setSparkSession(sparkSession);
-      oldSs.setSparkSession(null);
-      oldSs.close();
-    }
-    if (oldSs != null && oldSs.out != null && oldSs.out != System.out) {
-      oldSs.out.close();
-    }
-    if (oldSs != null) {
-      oldSs.close();
-    }
-    SessionState.start(ss);
-
-    isSessionStateStarted = true;
-
-    conf.set("hive.execution.engine", execEngine);
-    return ss;
-  }
-
-  public int executeAdhocCommand(String q) {
-    if (!q.contains(";")) {
-      return -1;
-    }
-
-    String q1 = q.split(";")[0] + ";";
-
-    LOG.debug("Executing " + q1);
-    return cliDriver.processLine(q1);
-  }
-
-  public int executeOne(String tname) {
-    String q = qMap.get(tname);
-
-    if (q.indexOf(";") == -1) {
-      return -1;
-    }
-
-    String q1 = q.substring(0, q.indexOf(";") + 1);
-    String qrest = q.substring(q.indexOf(";") + 1);
-    qMap.put(tname, qrest);
-
-    System.out.println("Executing " + q1);
-    return cliDriver.processLine(q1);
-  }
-
-  public int execute(String tname) {
-    return drv.run(qMap.get(tname)).getResponseCode();
-  }
-
-  public int executeClient(String tname1, String tname2) {
-    String commands = getCommand(tname1) + CRLF + getCommand(tname2);
-    return executeClientInternal(commands);
-  }
-
-  public int executeClient(String tname) {
-    return executeClientInternal(getCommand(tname));
-  }
-
-  private int executeClientInternal(String commands) {
-    List<String> cmds = CliDriver.splitSemiColon(commands);
-    int rc = 0;
-
-    String command = "";
-    for (String oneCmd : cmds) {
-      if (StringUtils.endsWith(oneCmd, "\\")) {
-        command += StringUtils.chop(oneCmd) + "\\;";
-        continue;
-      } else {
-        if (isHiveCommand(oneCmd)) {
-          command = oneCmd;
-        } else {
-          command += oneCmd;
-        }
-      }
-      if (StringUtils.isBlank(command)) {
-        continue;
-      }
-
-      if (isCommandUsedForTesting(command)) {
-        rc = executeTestCommand(command);
-      } else {
-        rc = cliDriver.processLine(command);
-      }
-
-      if (rc != 0 && !ignoreErrors()) {
-        break;
-      }
-      command = "";
-    }
-    if (rc == 0 && SessionState.get() != null) {
-      SessionState.get().setLastCommand(null);  // reset
-    }
-    return rc;
-  }
-
-  /**
-   * This allows a .q file to continue executing after a statement runs into an error which is convenient
-   * if you want to use another hive cmd after the failure to sanity check the state of the system.
-   */
-  private boolean ignoreErrors() {
-    return conf.getBoolVar(HiveConf.ConfVars.CLIIGNOREERRORS);
-  }
-
-  private boolean isHiveCommand(String command) {
-    String[] cmd = command.trim().split("\\s+");
-    if (HiveCommand.find(cmd) != null) {
-      return true;
-    } else if (HiveCommand.find(cmd, HiveCommand.ONLY_FOR_TESTING) != null) {
-      return true;
-    } else {
-      return false;
-    }
-  }
-
-  private int executeTestCommand(final String command) {
-    String commandName = command.trim().split("\\s+")[0];
-    String commandArgs = command.trim().substring(commandName.length());
-
-    if (commandArgs.endsWith(";")) {
-      commandArgs = StringUtils.chop(commandArgs);
-    }
-
-    //replace ${hiveconf:hive.metastore.warehouse.dir} with actual dir if existed.
-    //we only want the absolute path, so remove the header, such as hdfs://localhost:57145
-    String wareHouseDir = SessionState.get().getConf().getVar(ConfVars.METASTOREWAREHOUSE)
-        .replaceAll("^[a-zA-Z]+://.*?:\\d+", "");
-    commandArgs = commandArgs.replaceAll("\\$\\{hiveconf:hive\\.metastore\\.warehouse\\.dir\\}",
-      wareHouseDir);
-
-    if (SessionState.get() != null) {
-      SessionState.get().setLastCommand(commandName + " " + commandArgs.trim());
-    }
-
-    enableTestOnlyCmd(SessionState.get().getConf());
-
-    try {
-      CommandProcessor proc = getTestCommand(commandName);
-      if (proc != null) {
-        CommandProcessorResponse response = proc.run(commandArgs.trim());
-
-        int rc = response.getResponseCode();
-        if (rc != 0) {
-          SessionState.getConsole().printError(response.toString(), response.getException() != null ?
-                  Throwables.getStackTraceAsString(response.getException()) : "");
-        }
-
-        return rc;
-      } else {
-        throw new RuntimeException("Could not get CommandProcessor for command: " + commandName);
-      }
-    } catch (Exception e) {
-      throw new RuntimeException("Could not execute test command", e);
-    }
-  }
-
-  private CommandProcessor getTestCommand(final String commandName) throws SQLException {
-    HiveCommand testCommand = HiveCommand.find(new String[]{commandName}, HiveCommand.ONLY_FOR_TESTING);
-
-    if (testCommand == null) {
-      return null;
-    }
-
-    return CommandProcessorFactory
-      .getForHiveCommandInternal(new String[]{commandName}, SessionState.get().getConf(),
-        testCommand.isOnlyForTesting());
-  }
-
-  private void enableTestOnlyCmd(HiveConf conf){
-    StringBuilder securityCMDs = new StringBuilder(conf.getVar(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST));
-    for(String c : testOnlyCommands){
-      securityCMDs.append(",");
-      securityCMDs.append(c);
-    }
-    conf.set(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST.toString(), securityCMDs.toString());
-  }
-
-  private boolean isCommandUsedForTesting(final String command) {
-    String commandName = command.trim().split("\\s+")[0];
-    HiveCommand testCommand = HiveCommand.find(new String[]{commandName}, HiveCommand.ONLY_FOR_TESTING);
-    return testCommand != null;
-  }
-
-  private String getCommand(String tname) {
-    String commands = qMap.get(tname);
-    StringBuilder newCommands = new StringBuilder(commands.length());
-    int lastMatchEnd = 0;
-    Matcher commentMatcher = Pattern.compile("^--.*$", Pattern.MULTILINE).matcher(commands);
-    // remove the comments
-    while (commentMatcher.find()) {
-      newCommands.append(commands.substring(lastMatchEnd, commentMatcher.start()));
-      lastMatchEnd = commentMatcher.end();
-    }
-    newCommands.append(commands.substring(lastMatchEnd, commands.length()));
-    commands = newCommands.toString();
-    return commands;
-  }
-
-  public boolean shouldBeSkipped(String tname) {
-    return qSkipSet.contains(tname);
-  }
-
-  private String getOutFileExtension(String fname) {
-    String outFileExtension = ".out";
-    if (qJavaVersionSpecificOutput.contains(fname)) {
-      outFileExtension = ".java" + javaVersion + ".out";
-    }
-
-    return outFileExtension;
-  }
-
-  public void convertSequenceFileToTextFile() throws Exception {
-    // Create an instance of hive in order to create the tables
-    testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);
-    db = Hive.get(conf);
-
-    // Move all data from dest4_sequencefile to dest4
-    drv
-      .run("FROM dest4_sequencefile INSERT OVERWRITE TABLE dest4 SELECT dest4_sequencefile.*");
-
-    // Drop dest4_sequencefile
-    db.dropTable(Warehouse.DEFAULT_DATABASE_NAME, "dest4_sequencefile",
-        true, true);
-  }
-
-  public QTestProcessExecResult checkNegativeResults(String tname, Exception e) throws Exception {
-
-    String outFileExtension = getOutFileExtension(tname);
-
-    File qf = new File(outDir, tname);
-    String expf = outPath(outDir.toString(), tname.concat(outFileExtension));
-
-    File outf = null;
-    outf = new File(logDir);
-    outf = new File(outf, qf.getName().concat(outFileExtension));
-
-    FileWriter outfd = new FileWriter(outf);
-    if (e instanceof ParseException) {
-      outfd.write("Parse Error: ");
-    } else if (e instanceof SemanticException) {
-      outfd.write("Semantic Exception: \n");
-    } else {
-      throw e;
-    }
-
-    outfd.write(e.getMessage());
-    outfd.close();
-
-    QTestProcessExecResult result = executeDiffCommand(outf.getPath(), expf, false,
-                                     qSortSet.contains(qf.getName()));
-    if (overWrite) {
-      overwriteResults(outf.getPath(), expf);
-      return QTestProcessExecResult.createWithoutOutput(0);
-    }
-
-    return result;
-  }
-
-  public QTestProcessExecResult checkParseResults(String tname, ASTNode tree) throws Exception {
-
-    if (tree != null) {
-      String outFileExtension = getOutFileExtension(tname);
-
-      File parseDir = new File(outDir, "parse");
-      String expf = outPath(parseDir.toString(), tname.concat(outFileExtension));
-
-      File outf = null;
-      outf = new File(logDir);
-      outf = new File(outf, tname.concat(outFileExtension));
-
-      FileWriter outfd = new FileWriter(outf);
-      outfd.write(tree.toStringTree());
-      outfd.close();
-
-      QTestProcessExecResult exitVal = executeDiffCommand(outf.getPath(), expf, false, false);
-
-      if (overWrite) {
-        overwriteResults(outf.getPath(), expf);
-        return QTestProcessExecResult.createWithoutOutput(0);
-      }
-
-      return exitVal;
-    } else {
-      throw new Exception("Parse tree is null");
-    }
-  }
-
-  /**
-   * Given the current configurations (e.g., hadoop version and execution mode), return
-   * the correct file name to compare with the current test run output.
-   * @param outDir The directory where the reference log files are stored.
-   * @param testName The test file name (terminated by ".out").
-   * @return The file name appended with the configuration values if it exists.
-   */
-  public String outPath(String outDir, String testName) {
-    String ret = (new File(outDir, testName)).getPath();
-    // List of configurations. Currently the list consists of hadoop version and execution mode only
-    List<String> configs = new ArrayList<String>();
-    configs.add(this.clusterType.toString());
-    configs.add(this.hadoopVer);
-
-    Deque<String> stack = new LinkedList<String>();
-    StringBuilder sb = new StringBuilder();
-    sb.append(testName);
-    stack.push(sb.toString());
-
-    // example file names are input1.q.out_mr_0.17 or input2.q.out_0.17
-    for (String s: configs) {
-      sb.append('_');
-      sb.append(s);
-      stack.push(sb.toString());
-    }
-    while (stack.size() > 0) {
-      String fileName = stack.pop();
-      File f = new File(outDir, fileName);
-      if (f.exists()) {
-        ret = f.getPath();
-        break;
-      }
-    }
-   return ret;
-  }
-
-  private Pattern[] toPattern(String[] patternStrs) {
-    Pattern[] patterns = new Pattern[patternStrs.length];
-    for (int i = 0; i < patternStrs.length; i++) {
-      patterns[i] = Pattern.compile(patternStrs[i]);
-    }
-    return patterns;
-  }
-
-  private void maskPatterns(Pattern[] patterns, String fname) throws Exception {
-    String maskPattern = "#### A masked pattern was here ####";
-    String partialMaskPattern = "#### A PARTIAL masked pattern was here ####";
-
-    String line;
-    BufferedReader in;
-    BufferedWriter out;
-
-    File file = new File(fname);
-    File fileOrig = new File(fname + ".orig");
-    FileUtils.copyFile(file, fileOrig);
-
-    in = new BufferedReader(new InputStreamReader(new FileInputStream(fileOrig), "UTF-8"));
-    out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), "UTF-8"));
-
-    boolean lastWasMasked = false;
-    boolean partialMaskWasMatched = false;
-    Matcher matcher;
-    while (null != (line = in.readLine())) {
-      if (fsType == FsType.encrypted_hdfs) {
-        for (Pattern pattern : partialReservedPlanMask) {
-          matcher = pattern.matcher(line);
-          if (matcher.find()) {
-            line = partialMaskPattern + " " + matcher.group(0);
-            partialMaskWasMatched = true;
-            break;
-          }
-        }
-      }
-      else {
-        for (PatternReplacementPair prp : partialPlanMask) {
-          matcher = prp.pattern.matcher(line);
-          if (matcher.find()) {
-            line = line.replaceAll(prp.pattern.pattern(), prp.replacement);
-            partialMaskWasMatched = true;
-          }
-        }
-      }
-
-      if (!partialMaskWasMatched) {
-        for (Pair<Pattern, String> pair : patternsWithMaskComments) {
-          Pattern pattern = pair.getLeft();
-          String maskComment = pair.getRight();
-
-          matcher = pattern.matcher(line);
-          if (matcher.find()) {
-            line = matcher.replaceAll(maskComment);
-            partialMaskWasMatched = true;
-            break;
-          }
-        }
-
-        for (Pattern pattern : patterns) {
-          line = pattern.matcher(line).replaceAll(maskPattern);
-        }
-      }
-
-      if (line.equals(maskPattern)) {
-        // We're folding multiple masked lines into one.
-        if (!lastWasMasked) {
-          out.write(line);
-          out.write("\n");
-          lastWasMasked = true;
-          partialMaskWasMatched = false;
-        }
-      } else {
-        out.write(line);
-        out.write("\n");
-        lastWasMasked = false;
-        partialMaskWasMatched = false;
-      }
-    }
-
-    in.close();
-    out.close();
-  }
-
-  private final Pattern[] planMask = toPattern(new String[] {
-      ".*file:.*",
-      ".*pfile:.*",
-      ".*/tmp/.*",
-      ".*invalidscheme:.*",
-      ".*lastUpdateTime.*",
-      ".*lastAccessTime.*",
-      ".*lastModifiedTime.*",
-      ".*[Oo]wner.*",
-      ".*CreateTime.*",
-      ".*LastAccessTime.*",
-      ".*Location.*",
-      ".*LOCATION '.*",
-      ".*transient_lastDdlTime.*",
-      ".*last_modified_.*",
-      ".*at org.*",
-      ".*at sun.*",
-      ".*at java.*",
-      ".*at junit.*",
-      ".*Caused by:.*",
-      ".*LOCK_QUERYID:.*",
-      ".*LOCK_TIME:.*",
-      ".*grantTime.*",
-      ".*[.][.][.] [0-9]* more.*",
-      ".*job_[0-9_]*.*",
-      ".*job_local[0-9_]*.*",
-      ".*USING 'java -cp.*",
-      "^Deleted.*",
-      ".*DagName:.*",
-      ".*DagId:.*",
-      ".*Input:.*/data/files/.*",
-      ".*Output:.*/data/files/.*",
-      ".*total number of created files now is.*",
-      ".*.hive-staging.*",
-      ".*Warning.*",
-      "pk_-?[0-9]*_[0-9]*_[0-9]*",
-      "fk_-?[0-9]*_[0-9]*_[0-9]*",
-      "uk_-?[0-9]*_[0-9]*_[0-9]*",
-      "nn_-?[0-9]*_[0-9]*_[0-9]*",
-      ".*at com\\.sun\\.proxy.*",
-      ".*at com\\.jolbox.*",
-      ".*at com\\.zaxxer.*",
-      "org\\.apache\\.hadoop\\.hive\\.metastore\\.model\\.MConstraint@([0-9]|[a-z])*",
-      "^Repair: Added partition to metastore.*"
-  });
-
-  private final Pattern[] partialReservedPlanMask = toPattern(new String[] {
-      "data/warehouse/(.*?/)+\\.hive-staging"  // the directory might be db/table/partition
-      //TODO: add more expected test result here
-  });
-  /**
-   * Pattern to match and (partial) replacement text.
-   * For example, {"transaction":76,"bucketid":8249877}.  We just want to mask 76 but a regex that
-   * matches just 76 will match a lot of other things.
-   */
-  private final static class PatternReplacementPair {
-    private final Pattern pattern;
-    private final String replacement;
-    PatternReplacementPair(Pattern p, String r) {
-      pattern = p;
-      replacement = r;
-    }
-  }
-  private final PatternReplacementPair[] partialPlanMask;
-  {
-    ArrayList<PatternReplacementPair> ppm = new ArrayList<>();
-    ppm.add(new PatternReplacementPair(Pattern.compile("\\{\"transactionid\":[1-9][0-9]*,\"bucketid\":"),
-      "{\"transactionid\":### Masked txnid ###,\"bucketid\":"));
-
-    ppm.add(new PatternReplacementPair(Pattern.compile("attempt_[0-9]+"), "attempt_#ID#"));
-    ppm.add(new PatternReplacementPair(Pattern.compile("vertex_[0-9_]+"), "vertex_#ID#"));
-    ppm.add(new PatternReplacementPair(Pattern.compile("task_[0-9_]+"), "task_#ID#"));
-    partialPlanMask = ppm.toArray(new PatternReplacementPair[ppm.size()]);
-  }
-  /* This list may be modified by specific cli drivers to mask strings that change on every test */
-  private final List<Pair<Pattern, String>> patternsWithMaskComments =
-      new ArrayList<Pair<Pattern, String>>() {
-        {
-          add(toPatternPair("(pblob|s3.?|swift|wasb.?).*hive-staging.*",
-              "### BLOBSTORE_STAGING_PATH ###"));
-          add(toPatternPair(PATH_HDFS_WITH_DATE_USER_GROUP_REGEX,
-              "### USER ### ### GROUP ###$3$4 ### HDFS DATE ### $6### HDFS PATH ###"));
-          add(toPatternPair(PATH_HDFS_REGEX, "$1### HDFS PATH ###"));
-        }
-      };
-
-  private Pair<Pattern, String> toPatternPair(String patternStr, String maskComment) {
-    return ImmutablePair.of(Pattern.compile(patternStr), maskComment);
-  }
-
-  public void addPatternWithMaskComment(String patternStr, String maskComment) {
-    patternsWithMaskComments.add(toPatternPair(patternStr, maskComment));
-  }
-
-  public QTestProcessExecResult checkCliDriverResults(String tname) throws Exception {
-    assert(qMap.containsKey(tname));
-
-    String outFileExtension = getOutFileExtension(tname);
-    String outFileName = outPath(outDir, tname + outFileExtension);
-
-    File f = new File(logDir, tname + outFileExtension);
-
-    maskPatterns(planMask, f.getPath());
-    QTestProcessExecResult exitVal = executeDiffCommand(f.getPath(),
-                                     outFileName, false,
-                                     qSortSet.contains(tname));
-
-    if (overWrite) {
-      overwriteResults(f.getPath(), outFileName);
-      return QTestProcessExecResult.createWithoutOutput(0);
-    }
-
-    return exitVal;
-  }
-
-
-  public QTestProcessExecResult checkCompareCliDriverResults(String tname, List<String> outputs)
-      throws Exception {
-    assert outputs.size() > 1;
-    maskPatterns(planMask, outputs.get(0));
-    for (int i = 1; i < outputs.size(); ++i) {
-      maskPatterns(planMask, outputs.get(i));
-      QTestProcessExecResult result = executeDiffCommand(
-          outputs.get(i - 1), outputs.get(i), false, qSortSet.contains(tname));
-      if (result.getReturnCode() != 0) {
-        System.out.println("Files don't match: " + outputs.get(i - 1) + " and " + outputs.get(i));
-        return result;
-      }
-    }
-    return QTestProcessExecResult.createWithoutOutput(0);
-  }
-
-  private static void overwriteResults(String inFileName, String outFileName) throws Exception {
-    // This method can be replaced with Files.copy(source, target, REPLACE_EXISTING)
-    // once Hive uses JAVA 7.
-    System.out.println("Overwriting results " + inFileName + " to " + outFileName);
-    int result = executeCmd(new String[]{
-        "cp",
-        getQuotedString(inFileName),
-        getQuotedString(outFileName)
-    }).getReturnCode();
-    if (result != 0) {
-      throw new IllegalStateException("Unexpected error while overwriting " +
-          inFileName + " with " + outFileName);
-    }
-  }
-
-  private static QTestProcessExecResult executeDiffCommand(String inFileName,
-      String outFileName,
-      boolean ignoreWhiteSpace,
-      boolean sortResults
-      ) throws Exception {
-
-    QTestProcessExecResult result;
-
-    if (sortResults) {
-      // sort will try to open the output file in write mode on windows. We need to
-      // close it first.
-      SessionState ss = SessionState.get();
-      if (ss != null && ss.out != null && ss.out != System.out) {
-        ss.out.close();
-      }
-
-      String inSorted = inFileName + SORT_SUFFIX;
-      String outSorted = outFileName + SORT_SUFFIX;
-
-      sortFiles(inFileName, inSorted);
-      sortFiles(outFileName, outSorted);
-
-      inFileName = inSorted;
-      outFileName = outSorted;
-    }
-
-    ArrayList<String> diffCommandArgs = new ArrayList<String>();
-    diffCommandArgs.add("diff");
-
-    // Text file comparison
-    diffCommandArgs.add("-a");
-
-    // Ignore changes in the amount of white space
-    if (ignoreWhiteSpace) {
-      diffCommandArgs.add("-b");
-    }
-
-    // Add files to compare to the arguments list
-    diffCommandArgs.add(getQuotedString(inFileName));
-    diffCommandArgs.add(getQuotedString(outFileName));
-
-    result = executeCmd(diffCommandArgs);
-
-    if (sortResults) {
-      new File(inFileName).delete();
-      new File(outFileName).delete();
-    }
-
-    return result;
-  }
-
-  private static void sortFiles(String in, String out) throws Exception {
-    int result = executeCmd(new String[]{
-        "sort",
-        getQuotedString(in),
-    }, out, null).getReturnCode();
-    if (result != 0) {
-      throw new IllegalStateException("Unexpected error while sorting " + in);
-    }
-  }
-
-  private static QTestProcessExecResult executeCmd(Collection<String> args) throws Exception {
-    return executeCmd(args, null, null);
-  }
-
-  private static QTestProcessExecResult executeCmd(String[] args) throws Exception {
-    return executeCmd(args, null, null);
-  }
-
-  private static QTestProcessExecResult executeCmd(Collection<String> args, String outFile,
-                                            String errFile) throws Exception {
-    String[] cmdArray = args.toArray(new String[args.size()]);
-    return executeCmd(cmdArray, outFile, errFile);
-  }
-
-  private static QTestProcessExecResult executeCmd(String[] args, String outFile,
-                                            String errFile) throws Exception {
-    System.out.println("Running: " + org.apache.commons.lang.StringUtils.join(args, ' '));
-
-    PrintStream out = outFile == null ?
-      SessionState.getConsole().getChildOutStream() :
-      new PrintStream(new FileOutputStream(outFile), true, "UTF-8");
-    PrintStream err = errFile == null ?
-      SessionState.getConsole().getChildErrStream() :
-      new PrintStream(new FileOutputStream(errFile), true, "UTF-8");
-
-    Process executor = Runtime.getRuntime().exec(args);
-
-    ByteArrayOutputStream bos = new ByteArrayOutputStream();
-    PrintStream str = new PrintStream(bos, true, "UTF-8");
-
-    StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, err);
-    StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, out, str);
-
-    outPrinter.start();
-    errPrinter.start();
-
-    int result = executor.waitFor();
-
-    outPrinter.join();
-    errPrinter.join();
-
-    if (outFile != null) {
-      out.close();
-    }
-
-    if (errFile != null) {
-      err.close();
-    }
-
-    return QTestProcessExecResult.
-        create(result, new String(bos.toByteArray(), StandardCharsets.UTF_8));
-  }
-
-  private static String getQuotedString(String str){
-    return str;
-  }
-
-  public ASTNode parseQuery(String tname) throws Exception {
-    return pd.parse(qMap.get(tname));
-  }
-
-  public void resetParser() throws SemanticException {
-    pd = new ParseDriver();
-    queryState = new QueryState.Builder().withHiveConf(conf).build();
-    sem = new SemanticAnalyzer(queryState);
-  }
-
-
-  public List<Task<? extends Serializable>> analyzeAST(ASTNode ast) throws Exception {
-
-    // Do semantic analysis and plan generation
-    Context ctx = new Context(conf);
-    while ((ast.getToken() == null) && (ast.getChildCount() > 0)) {
-      ast = (ASTNode) ast.getChild(0);
-    }
-    sem.getOutputs().clear();
-    sem.getInputs().clear();
-    sem.analyze(ast, ctx);
-    ctx.clear();
-    return sem.getRootTasks();
-  }
-
-  public TreeMap<String, String> getQMap() {
-    return qMap;
-  }
-
-  /**
-   * QTestSetup defines test fixtures which are reused across testcases,
-   * and are needed before any test can be run
-   */
-  public static class QTestSetup
-  {
-    private MiniZooKeeperCluster zooKeeperCluster = null;
-    private int zkPort;
-    private ZooKeeper zooKeeper;
-
-    public QTestSetup() {
-    }
-
-    public void preTest(HiveConf conf) throws Exception {
-
-      if (zooKeeperCluster == null) {
-        //create temp dir
-        String tmpBaseDir =  System.getProperty(TEST_TMP_DIR_PROPERTY);
-        File tmpDir = Utilities.createTempDir(tmpBaseDir);
-
-        zooKeeperCluster = new MiniZooKeeperCluster();
-        zkPort = zooKeeperCluster.startup(tmpDir);
-      }
-
-      if (zooKeeper != null) {
-        zooKeeper.close();
-      }
-
-      int sessionTimeout =  (int) conf.getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT, TimeUnit.MILLISECONDS);
-      zooKeeper = new ZooKeeper("localhost:" + zkPort, sessionTimeout, new Watcher() {
-        @Override
-        public void process(WatchedEvent arg0) {
-        }
-      });
-
-      String zkServer = "localhost";
-      conf.set("hive.zookeeper.quorum", zkServer);
-      conf.set("hive.zookeeper.client.port", "" + zkPort);
-    }
-
-    public void postTest(HiveConf conf) throws Exception {
-      if (zooKeeperCluster == null) {
-        return;
-      }
-
-      if (zooKeeper != null) {
-        zooKeeper.close();
-      }
-
-      ZooKeeperHiveLockManager.releaseAllLocks(conf);
-    }
-
-    public void tearDown() throws Exception {
-      CuratorFrameworkSingleton.closeAndReleaseInstance();
-
-      if (zooKeeperCluster != null) {
-        zooKeeperCluster.shutdown();
-        zooKeeperCluster = null;
-      }
-    }
-  }
-
-  /**
-   * QTRunner: Runnable class for running a a single query file.
-   *
-   **/
-  public static class QTRunner implements Runnable {
-    private final QTestUtil qt;
-    private final String fname;
-
-    public QTRunner(QTestUtil qt, String fname) {
-      this.qt = qt;
-      this.fname = fname;
-    }
-
-    @Override
-    public void run() {
-      try {
-        // assumption is that environment has already been cleaned once globally
-        // hence each thread does not call cleanUp() and createSources() again
-        qt.cliInit(fname, false);
-        qt.executeClient(fname);
-      } catch (Throwable e) {
-        System.err.println("Query file " + fname + " failed with exception "
-            + e.getMessage());
-        e.printStackTrace();
-        outputTestFailureHelpMessage();
-      }
-    }
-  }
-
-  /**
-   * Setup to execute a set of query files. Uses QTestUtil to do so.
-   *
-   * @param qfiles
-   *          array of input query files containing arbitrary number of hive
-   *          queries
-   * @param resDir
-   *          output directory
-   * @param logDir
-   *          log directory
-   * @return one QTestUtil for each query file
-   */
-  public static QTestUtil[] queryListRunnerSetup(File[] qfiles, String resDir,
-      String logDir, String initScript, String cleanupScript) throws Exception
-  {
-    QTestUtil[] qt = new QTestUtil[qfiles.length];
-    for (int i = 0; i < qfiles.length; i++) {
-      qt[i] = new QTestUtil(resDir, logDir, MiniClusterType.none, null, "0.20",
-        initScript == null ? defaultInitScript : initScript,
-        cleanupScript == null ? defaultCleanupScript : cleanupScript, false);
-      qt[i].addFile(qfiles[i]);
-      qt[i].clearTestSideEffects();
-    }
-
-    return qt;
-  }
-
-  /**
-   * Executes a set of query files in sequence.
-   *
-   * @param qfiles
-   *          array of input query files containing arbitrary number of hive
-   *          queries
-   * @param qt
-   *          array of QTestUtils, one per qfile
-   * @return true if all queries passed, false otw
-   */
-  public static boolean queryListRunnerSingleThreaded(File[] qfiles, QTestUtil[] qt)
-    throws Exception
-  {
-    boolean failed = false;
-    qt[0].cleanUp();
-    qt[0].createSources();
-    for (int i = 0; i < qfiles.length && !failed; i++) {
-      qt[i].clearTestSideEffects();
-      qt[i].cliInit(qfiles[i].getName(), false);
-      qt[i].executeClient(qfiles[i].getName());
-      QTestProcessExecResult result = qt[i].checkCliDriverResults(qfiles[i].getName());
-      if (result.getReturnCode() != 0) {
-        failed = true;
-        StringBuilder builder = new StringBuilder();
-        builder.append("Test ")
-            .append(qfiles[i].getName())
-            .append(" results check failed with error code ")
-            .append(result.getReturnCode());
-        if (Strings.isNotEmpty(result.getCapturedOutput())) {
-          builder.append(" and diff value ").append(result.getCapturedOutput());
-        }
-        System.err.println(builder.toString());
-        outputTestFailureHelpMessage();
-      }
-      qt[i].clearPostTestEffects();
-    }
-    return (!failed);
-  }
-
-  /**
-   * Executes a set of query files parallel.
-   *
-   * Each query file is run in a separate thread. The caller has to arrange
-   * that different query files do not collide (in terms of destination tables)
-   *
-   * @param qfiles
-   *          array of input query files containing arbitrary number of hive
-   *          queries
-   * @param qt
-   *          array of QTestUtils, one per qfile
-   * @return true if all queries passed, false otw
-   *
-   */
-  public static boolean queryListRunnerMultiThreaded(File[] qfiles, QTestUtil[] qt)
-    throws Exception
-  {
-    boolean failed = false;
-
-    // in multithreaded mode - do cleanup/initialization just once
-
-    qt[0].cleanUp();
-    qt[0].createSources();
-    qt[0].clearTestSideEffects();
-
-    QTRunner[] qtRunners = new QTRunner[qfiles.length];
-    Thread[] qtThread = new Thread[qfiles.length];
-
-    for (int i = 0; i < qfiles.length; i++) {
-      qtRunners[i] = new QTRunner(qt[i], qfiles[i].getName());
-      qtThread[i] = new Thread(qtRunners[i]);
-    }
-
-    for (int i = 0; i < qfiles.length; i++) {
-      qtThread[i].start();
-    }
-
-    for (int i = 0; i < qfiles.length; i++) {
-      qtThread[i].join();
-      QTestProcessExecResult result = qt[i].checkCliDriverResults(qfiles[i].getName());
-      if (result.getReturnCode() != 0) {
-        failed = true;
-        StringBuilder builder = new StringBuilder();
-        builder.append("Test ")
-            .append(qfiles[i].getName())
-            .append(" results check failed with error code ")
-            .append(result.getReturnCode());
-        if (Strings.isNotEmpty(result.getCapturedOutput())) {
-          builder.append(" and diff value ").append(result.getCapturedOutput());
-        }
-        System.err.println(builder.toString());
-        outputTestFailureHelpMessage();
-      }
-    }
-    return (!failed);
-  }
-
-  public static void outputTestFailureHelpMessage() {
-    System.err.println(
-      "See ./ql/target/tmp/log/hive.log or ./itests/qtest/target/tmp/log/hive.log, or check " +
-        "./ql/target/surefire-reports or ./itests/qtest/target/surefire-reports/ for specific " +
-        "test cases logs.");
-    System.err.flush();
-  }
-
-  private static String[] cachedQvFileList = null;
-  private static ImmutableList<String> cachedDefaultQvFileList = null;
-  private static Pattern qvSuffix = Pattern.compile("_[0-9]+.qv$", Pattern.CASE_INSENSITIVE);
-
-  public static List<String> getVersionFiles(String queryDir, String tname) {
-    ensureQvFileList(queryDir);
-    List<String> result = getVersionFilesInternal(tname);
-    if (result == null) {
-      result = cachedDefaultQvFileList;
-    }
-    return result;
-  }
-
-  private static void ensureQvFileList(String queryDir) {
-    if (cachedQvFileList != null) {
-      return;
-    }
-    // Not thread-safe.
-    System.out.println("Getting versions from " + queryDir);
-    cachedQvFileList = (new File(queryDir)).list(new FilenameFilter() {
-      @Override
-      public boolean accept(File dir, String name) {
-        return name.toLowerCase().endsWith(".qv");
-      }
-    });
-    if (cachedQvFileList == null) {
-      return; // no files at all
-    }
-    Arrays.sort(cachedQvFileList, String.CASE_INSENSITIVE_ORDER);
-    List<String> defaults = getVersionFilesInternal("default");
-    cachedDefaultQvFileList = (defaults != null)
-        ? ImmutableList.copyOf(defaults) : ImmutableList.<String>of();
-  }
-
-  private static List<String> getVersionFilesInternal(String tname) {
-    if (cachedQvFileList == null) {
-      return new ArrayList<String>();
-    }
-    int pos = Arrays.binarySearch(cachedQvFileList, tname, String.CASE_INSENSITIVE_ORDER);
-    if (pos >= 0) {
-      throw new BuildException("Unexpected file list element: " + cachedQvFileList[pos]);
-    }
-    List<String> result = null;
-    for (pos = (-pos - 1); pos < cachedQvFileList.length; ++pos) {
-      String candidate = cachedQvFileList[pos];
-      if (candidate.length() <= tname.length()
-          || !tname.equalsIgnoreCase(candidate.substring(0, tname.length()))
-          || !qvSuffix.matcher(candidate.substring(tname.length())).matches()) {
-        break;
-      }
-      if (result == null) {
-        result = new ArrayList<String>();
-      }
-      result.add(candidate);
-    }
-    return result;
-  }
-
-  public void failed(int ecode, String fname, String debugHint) {
-    String command = SessionState.get() != null ? SessionState.get().getLastCommand() : null;
-    String message = "Client execution failed with error code = " + ecode +
-        (command != null ? " running \"" + command : "") + "\" fname=" + fname + " " +
-        (debugHint != null ? debugHint : "");
-    LOG.error(message);
-    Assert.fail(message);
-  }
-
-  // for negative tests, which is succeeded.. no need to print the query string
-  public void failed(String fname, String debugHint) {
-    Assert.fail(
-        "Client Execution was expected to fail, but succeeded with error code 0 for fname=" +
-            fname + (debugHint != null ? (" " + debugHint) : ""));
-  }
-
-  public void failedDiff(int ecode, String fname, String debugHint) {
-    String message =
-        "Client Execution succeeded but contained differences " +
-            "(error code = " + ecode + ") after executing " +
-            fname + (debugHint != null ? (" " + debugHint) : "");
-    LOG.error(message);
-    Assert.fail(message);
-  }
-
-  public void failed(Exception e, String fname, String debugHint) {
-    String command = SessionState.get() != null ? SessionState.get().getLastCommand() : null;
-    System.err.println("Failed query: " + fname);
-    System.err.flush();
-    Assert.fail("Unexpected exception " +
-        org.apache.hadoop.util.StringUtils.stringifyException(e) + "\n" +
-        (command != null ? " running " + command : "") +
-        (debugHint != null ? debugHint : ""));
-  }
-
-  public static void addTestsToSuiteFromQfileNames(
-    String qFileNamesFile,
-    Set<String> qFilesToExecute,
-    TestSuite suite,
-    Object setup,
-    SuiteAddTestFunctor suiteAddTestCallback) {
-    try {
-      File qFileNames = new File(qFileNamesFile);
-      FileReader fr = new FileReader(qFileNames.getCanonicalFile());
-      BufferedReader br = new BufferedReader(fr);
-      String fName = null;
-
-      while ((fName = br.readLine()) != null) {
-        if (fName.isEmpty() || fName.trim().equals("")) {
-          continue;
-        }
-
-        int eIdx = fName.indexOf('.');
-
-        if (eIdx == -1) {
-          continue;
-        }
-
-        String tName = fName.substring(0, eIdx);
-
-        if (qFilesToExecute.isEmpty() || qFilesToExecute.contains(fName)) {
-          suiteAddTestCallback.addTestToSuite(suite, setup, tName);
-        }
-      }
-      br.close();
-    } catch (Exception e) {
-      Assert.fail("Unexpected exception " + org.apache.hadoop.util.StringUtils.stringifyException(e));
-    }
-  }
-
-  public static void setupMetaStoreTableColumnStatsFor30TBTPCDSWorkload(HiveConf conf) {
-    Connection conn = null;
-    ArrayList<Statement> statements = new ArrayList<Statement>(); // list of Statements, PreparedStatements
-
-    try {
-      Properties props = new Properties(); // connection properties
-      props.put("user", conf.get("javax.jdo.option.ConnectionUserName"));
-      props.put("password", conf.get("javax.jdo.option.ConnectionPassword"));
-      conn = DriverManager.getConnection(conf.get("javax.jdo.option.ConnectionURL"), props);
-      ResultSet rs = null;
-      Statement s = conn.createStatement();
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Connected to metastore database ");
-      }
-
-      String mdbPath = HIVE_ROOT + "/data/files/tpcds-perf/metastore_export/";
-
-      // Setup the table column stats
-      BufferedReader br = new BufferedReader(
-          new FileReader(
-              new File(HIVE_ROOT + "/metastore/scripts/upgrade/derby/022-HIVE-11107.derby.sql")));
-      String command;
-
-      s.execute("DROP TABLE APP.TABLE_PARAMS");
-      s.execute("DROP TABLE APP.TAB_COL_STATS");
-      // Create the column stats table
-      while ((command = br.readLine()) != null) {
-        if (!command.endsWith(";")) {
-          continue;
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Going to run command : " + command);
-        }
-        try {
-          PreparedStatement psCommand = conn.prepareStatement(command.substring(0, command.length()-1));
-          statements.add(psCommand);
-          psCommand.execute();
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("successfully completed " + command);
-          }
-        } catch (SQLException e) {
-          LOG.info("Got SQL Exception " + e.getMessage());
-        }
-      }
-      br.close();
-
-      java.nio.file.Path tabColStatsCsv = FileSystems.getDefault().getPath(mdbPath, "csv" ,"TAB_COL_STATS.txt.bz2");
-      java.nio.file.Path tabParamsCsv = FileSystems.getDefault().getPath(mdbPath, "csv", "TABLE_PARAMS.txt.bz2");
-
-      // Set up the foreign key constraints properly in the TAB_COL_STATS data
-      String tmpBaseDir =  System.getProperty(TEST_TMP_DIR_PROPERTY);
-      java.nio.file.Path tmpFileLoc1 = FileSystems.getDefault().getPath(tmpBaseDir, "TAB_COL_STATS.txt");
-      java.nio.file.Path tmpFileLoc2 = FileSystems.getDefault().getPath(tmpBaseDir, "TABLE_PARAMS.txt");
-
-      class MyComp implements Comparator<String> {
-        @Override
-        public int compare(String str1, String str2) {
-          if (str2.length() != str1.length()) {
-            return str2.length() - str1.length();
-          }
-          return str1.compareTo(str2);
-        }
-      }
-
-      final SortedMap<String, Integer> tableNameToID = new TreeMap<String, Integer>(new MyComp());
-
-     rs = s.executeQuery("SELECT * FROM APP.TBLS");
-      while(rs.next()) {
-        String tblName = rs.getString("TBL_NAME");
-        Integer tblId = rs.getInt("TBL_ID");
-        tableNameToID.put(tblName, tblId);
-
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Resultset : " +  tblName + " | " + tblId);
-        }
-      }
-
-      final Map<String, Map<String, String>> data = new HashMap<>();
-      rs = s.executeQuery("select TBLS.TBL_NAME, a.COLUMN_NAME, a.TYPE_NAME from  "
-          + "(select COLUMN_NAME, TYPE_NAME, SDS.SD_ID from APP.COLUMNS_V2 join APP.SDS on SDS.CD_ID = COLUMNS_V2.CD_ID) a"
-          + " join APP.TBLS on  TBLS.SD_ID = a.SD_ID");
-      while (rs.next()) {
-        String tblName = rs.getString(1);
-        String colName = rs.getString(2);
-        String typeName = rs.getString(3);
-        Map<String, String> cols = data.get(tblName);
-        if (null == cols) {
-          cols = new HashMap<>();
-        }
-        cols.put(colName, typeName);
-        data.put(tblName, cols);
-      }
-
-      BufferedReader reader = new BufferedReader(new InputStreamReader(
-        new BZip2CompressorInputStream(Files.newInputStream(tabColStatsCsv, StandardOpenOption.READ))));
-
-      Stream<String> replaced = reader.lines().parallel().map(str-> {
-        String[] splits = str.split(",");
-        String tblName = splits[0];
-        String colName = splits[1];
-        Integer tblID = tableNameToID.get(tblName);
-        StringBuilder sb = new StringBuilder("default@"+tblName + "@" + colName + "@" + data.get(tblName).get(colName)+"@");
-        for (int i = 2; i < splits.length; i++) {
-          sb.append(splits[i]+"@");
-        }
-        // Add tbl_id and empty bitvector
-        return sb.append(tblID).append("@").toString();
-        });
-
-      Files.write(tmpFileLoc1, (Iterable<String>)replaced::iterator);
-      replaced.close();
-      reader.close();
-
-      BufferedReader reader2 = new BufferedReader(new InputStreamReader(
-          new BZip2CompressorInputStream(Files.newInputStream(tabParamsCsv, StandardOpenOption.READ))));
-      final Map<String,String> colStats = new ConcurrentHashMap<>();
-      Stream<String> replacedStream = reader2.lines().parallel().map(str-> {
-        String[] splits = str.split("_@");
-        String tblName = splits[0];
-        Integer tblId = tableNameToID.get(tblName);
-        Map<String,String> cols = data.get(tblName);
-        StringBuilder sb = new StringBuilder();
-        sb.append("{\"COLUMN_STATS\":{");
-        for (String colName : cols.keySet()) {
-          sb.append("\""+colName+"\":\"true\",");
-        }
-        sb.append("},\"BASIC_STATS\":\"true\"}");
-        colStats.put(tblId.toString(), sb.toString());
-
-        return  tblId.toString() + "@" + splits[1];
-      });
-
-      Files.write(tmpFileLoc2, (Iterable<String>)replacedStream::iterator);
-      Files.write(tmpFileLoc2, (Iterable<String>)colStats.entrySet().stream()
-        .map(map->map.getKey()+"@COLUMN_STATS_ACCURATE@"+map.getValue())::iterator, StandardOpenOption.APPEND);
-
-      replacedStream.close();
-      reader2.close();
-      // Load the column stats and table params with 30 TB scale
-      String importStatement1 =  "CALL SYSCS_UTIL.SYSCS_IMPORT_TABLE(null, '" + "TAB_COL_STATS" +
-        "', '" + tmpFileLoc1.toAbsolutePath().toString() +
-        "', '@', null, 'UTF-8', 1)";
-      String importStatement2 =  "CALL SYSCS_UTIL.SYSCS_IMPORT_TABLE(null, '" + "TABLE_PARAMS" +
-        "', '" + tmpFileLoc2.toAbsolutePath().toString() +
-        "', '@', null, 'UTF-8', 1)";
-      try {
-        PreparedStatement psImport1 = conn.prepareStatement(importStatement1);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Going to execute : " + importStatement1);
-        }
-        statements.add(psImport1);
-        psImport1.execute();
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("successfully completed " + importStatement1);
-        }
-        PreparedStatement psImport2 = conn.prepareStatement(importStatement2);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Going to execute : " + importStatement2);
-        }
-        statements.add(psImport2);
-        psImport2.execute();
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("successfully completed " + importStatement2);
-        }
-      } catch (SQLException e) {
-        LOG.info("Got SQL Exception  " +  e.getMessage());
-      }
-    } catch (FileNotFoundException e1) {
-        LOG.info("Got File not found Exception " + e1.getMessage());
-	} catch (IOException e1) {
-        LOG.info("Got IOException " + e1.getMessage());
-	} catch (SQLException e1) {
-        LOG.info("Got SQLException " + e1.getMessage());
-	} finally {
-      // Statements and PreparedStatements
-      int i = 0;
-      while (!statements.isEmpty()) {
-        // PreparedStatement extend Statement
-        Statement st = statements.remove(i);
-        try {
-          if (st != null) {
-            st.close();
-            st = null;
-          }
-        } catch (SQLException sqle) {
-        }
-      }
-
-      //Connection
-      try {
-        if (conn != null) {
-          conn.close();
-          conn = null;
-        }
-      } catch (SQLException sqle) {
-      }
-    }
-  }
-  
-  private static String getHiveRoot() {
-      String path;
-      if (System.getProperty("hive.root") != null) {
-          try {
-              path = new File(System.getProperty("hive.root")).getCanonicalPath();
-          } catch (IOException e) {
-              throw new RuntimeException("error getting hive.root", e);
-          }
-      } else {
-          path = new File("target").getAbsolutePath();
-      }
-      return ensurePathEndsInSlash(new File(path).getAbsolutePath());
-    }
-  
-  public static String ensurePathEndsInSlash(String path) {
-      if (path == null) {
-        throw new NullPointerException("Path cannot be null");
-      }
-      if (path.endsWith(File.separator)) {
-        return path;
-      } else {
-        return path + File.separator;
-      }
-    }
-
-}
diff --git a/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/security/DummyAuthenticator.java b/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/security/DummyAuthenticator.java
deleted file mode 100644
index 45fabf5..0000000
--- a/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/security/DummyAuthenticator.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.security;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.session.SessionState;
-
-public class DummyAuthenticator implements HiveAuthenticationProvider {
-
-  private final List<String> groupNames;
-  private final String userName;
-  private Configuration conf;
-
-  public DummyAuthenticator() {
-    this.groupNames = new ArrayList<String>();
-    groupNames.add("hive_test_group1");
-    groupNames.add("hive_test_group2");
-    userName = "hive_test_user";
-  }
-
-  @Override
-  public void destroy() throws HiveException{
-    return;
-  }
-
-  @Override
-  public List<String> getGroupNames() {
-    return groupNames;
-  }
-
-  @Override
-  public String getUserName() {
-    return userName;
-  }
-
-  @Override
-  public void setConf(Configuration conf) {
-    this.conf = conf;
-  }
-
-  @Override
-  public Configuration getConf() {
-    return this.conf;
-  }
-
-  @Override
-  public void setSessionState(SessionState ss) {
-    //no op
-  }
-
-}
diff --git a/phoenix-hive/src/it/java/org/apache/phoenix/hive/BaseHivePhoenixStoreIT.java b/phoenix-hive/src/it/java/org/apache/phoenix/hive/BaseHivePhoenixStoreIT.java
deleted file mode 100644
index aede9ac..0000000
--- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/BaseHivePhoenixStoreIT.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hive;
-
-import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.File;
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.Statement;
-import java.util.Properties;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.QTestProcessExecResult;
-import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
-import org.apache.phoenix.jdbc.PhoenixDriver;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.TestUtil;
-import org.junit.AfterClass;
-import org.junit.Ignore;
-import org.junit.experimental.categories.Category;
-
-import com.google.common.base.Throwables;
-
-/**
- * Base class for all Hive Phoenix integration tests that may be run with Tez or MR mini cluster
- */
-@Category(NeedsOwnMiniClusterTest.class)
-@Ignore
-public class BaseHivePhoenixStoreIT {
-
-    private static final Log LOG = LogFactory.getLog(BaseHivePhoenixStoreIT.class);
-    protected static HBaseTestingUtility hbaseTestUtil;
-    protected static MiniHBaseCluster hbaseCluster;
-    private static String zkQuorum;
-    protected static Connection conn;
-    private static Configuration conf;
-    protected static HiveTestUtil qt;
-    protected static String hiveOutputDir;
-    protected static String hiveLogDir;
-
-
-    public static void setup(HiveTestUtil.MiniClusterType clusterType)throws Exception {
-        String hadoopConfDir = System.getenv("HADOOP_CONF_DIR");
-        if (null != hadoopConfDir && !hadoopConfDir.isEmpty()) {
-          LOG.warn("WARNING: HADOOP_CONF_DIR is set in the environment which may cause "
-              + "issues with test execution via MiniDFSCluster");
-        }
-        hbaseTestUtil = new HBaseTestingUtility();
-        conf = hbaseTestUtil.getConfiguration();
-        setUpConfigForMiniCluster(conf);
-        conf.set(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
-        conf.set("hive.metastore.schema.verification","false");
-        hiveOutputDir = new Path(hbaseTestUtil.getDataTestDir(), "hive_output").toString();
-        File outputDir = new File(hiveOutputDir);
-        outputDir.mkdirs();
-        hiveLogDir = new Path(hbaseTestUtil.getDataTestDir(), "hive_log").toString();
-        File logDir = new File(hiveLogDir);
-        logDir.mkdirs();
-        // Setup Hive mini Server
-        Path testRoot = hbaseTestUtil.getDataTestDir();
-        System.setProperty("test.tmp.dir", testRoot.toString());
-        System.setProperty("test.warehouse.dir", (new Path(testRoot, "warehouse")).toString());
-        System.setProperty(HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION.toString(), "false");
-        //System.setProperty(HiveConf.ConfVars.METASTORE_AUTO_CREATE_ALL.toString(),"true");
-        try {
-            qt = new HiveTestUtil(hiveOutputDir, hiveLogDir, clusterType, "", "0.20",null, null, false);
-            // do a one time initialization
-            qt.createSources();
-        } catch (Exception e) {
-            LOG.error("Unexpected exception in setup: " + e.getMessage(), e);
-            fail("Unexpected exception in setup"+Throwables.getStackTraceAsString(e));
-        }
-
-        //Start HBase cluster
-        hbaseCluster = hbaseTestUtil.startMiniCluster(1);
-        //MiniDFSCluster x = hbaseTestUtil.getDFSCluster();
-        Class.forName(PhoenixDriver.class.getName());
-        zkQuorum = "localhost:" + hbaseTestUtil.getZkCluster().getClientPort();
-        Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
-        props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
-        conn = DriverManager.getConnection(PhoenixRuntime.JDBC_PROTOCOL +
-                PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum, props);
-        // Setup Hive Output Folder
-
-        Statement stmt = conn.createStatement();
-        stmt.execute("create table t(a integer primary key,b varchar)");
-    }
-
-    protected void runTest(String fname, String fpath) throws Exception {
-        long startTime = System.currentTimeMillis();
-        try {
-            LOG.info("Begin query: " + fname);
-            qt.addFile(fpath);
-
-            if (qt.shouldBeSkipped(fname)) {
-                LOG.info("Test " + fname + " skipped");
-                return;
-            }
-
-            qt.cliInit(fname);
-            qt.clearTestSideEffects();
-            int ecode = qt.executeClient(fname);
-            if (ecode != 0) {
-                qt.failed(ecode, fname, null);
-                return;
-            }
-
-            QTestProcessExecResult result = qt.checkCliDriverResults(fname);
-            if (result.getReturnCode() != 0) {
-              qt.failedDiff(result.getReturnCode(), fname, result.getCapturedOutput());
-            }
-            qt.clearPostTestEffects();
-
-        } catch (Throwable e) {
-            qt.failed(new Exception(e), fname, null);
-        }
-
-        long elapsedTime = System.currentTimeMillis() - startTime;
-        LOG.info("Done query: " + fname + " elapsedTime=" + elapsedTime / 1000 + "s");
-        assertTrue("Test passed", true);
-    }
-
-    protected void createFile(String content, String fullName) throws IOException {
-        FileUtils.write(new File(fullName), content);
-    }
-
-    @AfterClass
-    public static void tearDownAfterClass() throws Exception {
-        try {
-            conn.close();
-        } finally {
-            try {
-                PhoenixDriver.INSTANCE.close();
-            } finally {
-                try {
-                    DriverManager.deregisterDriver(PhoenixDriver.INSTANCE);
-                } finally {
-                    hbaseTestUtil.shutdownMiniCluster();
-                }
-            }
-        }
-        // Shutdowns down the filesystem -- do this after stopping HBase.
-        if (qt != null) {
-          try {
-              qt.shutdown();
-          } catch (Exception e) {
-              LOG.error("Unexpected exception in setup", e);
-              //fail("Unexpected exception in tearDown");
-          }
-      }
-    }
-}
diff --git a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveMapReduceIT.java b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveMapReduceIT.java
deleted file mode 100644
index c866921..0000000
--- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveMapReduceIT.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.phoenix.hive;
-
-import static org.junit.Assert.fail;
-
-import java.util.Map;
-
-import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(NeedsOwnMiniClusterTest.class)
-public class HiveMapReduceIT extends HivePhoenixStoreIT {
-
-    @BeforeClass
-    public static void setUpBeforeClass() throws Exception {
-        final String hadoopConfDir = System.getenv("HADOOP_CONF_DIR");
-        if (hadoopConfDir != null && hadoopConfDir.length() != 0) {
-            fail("HADOOP_CONF_DIR is non-empty in the current shell environment which will very likely cause this test to fail.");
-        }
-        setup(HiveTestUtil.MiniClusterType.mr);
-    }
-    
-    @Override
-    @Test
-    @Ignore 
-    /**
-     * Ignoring because precicate pushdown is skipped for MR (ref:HIVE-18873) when there are multiple aliases
-     */
-    public void testJoinNoColumnMaps() throws Exception {
-        
-    }
-    
-    @Override
-    @Test
-    @Ignore 
-    /**
-     * Ignoring because projection pushdown is incorrect for MR when there are multiple aliases (ref:HIVE-18872)
-     */
-    public void testJoinColumnMaps() throws Exception {
-        
-    }
-}
diff --git a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java
deleted file mode 100644
index ecb2003..0000000
--- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hive;
-
-import static org.junit.Assert.assertTrue;
-
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
-import org.apache.phoenix.util.StringUtil;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/**
- * Test methods only. All supporting methods should be placed to BaseHivePhoenixStoreIT
- */
-
-@Category(NeedsOwnMiniClusterTest.class)
-@Ignore("This class contains only test methods and should not be executed directly")
-public class HivePhoenixStoreIT  extends BaseHivePhoenixStoreIT {
-
-    /**
-     * Create a table with two column, insert 1 row, check that phoenix table is created and
-     * the row is there
-     *
-     * @throws Exception
-     */
-    @Test
-    public void simpleTest() throws Exception {
-        String testName = "simpleTest";
-        hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out"));
-        createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString());
-        createFile(StringUtil.EMPTY_STRING, new Path(hiveOutputDir, testName + ".out").toString());
-        StringBuilder sb = new StringBuilder();
-        sb.append("CREATE TABLE phoenix_table(ID STRING, SALARY STRING)" + HiveTestUtil.CRLF +
-                " STORED BY  \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil
-                .CRLF + " TBLPROPERTIES(" + HiveTestUtil.CRLF +
-                "   'phoenix.table.name'='phoenix_table'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.client.port'='" +
-                hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
-                "   'phoenix.rowkeys'='id');");
-        sb.append("INSERT INTO TABLE phoenix_table" + HiveTestUtil.CRLF +
-                "VALUES ('10', '1000');" + HiveTestUtil.CRLF);
-        String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString();
-        createFile(sb.toString(), fullPath);
-        runTest(testName, fullPath);
-
-        String phoenixQuery = "SELECT * FROM phoenix_table";
-        PreparedStatement statement = conn.prepareStatement(phoenixQuery);
-        ResultSet rs = statement.executeQuery();
-        assert (rs.getMetaData().getColumnCount() == 2);
-        assertTrue(rs.next());
-        assert (rs.getString(1).equals("10"));
-        assert (rs.getString(2).equals("1000"));
-    }
-
-    /**
-     * Create hive table with custom column mapping
-     * @throws Exception
-     */
-
-    @Test
-    public void simpleColumnMapTest() throws Exception {
-        String testName = "cmTest";
-        hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out"));
-        createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString());
-        createFile(StringUtil.EMPTY_STRING, new Path(hiveOutputDir, testName + ".out").toString());
-        StringBuilder sb = new StringBuilder();
-        sb.append("CREATE TABLE column_table(ID STRING, P1 STRING, p2 STRING)" + HiveTestUtil.CRLF +
-                " STORED BY  \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil
-                .CRLF + " TBLPROPERTIES(" + HiveTestUtil.CRLF +
-                "   'phoenix.table.name'='column_table'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF +
-                "   'phoenix.column.mapping' = 'id:C1, p1:c2, p2:C3'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.client.port'='" +
-                hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
-                "   'phoenix.rowkeys'='id');");
-        sb.append("INSERT INTO TABLE column_table" + HiveTestUtil.CRLF +
-                "VALUES ('1', '2', '3');" + HiveTestUtil.CRLF);
-        String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString();
-        createFile(sb.toString(), fullPath);
-        runTest(testName, fullPath);
-
-        String phoenixQuery = "SELECT C1, \"c2\", C3 FROM column_table";
-        PreparedStatement statement = conn.prepareStatement(phoenixQuery);
-        ResultSet rs = statement.executeQuery();
-        assert (rs.getMetaData().getColumnCount() == 3);
-        assertTrue(rs.next());
-        assert (rs.getString(1).equals("1"));
-        assert (rs.getString(2).equals("2"));
-        assert (rs.getString(3).equals("3"));
-
-    }
-
-
-    /**
-     * Datatype Test
-     *
-     * @throws Exception
-     */
-    @Test
-    public void dataTypeTest() throws Exception {
-        String testName = "dataTypeTest";
-        hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out"));
-        createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString());
-        createFile(StringUtil.EMPTY_STRING, new Path(hiveOutputDir, testName + ".out").toString());
-        StringBuilder sb = new StringBuilder();
-        sb.append("CREATE TABLE phoenix_datatype(ID int, description STRING, ts TIMESTAMP,  db " +
-                "DOUBLE,fl FLOAT, us INT)" + HiveTestUtil.CRLF +
-                " STORED BY  \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil
-                .CRLF + " TBLPROPERTIES(" + HiveTestUtil.CRLF +
-                "   'phoenix.hbase.table.name'='phoenix_datatype'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.client.port'='" +
-                hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
-                "   'phoenix.rowkeys'='id');");
-        sb.append("INSERT INTO TABLE phoenix_datatype" + HiveTestUtil.CRLF +
-                "VALUES (10, \"foodesc\", \"2013-01-05 01:01:01\", 200,2.0,-1);" + HiveTestUtil.CRLF);
-        String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString();
-        createFile(sb.toString(), fullPath);
-        runTest(testName, fullPath);
-
-        String phoenixQuery = "SELECT * FROM phoenix_datatype";
-        PreparedStatement statement = conn.prepareStatement(phoenixQuery);
-        ResultSet rs = statement.executeQuery();
-        assert (rs.getMetaData().getColumnCount() == 6);
-        while (rs.next()) {
-            assert (rs.getInt(1) == 10);
-            assert (rs.getString(2).equalsIgnoreCase("foodesc"));
-            assert (rs.getDouble(4) == 200);
-            assert (rs.getFloat(5) == 2.0);
-            assert (rs.getInt(6) == -1);
-        }
-    }
-
-    /**
-     * Datatype Test
-     *
-     * @throws Exception
-     */
-    @Test
-    public void MultiKey() throws Exception {
-        String testName = "MultiKey";
-        hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out"));
-        createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString());
-        createFile(StringUtil.EMPTY_STRING, new Path(hiveOutputDir, testName + ".out").toString());
-        StringBuilder sb = new StringBuilder();
-        sb.append("CREATE TABLE phoenix_MultiKey(ID int, ID2 String,description STRING," +
-                "db DOUBLE,fl FLOAT, us INT)" + HiveTestUtil.CRLF +
-                " STORED BY  \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil
-                .CRLF +
-                " TBLPROPERTIES(" + HiveTestUtil.CRLF +
-                "   'phoenix.hbase.table.name'='phoenix_MultiKey'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.client.port'='" +
-                hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
-                "   'phoenix.rowkeys'='id,id2');" + HiveTestUtil.CRLF);
-        sb.append("INSERT INTO TABLE phoenix_MultiKey" + HiveTestUtil.CRLF +"VALUES (10, \'part2\',\'foodesc\',200,2.0,-1);" +
-                HiveTestUtil.CRLF);
-        String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString();
-        createFile(sb.toString(), fullPath);
-        runTest(testName, fullPath);
-
-        String phoenixQuery = "SELECT * FROM phoenix_MultiKey";
-        PreparedStatement statement = conn.prepareStatement(phoenixQuery);
-        ResultSet rs = statement.executeQuery();
-        assert (rs.getMetaData().getColumnCount() == 6);
-        while (rs.next()) {
-            assert (rs.getInt(1) == 10);
-            assert (rs.getString(2).equalsIgnoreCase("part2"));
-            assert (rs.getString(3).equalsIgnoreCase("foodesc"));
-            assert (rs.getDouble(4) == 200);
-            assert (rs.getFloat(5) == 2.0);
-            assert (rs.getInt(6) == -1);
-        }
-    }
-
-    /**
-     * Test that hive is able to access Phoenix data during MR job (creating two tables and perform join on it)
-     *
-     * @throws Exception
-     */
-    @Test
-    public void testJoinNoColumnMaps() throws Exception {
-        String testName = "testJoin";
-        hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out"));
-        createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString());
-        createFile("#### A masked pattern was here ####\n10\tpart2\tfoodesc\t200.0\t2.0\t-1\t10\tpart2\tfoodesc\t200.0\t2.0\t-1\n",
-                new Path(hiveOutputDir, testName + ".out").toString());
-        StringBuilder sb = new StringBuilder();
-        sb.append("CREATE TABLE joinTable1(ID int, ID2 String,description STRING," +
-                "db DOUBLE,fl FLOAT, us INT)" + HiveTestUtil.CRLF +
-                " STORED BY  \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil
-                .CRLF +
-                " TBLPROPERTIES(" + HiveTestUtil.CRLF +
-                "   'phoenix.hbase.table.name'='joinTable1'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.client.port'='" +
-                hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
-                "   'phoenix.rowkeys'='id,id2');" + HiveTestUtil.CRLF);
-        sb.append("CREATE TABLE joinTable2(ID int, ID2 String,description STRING," +
-                "db DOUBLE,fl FLOAT, us INT)" + HiveTestUtil.CRLF +
-                " STORED BY  \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil
-                .CRLF +
-                " TBLPROPERTIES(" + HiveTestUtil.CRLF +
-                "   'phoenix.hbase.table.name'='joinTable2'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.client.port'='" +
-                hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
-                "   'phoenix.rowkeys'='id,id2');" + HiveTestUtil.CRLF);
-
-        sb.append("INSERT INTO TABLE joinTable1" + HiveTestUtil.CRLF +"VALUES (5, \'part2\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
-        sb.append("INSERT INTO TABLE joinTable1" + HiveTestUtil.CRLF +"VALUES (10, \'part2\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
-
-        sb.append("INSERT INTO TABLE joinTable2" + HiveTestUtil.CRLF +"VALUES (5, \'part2\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
-        sb.append("INSERT INTO TABLE joinTable2" + HiveTestUtil.CRLF +"VALUES (10, \'part2\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
-        
-        sb.append("SELECT  * from joinTable1 A join joinTable2 B on A.id = B.id WHERE A.ID=10;" +
-                HiveTestUtil.CRLF);
-
-        String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString();
-        createFile(sb.toString(), fullPath);
-        runTest(testName, fullPath);
-    }
-
-    /**
-     * Test that hive is able to access Phoenix data during MR job (creating two tables and perform join on it)
-     *
-     * @throws Exception
-     */
-    @Test
-    public void testJoinColumnMaps() throws Exception {
-        String testName = "testJoin";
-        hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out"));
-        createFile("#### A masked pattern was here ####\n10\t200.0\tpart2\n", new Path(hiveOutputDir, testName + ".out").toString());
-        createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString());
-
-        StringBuilder sb = new StringBuilder();
-        sb.append("CREATE TABLE joinTable3(ID int, ID2 String,description STRING," +
-                "db DOUBLE,fl FLOAT, us INT)" + HiveTestUtil.CRLF +
-                " STORED BY  \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil
-                .CRLF +
-                " TBLPROPERTIES(" + HiveTestUtil.CRLF +
-                "   'phoenix.hbase.table.name'='joinTable3'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.client.port'='" +
-                hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
-                "   'phoenix.column.mapping' = 'id:i1, id2:I2, db:db'," + HiveTestUtil.CRLF +
-                "   'phoenix.rowkeys'='id,id2');" + HiveTestUtil.CRLF);
-        sb.append("CREATE TABLE joinTable4(ID int, ID2 String,description STRING," +
-                "db DOUBLE,fl FLOAT, us INT)" + HiveTestUtil.CRLF +
-                " STORED BY  \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil
-                .CRLF +
-                " TBLPROPERTIES(" + HiveTestUtil.CRLF +
-                "   'phoenix.hbase.table.name'='joinTable4'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.client.port'='" +
-                hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
-                "   'phoenix.column.mapping' = 'id:i1, id2:I2, db:db'," + HiveTestUtil.CRLF +
-                "   'phoenix.rowkeys'='id,id2');" + HiveTestUtil.CRLF);
-
-        sb.append("INSERT INTO TABLE joinTable3" + HiveTestUtil.CRLF +"VALUES (5, \'part1\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
-        sb.append("INSERT INTO TABLE joinTable3" + HiveTestUtil.CRLF +"VALUES (10, \'part1\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
-
-        sb.append("INSERT INTO TABLE joinTable4" + HiveTestUtil.CRLF +"VALUES (5, \'part2\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
-        sb.append("INSERT INTO TABLE joinTable4" + HiveTestUtil.CRLF +"VALUES (10, \'part2\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
-        
-        sb.append("SELECT A.ID, a.db, B.ID2 from joinTable3 A join joinTable4 B on A.ID = B.ID WHERE A.ID=10;" +
-                HiveTestUtil.CRLF);
-
-        String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString();
-        createFile(sb.toString(), fullPath);
-        runTest(testName, fullPath);
-        //Test that Phoenix has correctly mapped columns. We are checking both, primary key and
-        // regular columns mapped and not mapped
-        String phoenixQuery = "SELECT \"i1\", \"I2\", \"db\" FROM joinTable3 where \"i1\" = 10 AND \"I2\" = 'part1' AND \"db\" = 200";
-        PreparedStatement statement = conn.prepareStatement(phoenixQuery);
-        ResultSet rs = statement.executeQuery();
-        assert (rs.getMetaData().getColumnCount() == 3);
-        while (rs.next()) {
-            assert (rs.getInt(1) == 10);
-            assert (rs.getString(2).equalsIgnoreCase("part1"));
-            assert (rs.getDouble(3) == 200);
-        }
-    }
-
-    @Test
-    public void testTimestampPredicate() throws Exception {
-        String testName = "testTimeStampPredicate";
-        hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out"));
-        createFile("10\t2013-01-02 01:01:01.123\n", new Path(hiveOutputDir, testName + ".out").toString());
-        createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString());
-
-        StringBuilder sb = new StringBuilder();
-        sb.append("CREATE TABLE timeStampTable(ID int,ts TIMESTAMP)" + HiveTestUtil.CRLF +
-                " STORED BY  \"org.apache.phoenix.hive.PhoenixStorageHandler\"" + HiveTestUtil
-                .CRLF +
-                " TBLPROPERTIES(" + HiveTestUtil.CRLF +
-                "   'phoenix.hbase.table.name'='TIMESTAMPTABLE'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.znode.parent'='/hbase'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
-                "   'phoenix.zookeeper.client.port'='" +
-                hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
-                "   'phoenix.column.mapping' = 'id:ID, ts:TS'," + HiveTestUtil.CRLF +
-                "   'phoenix.rowkeys'='id');" + HiveTestUtil.CRLF);
-        /*
-        Following query only for check that nanoseconds are correctly parsed with over 3 digits.
-         */
-        sb.append("INSERT INTO TABLE timeStampTable VALUES (10, \"2013-01-02 01:01:01.123456\");" + HiveTestUtil.CRLF);
-        sb.append("SELECT * from timeStampTable WHERE ts between '2012-01-02 01:01:01.123455' and " +
-                " '2015-01-02 12:01:02.123457789' AND id = 10;" + HiveTestUtil.CRLF);
-
-        String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString();
-        createFile(sb.toString(), fullPath);
-        runTest(testName, fullPath);
-    }
-}
diff --git a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTestUtil.java b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTestUtil.java
deleted file mode 100644
index 3d2657b..0000000
--- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTestUtil.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hive;
-
-import org.apache.hadoop.hive.ql.QTestUtil;
-
-/**
- * HiveTestUtil cloned from Hive QTestUtil. Can be outdated and may require update once a problem
- * found.
- */
-public class HiveTestUtil extends QTestUtil{
-    public static final String CRLF = System.getProperty("line.separator");
-
-    public HiveTestUtil(String outDir, String logDir, MiniClusterType clusterType, String confDir, String hadoopVer,
-            String initScript, String cleanupScript, boolean withLlapIo) throws Exception {
-        super(outDir, logDir, clusterType, confDir, hadoopVer, initScript, cleanupScript, withLlapIo);
-    }
-
-    @Override
-    public int executeClient(String tname) {
-        conf.set("mapreduce.job.name", "test");
-        return super.executeClient(tname);
-    }
-    
-}
diff --git a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTezIT.java b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTezIT.java
deleted file mode 100644
index a675a0e..0000000
--- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTezIT.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.phoenix.hive;
-
-import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
-import org.junit.BeforeClass;
-import org.junit.experimental.categories.Category;
-
-@Category(NeedsOwnMiniClusterTest.class)
-public class HiveTezIT extends HivePhoenixStoreIT {
-
-    @BeforeClass
-    public static void setUpBeforeClass() throws Exception {
-        setup(HiveTestUtil.MiniClusterType.tez);
-    }
-}
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/PhoenixRecordUpdater.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/PhoenixRecordUpdater.java
deleted file mode 100644
index 1f26df1..0000000
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/PhoenixRecordUpdater.java
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hive;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.util.Properties;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
-import org.apache.hadoop.hive.ql.io.RecordUpdater;
-import org.apache.hadoop.hive.serde2.SerDeException;
-import org.apache.hadoop.hive.serde2.SerDeStats;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.phoenix.hive.PhoenixSerializer.DmlType;
-import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
-import org.apache.phoenix.hive.mapreduce.PhoenixResultWritable;
-import org.apache.phoenix.hive.util.PhoenixConnectionUtil;
-import org.apache.phoenix.hive.util.PhoenixStorageHandlerUtil;
-import org.apache.phoenix.hive.util.PhoenixUtil;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
-import org.apache.phoenix.schema.ConcurrentTableMutationException;
-import org.apache.phoenix.schema.MetaDataClient;
-import org.apache.phoenix.util.QueryUtil;
-
-public class PhoenixRecordUpdater implements RecordUpdater {
-
-    private static final Log LOG = LogFactory.getLog(PhoenixRecordUpdater.class);
-
-    private final Connection conn;
-    private final PreparedStatement pstmt;
-    private final long batchSize;
-    private long numRecords = 0;
-
-    private Configuration config;
-    private String tableName;
-    private MetaDataClient metaDataClient;
-    private boolean restoreWalMode;
-
-    private long rowCountDelta = 0;
-
-    private PhoenixSerializer phoenixSerializer;
-    private ObjectInspector objInspector;
-    private PreparedStatement pstmtForDelete;
-
-    public PhoenixRecordUpdater(Path path, AcidOutputFormat.Options options) throws IOException {
-        this.config = options.getConfiguration();
-        tableName = config.get(PhoenixStorageHandlerConstants.PHOENIX_TABLE_NAME);
-
-        Properties props = new Properties();
-
-        try {
-            // Disable WAL
-            String walConfigName = tableName.toLowerCase() + PhoenixStorageHandlerConstants
-                    .DISABLE_WAL;
-            boolean disableWal = config.getBoolean(walConfigName, false);
-            if (disableWal) {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug(walConfigName + " is true. batch.mode will be set true.");
-                }
-
-                props.setProperty(PhoenixStorageHandlerConstants.BATCH_MODE, "true");
-            }
-
-            this.conn = PhoenixConnectionUtil.getInputConnection(config, props);
-
-            if (disableWal) {
-                metaDataClient = new MetaDataClient((PhoenixConnection) conn);
-
-                if (!PhoenixUtil.isDisabledWal(metaDataClient, tableName)) {
-                    // execute alter tablel statement if disable_wal is not true.
-                    try {
-                        PhoenixUtil.alterTableForWalDisable(conn, tableName, true);
-                    } catch (ConcurrentTableMutationException e) {
-                        if (LOG.isWarnEnabled()) {
-                            LOG.warn("Concurrent modification of disableWAL");
-                        }
-                    }
-
-                    if (LOG.isDebugEnabled()) {
-                        LOG.debug(tableName + "s wal disabled.");
-                    }
-
-                    // restore original value of disable_wal at the end.
-                    restoreWalMode = true;
-                }
-            }
-
-            this.batchSize = PhoenixConfigurationUtil.getBatchSize(config);
-
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Batch-size : " + batchSize);
-            }
-
-            String upsertQuery = QueryUtil.constructUpsertStatement(tableName, PhoenixUtil
-                    .getColumnInfoList(conn, tableName));
-
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Upsert-query : " + upsertQuery);
-            }
-            this.pstmt = this.conn.prepareStatement(upsertQuery);
-        } catch (SQLException e) {
-            throw new IOException(e);
-        }
-
-        this.objInspector = options.getInspector();
-        try {
-            phoenixSerializer = new PhoenixSerializer(config, options.getTableProperties());
-        } catch (SerDeException e) {
-            throw new IOException(e);
-        }
-    }
-
-    /* (non-Javadoc)
-     * @see org.apache.hadoop.hive.ql.io.RecordUpdater#insert(long, java.lang.Object)
-     */
-    @Override
-    public void insert(long currentTransaction, Object row) throws IOException {
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Insert - currentTranscation : " + currentTransaction + ", row : " +
-                    PhoenixStorageHandlerUtil.toString(row));
-        }
-
-        PhoenixResultWritable pResultWritable = (PhoenixResultWritable) phoenixSerializer
-                .serialize(row, objInspector, DmlType.INSERT);
-
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Data : " + pResultWritable.getValueList());
-        }
-
-        write(pResultWritable);
-
-        rowCountDelta++;
-    }
-
-    /* (non-Javadoc)
-     * @see org.apache.hadoop.hive.ql.io.RecordUpdater#update(long, java.lang.Object)
-     */
-    @Override
-    public void update(long currentTransaction, Object row) throws IOException {
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Update - currentTranscation : " + currentTransaction + ", row : " +
-                    PhoenixStorageHandlerUtil.toString(row));
-        }
-
-        PhoenixResultWritable pResultWritable = (PhoenixResultWritable) phoenixSerializer
-                .serialize(row, objInspector, DmlType.UPDATE);
-
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Data : " + pResultWritable.getValueList());
-        }
-
-        write(pResultWritable);
-    }
-
-    /* (non-Javadoc)
-     * @see org.apache.hadoop.hive.ql.io.RecordUpdater#delete(long, java.lang.Object)
-     */
-    @Override
-    public void delete(long currentTransaction, Object row) throws IOException {
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Delete - currentTranscation : " + currentTransaction + ", row : " +
-                    PhoenixStorageHandlerUtil.toString(row));
-        }
-
-        PhoenixResultWritable pResultWritable = (PhoenixResultWritable) phoenixSerializer
-                .serialize(row, objInspector, DmlType.DELETE);
-
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Data : " + pResultWritable.getValueList());
-        }
-
-        if (pstmtForDelete == null) {
-            try {
-                String deleteQuery = PhoenixUtil.constructDeleteStatement(conn, tableName);
-
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Delete query : " + deleteQuery);
-                }
-
-                pstmtForDelete = conn.prepareStatement(deleteQuery);
-            } catch (SQLException e) {
-                throw new IOException(e);
-            }
-        }
-
-        delete(pResultWritable);
-
-        rowCountDelta--;
-    }
-
-    private void delete(PhoenixResultWritable pResultWritable) throws IOException {
-        try {
-            pResultWritable.delete(pstmtForDelete);
-            numRecords++;
-            pstmtForDelete.executeUpdate();
-
-            if (numRecords % batchSize == 0) {
-                LOG.debug("Commit called on a batch of size : " + batchSize);
-                conn.commit();
-            }
-        } catch (SQLException e) {
-            throw new IOException("Exception while deleting to table.", e);
-        }
-    }
-
-    private void write(PhoenixResultWritable pResultWritable) throws IOException {
-        try {
-            pResultWritable.write(pstmt);
-            numRecords++;
-            pstmt.executeUpdate();
-
-            if (numRecords % batchSize == 0) {
-                LOG.debug("Commit called on a batch of size : " + batchSize);
-                conn.commit();
-            }
-        } catch (SQLException e) {
-            throw new IOException("Exception while writing to table.", e);
-        }
-    }
-
-    /* (non-Javadoc)
-     * @see org.apache.hadoop.hive.ql.io.RecordUpdater#flush()
-     */
-    @Override
-    public void flush() throws IOException {
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Flush called");
-        }
-
-        try {
-            conn.commit();
-
-            if (LOG.isInfoEnabled()) {
-                LOG.info("Written row : " + numRecords);
-            }
-        } catch (SQLException e) {
-            LOG.error("SQLException while performing the commit for the task.");
-            throw new IOException(e);
-        }
-    }
-
-    /* (non-Javadoc)
-     * @see org.apache.hadoop.hive.ql.io.RecordUpdater#close(boolean)
-     */
-    @Override
-    public void close(boolean abort) throws IOException {
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("abort : " + abort);
-        }
-
-        try {
-            conn.commit();
-
-            if (LOG.isInfoEnabled()) {
-                LOG.info("Written row : " + numRecords);
-            }
-        } catch (SQLException e) {
-            LOG.error("SQLException while performing the commit for the task.");
-            throw new IOException(e);
-        } finally {
-            try {
-                if (restoreWalMode && PhoenixUtil.isDisabledWal(metaDataClient, tableName)) {
-                    try {
-                        PhoenixUtil.alterTableForWalDisable(conn, tableName, false);
-                    } catch (ConcurrentTableMutationException e) {
-                        if (LOG.isWarnEnabled()) {
-                            LOG.warn("Concurrent modification of disableWAL");
-                        }
-                    }
-
-                    if (LOG.isDebugEnabled()) {
-                        LOG.debug(tableName + "s wal enabled.");
-                    }
-                }
-
-                // flush when [table-name].auto.flush is true.
-                String autoFlushConfigName = tableName.toLowerCase() +
-                        PhoenixStorageHandlerConstants.AUTO_FLUSH;
-                boolean autoFlush = config.getBoolean(autoFlushConfigName, false);
-                if (autoFlush) {
-                    if (LOG.isDebugEnabled()) {
-                        LOG.debug("autoFlush is " + autoFlush);
-                    }
-
-                    PhoenixUtil.flush(conn, tableName);
-                }
-
-                PhoenixUtil.closeResource(pstmt);
-                PhoenixUtil.closeResource(pstmtForDelete);
-                PhoenixUtil.closeResource(conn);
-            } catch (SQLException ex) {
-                LOG.error("SQLException while closing the connection for the task.");
-                throw new IOException(ex);
-            }
-        }
-    }
-
-    /* (non-Javadoc)
-     * @see org.apache.hadoop.hive.ql.io.RecordUpdater#getStats()
-     */
-    @Override
-    public SerDeStats getStats() {
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("getStats called");
-        }
-
-        SerDeStats stats = new SerDeStats();
-        stats.setRowCount(rowCountDelta);
-        // Don't worry about setting raw data size diff.  There is no reasonable way  to calculate
-        // that without finding the row we are updating or deleting, which would be a mess.
-        return stats;
-    }
-
-    @Override
-    public long getBufferedRowCount() {
-        return numRecords;
-    }
-
-}
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/PhoenixStorageHandler.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/PhoenixStorageHandler.java
deleted file mode 100644
index 0f8ee93..0000000
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/PhoenixStorageHandler.java
+++ /dev/null
@@ -1,276 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hive;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.mapred.TableMapReduceUtil;
-import org.apache.hadoop.hive.common.JavaUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaHook;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.ql.exec.TableScanOperator;
-import org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.HiveStoragePredicateHandler;
-import org.apache.hadoop.hive.ql.metadata.InputEstimator;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.TableDesc;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.serde2.AbstractSerDe;
-import org.apache.hadoop.hive.serde2.Deserializer;
-import org.apache.hadoop.hive.shims.ShimLoader;
-import org.apache.hadoop.mapred.InputFormat;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.OutputFormat;
-import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
-import org.apache.phoenix.hive.mapreduce.PhoenixInputFormat;
-import org.apache.phoenix.hive.mapreduce.PhoenixOutputFormat;
-import org.apache.phoenix.hive.ppd.PhoenixPredicateDecomposer;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.UUID;
-
-/**
- * This class manages all the Phoenix/Hive table initial configurations and SerDe Election
- */
-@SuppressWarnings("deprecation")
-public class PhoenixStorageHandler extends DefaultStorageHandler implements
-        HiveStoragePredicateHandler, InputEstimator {
-
-
-    private Configuration jobConf;
-    private Configuration hbaseConf;
-
-
-    @Override
-    public void setConf(Configuration conf) {
-        jobConf = conf;
-        hbaseConf = HBaseConfiguration.create(conf);
-    }
-
-    @Override
-    public Configuration getConf() {
-        return hbaseConf;
-    }
-
-    private static final Log LOG = LogFactory.getLog(PhoenixStorageHandler.class);
-
-    public PhoenixStorageHandler() {
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("PhoenixStorageHandler created");
-        }
-    }
-
-    @Override
-    public HiveMetaHook getMetaHook() {
-        return new PhoenixMetaHook();
-    }
-
-    @Override
-    public void configureJobConf(TableDesc tableDesc, JobConf jobConf) {
-        try {
-            TableMapReduceUtil.addDependencyJars(jobConf);
-            org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(jobConf,
-                    PhoenixStorageHandler.class);
-            JobConf hbaseJobConf = new JobConf(getConf());
-            org.apache.hadoop.hbase.mapred.TableMapReduceUtil.initCredentials(hbaseJobConf);
-            ShimLoader.getHadoopShims().mergeCredentials(jobConf, hbaseJobConf);
-        } catch (IOException e) {
-            throw new RuntimeException(e);
-        }
-
-
-    }
-
-    @SuppressWarnings("rawtypes")
-    @Override
-    public Class<? extends OutputFormat> getOutputFormatClass() {
-        return PhoenixOutputFormat.class;
-    }
-
-    @SuppressWarnings("rawtypes")
-    @Override
-    public Class<? extends InputFormat> getInputFormatClass() {
-        return PhoenixInputFormat.class;
-    }
-
-    @Override
-    public void configureInputJobProperties(TableDesc tableDesc, Map<String, String>
-            jobProperties) {
-        configureJobProperties(tableDesc, jobProperties);
-
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Configuring input job for table : " + tableDesc.getTableName());
-        }
-
-        // initialization efficiency. Inform to SerDe about in/out work.
-        tableDesc.getProperties().setProperty(PhoenixStorageHandlerConstants.IN_OUT_WORK,
-                PhoenixStorageHandlerConstants.IN_WORK);
-    }
-
-    @Override
-    public void configureOutputJobProperties(TableDesc tableDesc, Map<String, String>
-            jobProperties) {
-        configureJobProperties(tableDesc, jobProperties);
-
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Configuring output job for  table : " + tableDesc.getTableName());
-        }
-
-        // initialization efficiency. Inform to SerDe about in/out work.
-        tableDesc.getProperties().setProperty(PhoenixStorageHandlerConstants.IN_OUT_WORK,
-                PhoenixStorageHandlerConstants.OUT_WORK);
-    }
-
-    @Override
-    public void configureTableJobProperties(TableDesc tableDesc, Map<String, String>
-            jobProperties) {
-        configureJobProperties(tableDesc, jobProperties);
-    }
-
-    @SuppressWarnings({"unchecked", "rawtypes"})
-    protected void configureJobProperties(TableDesc tableDesc, Map<String, String> jobProperties) {
-        Properties tableProperties = tableDesc.getProperties();
-
-        String inputFormatClassName =
-                tableProperties.getProperty(PhoenixStorageHandlerConstants
-                        .HBASE_INPUT_FORMAT_CLASS);
-
-        if (LOG.isDebugEnabled()) {
-            LOG.debug(PhoenixStorageHandlerConstants.HBASE_INPUT_FORMAT_CLASS + " is " +
-                    inputFormatClassName);
-        }
-
-        Class<?> inputFormatClass;
-        try {
-            if (inputFormatClassName != null) {
-                inputFormatClass = JavaUtils.loadClass(inputFormatClassName);
-            } else {
-                inputFormatClass = PhoenixInputFormat.class;
-            }
-        } catch (Exception e) {
-            LOG.error(e.getMessage(), e);
-            throw new RuntimeException(e);
-        }
-
-        if (inputFormatClass != null) {
-            tableDesc.setInputFileFormatClass((Class<? extends InputFormat>) inputFormatClass);
-        }
-
-        String tableName = tableProperties.getProperty(PhoenixStorageHandlerConstants
-                .PHOENIX_TABLE_NAME);
-        if (tableName == null) {
-            tableName = tableDesc.getTableName();
-            tableProperties.setProperty(PhoenixStorageHandlerConstants.PHOENIX_TABLE_NAME,
-                    tableName);
-        }
-        SessionState sessionState = SessionState.get();
-
-        String sessionId;
-        if(sessionState!= null) {
-            sessionId = sessionState.getSessionId();
-        }  else {
-            sessionId = UUID.randomUUID().toString();
-        }
-        jobProperties.put(PhoenixConfigurationUtil.SESSION_ID, sessionId);
-        jobProperties.put(PhoenixConfigurationUtil.INPUT_TABLE_NAME, tableName);
-        jobProperties.put(PhoenixStorageHandlerConstants.ZOOKEEPER_QUORUM, tableProperties
-                .getProperty(PhoenixStorageHandlerConstants.ZOOKEEPER_QUORUM,
-                        PhoenixStorageHandlerConstants.DEFAULT_ZOOKEEPER_QUORUM));
-        jobProperties.put(PhoenixStorageHandlerConstants.ZOOKEEPER_PORT, tableProperties
-                .getProperty(PhoenixStorageHandlerConstants.ZOOKEEPER_PORT, String.valueOf
-                        (PhoenixStorageHandlerConstants.DEFAULT_ZOOKEEPER_PORT)));
-        jobProperties.put(PhoenixStorageHandlerConstants.ZOOKEEPER_PARENT, tableProperties
-                .getProperty(PhoenixStorageHandlerConstants.ZOOKEEPER_PARENT,
-                        PhoenixStorageHandlerConstants.DEFAULT_ZOOKEEPER_PARENT));
-        String columnMapping = tableProperties
-                .getProperty(PhoenixStorageHandlerConstants.PHOENIX_COLUMN_MAPPING);
-        if(columnMapping != null) {
-            jobProperties.put(PhoenixStorageHandlerConstants.PHOENIX_COLUMN_MAPPING, columnMapping);
-        }
-
-        jobProperties.put(hive_metastoreConstants.META_TABLE_STORAGE, this.getClass().getName());
-
-        // set configuration when direct work with HBase.
-        jobProperties.put(HConstants.ZOOKEEPER_QUORUM, jobProperties.get
-                (PhoenixStorageHandlerConstants.ZOOKEEPER_QUORUM));
-        jobProperties.put(HConstants.ZOOKEEPER_CLIENT_PORT, jobProperties.get
-                (PhoenixStorageHandlerConstants.ZOOKEEPER_PORT));
-        jobProperties.put(HConstants.ZOOKEEPER_ZNODE_PARENT, jobProperties.get
-                (PhoenixStorageHandlerConstants.ZOOKEEPER_PARENT));
-        addHBaseResources(jobConf, jobProperties);
-    }
-
-    /**
-     * Utility method to add hbase-default.xml and hbase-site.xml properties to a new map
-     * if they are not already present in the jobConf.
-     * @param jobConf Job configuration
-     * @param newJobProperties  Map to which new properties should be added
-     */
-    private void addHBaseResources(Configuration jobConf,
-                                   Map<String, String> newJobProperties) {
-        Configuration conf = new Configuration(false);
-        HBaseConfiguration.addHbaseResources(conf);
-        for (Map.Entry<String, String> entry : conf) {
-            if (jobConf.get(entry.getKey()) == null) {
-                newJobProperties.put(entry.getKey(), entry.getValue());
-            }
-        }
-    }
-
-    @Override
-    public Class<? extends AbstractSerDe> getSerDeClass() {
-        return PhoenixSerDe.class;
-    }
-
-    @Override
-    public DecomposedPredicate decomposePredicate(JobConf jobConf, Deserializer deserializer,
-                                                  ExprNodeDesc predicate) {
-        PhoenixSerDe phoenixSerDe = (PhoenixSerDe) deserializer;
-        List<String> columnNameList = phoenixSerDe.getSerdeParams().getColumnNames();
-
-        return PhoenixPredicateDecomposer.create(columnNameList).decomposePredicate(predicate);
-    }
-
-    @Override
-    public Estimation estimate(JobConf job, TableScanOperator ts, long remaining) throws
-            HiveException {
-        String hiveTableName = ts.getConf().getTableMetadata().getTableName();
-        int reducerCount = job.getInt(hiveTableName + PhoenixStorageHandlerConstants
-                .PHOENIX_REDUCER_NUMBER, 1);
-
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Estimating input size for table: " + hiveTableName + " with reducer count " +
-                    reducerCount + ". Remaining : " + remaining);
-        }
-
-        long bytesPerReducer = job.getLong(HiveConf.ConfVars.BYTESPERREDUCER.varname,
-                Long.parseLong(HiveConf.ConfVars.BYTESPERREDUCER.getDefaultValue()));
-        long totalLength = reducerCount * bytesPerReducer;
-
-        return new Estimation(0, totalLength);
-    }
-}
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java
deleted file mode 100644
index 1e36413..0000000
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/constants/PhoenixStorageHandlerConstants.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hive.constants;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.io.IntWritable;
-
-import java.util.List;
-
-/**
- * Constants using for Hive Storage Handler implementation
- */
-public class PhoenixStorageHandlerConstants {
-
-    public static final String HBASE_INPUT_FORMAT_CLASS = "phoenix.input.format.class";
-
-    public static final String PHOENIX_TABLE_NAME = "phoenix.table.name";
-
-    public static final String DEFAULT_PHOENIX_INPUT_CLASS = "org.apache.phoenix.hive.mapreduce" +
-            ".PhoenixResultWritable";
-
-    public static final String ZOOKEEPER_QUORUM = "phoenix.zookeeper.quorum";
-    public static final String ZOOKEEPER_PORT = "phoenix.zookeeper.client.port";
-    public static final String ZOOKEEPER_PARENT = "phoenix.zookeeper.znode.parent";
-    public static final String DEFAULT_ZOOKEEPER_QUORUM = "localhost";
-    public static final int DEFAULT_ZOOKEEPER_PORT = 2181;
-    public static final String DEFAULT_ZOOKEEPER_PARENT = "/hbase";
-
-    public static final String PHOENIX_ROWKEYS = "phoenix.rowkeys";
-    public static final String PHOENIX_COLUMN_MAPPING = "phoenix.column.mapping";
-    public static final String PHOENIX_TABLE_OPTIONS = "phoenix.table.options";
-
-    public static final String PHOENIX_TABLE_QUERY_HINT = ".query.hint";
-    public static final String PHOENIX_REDUCER_NUMBER = ".reducer.count";
-    public static final String DISABLE_WAL = ".disable.wal";
-    public static final String BATCH_MODE = "batch.mode";
-    public static final String AUTO_FLUSH = ".auto.flush";
-
-    public static final String COLON = ":";
-    public static final String COMMA = ",";
-    public static final String EMPTY_STRING = "";
-    public static final String SPACE = " ";
-    public static final String LEFT_ROUND_BRACKET = "(";
-    public static final String RIGHT_ROUND_BRACKET = ")";
-    public static final String QUOTATION_MARK = "'";
-    public static final String EQUAL = "=";
-    public static final String IS = "is";
-    public static final String QUESTION = "?";
-
-    public static final String SPLIT_BY_STATS = "split.by.stats";
-    public static final String HBASE_SCAN_CACHE = "hbase.scan.cache";
-    public static final String HBASE_SCAN_CACHEBLOCKS = "hbase.scan.cacheblock";
-    public static final String HBASE_DATE_FORMAT = "hbase.date.format";
-    public static final String HBASE_TIMESTAMP_FORMAT = "hbase.timestamp.format";
-    public static final String DEFAULT_DATE_FORMAT = "yyyy-MM-dd";
-    public static final String DEFAULT_TIMESTAMP_FORMAT = "yyyy-MM-dd HH:mm:ss.SSS";
-
-    public static final String IN_OUT_WORK = "in.out.work";
-    public static final String IN_WORK = "input";
-    public static final String OUT_WORK = "output";
-
-    public static final String MR = "mr";
-    public static final String TEZ = "tez";
-    public static final String SPARK = "spark";
-
-    public static final String DATE_TYPE = "date";
-    public static final String TIMESTAMP_TYPE = "timestamp";
-    public static final String BETWEEN_COMPARATOR = "between";
-    public static final String IN_COMPARATOR = "in";
-    public static final List<String> COMMON_COMPARATOR = Lists.newArrayList("=", "<", ">", "<=",
-            ">=");
-
-    // date/timestamp
-    public static final String COLUMNE_MARKER = "$columnName$";
-    public static final String PATERN_MARKER = "$targetPattern$";
-    public static final String DATE_PATTERN = "'?\\d{4}-\\d{2}-\\d{2}'?";
-    public static final String TIMESTAMP_PATTERN = "'?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\\" +
-            ".?\\d{0,9}'?";
-    public static final String COMMON_OPERATOR_PATTERN = "(\\(?\"?" + COLUMNE_MARKER + "\"?\\)?\\s*" +
-            "(=|>|<|<=|>=)\\s*(" + PATERN_MARKER + "))";
-    public static final String BETWEEN_OPERATOR_PATTERN = "(\\(?\"?" + COLUMNE_MARKER + "\"?\\)?\\s*(" +
-            "(?i)not)?\\s*(?i)between\\s*(" + PATERN_MARKER + ")\\s*(?i)and\\s*(" + PATERN_MARKER
-            + "))";
-    public static final String IN_OPERATOR_PATTERN = "(\\(?\"?" + COLUMNE_MARKER + "\"?\\)?\\s*((?i)" +
-            "not)?\\s*(?i)in\\s*\\((" + PATERN_MARKER + ",?\\s*)+\\))";
-
-    public static final String FUNCTION_VALUE_MARKER = "$value$";
-    public static final String DATE_FUNCTION_TEMPLETE = "to_date(" + FUNCTION_VALUE_MARKER + ")";
-    public static final String TIMESTAMP_FUNCTION_TEMPLATE = "TIMESTAMP" +
-            FUNCTION_VALUE_MARKER;
-
-    public static final IntWritable INT_ZERO = new IntWritable(0);
-}
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java
deleted file mode 100644
index 02c62d1..0000000
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hive.mapreduce;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.Statement;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.mapreduce.RegionSizeCalculator;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
-import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
-import org.apache.hadoop.hive.ql.plan.TableScanDesc;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.hive.shims.ShimLoader;
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.mapred.InputFormat;
-import org.apache.hadoop.mapred.InputSplit;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.RecordReader;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.lib.db.DBWritable;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.phoenix.compile.QueryPlan;
-import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
-import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
-import org.apache.phoenix.hive.ppd.PhoenixPredicateDecomposer;
-import org.apache.phoenix.hive.ql.index.IndexSearchCondition;
-import org.apache.phoenix.hive.query.PhoenixQueryBuilder;
-import org.apache.phoenix.hive.util.PhoenixConnectionUtil;
-import org.apache.phoenix.hive.util.PhoenixStorageHandlerUtil;
-import org.apache.phoenix.iterate.MapReduceParallelScanGrouper;
-import org.apache.phoenix.jdbc.PhoenixStatement;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
-import org.apache.phoenix.query.KeyRange;
-import org.apache.phoenix.util.PhoenixRuntime;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Custom InputFormat to feed into Hive
- */
-@SuppressWarnings({"deprecation", "rawtypes"})
-public class PhoenixInputFormat<T extends DBWritable> implements InputFormat<WritableComparable,
-        T> {
-
-    private static final Log LOG = LogFactory.getLog(PhoenixInputFormat.class);
-
-    public PhoenixInputFormat() {
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("PhoenixInputFormat created");
-        }
-    }
-
-    @Override
-    public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException {
-        String tableName = jobConf.get(PhoenixStorageHandlerConstants.PHOENIX_TABLE_NAME);
-
-        String query;
-        String executionEngine = jobConf.get(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname,
-                HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.getDefaultValue());
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Target table name at split phase : " + tableName + "with whereCondition :" +
-                    jobConf.get(TableScanDesc.FILTER_TEXT_CONF_STR) +
-                    " and " + HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname + " : " +
-                    executionEngine);
-        }
-
-        if (PhoenixStorageHandlerConstants.MR.equals(executionEngine)) {
-            List<IndexSearchCondition> conditionList = null;
-            String filterExprSerialized = jobConf.get(TableScanDesc.FILTER_EXPR_CONF_STR);
-            if (filterExprSerialized != null) {
-                ExprNodeGenericFuncDesc filterExpr =
-                        SerializationUtilities.deserializeExpression(filterExprSerialized);
-                PhoenixPredicateDecomposer predicateDecomposer =
-                        PhoenixPredicateDecomposer.create(Arrays.asList(jobConf.get(serdeConstants.LIST_COLUMNS).split(",")));
-                predicateDecomposer.decomposePredicate(filterExpr);
-                if (predicateDecomposer.isCalledPPD()) {
-                    conditionList = predicateDecomposer.getSearchConditionList();
-                }
-            }
-
-            query = PhoenixQueryBuilder.getInstance().buildQuery(jobConf, tableName,
-                    PhoenixStorageHandlerUtil.getReadColumnNames(jobConf), conditionList);
-        } else if (PhoenixStorageHandlerConstants.TEZ.equals(executionEngine)) {
-            Map<String, TypeInfo> columnTypeMap =
-                    PhoenixStorageHandlerUtil.createColumnTypeMap(jobConf);
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Column type map for TEZ : " + columnTypeMap);
-            }
-
-            String whereClause = jobConf.get(TableScanDesc.FILTER_TEXT_CONF_STR);
-            query = PhoenixQueryBuilder.getInstance().buildQuery(jobConf, tableName,
-                    PhoenixStorageHandlerUtil.getReadColumnNames(jobConf), whereClause, columnTypeMap);
-        } else {
-            throw new IOException(executionEngine + " execution engine unsupported yet.");
-        }
-
-        final QueryPlan queryPlan = getQueryPlan(jobConf, query);
-        final List<KeyRange> allSplits = queryPlan.getSplits();
-        final List<InputSplit> splits = generateSplits(jobConf, queryPlan, allSplits, query);
-
-        return splits.toArray(new InputSplit[splits.size()]);
-    }
-
-    private List<InputSplit> generateSplits(final JobConf jobConf, final QueryPlan qplan,
-                                            final List<KeyRange> splits, String query) throws
-            IOException {
-        Preconditions.checkNotNull(qplan);
-        Preconditions.checkNotNull(splits);
-        final List<InputSplit> psplits = Lists.newArrayListWithExpectedSize(splits.size());
-
-        Path[] tablePaths = FileInputFormat.getInputPaths(ShimLoader.getHadoopShims()
-                .newJobContext(new Job(jobConf)));
-        boolean splitByStats = jobConf.getBoolean(PhoenixStorageHandlerConstants.SPLIT_BY_STATS,
-                false);
-
-        setScanCacheSize(jobConf);
-
-        // Adding Localization
-        try (org.apache.hadoop.hbase.client.Connection connection = ConnectionFactory.createConnection(PhoenixConnectionUtil.getConfiguration(jobConf))) {
-        RegionLocator regionLocator = connection.getRegionLocator(TableName.valueOf(qplan
-                .getTableRef().getTable().getPhysicalName().toString()));
-        RegionSizeCalculator sizeCalculator = new RegionSizeCalculator(regionLocator, connection
-                .getAdmin());
-
-        for (List<Scan> scans : qplan.getScans()) {
-            PhoenixInputSplit inputSplit;
-
-            HRegionLocation location = regionLocator.getRegionLocation(scans.get(0).getStartRow()
-                    , false);
-            long regionSize = sizeCalculator.getRegionSize(location.getRegionInfo().getRegionName
-                    ());
-            String regionLocation = PhoenixStorageHandlerUtil.getRegionLocation(location, LOG);
-
-            if (splitByStats) {
-                for (Scan aScan : scans) {
-                    if (LOG.isDebugEnabled()) {
-                        LOG.debug("Split for  scan : " + aScan + "with scanAttribute : " + aScan
-                                .getAttributesMap() + " [scanCache, cacheBlock, scanBatch] : [" +
-                                aScan.getCaching() + ", " + aScan.getCacheBlocks() + ", " + aScan
-                                .getBatch() + "] and  regionLocation : " + regionLocation);
-                    }
-
-                    inputSplit = new PhoenixInputSplit(Lists.newArrayList(aScan), tablePaths[0],
-                            regionLocation, regionSize);
-                    inputSplit.setQuery(query);
-                    psplits.add(inputSplit);
-                }
-            } else {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Scan count[" + scans.size() + "] : " + Bytes.toStringBinary(scans
-                            .get(0).getStartRow()) + " ~ " + Bytes.toStringBinary(scans.get(scans
-                            .size() - 1).getStopRow()));
-                    LOG.debug("First scan : " + scans.get(0) + "with scanAttribute : " + scans
-                            .get(0).getAttributesMap() + " [scanCache, cacheBlock, scanBatch] : " +
-                            "[" + scans.get(0).getCaching() + ", " + scans.get(0).getCacheBlocks()
-                            + ", " + scans.get(0).getBatch() + "] and  regionLocation : " +
-                            regionLocation);
-
-                    for (int i = 0, limit = scans.size(); i < limit; i++) {
-                        LOG.debug("EXPECTED_UPPER_REGION_KEY[" + i + "] : " + Bytes
-                                .toStringBinary(scans.get(i).getAttribute
-                                        (BaseScannerRegionObserver.EXPECTED_UPPER_REGION_KEY)));
-                    }
-                }
-
-                inputSplit = new PhoenixInputSplit(scans, tablePaths[0], regionLocation,
-                        regionSize);
-                inputSplit.setQuery(query);
-                psplits.add(inputSplit);
-            }
-        }
-		}
-
-        return psplits;
-    }
-
-    private void setScanCacheSize(JobConf jobConf) {
-        int scanCacheSize = jobConf.getInt(PhoenixStorageHandlerConstants.HBASE_SCAN_CACHE, -1);
-        if (scanCacheSize > 0) {
-            jobConf.setInt(HConstants.HBASE_CLIENT_SCANNER_CACHING, scanCacheSize);
-        }
-
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Generating splits with scanCacheSize : " + scanCacheSize);
-        }
-    }
-
-    @Override
-    public RecordReader<WritableComparable, T> getRecordReader(InputSplit split, JobConf job,
-                                                               Reporter reporter) throws
-            IOException {
-        final QueryPlan queryPlan = getQueryPlan(job, ((PhoenixInputSplit) split).getQuery());
-        @SuppressWarnings("unchecked")
-        final Class<T> inputClass = (Class<T>) job.getClass(PhoenixConfigurationUtil.INPUT_CLASS,
-                PhoenixResultWritable.class);
-
-        PhoenixRecordReader<T> recordReader = new PhoenixRecordReader<T>(inputClass, job,
-                queryPlan);
-        recordReader.initialize(split);
-
-        return recordReader;
-    }
-
-    /**
-     * Returns the query plan associated with the select query.
-     */
-    private QueryPlan getQueryPlan(final Configuration configuration, String selectStatement)
-            throws IOException {
-        try {
-            final String currentScnValue = configuration.get(PhoenixConfigurationUtil
-                    .CURRENT_SCN_VALUE);
-            final Properties overridingProps = new Properties();
-            if (currentScnValue != null) {
-                overridingProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, currentScnValue);
-            }
-            final Connection connection = PhoenixConnectionUtil.getInputConnection(configuration,
-                    overridingProps);
-            Preconditions.checkNotNull(selectStatement);
-            final Statement statement = connection.createStatement();
-            final PhoenixStatement pstmt = statement.unwrap(PhoenixStatement.class);
-
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Compiled query : " + selectStatement);
-            }
-
-            // Optimize the query plan so that we potentially use secondary indexes
-            final QueryPlan queryPlan = pstmt.optimizeQuery(selectStatement);
-            // Initialize the query plan so it sets up the parallel scans
-            queryPlan.iterator(MapReduceParallelScanGrouper.getInstance());
-            return queryPlan;
-        } catch (Exception exception) {
-            LOG.error(String.format("Failed to get the query plan with error [%s]", exception.getMessage()));
-            throw new RuntimeException(exception);
-        }
-    }
-}
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixRecordWriter.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixRecordWriter.java
deleted file mode 100644
index fcced90..0000000
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixRecordWriter.java
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hive.mapreduce;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.util.Properties;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
-import org.apache.hadoop.hive.ql.io.RecordUpdater;
-import org.apache.hadoop.hive.serde2.SerDeException;
-import org.apache.hadoop.hive.serde2.SerDeStats;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.mapred.RecordWriter;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapreduce.lib.db.DBWritable;
-import org.apache.phoenix.hive.PhoenixSerializer;
-import org.apache.phoenix.hive.PhoenixSerializer.DmlType;
-import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
-import org.apache.phoenix.hive.util.PhoenixConnectionUtil;
-import org.apache.phoenix.hive.util.PhoenixStorageHandlerUtil;
-import org.apache.phoenix.hive.util.PhoenixUtil;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
-import org.apache.phoenix.schema.ConcurrentTableMutationException;
-import org.apache.phoenix.schema.MetaDataClient;
-import org.apache.phoenix.util.QueryUtil;
-
-/**
- *
- * RecordWriter implementation. Writes records to the output
- * WARNING : There is possibility that WAL disable setting not working properly due concurrent
- * enabling/disabling WAL.
- *
- */
-public class PhoenixRecordWriter<T extends DBWritable> implements RecordWriter<NullWritable, T>,
-        org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter, RecordUpdater {
-
-    private static final Log LOG = LogFactory.getLog(PhoenixRecordWriter.class);
-
-    private Connection conn;
-    private PreparedStatement pstmt;
-    private long batchSize;
-    private long numRecords = 0;
-
-    private Configuration config;
-    private String tableName;
-    private MetaDataClient metaDataClient;
-    private boolean restoreWalMode;
-
-    // For RecordUpdater
-    private long rowCountDelta = 0;
-    private PhoenixSerializer phoenixSerializer;
-    private ObjectInspector objInspector;
-    private PreparedStatement pstmtForDelete;
-
-    // For RecordUpdater
-    public PhoenixRecordWriter(Path path, AcidOutputFormat.Options options) throws IOException {
-        Configuration config = options.getConfiguration();
-        Properties props = new Properties();
-
-        try {
-            initialize(config, props);
-        } catch (SQLException e) {
-            throw new IOException(e);
-        }
-
-        this.objInspector = options.getInspector();
-        try {
-            phoenixSerializer = new PhoenixSerializer(config, options.getTableProperties());
-        } catch (SerDeException e) {
-            throw new IOException(e);
-        }
-    }
-
-    public PhoenixRecordWriter(final Configuration configuration, final Properties props) throws
-            SQLException {
-        initialize(configuration, props);
-    }
-
-    private void initialize(Configuration config, Properties properties) throws SQLException {
-        this.config = config;
-        tableName = config.get(PhoenixStorageHandlerConstants.PHOENIX_TABLE_NAME);
-
-        // Disable WAL
-        String walConfigName = tableName.toLowerCase() + PhoenixStorageHandlerConstants.DISABLE_WAL;
-        boolean disableWal = config.getBoolean(walConfigName, false);
-        if (disableWal) {
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Property " + walConfigName + " is true. batch.mode will be set true. ");
-            }
-
-            properties.setProperty(PhoenixStorageHandlerConstants.BATCH_MODE, "true");
-        }
-
-        this.conn = PhoenixConnectionUtil.getInputConnection(config, properties);
-
-        if (disableWal) {
-            metaDataClient = new MetaDataClient((PhoenixConnection) conn);
-
-            if (!PhoenixUtil.isDisabledWal(metaDataClient, tableName)) {
-                // execute alter tablel statement if disable_wal is not true.
-                try {
-                    PhoenixUtil.alterTableForWalDisable(conn, tableName, true);
-                } catch (ConcurrentTableMutationException e) {
-                    if (LOG.isWarnEnabled()) {
-                        LOG.warn("Another mapper or task processing wal disable");
-                    }
-                }
-
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug(tableName + "s wal disabled.");
-                }
-
-                // restore original value of disable_wal at the end.
-                restoreWalMode = true;
-            }
-        }
-
-        this.batchSize = PhoenixConfigurationUtil.getBatchSize(config);
-
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Batch-size : " + batchSize);
-        }
-
-        String upsertQuery = QueryUtil.constructUpsertStatement(tableName, PhoenixUtil
-                .getColumnInfoList(conn, tableName));
-
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Upsert-query : " + upsertQuery);
-        }
-        this.pstmt = this.conn.prepareStatement(upsertQuery);
-    }
-
-    @Override
-    public void write(NullWritable key, T record) throws IOException {
-        try {
-            record.write(pstmt);
-            numRecords++;
-            pstmt.executeUpdate();
-
-            if (numRecords % batchSize == 0) {
-                LOG.debug("Commit called on a batch of size : " + batchSize);
-                conn.commit();
-            }
-        } catch (SQLException e) {
-            throw new IOException("Exception while writing to table.", e);
-        }
-    }
-
-    @Override
-    public void close(Reporter reporter) throws IOException {
-        try {
-            conn.commit();
-
-            if (LOG.isInfoEnabled()) {
-                LOG.info("Wrote row : " + numRecords);
-            }
-        } catch (SQLException e) {
-            LOG.error("SQLException while performing the commit for the task.");
-            throw new IOException(e);
-        } finally {
-            try {
-                if (restoreWalMode && PhoenixUtil.isDisabledWal(metaDataClient, tableName)) {
-                    try {
-                        PhoenixUtil.alterTableForWalDisable(conn, tableName, false);
-                    } catch (ConcurrentTableMutationException e) {
-                        if (LOG.isWarnEnabled()) {
-                            LOG.warn("Another mapper or task processing wal enable");
-                        }
-                    }
-
-                    if (LOG.isDebugEnabled()) {
-                        LOG.debug(tableName + "s wal enabled.");
-                    }
-                }
-
-                // flush if [table-name].auto.flush is true.
-                String autoFlushConfigName = tableName.toLowerCase() +
-                        PhoenixStorageHandlerConstants.AUTO_FLUSH;
-                boolean autoFlush = config.getBoolean(autoFlushConfigName, false);
-                if (autoFlush) {
-                    if (LOG.isDebugEnabled()) {
-                        LOG.debug("autoFlush is true.");
-                    }
-
-                    PhoenixUtil.flush(conn, tableName);
-                }
-
-                PhoenixUtil.closeResource(pstmt);
-                PhoenixUtil.closeResource(pstmtForDelete);
-                PhoenixUtil.closeResource(conn);
-            } catch (SQLException ex) {
-                LOG.error("SQLException while closing the connection for the task.");
-                throw new IOException(ex);
-            }
-        }
-    }
-
-    // For Testing
-    public boolean isRestoreWalMode() {
-        return restoreWalMode;
-    }
-
-    @SuppressWarnings("unchecked")
-    @Override
-    public void write(Writable w) throws IOException {
-        PhoenixResultWritable row = (PhoenixResultWritable) w;
-
-        write(NullWritable.get(), (T) row);
-    }
-
-    @Override
-    public void close(boolean abort) throws IOException {
-        close(Reporter.NULL);
-    }
-
-    @Override
-    public void insert(long currentTransaction, Object row) throws IOException {
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("insert transaction : " + currentTransaction + ", row : " +
-                    PhoenixStorageHandlerUtil.toString(row));
-        }
-
-        PhoenixResultWritable pResultWritable = (PhoenixResultWritable) phoenixSerializer
-                .serialize(row, objInspector, DmlType.INSERT);
-
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Data : " + pResultWritable.getValueList());
-        }
-
-        write(pResultWritable);
-        rowCountDelta++;
-    }
-
-    @Override
-    public void update(long currentTransaction, Object row) throws IOException {
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("update transaction : " + currentTransaction + ", row : " +
-                    PhoenixStorageHandlerUtil
-                            .toString(row));
-        }
-
-        PhoenixResultWritable pResultWritable = (PhoenixResultWritable) phoenixSerializer
-                .serialize(row, objInspector, DmlType.UPDATE);
-
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Data : " + pResultWritable.getValueList());
-        }
-
-        write(pResultWritable);
-    }
-
-    @Override
-    public void delete(long currentTransaction, Object row) throws IOException {
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("delete transaction : " + currentTransaction + ", row : " +
-                    PhoenixStorageHandlerUtil.toString(row));
-        }
-
-        PhoenixResultWritable pResultWritable = (PhoenixResultWritable) phoenixSerializer
-                .serialize(row, objInspector, DmlType.DELETE);
-
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Data : " + pResultWritable.getValueList());
-        }
-
-        if (pstmtForDelete == null) {
-            try {
-                String deleteQuery = PhoenixUtil.constructDeleteStatement(conn, tableName);
-
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Delete query : " + deleteQuery);
-                }
-
-                pstmtForDelete = conn.prepareStatement(deleteQuery);
-            } catch (SQLException e) {
-                throw new IOException(e);
-            }
-        }
-
-        delete(pResultWritable);
-
-        rowCountDelta--;
-    }
-
-    private void delete(PhoenixResultWritable pResultWritable) throws IOException {
-        try {
-            pResultWritable.delete(pstmtForDelete);
-            numRecords++;
-            pstmtForDelete.executeUpdate();
-
-            if (numRecords % batchSize == 0) {
-                LOG.debug("Commit called on a batch of size : " + batchSize);
-                conn.commit();
-            }
-        } catch (SQLException e) {
-            throw new IOException("Exception while deleting to table.", e);
-        }
-    }
-
-    @Override
-    public void flush() throws IOException {
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Flush called");
-        }
-
-        try {
-            conn.commit();
-
-            if (LOG.isInfoEnabled()) {
-                LOG.info("Written row : " + numRecords);
-            }
-        } catch (SQLException e) {
-            LOG.error("SQLException while performing the commit for the task.");
-            throw new IOException(e);
-        }
-    }
-
-    @Override
-    public SerDeStats getStats() {
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("getStats called");
-        }
-
-        SerDeStats stats = new SerDeStats();
-        stats.setRowCount(rowCountDelta);
-        // Don't worry about setting raw data size diff.  There is no reasonable way  to calculate
-        // that without finding the row we are updating or deleting, which would be a mess.
-        return stats;
-    }
-
-    @Override
-    public long getBufferedRowCount() {
-        return numRecords;
-    }
-}
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixByteObjectInspector.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixByteObjectInspector.java
deleted file mode 100644
index 6972238..0000000
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixByteObjectInspector.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hive.objectinspector;
-
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.ByteObjectInspector;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
-import org.apache.hadoop.hive.serde2.io.ByteWritable;
-
-/**
- * ObjectInspector for byte type
- */
-public class PhoenixByteObjectInspector extends AbstractPhoenixObjectInspector<ByteWritable>
-        implements ByteObjectInspector {
-
-    public PhoenixByteObjectInspector() {
-        super(TypeInfoFactory.byteTypeInfo);
-    }
-
-    @Override
-    public Object copyObject(Object o) {
-        return o == null ? null : new Byte((Byte) o);
-    }
-
-    @Override
-    public ByteWritable getPrimitiveWritableObject(Object o) {
-        return new ByteWritable(get(o));
-    }
-
-    @Override
-    public byte get(Object o) {
-        Byte value = null;
-
-        if (o != null) {
-            try {
-                value = (Byte) o;
-            } catch (Exception e) {
-                logExceptionMessage(o, "BYTE");
-            }
-        }
-
-        return value;
-    }
-
-}
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixDoubleObjectInspector.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixDoubleObjectInspector.java
deleted file mode 100644
index bd1c2e2..0000000
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/objectinspector/PhoenixDoubleObjectInspector.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hive.objectinspector;
-
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.DoubleObjectInspector;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
-import org.apache.hadoop.hive.serde2.io.DoubleWritable;
-
-/**
- * ObjectInspector for double type
- */
-public class PhoenixDoubleObjectInspector extends AbstractPhoenixObjectInspector<DoubleWritable>
-        implements DoubleObjectInspector {
-
-    public PhoenixDoubleObjectInspector() {
-        super(TypeInfoFactory.doubleTypeInfo);
-    }
-
-    @Override
-    public Object copyObject(Object o) {
-        return o == null ? null : new Double((Double) o);
-    }
-
-    @Override
-    public DoubleWritable getPrimitiveWritableObject(Object o) {
-        return new DoubleWritable(get(o));
-    }
-
-    @Override
-    public double get(Object o) {
-        Double value = null;
-
-        if (o != null) {
-            try {
-                value = ((Double) o).doubleValue();
-            } catch (Exception e) {
-                logExceptionMessage(o, "LONG");
-            }
-        }
-
-        return value;
-    }
-
-}
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/ql/index/IndexPredicateAnalyzer.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/ql/index/IndexPredicateAnalyzer.java
deleted file mode 100644
index 207b46a..0000000
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/ql/index/IndexPredicateAnalyzer.java
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hive.ql.index;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.Stack;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
-import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
-import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
-import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
-import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.lib.Rule;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
-import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils;
-import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc;
-import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseCompare;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBetween;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNot;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotNull;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToBinary;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToChar;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToDate;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToDecimal;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToUnixTimeStamp;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToUtcTimestamp;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToVarchar;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
-
-import com.google.common.collect.Lists;
-
-/**
- * Clone of org.apache.hadoop.hive.ql.index.IndexPredicateAnalyzer with modifying
- * analyzePredicate method.
- *
- *
- */
-public class IndexPredicateAnalyzer {
-
-    private static final Log LOG = LogFactory.getLog(IndexPredicateAnalyzer.class);
-
-    private final Set<String> udfNames;
-    private final Map<String, Set<String>> columnToUDFs;
-    private FieldValidator fieldValidator;
-
-    private boolean acceptsFields;
-
-    public IndexPredicateAnalyzer() {
-        udfNames = new HashSet<String>();
-        columnToUDFs = new HashMap<String, Set<String>>();
-    }
-
-    public void setFieldValidator(FieldValidator fieldValidator) {
-        this.fieldValidator = fieldValidator;
-    }
-
-    /**
-     * Registers a comparison operator as one which can be satisfied by an index
-     * search. Unless this is called, analyzePredicate will never find any
-     * indexable conditions.
-     *
-     * @param udfName name of comparison operator as returned by either
-     *                {@link GenericUDFBridge#getUdfName} (for simple UDF's) or
-     *                udf.getClass().getName() (for generic UDF's).
-     */
-    public void addComparisonOp(String udfName) {
-        udfNames.add(udfName);
-    }
-
-    /**
-     * Clears the set of column names allowed in comparisons. (Initially, all
-     * column names are allowed.)
-     */
-    public void clearAllowedColumnNames() {
-        columnToUDFs.clear();
-    }
-
-    /**
-     * Adds a column name to the set of column names allowed.
-     *
-     * @param columnName name of column to be allowed
-     */
-    public void allowColumnName(String columnName) {
-        columnToUDFs.put(columnName, udfNames);
-    }
-
-    /**
-     * add allowed functions per column
-     *
-     * @param columnName
-     * @param udfs
-     */
-    public void addComparisonOp(String columnName, String... udfs) {
-        Set<String> allowed = columnToUDFs.get(columnName);
-        if (allowed == null || allowed == udfNames) {
-            // override
-            columnToUDFs.put(columnName, new HashSet<String>(Arrays.asList(udfs)));
-        } else {
-            allowed.addAll(Arrays.asList(udfs));
-        }
-    }
-
-    /**
-     * Analyzes a predicate.
-     *
-     * @param predicate        predicate to be analyzed
-     * @param searchConditions receives conditions produced by analysis
-     * @return residual predicate which could not be translated to
-     * searchConditions
-     */
-    public ExprNodeDesc analyzePredicate(ExprNodeDesc predicate, final List<IndexSearchCondition>
-            searchConditions) {
-
-        Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
-        NodeProcessor nodeProcessor = new NodeProcessor() {
-            @Override
-            public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object...
-                    nodeOutputs) throws SemanticException {
-
-                // We can only push down stuff which appears as part of
-                // a pure conjunction: reject OR, CASE, etc.
-                for (Node ancestor : stack) {
-                    if (nd == ancestor) {
-                        break;
-                    }
-                    if (!FunctionRegistry.isOpAnd((ExprNodeDesc) ancestor)) {
-                        return nd;
-                    }
-                }
-
-                return analyzeExpr((ExprNodeGenericFuncDesc) nd, searchConditions, nodeOutputs);
-            }
-        };
-
-        Dispatcher disp = new DefaultRuleDispatcher(nodeProcessor, opRules, null);
-        GraphWalker ogw = new DefaultGraphWalker(disp);
-        ArrayList<Node> topNodes = new ArrayList<Node>();
-        topNodes.add(predicate);
-        HashMap<Node, Object> nodeOutput = new HashMap<Node, Object>();
-
-        try {
-            ogw.startWalking(topNodes, nodeOutput);
-        } catch (SemanticException ex) {
-            throw new RuntimeException(ex);
-        }
-
-        ExprNodeDesc residualPredicate = (ExprNodeDesc) nodeOutput.get(predicate);
-        return residualPredicate;
-    }
-
-    // Check if ExprNodeColumnDesc is wrapped in expr.
-    // If so, peel off. Otherwise return itself.
-    private ExprNodeDesc getColumnExpr(ExprNodeDesc expr) {
-        if (expr instanceof ExprNodeColumnDesc) {
-            return expr;
-        }
-        ExprNodeGenericFuncDesc funcDesc = null;
-        if (expr instanceof ExprNodeGenericFuncDesc) {
-            funcDesc = (ExprNodeGenericFuncDesc) expr;
-        }
-        if (null == funcDesc) {
-            return expr;
-        }
-        GenericUDF udf = funcDesc.getGenericUDF();
-        // check if its a simple cast expression.
-        if ((udf instanceof GenericUDFBridge || udf instanceof GenericUDFToBinary || udf
-                instanceof GenericUDFToChar
-                || udf instanceof GenericUDFToVarchar || udf instanceof GenericUDFToDecimal
-                || udf instanceof GenericUDFToDate || udf instanceof GenericUDFToUnixTimeStamp
-                || udf instanceof GenericUDFToUtcTimestamp) && funcDesc.getChildren().size() == 1
-                && funcDesc.getChildren().get(0) instanceof ExprNodeColumnDesc) {
-            return expr.getChildren().get(0);
-        }
-        return expr;
-    }
-
-    private void processingBetweenOperator(ExprNodeGenericFuncDesc expr,
-                                           List<IndexSearchCondition> searchConditions, Object...
-                                                   nodeOutputs) {
-        ExprNodeColumnDesc columnDesc = null;
-        String[] fields = null;
-
-        if (nodeOutputs[1] instanceof ExprNodeFieldDesc) {
-            // rowKey field
-            ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) nodeOutputs[1];
-            fields = ExprNodeDescUtils.extractFields(fieldDesc);
-
-            ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair((ExprNodeDesc)
-                    nodeOutputs[1], (ExprNodeDesc) nodeOutputs[2]);
-            columnDesc = (ExprNodeColumnDesc) extracted[0];
-        } else if (nodeOutputs[0] instanceof ExprNodeGenericFuncDesc) {
-            columnDesc = (ExprNodeColumnDesc) ((ExprNodeGenericFuncDesc) nodeOutputs[1])
-                    .getChildren().get(0);
-        } else {
-            columnDesc = (ExprNodeColumnDesc) nodeOutputs[1];
-        }
-
-        String udfName = expr.getGenericUDF().getUdfName();
-        ExprNodeConstantDesc[] betweenConstants = new ExprNodeConstantDesc[]{
-                (ExprNodeConstantDesc) nodeOutputs[2], (ExprNodeConstantDesc) nodeOutputs[3]};
-        boolean isNot = (Boolean) ((ExprNodeConstantDesc) nodeOutputs[0]).getValue();
-
-        searchConditions.add(new IndexSearchCondition(columnDesc, udfName, betweenConstants,
-                expr, fields, isNot));
-    }
-
-    private void processingInOperator(ExprNodeGenericFuncDesc expr, List<IndexSearchCondition>
-            searchConditions, boolean isNot, Object... nodeOutputs) {
-        ExprNodeColumnDesc columnDesc = null;
-        String[] fields = null;
-
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Processing In Operator. nodeOutputs : " + Lists.newArrayList(nodeOutputs));
-        }
-
-        if (nodeOutputs[0] instanceof ExprNodeFieldDesc) {
-            // rowKey field
-            ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) nodeOutputs[0];
-            fields = ExprNodeDescUtils.extractFields(fieldDesc);
-
-            ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair((ExprNodeDesc)
-                    nodeOutputs[0], (ExprNodeDesc) nodeOutputs[1]);
-
-            if (extracted == null) {    // adding for tez
-                return;
-            }
-
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("nodeOutputs[0] : " + nodeOutputs[0] + ", nodeOutputs[1] : " +
-                        nodeOutputs[1] + " => " + Lists.newArrayList(extracted));
-            }
-
-            columnDesc = (ExprNodeColumnDesc) extracted[0];
-        } else if (nodeOutputs[0] instanceof ExprNodeGenericFuncDesc) {
-            columnDesc = (ExprNodeColumnDesc) ((ExprNodeGenericFuncDesc) nodeOutputs[0])
-                    .getChildren().get(0);
-        } else {
-            columnDesc = (ExprNodeColumnDesc) nodeOutputs[0];
-        }
-
-        String udfName = expr.getGenericUDF().getUdfName();
-        ExprNodeConstantDesc[] inConstantDescs = new ExprNodeConstantDesc[nodeOutputs.length - 1];
-
-        for (int i = 0, limit = inConstantDescs.length; i < limit; i++) {
-            if (!(nodeOutputs[i + 1] instanceof ExprNodeConstantDesc)) {    // adding for tez
-                return;
-            }
-
-            inConstantDescs[i] = (ExprNodeConstantDesc) nodeOutputs[i + 1];
-        }
-
-        searchConditions.add(new IndexSearchCondition(columnDesc, udfName, inConstantDescs, expr,
-                fields, isNot));
-    }
-
-    private void processingNullOperator(ExprNodeGenericFuncDesc expr, List<IndexSearchCondition>
-            searchConditions, Object... nodeOutputs) {
-        ExprNodeColumnDesc columnDesc = null;
-        String[] fields = null;
-
-        if (nodeOutputs[0] instanceof ExprNodeFieldDesc) {
-            // rowKey field
-            ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) nodeOutputs[0];
-            fields = ExprNodeDescUtils.extractFields(fieldDesc);
-
-            ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair((ExprNodeDesc)
-                    nodeOutputs[0], new ExprNodeConstantDesc());
-            columnDesc = (ExprNodeColumnDesc) extracted[0];
-        } else if (nodeOutputs[0] instanceof ExprNodeGenericFuncDesc) {
-            columnDesc = (ExprNodeColumnDesc) ((ExprNodeGenericFuncDesc) nodeOutputs[0])
-                    .getChildren().get(0);
-        } else {
-            columnDesc = (ExprNodeColumnDesc) nodeOutputs[0];
-        }
-
-        String udfName = expr.getGenericUDF().getUdfName();
-
-        searchConditions.add(new IndexSearchCondition(columnDesc, udfName, null, expr, fields,
-                false));
-    }
-
-    private void processingNotNullOperator(ExprNodeGenericFuncDesc expr,
-                                           List<IndexSearchCondition> searchConditions, Object...
-                                                   nodeOutputs) {
-        ExprNodeColumnDesc columnDesc = null;
-        String[] fields = null;
-
-        if (nodeOutputs[0] instanceof ExprNodeFieldDesc) {
-            // rowKey field
-            ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) nodeOutputs[0];
-            fields = ExprNodeDescUtils.extractFields(fieldDesc);
-
-            ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair((ExprNodeDesc)
-                    nodeOutputs[0], new ExprNodeConstantDesc());
-            columnDesc = (ExprNodeColumnDesc) extracted[0];
-        } else if (nodeOutputs[0] instanceof ExprNodeGenericFuncDesc) {
-            columnDesc = (ExprNodeColumnDesc) ((ExprNodeGenericFuncDesc) nodeOutputs[0])
-                    .getChildren().get(0);
-        } else {
-            columnDesc = (ExprNodeColumnDesc) nodeOutputs[0];
-        }
-
-        String udfName = expr.getGenericUDF().getUdfName();
-
-        searchConditions.add(new IndexSearchCondition(columnDesc, udfName, null, expr, fields,
-                true));
-    }
-
-    private ExprNodeDesc analyzeExpr(ExprNodeGenericFuncDesc expr, List<IndexSearchCondition>
-            searchConditions, Object... nodeOutputs) throws SemanticException {
-
-        if (FunctionRegistry.isOpAnd(expr)) {
-            assert (nodeOutputs.length == 2);
-            ExprNodeDesc residual1 = (ExprNodeDesc)nodeOutputs[0];
-            ExprNodeDesc residual2 = (ExprNodeDesc)nodeOutputs[1];
-            if (residual1 == null) { return residual2; }
-            if (residual2 == null) { return residual1; }
-            List<ExprNodeDesc> residuals = new ArrayList<ExprNodeDesc>();
-            residuals.add(residual1);
-            residuals.add(residual2);
-
-            return new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, FunctionRegistry
-                    .getGenericUDFForAnd(), residuals);
-        }
-
-        GenericUDF genericUDF = expr.getGenericUDF();
-        if (!(genericUDF instanceof GenericUDFBaseCompare)) {
-            // 2015-10-22 Added by JeongMin Ju : Processing Between/In Operator
-            if (genericUDF instanceof GenericUDFBetween) {
-                // In case of not between, The value of first element of nodeOutputs is true.
-                // otherwise false.
-                processingBetweenOperator(expr, searchConditions, nodeOutputs);
-                return expr;
-            } else if (genericUDF instanceof GenericUDFIn) {
-                // In case of not in operator, in operator exist as child of not operator.
-                processingInOperator(expr, searchConditions, false, nodeOutputs);
-                return expr;
-            } else if (genericUDF instanceof GenericUDFOPNot &&
-                    ((ExprNodeGenericFuncDesc) expr.getChildren().get(0)).getGenericUDF()
-                            instanceof GenericUDFIn) {
-                // In case of not in operator, in operator exist as child of not operator.
-                processingInOperator((ExprNodeGenericFuncDesc) expr.getChildren().get(0),
-                        searchConditions, true, ((ExprNodeGenericFuncDesc) nodeOutputs[0])
-                                .getChildren().toArray());
-                return expr;
-            } else if (genericUDF instanceof GenericUDFOPNull) {
-                processingNullOperator(expr, searchConditions, nodeOutputs);
-                return expr;
-            } else if (genericUDF instanceof GenericUDFOPNotNull) {
-                processingNotNullOperator(expr, searchConditions, nodeOutputs);
-                return expr;
-            } else {
-                return expr;
-            }
-        }
-        ExprNodeDesc expr1 = (ExprNodeDesc) nodeOutputs[0];
-        ExprNodeDesc expr2 = (ExprNodeDesc) nodeOutputs[1];
-        // We may need to peel off the GenericUDFBridge that is added by CBO or
-        // user
-        if (expr1.getTypeInfo().equals(expr2.getTypeInfo())) {
-            expr1 = getColumnExpr(expr1);
-            expr2 = getColumnExpr(expr2);
-        }
-
-        ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair(expr1, expr2);
-        if (extracted == null || (extracted.length > 2 && !acceptsFields)) {
-            return expr;
-        }
-
-        ExprNodeColumnDesc columnDesc;
-        ExprNodeConstantDesc constantDesc;
-        if (extracted[0] instanceof ExprNodeConstantDesc) {
-            genericUDF = genericUDF.flip();
-            columnDesc = (ExprNodeColumnDesc) extracted[1];
-            constantDesc = (ExprNodeConstantDesc) extracted[0];
-        } else {
-            columnDesc = (ExprNodeColumnDesc) extracted[0];
-            constantDesc = (ExprNodeConstantDesc) extracted[1];
-        }
-
-        Set<String> allowed = columnToUDFs.get(columnDesc.getColumn());
-        if (allowed == null) {
-            return expr;
-        }
-
-        String udfName = genericUDF.getUdfName();
-        if (!allowed.contains(genericUDF.getUdfName())) {
-            return expr;
-        }
-
-        String[] fields = null;
-        if (extracted.length > 2) {
-            ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) extracted[2];
-            if (!isValidField(fieldDesc)) {
-                return expr;
-            }
-            fields = ExprNodeDescUtils.extractFields(fieldDesc);
-        }
-
-        // We also need to update the expr so that the index query can be
-        // generated.
-        // Note that, hive does not support UDFToDouble etc in the query text.
-        List<ExprNodeDesc> list = new ArrayList<ExprNodeDesc>();
-        list.add(expr1);
-        list.add(expr2);
-        expr = new ExprNodeGenericFuncDesc(expr.getTypeInfo(), expr.getGenericUDF(), list);
-
-        searchConditions.add(new IndexSearchCondition(columnDesc, udfName, constantDesc, expr,
-                fields));
-
-        // we converted the expression to a search condition, so
-        // remove it from the residual predicate
-        return fields == null ? null : expr;
-    }
-
-    private boolean isValidField(ExprNodeFieldDesc field) {
-        return fieldValidator == null || fieldValidator.validate(field);
-    }
-
-    /**
-     * Translates search conditions back to ExprNodeDesc form (as a left-deep
-     * conjunction).
-     *
-     * @param searchConditions (typically produced by analyzePredicate)
-     * @return ExprNodeGenericFuncDesc form of search conditions
-     */
-    public ExprNodeGenericFuncDesc translateSearchConditions(List<IndexSearchCondition>
-                                                                     searchConditions) {
-
-        ExprNodeGenericFuncDesc expr = null;
-
-        for (IndexSearchCondition searchCondition : searchConditions) {
-            if (expr == null) {
-                expr = searchCondition.getComparisonExpr();
-                continue;
-            }
-
-            List<ExprNodeDesc> children = new ArrayList<ExprNodeDesc>();
-            children.add(expr);
-            children.add(searchCondition.getComparisonExpr());
-            expr = new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, FunctionRegistry
-                    .getGenericUDFForAnd(), children);
-        }
-
-        return expr;
-    }
-
-    public void setAcceptsFields(boolean acceptsFields) {
-        this.acceptsFields = acceptsFields;
-    }
-
-    public static interface FieldValidator {
-        boolean validate(ExprNodeFieldDesc exprNodeDesc);
-    }
-
-    public static IndexPredicateAnalyzer createAnalyzer(boolean equalOnly) {
-        IndexPredicateAnalyzer analyzer = new IndexPredicateAnalyzer();
-        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual");
-
-        if (equalOnly) {
-            return analyzer;
-        }
-
-        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic" +
-                ".GenericUDFOPEqualOrGreaterThan");
-        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic" +
-                ".GenericUDFOPEqualOrLessThan");
-        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan");
-        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan");
-
-        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotEqual");
-        // apply !=
-        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic.GenericUDFBetween");
-        // apply (Not) Between
-        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn");        //
-        // apply (Not) In
-        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn");        //
-        // apply In
-        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull");
-        // apply Null
-        analyzer.addComparisonOp("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotNull");
-        // apply Not Null
-
-        return analyzer;
-    }
-}
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/query/PhoenixQueryBuilder.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/query/PhoenixQueryBuilder.java
deleted file mode 100644
index bb32996..0000000
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/query/PhoenixQueryBuilder.java
+++ /dev/null
@@ -1,849 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hive.query;
-
-import com.google.common.base.CharMatcher;
-import com.google.common.base.Function;
-import com.google.common.base.Joiner;
-import com.google.common.base.Predicate;
-import com.google.common.base.Splitter;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import javax.annotation.Nullable;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
-import org.apache.phoenix.hive.ql.index.IndexSearchCondition;
-import org.apache.phoenix.hive.util.ColumnMappingUtils;
-import org.apache.phoenix.hive.util.PhoenixStorageHandlerUtil;
-import org.apache.phoenix.hive.util.PhoenixUtil;
-import org.apache.phoenix.util.StringUtil;
-
-import static org.apache.phoenix.hive.util.ColumnMappingUtils.getColumnMappingMap;
-
-/**
- * Query builder. Produces a query depending on the colummn list and conditions
- */
-
-public class PhoenixQueryBuilder {
-
-    private static final Log LOG = LogFactory.getLog(PhoenixQueryBuilder.class);
-
-    private static final String QUERY_TEMPLATE = "select $HINT$ $COLUMN_LIST$ from $TABLE_NAME$";
-
-    private static final PhoenixQueryBuilder QUERY_BUILDER = new PhoenixQueryBuilder();
-
-    private PhoenixQueryBuilder() {
-        if (LOG.isInfoEnabled()) {
-            LOG.info("PhoenixQueryBuilder created");
-        }
-    }
-
-    public static PhoenixQueryBuilder getInstance() {
-        return QUERY_BUILDER;
-    }
-
-    private void addConditionColumnToReadColumn(List<String> readColumnList, List<String>
-            conditionColumnList) {
-        if (readColumnList.isEmpty()) {
-            return;
-        }
-
-        for (String conditionColumn : conditionColumnList) {
-            if (!readColumnList.contains(conditionColumn)) {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Condition column " + conditionColumn + " does not exist in " +
-                            "read-columns.");
-                }
-
-                readColumnList.add(conditionColumn);
-            }
-        }
-    }
-
-    private String makeQueryString(JobConf jobConf, String tableName, List<String>
-            readColumnList, String whereClause, String queryTemplate, String hints, Map<String,
-            TypeInfo> columnTypeMap) throws IOException {
-        StringBuilder sql = new StringBuilder();
-        List<String> conditionColumnList = buildWhereClause(jobConf, sql, whereClause, columnTypeMap);
-        readColumnList  = replaceColumns(jobConf, readColumnList);
-
-        if (conditionColumnList.size() > 0) {
-            addConditionColumnToReadColumn(readColumnList, conditionColumnList);
-            sql.insert(0, queryTemplate.replace("$HINT$", hints).replace("$COLUMN_LIST$",
-                    getSelectColumns(jobConf, tableName, readColumnList)).replace("$TABLE_NAME$",
-                    tableName));
-        } else {
-            sql.append(queryTemplate.replace("$HINT$", hints).replace("$COLUMN_LIST$",
-                    getSelectColumns(jobConf, tableName, readColumnList)).replace("$TABLE_NAME$",
-                    tableName));
-        }
-
-        if (LOG.isInfoEnabled()) {
-            LOG.info("Input query : " + sql.toString());
-        }
-
-        return sql.toString();
-    }
-
-    private static String findReplacement(JobConf jobConf, String column) {
-        Map<String, String> columnMappingMap = getColumnMappingMap(jobConf.get
-                (PhoenixStorageHandlerConstants.PHOENIX_COLUMN_MAPPING));
-        if (columnMappingMap != null && columnMappingMap.containsKey(column)) {
-            return columnMappingMap.get(column);
-        } else {
-            return column;
-        }
-    }
-    private static List<String> replaceColumns(JobConf jobConf, List<String> columnList) {
-        Map<String, String> columnMappingMap = getColumnMappingMap(jobConf.get
-                (PhoenixStorageHandlerConstants.PHOENIX_COLUMN_MAPPING));
-        if(columnMappingMap != null) {
-          List<String> newList = Lists.newArrayList();
-            for(String column:columnList) {
-                if(columnMappingMap.containsKey(column)) {
-                    newList.add(columnMappingMap.get(column));
-                } else {
-                    newList.add(column);
-                }
-            }
-            return newList;
-        }
-        return null;
-    }
-
-    private String makeQueryString(JobConf jobConf, String tableName, List<String>
-            readColumnList, List<IndexSearchCondition> searchConditions, String queryTemplate,
-                                   String hints) throws IOException {
-        StringBuilder query = new StringBuilder();
-        List<String> conditionColumnList = buildWhereClause(jobConf, query, searchConditions);
-
-        if (conditionColumnList.size() > 0) {
-            readColumnList  = replaceColumns(jobConf, readColumnList);
-            addConditionColumnToReadColumn(readColumnList, conditionColumnList);
-            query.insert(0, queryTemplate.replace("$HINT$", hints).replace("$COLUMN_LIST$",
-                    getSelectColumns(jobConf, tableName, readColumnList)).replace("$TABLE_NAME$",
-                    tableName));
-        } else {
-            readColumnList  = replaceColumns(jobConf, readColumnList);
-            query.append(queryTemplate.replace("$HINT$", hints).replace("$COLUMN_LIST$",
-                    getSelectColumns(jobConf, tableName, readColumnList)).replace("$TABLE_NAME$",
-                    tableName));
-        }
-
-        if (LOG.isInfoEnabled()) {
-            LOG.info("Input query : " + query.toString());
-        }
-
-        return query.toString();
-    }
-
-    private String getSelectColumns(JobConf jobConf, String tableName, List<String>
-            readColumnList) throws IOException {
-        String selectColumns = Joiner.on(PhoenixStorageHandlerConstants.COMMA).join(ColumnMappingUtils.quoteColumns(readColumnList));
-
-        if (PhoenixStorageHandlerConstants.EMPTY_STRING.equals(selectColumns)) {
-            selectColumns = "*";
-        } else {
-            if (PhoenixStorageHandlerUtil.isTransactionalTable(jobConf)) {
-                List<String> pkColumnList = PhoenixUtil.getPrimaryKeyColumnList(jobConf, tableName);
-                StringBuilder pkColumns = new StringBuilder();
-
-                for (String pkColumn : pkColumnList) {
-                    if (!readColumnList.contains(pkColumn)) {
-                        pkColumns.append("\"").append(pkColumn).append("\"" + PhoenixStorageHandlerConstants.COMMA);
-                    }
-                }
-
-                selectColumns = pkColumns.toString() + selectColumns;
-            }
-        }
-
-        return selectColumns;
-    }
-
-    public String buildQuery(JobConf jobConf, String tableName, List<String> readColumnList,
-                             String whereClause, Map<String, TypeInfo> columnTypeMap) throws
-            IOException {
-        String hints = getHint(jobConf, tableName);
-
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Building query with columns : " + readColumnList + " table name : " +
-                    tableName + "  with where conditions : " + whereClause + "  hints : " + hints);
-        }
-
-        return makeQueryString(jobConf, tableName, Lists.newArrayList(readColumnList),
-                whereClause, QUERY_TEMPLATE, hints, columnTypeMap);
-    }
-
-    public String buildQuery(JobConf jobConf, String tableName, List<String> readColumnList,
-                             List<IndexSearchCondition> searchConditions) throws IOException {
-        String hints = getHint(jobConf, tableName);
-
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Building query with columns : " + readColumnList + "  table name : " +
-                    tableName + " search conditions : " + searchConditions + "  hints : " + hints);
-        }
-
-        return makeQueryString(jobConf, tableName, Lists.newArrayList(readColumnList),
-                searchConditions, QUERY_TEMPLATE, hints);
-    }
-
-    private String getHint(JobConf jobConf, String tableName) {
-        StringBuilder hints = new StringBuilder("/*+ ");
-        if (!jobConf.getBoolean(PhoenixStorageHandlerConstants.HBASE_SCAN_CACHEBLOCKS, Boolean
-                .FALSE)) {
-            hints.append("NO_CACHE ");
-        }
-
-        String queryHint = jobConf.get(tableName + PhoenixStorageHandlerConstants
-                .PHOENIX_TABLE_QUERY_HINT);
-        if (queryHint != null) {
-            hints.append(queryHint);
-        }
-        hints.append(" */");
-
-        return hints.toString();
-    }
-
-    private List<String> buildWhereClause(JobConf jobConf, StringBuilder sql, String whereClause,
-                                          Map<String, TypeInfo> columnTypeMap) throws IOException {
-        if (whereClause == null || whereClause.isEmpty()) {
-            return Collections.emptyList();
-        }
-
-        List<String> conditionColumnList = Lists.newArrayList();
-        sql.append(" where ");
-
-        whereClause = StringUtils.replaceEach(whereClause, new String[]{"UDFToString"}, new
-                String[]{"to_char"});
-
-        for (String columnName : columnTypeMap.keySet()) {
-            if (whereClause.contains(columnName)) {
-                String column = findReplacement(jobConf, columnName);
-                whereClause = whereClause.replaceAll("\\b" + columnName + "\\b", "\"" + column + "\"");
-                conditionColumnList.add(column);
-
-
-                if (PhoenixStorageHandlerConstants.DATE_TYPE.equals(
-                        columnTypeMap.get(columnName).getTypeName())) {
-                    whereClause = applyDateFunctionUsingRegex(whereClause, column);
-                } else if (PhoenixStorageHandlerConstants.TIMESTAMP_TYPE.equals(
-                        columnTypeMap.get(columnName).getTypeName())) {
-                    whereClause = applyTimestampFunctionUsingRegex(whereClause, column);
-                }
-            }
-        }
-
-        sql.append(whereClause);
-
-        return conditionColumnList;
-    }
-
-    private String applyDateFunctionUsingRegex(String whereClause, String columnName) {
-        whereClause = applyFunctionForCommonOperator(whereClause, columnName, true);
-        whereClause = applyFunctionForBetweenOperator(whereClause, columnName, true);
-        whereClause = applyFunctionForInOperator(whereClause, columnName, true);
-
-        return whereClause;
-    }
-
-    private String applyTimestampFunctionUsingRegex(String whereClause, String columnName) {
-        whereClause = applyFunctionForCommonOperator(whereClause, columnName, false);
-        whereClause = applyFunctionForBetweenOperator(whereClause, columnName, false);
-        whereClause = applyFunctionForInOperator(whereClause, columnName, false);
-
-        return whereClause;
-    }
-
-    private String applyFunctionForCommonOperator(String whereClause, String columnName, boolean
-            isDate) {
-        String targetPattern = isDate ? PhoenixStorageHandlerConstants.DATE_PATTERN :
-                PhoenixStorageHandlerConstants.TIMESTAMP_PATTERN;
-        String pattern = StringUtils.replaceEach(PhoenixStorageHandlerConstants
-                        .COMMON_OPERATOR_PATTERN,
-                new String[]{PhoenixStorageHandlerConstants.COLUMNE_MARKER,
-                        PhoenixStorageHandlerConstants.PATERN_MARKER}, new String[]{columnName,
-                        targetPattern});
-
-        Matcher matcher = Pattern.compile(pattern).matcher(whereClause);
-
-        while (matcher.find()) {
-            String token = matcher.group(1);
-            String datePart = matcher.group(3);
-
-            String convertString = token.replace(datePart, applyFunction(isDate ?
-                    PhoenixStorageHandlerConstants.DATE_FUNCTION_TEMPLETE :
-                    PhoenixStorageHandlerConstants.TIMESTAMP_FUNCTION_TEMPLATE, datePart));
-            whereClause = whereClause.replaceAll(StringUtils.replaceEach(token, new String[]{"(",
-                    ")"}, new String[]{"\\(", "\\)"}), convertString);
-        }
-
-        return whereClause;
-    }
-
-    private String applyFunctionForBetweenOperator(String whereClause, String columnName, boolean
-            isDate) {
-        String targetPattern = isDate ? PhoenixStorageHandlerConstants.DATE_PATTERN :
-                PhoenixStorageHandlerConstants.TIMESTAMP_PATTERN;
-        String pattern = StringUtils.replaceEach(PhoenixStorageHandlerConstants
-                        .BETWEEN_OPERATOR_PATTERN,
-                new String[]{PhoenixStorageHandlerConstants.COLUMNE_MARKER,
-                        PhoenixStorageHandlerConstants.PATERN_MARKER}, new String[]{columnName,
-                        targetPattern});
-
-        Matcher matcher = Pattern.compile(pattern).matcher(whereClause);
-
-        while (matcher.find()) {
-            String token = matcher.group(1);
-            boolean isNot = matcher.group(2) == null ? false : true;
-            String fromDate = matcher.group(3);
-            String toDate = matcher.group(4);
-
-            String convertString = StringUtils.replaceEach(token, new String[]{fromDate, toDate},
-                    new String[]{applyFunction(isDate ? PhoenixStorageHandlerConstants
-                            .DATE_FUNCTION_TEMPLETE : PhoenixStorageHandlerConstants
-                            .TIMESTAMP_FUNCTION_TEMPLATE, fromDate),
-                            applyFunction(isDate ? PhoenixStorageHandlerConstants
-                                    .DATE_FUNCTION_TEMPLETE : PhoenixStorageHandlerConstants
-                                    .TIMESTAMP_FUNCTION_TEMPLATE, toDate)});
-
-            whereClause = whereClause.replaceAll(pattern, convertString);
-        }
-
-        return whereClause;
-    }
-
-    private String applyFunctionForInOperator(String whereClause, String columnName, boolean
-            isDate) {
-        String targetPattern = isDate ? PhoenixStorageHandlerConstants.DATE_PATTERN :
-                PhoenixStorageHandlerConstants.TIMESTAMP_PATTERN;
-        String pattern = StringUtils.replaceEach(PhoenixStorageHandlerConstants.IN_OPERATOR_PATTERN,
-                new String[]{PhoenixStorageHandlerConstants.COLUMNE_MARKER,
-                        PhoenixStorageHandlerConstants.PATERN_MARKER}, new String[]{columnName,
-                        targetPattern});
-        String itemPattern = "(" + targetPattern + ")";
-
-        Matcher matcher = Pattern.compile(pattern).matcher(whereClause);
-
-        while (matcher.find()) {
-            String token = matcher.group(1);
-            Matcher itemMatcher = Pattern.compile(itemPattern).matcher(token);
-            while (itemMatcher.find()) {
-                String item = itemMatcher.group(1);
-
-                token = token.replace(item, applyFunction(isDate ? PhoenixStorageHandlerConstants
-                        .DATE_FUNCTION_TEMPLETE : PhoenixStorageHandlerConstants
-                        .TIMESTAMP_FUNCTION_TEMPLATE, item));
-            }
-
-            whereClause = whereClause.replaceAll(pattern, token);
-        }
-
-        return whereClause;
-    }
-
-    /**
-     * replace value to specific part of pattern.
-     * if pattern is to_date($value$) and value is '2016-01-15'. then return to_date('2016-01-15').
-     * if pattern is cast($value$ as date) and value is '2016-01-15'. then return cast
-     * ('2016-01-15' as date).
-     */
-    private String applyFunction(String pattern, String value) {
-        if (!value.startsWith(PhoenixStorageHandlerConstants.QUOTATION_MARK)) {
-            value = PhoenixStorageHandlerConstants.QUOTATION_MARK + value +
-                    PhoenixStorageHandlerConstants.QUOTATION_MARK;
-        }
-
-        return pattern.replace(PhoenixStorageHandlerConstants.FUNCTION_VALUE_MARKER, value);
-    }
-
-    private String getCompareValueForDateAndTimestampFunction(String compareValue) {
-        if (compareValue.startsWith(PhoenixStorageHandlerConstants.QUOTATION_MARK)) {
-            return compareValue;
-        } else {
-            return PhoenixStorageHandlerConstants.QUOTATION_MARK + compareValue +
-                    PhoenixStorageHandlerConstants.QUOTATION_MARK;
-        }
-    }
-
-    private String applyDateFunction(String whereClause, String columnName) {
-        StringBuilder whereCondition = new StringBuilder();
-        for (Iterator<String> iterator = Splitter.on(CharMatcher.WHITESPACE).omitEmptyStrings()
-                .split(whereClause).iterator(); iterator.hasNext(); whereCondition.append
-                (PhoenixStorageHandlerConstants.SPACE)) {
-            String token = iterator.next();
-            if (isMyCondition(columnName, token)) {
-                whereCondition.append(token);
-
-                String comparator = iterator.next();
-                whereCondition.append(PhoenixStorageHandlerConstants.SPACE);
-                whereCondition.append(comparator).append(PhoenixStorageHandlerConstants.SPACE);
-                if (PhoenixStorageHandlerConstants.BETWEEN_COMPARATOR.equalsIgnoreCase
-                        (comparator)) {
-                    whereCondition.append("to_date(").append
-                            (getCompareValueForDateAndTimestampFunction(iterator.next())).append
-                            (") ").append(iterator.next()).append(PhoenixStorageHandlerConstants
-                            .SPACE)
-                            .append("to_date(");
-
-                    String toCompareValue = iterator.next();
-                    if (toCompareValue.endsWith(PhoenixStorageHandlerConstants
-                            .RIGHT_ROUND_BRACKET)) {
-                        int rightBracketIndex = toCompareValue.indexOf
-                                (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET);
-                        whereCondition.append(getCompareValueForDateAndTimestampFunction
-                                (toCompareValue.substring(0, rightBracketIndex))).append
-                                (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET).append
-                                (toCompareValue.substring(rightBracketIndex));
-                    } else {
-                        whereCondition.append(getCompareValueForDateAndTimestampFunction
-                                (toCompareValue)).append(PhoenixStorageHandlerConstants
-                                .RIGHT_ROUND_BRACKET);
-                    }
-                } else if (PhoenixStorageHandlerConstants.IN_COMPARATOR.equalsIgnoreCase
-                        (comparator)) {
-                    while (iterator.hasNext()) {
-                        String aToken = iterator.next();
-                        if (aToken.equals(PhoenixStorageHandlerConstants.LEFT_ROUND_BRACKET) ||
-                                aToken.equals(PhoenixStorageHandlerConstants.COMMA)) {
-                            whereCondition.append(aToken);
-                        } else if (aToken.equals(PhoenixStorageHandlerConstants
-                                .RIGHT_ROUND_BRACKET)) {
-                            whereCondition.append(aToken);
-                            break;
-                        } else if (aToken.endsWith(PhoenixStorageHandlerConstants
-                                .RIGHT_ROUND_BRACKET)) {
-                            int bracketIndex = aToken.indexOf(PhoenixStorageHandlerConstants
-                                    .RIGHT_ROUND_BRACKET);
-                            whereCondition.append("to_date(").append
-                                    (getCompareValueForDateAndTimestampFunction(aToken.substring
-                                            (0, bracketIndex))).append
-                                    (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET).append
-                                    (aToken.substring(bracketIndex));
-                            break;
-                        } else if (aToken.endsWith(PhoenixStorageHandlerConstants.COMMA)) {
-                            if (aToken.startsWith(PhoenixStorageHandlerConstants
-                                    .LEFT_ROUND_BRACKET)) {
-                                int bracketIndex = aToken.lastIndexOf
-                                        (PhoenixStorageHandlerConstants.LEFT_ROUND_BRACKET);
-                                whereCondition.append(aToken.substring(0, bracketIndex + 1))
-                                        .append("to_date(").append
-                                        (getCompareValueForDateAndTimestampFunction(aToken
-                                                .substring(bracketIndex + 1, aToken.length() - 1)
-                                        )).append("),");
-                            } else {
-                                whereCondition.append("to_date(").append
-                                        (getCompareValueForDateAndTimestampFunction(aToken
-                                                .substring(0, aToken.length() - 1))).append("),");
-                            }
-                        }
-
-                        whereCondition.append(PhoenixStorageHandlerConstants.SPACE);
-                    }
-                } else if (PhoenixStorageHandlerConstants.COMMON_COMPARATOR.contains(comparator)) {
-                    String compareValue = getCompareValueForDateAndTimestampFunction(iterator
-                            .next());
-                    whereCondition.append("to_date(");
-                    if (compareValue.endsWith(PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET)) {
-                        int rightBracketIndex = compareValue.indexOf
-                                (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET);
-                        whereCondition.append(getCompareValueForDateAndTimestampFunction
-                                (compareValue.substring(0, rightBracketIndex))).append
-                                (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET).append
-                                (compareValue.substring(rightBracketIndex));
-                    } else {
-                        whereCondition.append(getCompareValueForDateAndTimestampFunction
-                                (compareValue)).append(PhoenixStorageHandlerConstants
-                                .RIGHT_ROUND_BRACKET);
-                    }
-                }
-            } else {
-                whereCondition.append(token);
-            }
-        }
-
-        return whereCondition.toString();
-    }
-
-    // Assume timestamp value is yyyy-MM-dd HH:mm:ss.SSS
-    private String applyTimestampFunction(String whereClause, String columnName) {
-        StringBuilder whereCondition = new StringBuilder();
-        for (Iterator<String> iterator = Splitter.on(CharMatcher.WHITESPACE).omitEmptyStrings()
-                .split(whereClause).iterator(); iterator.hasNext(); whereCondition.append
-                (PhoenixStorageHandlerConstants.SPACE)) {
-            String token = iterator.next();
-            if (isMyCondition(columnName, token)) {
-                whereCondition.append(token);
-
-                String comparator = iterator.next();
-                whereCondition.append(PhoenixStorageHandlerConstants.SPACE);
-                whereCondition.append(comparator).append(PhoenixStorageHandlerConstants.SPACE);
-                if (PhoenixStorageHandlerConstants.BETWEEN_COMPARATOR.equalsIgnoreCase
-                        (comparator)) {
-                    String fromCompareValue = iterator.next() + PhoenixStorageHandlerConstants
-                            .SPACE + iterator.next();
-                    whereCondition.append("to_timestamp(").append
-                            (getCompareValueForDateAndTimestampFunction(fromCompareValue)).append
-                            (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET);
-                    whereCondition.append(PhoenixStorageHandlerConstants.SPACE).append(iterator
-                            .next()).append(PhoenixStorageHandlerConstants.SPACE);
-                    whereCondition.append("to_timestamp(");
-
-                    String toCompareValue = iterator.next() + PhoenixStorageHandlerConstants
-                            .SPACE + iterator.next();
-                    if (toCompareValue.endsWith(PhoenixStorageHandlerConstants
-                            .RIGHT_ROUND_BRACKET)) {
-                        int rightBracketIndex = toCompareValue.indexOf
-                                (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET);
-                        whereCondition.append(getCompareValueForDateAndTimestampFunction
-                                (toCompareValue.substring(0, rightBracketIndex))).append
-                                (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET).append
-                                (toCompareValue.substring(rightBracketIndex));
-                    } else {
-                        whereCondition.append(getCompareValueForDateAndTimestampFunction
-                                (toCompareValue)).append(PhoenixStorageHandlerConstants
-                                .RIGHT_ROUND_BRACKET);
-                    }
-                } else if (PhoenixStorageHandlerConstants.IN_COMPARATOR.equalsIgnoreCase
-                        (comparator)) {
-                    while (iterator.hasNext()) {
-                        String aToken = iterator.next();
-                        if (aToken.equals(PhoenixStorageHandlerConstants.LEFT_ROUND_BRACKET) ||
-                                aToken.equals(PhoenixStorageHandlerConstants.COMMA)) {
-                            whereCondition.append(aToken);
-                        } else if (aToken.equals(PhoenixStorageHandlerConstants
-                                .RIGHT_ROUND_BRACKET)) {
-                            whereCondition.append(aToken);
-                            break;
-                        } else {
-                            String compareValue = aToken + PhoenixStorageHandlerConstants.SPACE +
-                                    iterator.next();
-
-                            if (compareValue.startsWith(PhoenixStorageHandlerConstants
-                                    .LEFT_ROUND_BRACKET)) {
-                                int leftBracketIndex = compareValue.lastIndexOf
-                                        (PhoenixStorageHandlerConstants.LEFT_ROUND_BRACKET);
-                                whereCondition.append(compareValue.substring(0, leftBracketIndex
-                                        + 1)).append("to_timestamp(");
-
-                                if (compareValue.endsWith(PhoenixStorageHandlerConstants
-                                        .RIGHT_ROUND_BRACKET)) {
-                                    int rightBracketIndex = compareValue.indexOf
-                                            (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET);
-                                    whereCondition.append
-                                            (getCompareValueForDateAndTimestampFunction
-                                                    (compareValue.substring(leftBracketIndex + 1,
-                                                            rightBracketIndex)))
-                                            .append(PhoenixStorageHandlerConstants
-                                                    .RIGHT_ROUND_BRACKET).append(compareValue
-                                            .substring(rightBracketIndex));
-                                } else if (compareValue.endsWith(PhoenixStorageHandlerConstants
-                                        .COMMA)) {
-                                    whereCondition.append
-                                            (getCompareValueForDateAndTimestampFunction
-                                                    (compareValue.substring(leftBracketIndex + 1,
-                                                            compareValue.length() - 1)))
-                                            .append(PhoenixStorageHandlerConstants
-                                                    .RIGHT_ROUND_BRACKET).append
-                                            (PhoenixStorageHandlerConstants.COMMA);
-                                } else {
-                                    whereCondition.append
-                                            (getCompareValueForDateAndTimestampFunction
-                                                    (compareValue.substring(leftBracketIndex + 1)
-                                                    )).append(PhoenixStorageHandlerConstants
-                                            .RIGHT_ROUND_BRACKET);
-                                }
-                            } else if (compareValue.endsWith(PhoenixStorageHandlerConstants
-                                    .RIGHT_ROUND_BRACKET)) {
-                                int rightBracketIndex = compareValue.indexOf
-                                        (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET);
-                                whereCondition.append("to_timestamp(").append
-                                        (getCompareValueForDateAndTimestampFunction(compareValue
-                                                .substring(0, rightBracketIndex)))
-                                        .append(PhoenixStorageHandlerConstants
-                                                .RIGHT_ROUND_BRACKET).append(compareValue
-                                        .substring(rightBracketIndex));
-                                break;
-                            } else if (compareValue.endsWith(PhoenixStorageHandlerConstants
-                                    .COMMA)) {
-                                whereCondition.append("to_timestamp(").append
-                                        (getCompareValueForDateAndTimestampFunction(compareValue
-                                                .substring(0, compareValue.length() - 1))).append
-                                        ("),");
-                            }
-                        }
-
-                        whereCondition.append(PhoenixStorageHandlerConstants.SPACE);
-                    }
-                } else if (PhoenixStorageHandlerConstants.COMMON_COMPARATOR.contains(comparator)) {
-                    String timestampValue = iterator.next() + PhoenixStorageHandlerConstants
-                            .SPACE + iterator.next();
-                    whereCondition.append("to_timestamp(");
-                    if (timestampValue.endsWith(PhoenixStorageHandlerConstants
-                            .RIGHT_ROUND_BRACKET)) {
-                        int rightBracketIndex = timestampValue.indexOf
-                                (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET);
-                        whereCondition.append(getCompareValueForDateAndTimestampFunction
-                                (timestampValue.substring(0, rightBracketIndex))).append
-                                (PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET).append
-                                (timestampValue.substring(rightBracketIndex));
-                    } else {
-                        whereCondition.append(getCompareValueForDateAndTimestampFunction
-                                (timestampValue)).append(PhoenixStorageHandlerConstants
-                                .RIGHT_ROUND_BRACKET);
-                    }
-                }
-            } else {
-                whereCondition.append(token);
-            }
-        }
-
-        return whereCondition.toString();
-    }
-
-    private boolean isMyCondition(String columnName, String token) {
-        boolean itsMine = false;
-
-        if (columnName.equals(token)) {
-            itsMine = true;
-        } else if (token.startsWith(PhoenixStorageHandlerConstants.LEFT_ROUND_BRACKET) && token
-                .substring(token.lastIndexOf(PhoenixStorageHandlerConstants.LEFT_ROUND_BRACKET) +
-                        1).equals(columnName)) {
-            itsMine = true;
-        } else if (token.startsWith(PhoenixStorageHandlerConstants.LEFT_ROUND_BRACKET) && token
-                .endsWith(PhoenixStorageHandlerConstants.RIGHT_ROUND_BRACKET)
-                && token.substring(token.lastIndexOf(PhoenixStorageHandlerConstants
-                .LEFT_ROUND_BRACKET) + 1, token.indexOf(PhoenixStorageHandlerConstants
-                .RIGHT_ROUND_BRACKET)).equals(columnName)) {
-            itsMine = true;
-        }
-
-        return itsMine;
-    }
-
-    protected List<String> buildWhereClause(JobConf jobConf, StringBuilder sql,
-                                            List<IndexSearchCondition> conditions)
-            throws IOException {
-        if (conditions == null || conditions.size() == 0) {
-            return Collections.emptyList();
-        }
-
-        List<String> columns = Lists.newArrayList();
-        sql.append(" where ");
-
-        Iterator<IndexSearchCondition> iter = conditions.iterator();
-        appendExpression(jobConf, sql, iter.next(), columns);
-        while (iter.hasNext()) {
-            sql.append(" and ");
-            appendExpression(jobConf, sql, iter.next(), columns);
-        }
-
-        return columns;
-    }
-
-    private void appendExpression(JobConf jobConf, StringBuilder sql, IndexSearchCondition condition,
-                                  List<String> columns) {
-        Expression expr = findExpression(condition);
-        if (expr != null) {
-            sql.append(expr.buildExpressionStringFrom(jobConf, condition));
-            String column = condition.getColumnDesc().getColumn();
-            String rColumn = findReplacement(jobConf, column);
-            if(rColumn != null) {
-                column = rColumn;
-            }
-
-            columns.add(column);
-        }
-    }
-
-    private Expression findExpression(final IndexSearchCondition condition) {
-        return Iterables.tryFind(Arrays.asList(Expression.values()), new Predicate<Expression>() {
-            @Override
-            public boolean apply(@Nullable Expression expr) {
-                return expr.isFor(condition);
-            }
-        }).orNull();
-    }
-
-    private static final Joiner JOINER_COMMA = Joiner.on(", ");
-    private static final Joiner JOINER_AND = Joiner.on(" and ");
-    private static final Joiner JOINER_SPACE = Joiner.on(" ");
-
-    private enum Expression {
-        EQUAL("UDFOPEqual", "="),
-        GREATER_THAN_OR_EQUAL_TO("UDFOPEqualOrGreaterThan", ">="),
-        GREATER_THAN("UDFOPGreaterThan", ">"),
-        LESS_THAN_OR_EQUAL_TO("UDFOPEqualOrLessThan", "<="),
-        LESS_THAN("UDFOPLessThan", "<"),
-        NOT_EQUAL("UDFOPNotEqual", "!="),
-        BETWEEN("GenericUDFBetween", "between", JOINER_AND, true) {
-            public boolean checkCondition(IndexSearchCondition condition) {
-                return condition.getConstantDescs() != null;
-            }
-        },
-        IN("GenericUDFIn", "in", JOINER_COMMA, true) {
-            public boolean checkCondition(IndexSearchCondition condition) {
-                return condition.getConstantDescs() != null;
-            }
-
-            public String createConstants(final String typeName, ExprNodeConstantDesc[] desc) {
-                return "(" + super.createConstants(typeName, desc) + ")";
-            }
-        },
-        IS_NULL("GenericUDFOPNull", "is null") {
-            public boolean checkCondition(IndexSearchCondition condition) {
-                return true;
-            }
-        },
-        IS_NOT_NULL("GenericUDFOPNotNull", "is not null") {
-            public boolean checkCondition(IndexSearchCondition condition) {
-                return true;
-            }
-        };
-
-        private final String hiveCompOp;
-        private final String sqlCompOp;
-        private final Joiner joiner;
-        private final boolean supportNotOperator;
-
-        Expression(String hiveCompOp, String sqlCompOp) {
-            this(hiveCompOp, sqlCompOp, null);
-        }
-
-        Expression(String hiveCompOp, String sqlCompOp, Joiner joiner) {
-            this(hiveCompOp, sqlCompOp, joiner, false);
-        }
-
-        Expression(String hiveCompOp, String sqlCompOp, Joiner joiner, boolean supportNotOp) {
-            this.hiveCompOp = hiveCompOp;
-            this.sqlCompOp = sqlCompOp;
-            this.joiner = joiner;
-            this.supportNotOperator = supportNotOp;
-        }
-
-        public boolean checkCondition(IndexSearchCondition condition) {
-            return condition.getConstantDesc().getValue() != null;
-        }
-
-        public boolean isFor(IndexSearchCondition condition) {
-            return condition.getComparisonOp().endsWith(hiveCompOp) && checkCondition(condition);
-        }
-
-        public String buildExpressionStringFrom(JobConf jobConf, IndexSearchCondition condition) {
-            final String type = condition.getColumnDesc().getTypeString();
-            String column = condition.getColumnDesc().getColumn();
-            String rColumn = findReplacement(jobConf, column);
-            if(rColumn != null) {
-                column = rColumn;
-            }
-            return JOINER_SPACE.join(
-                    "\"" + column + "\"",
-                    getSqlCompOpString(condition),
-                    joiner != null ? createConstants(type, condition.getConstantDescs()) :
-                            createConstant(type, condition.getConstantDesc()));
-        }
-
-        public String getSqlCompOpString(IndexSearchCondition condition) {
-            return supportNotOperator ?
-                    (condition.isNot() ? "not " : "") + sqlCompOp : sqlCompOp;
-        }
-
-        public String createConstant(String typeName, ExprNodeConstantDesc constantDesc) {
-            if (constantDesc == null) {
-                return StringUtil.EMPTY_STRING;
-            }
-
-            return createConstantString(typeName, String.valueOf(constantDesc.getValue()));
-        }
-
-        public String createConstants(final String typeName, ExprNodeConstantDesc[] constantDesc) {
-            if (constantDesc == null) {
-                return StringUtil.EMPTY_STRING;
-            }
-
-            return joiner.join(Iterables.transform(Arrays.asList(constantDesc),
-                    new Function<ExprNodeConstantDesc, String>() {
-                        @Nullable
-                        @Override
-                        public String apply(@Nullable ExprNodeConstantDesc desc) {
-                            return createConstantString(typeName, String.valueOf(desc.getValue()));
-                        }
-                    }
-            ));
-        }
-
-        private static class ConstantStringWrapper {
-            private List<String> types;
-            private String prefix;
-            private String postfix;
-
-            ConstantStringWrapper(String type, String prefix, String postfix) {
-                this(Lists.newArrayList(type), prefix, postfix);
-            }
-
-            ConstantStringWrapper(List<String> types, String prefix, String postfix) {
-                this.types = types;
-                this.prefix = prefix;
-                this.postfix = postfix;
-            }
-
-            public String apply(final String typeName, String value) {
-                return Iterables.any(types, new Predicate<String>() {
-
-                    @Override
-                    public boolean apply(@Nullable String type) {
-                        return typeName.startsWith(type);
-                    }
-                }) ? prefix + value + postfix : value;
-            }
-        }
-
-        private static final String SINGLE_QUOTATION = "'";
-        private static List<ConstantStringWrapper> WRAPPERS = Lists.newArrayList(
-                new ConstantStringWrapper(Lists.newArrayList(
-                        serdeConstants.STRING_TYPE_NAME, serdeConstants.CHAR_TYPE_NAME,
-                        serdeConstants.VARCHAR_TYPE_NAME, serdeConstants.DATE_TYPE_NAME,
-                        serdeConstants.TIMESTAMP_TYPE_NAME
-                ), SINGLE_QUOTATION, SINGLE_QUOTATION),
-                new ConstantStringWrapper(serdeConstants.DATE_TYPE_NAME, "to_date(", ")"),
-                new ConstantStringWrapper(serdeConstants.TIMESTAMP_TYPE_NAME, "to_timestamp(", ")")
-        );
-
-        private String createConstantString(String typeName, String value) {
-            for (ConstantStringWrapper wrapper : WRAPPERS) {
-                value = wrapper.apply(typeName, value);
-            }
-
-            return value;
-        }
-    }
-}
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixConnectionUtil.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixConnectionUtil.java
deleted file mode 100644
index d5eb86f..0000000
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixConnectionUtil.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hive.util;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
-import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.QueryUtil;
-
-/**
- * Set of methods to obtain Connection depending on configuration
- */
-
-public class PhoenixConnectionUtil {
-
-    private static final Log LOG = LogFactory.getLog(PhoenixConnectionUtil.class);
-
-    public static Connection getInputConnection(final Configuration conf, final Properties props)
-            throws SQLException {
-        String quorum = conf.get(PhoenixStorageHandlerConstants.ZOOKEEPER_QUORUM);
-        quorum = quorum == null ? props.getProperty(PhoenixStorageHandlerConstants
-                .ZOOKEEPER_QUORUM, PhoenixStorageHandlerConstants.DEFAULT_ZOOKEEPER_QUORUM) :
-                quorum;
-
-        int zooKeeperClientPort = conf.getInt(PhoenixStorageHandlerConstants.ZOOKEEPER_PORT, 0);
-        zooKeeperClientPort = zooKeeperClientPort == 0 ?
-                Integer.parseInt(props.getProperty(PhoenixStorageHandlerConstants.ZOOKEEPER_PORT,
-                        String.valueOf(PhoenixStorageHandlerConstants.DEFAULT_ZOOKEEPER_PORT))) :
-                zooKeeperClientPort;
-
-        String zNodeParent = conf.get(PhoenixStorageHandlerConstants.ZOOKEEPER_PARENT);
-        zNodeParent = zNodeParent == null ? props.getProperty(PhoenixStorageHandlerConstants
-                .ZOOKEEPER_PARENT, PhoenixStorageHandlerConstants.DEFAULT_ZOOKEEPER_PARENT) :
-                zNodeParent;
-
-        return getConnection(quorum, zooKeeperClientPort, zNodeParent, PropertiesUtil
-                .combineProperties(props, conf));
-    }
-
-    public static Connection getConnection(final Table table) throws SQLException {
-        Map<String, String> tableParameterMap = table.getParameters();
-
-        String zookeeperQuorum = tableParameterMap.get(PhoenixStorageHandlerConstants
-                .ZOOKEEPER_QUORUM);
-        zookeeperQuorum = zookeeperQuorum == null ? PhoenixStorageHandlerConstants
-                .DEFAULT_ZOOKEEPER_QUORUM : zookeeperQuorum;
-
-        String clientPortString = tableParameterMap.get(PhoenixStorageHandlerConstants
-                .ZOOKEEPER_PORT);
-        int clientPort = clientPortString == null ? PhoenixStorageHandlerConstants
-                .DEFAULT_ZOOKEEPER_PORT : Integer.parseInt(clientPortString);
-
-        String zNodeParent = tableParameterMap.get(PhoenixStorageHandlerConstants.ZOOKEEPER_PARENT);
-        zNodeParent = zNodeParent == null ? PhoenixStorageHandlerConstants
-                .DEFAULT_ZOOKEEPER_PARENT : zNodeParent;
-        try {
-            Class.forName("org.apache.phoenix.jdbc.PhoenixDriver");
-        } catch (ClassNotFoundException e) {
-            LOG.warn(e.getStackTrace());
-        }
-        return DriverManager.getConnection(QueryUtil.getUrl(zookeeperQuorum, clientPort,
-                zNodeParent));
-    }
-
-    private static Connection getConnection(final String quorum, final Integer clientPort, String
-            zNodeParent, Properties props) throws SQLException {
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Connection attrs [quorum, port, znode] : " + quorum + ", " + clientPort +
-                    ", " +
-                    zNodeParent);
-        }
-
-        return DriverManager.getConnection(clientPort != null ? QueryUtil.getUrl(quorum,
-                clientPort, zNodeParent) : QueryUtil.getUrl(quorum), props);
-    }
-
-    public static Configuration getConfiguration(JobConf jobConf) {
-        Configuration conf = new Configuration(jobConf);
-        String quorum = conf.get(PhoenixStorageHandlerConstants.ZOOKEEPER_QUORUM);
-        if(quorum!=null) {
-            conf.set(HConstants.ZOOKEEPER_QUORUM, quorum);
-        }
-        int zooKeeperClientPort = conf.getInt(PhoenixStorageHandlerConstants.ZOOKEEPER_PORT, 0);
-        if(zooKeeperClientPort != 0) {
-            conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zooKeeperClientPort);
-        }
-        String zNodeParent = conf.get(PhoenixStorageHandlerConstants.ZOOKEEPER_PARENT);
-        if(zNodeParent != null) {
-            conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, zNodeParent);
-        }
-        return conf;
-    }
-}
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixStorageHandlerUtil.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixStorageHandlerUtil.java
deleted file mode 100644
index 4b23103..0000000
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixStorageHandlerUtil.java
+++ /dev/null
@@ -1,321 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hive.util;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.lang.reflect.Array;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.math.BigDecimal;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Properties;
-import java.util.concurrent.atomic.AtomicReference;
-
-import javax.naming.NamingException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.util.Strings;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.ql.io.AcidOutputFormat.Options;
-import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.net.DNS;
-import org.apache.phoenix.hive.PrimaryKeyData;
-import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
-import org.apache.phoenix.hive.ql.index.IndexSearchCondition;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
-
-import com.google.common.base.Joiner;
-import com.google.common.collect.Maps;
-
-/**
- * Misc utils for PhoenixStorageHandler
- */
-
-public class PhoenixStorageHandlerUtil {
-    private static final Log LOG = LogFactory.getLog(PhoenixStorageHandlerUtil.class);
-    private static final AtomicReference<Method> GET_BUCKET_METHOD_REF = new AtomicReference<>();
-    private static final AtomicReference<Method> GET_BUCKET_ID_METHOD_REF = new AtomicReference<>();
-
-    public static String getTargetTableName(Table table) {
-        Map<String, String> tableParameterMap = table.getParameters();
-        String tableName = tableParameterMap.get(PhoenixStorageHandlerConstants
-                .PHOENIX_TABLE_NAME);
-        if (tableName == null) {
-            tableName = table.getTableName();
-            tableParameterMap.put(PhoenixStorageHandlerConstants.PHOENIX_TABLE_NAME, tableName);
-        }
-
-        return tableName;
-    }
-
-
-    public static Object[] toTypedValues(JobConf jobConf, String typeName, String[] values) throws
-            Exception {
-        Object[] results = new Object[values.length];
-        DateFormat df = null;
-
-        for (int i = 0, limit = values.length; i < limit; i++) {
-            if (serdeConstants.STRING_TYPE_NAME.equals(typeName) ||
-                    typeName.startsWith(serdeConstants.CHAR_TYPE_NAME) ||
-                    typeName.startsWith(serdeConstants.VARCHAR_TYPE_NAME)) {
-                results[i] = values[i];
-            } else if (serdeConstants.INT_TYPE_NAME.equals(typeName)) {
-                results[i] = new Integer(values[i]);
-            } else if (serdeConstants.BIGINT_TYPE_NAME.equals(typeName)) {
-                results[i] = new Long(values[i]);
-            } else if (serdeConstants.DOUBLE_TYPE_NAME.equals(typeName)) {
-                results[i] = new Double(values[i]);
-            } else if (serdeConstants.FLOAT_TYPE_NAME.equals(typeName)) {
-                results[i] = new Float(values[i]);
-            } else if (serdeConstants.SMALLINT_TYPE_NAME.equals(typeName)) {
-                results[i] = new Short(values[i]);
-            } else if (serdeConstants.TINYINT_TYPE_NAME.equals(typeName)) {
-                results[i] = new Byte(values[i]);
-            } else if (serdeConstants.DATE_TYPE_NAME.equals(typeName)) {
-                String dateFormat = jobConf.get(PhoenixStorageHandlerConstants.HBASE_DATE_FORMAT,
-                        PhoenixStorageHandlerConstants.DEFAULT_DATE_FORMAT);
-                df = new SimpleDateFormat(dateFormat);
-                results[i] = new Long(df.parse(values[i]).getTime());
-            } else if (serdeConstants.TIMESTAMP_TYPE_NAME.equals(typeName)) {
-                String timestampFormat = jobConf.get(PhoenixStorageHandlerConstants
-                        .HBASE_TIMESTAMP_FORMAT, PhoenixStorageHandlerConstants
-                        .DEFAULT_TIMESTAMP_FORMAT);
-                df = new SimpleDateFormat(timestampFormat);
-                results[i] = new Long(df.parse(values[i]).getTime());
-            } else if (typeName.contains(serdeConstants.DECIMAL_TYPE_NAME)) {
-                results[i] = new BigDecimal(values[i]);
-            }
-        }
-
-        return results;
-    }
-
-    public static String[] getConstantValues(IndexSearchCondition condition, String comparisonOp) {
-        String[] constantValues = null;
-
-        if (comparisonOp.endsWith("UDFOPEqual") || comparisonOp.endsWith("UDFOPNotEqual")) {
-            constantValues = new String[]{String.valueOf(condition.getConstantDesc().getValue())};
-        } else if (comparisonOp.endsWith("UDFOPEqualOrGreaterThan")) {    // key >= 1
-            constantValues = new String[]{String.valueOf(condition.getConstantDesc().getValue())};
-        } else if (comparisonOp.endsWith("UDFOPGreaterThan")) {        // key > 1
-            constantValues = new String[]{String.valueOf(condition.getConstantDesc().getValue())};
-        } else if (comparisonOp.endsWith("UDFOPEqualOrLessThan")) {    // key <= 1
-            constantValues = new String[]{String.valueOf(condition.getConstantDesc().getValue())};
-        } else if (comparisonOp.endsWith("UDFOPLessThan")) {    // key < 1
-            constantValues = new String[]{String.valueOf(condition.getConstantDesc().getValue())};
-        } else if (comparisonOp.endsWith("GenericUDFBetween")) {
-            constantValues = new String[]{String.valueOf(condition.getConstantDesc(0).getValue()),
-                    String.valueOf(condition.getConstantDesc(1).getValue())};
-        } else if (comparisonOp.endsWith("GenericUDFIn")) {
-            ExprNodeConstantDesc[] constantDescs = condition.getConstantDescs();
-            constantValues = new String[constantDescs.length];
-            for (int i = 0, limit = constantDescs.length; i < limit; i++) {
-                constantValues[i] = String.valueOf(condition.getConstantDesc(i).getValue());
-            }
-        }
-
-        return constantValues;
-    }
-
-    public static String getRegionLocation(HRegionLocation location, Log log) throws IOException {
-        InetSocketAddress isa = new InetSocketAddress(location.getHostname(), location.getPort());
-        if (isa.isUnresolved()) {
-            log.warn("Failed resolve " + isa);
-        }
-        InetAddress regionAddress = isa.getAddress();
-        String regionLocation = null;
-        try {
-            regionLocation = reverseDNS(regionAddress);
-        } catch (NamingException e) {
-            log.warn("Cannot resolve the host name for " + regionAddress + " because of " + e);
-            regionLocation = location.getHostname();
-        }
-
-        return regionLocation;
-    }
-
-    // Copy from org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.reverseDNS
-    private static final Map<InetAddress, String> reverseDNSCacheMap = Maps.newConcurrentMap();
-
-    private static String reverseDNS(InetAddress ipAddress) throws NamingException,
-            UnknownHostException {
-        String hostName = reverseDNSCacheMap.get(ipAddress);
-
-        if (hostName == null) {
-            String ipAddressString = null;
-            try {
-                ipAddressString = DNS.reverseDns(ipAddress, null);
-            } catch (Exception e) {
-                // We can use InetAddress in case the jndi failed to pull up the reverse DNS entry
-                // from the name service. Also, in case of ipv6, we need to use the InetAddress
-                // since resolving reverse DNS using jndi doesn't work well with ipv6 addresses.
-                ipAddressString = InetAddress.getByName(ipAddress.getHostAddress()).getHostName();
-            }
-
-            if (ipAddressString == null) {
-                throw new UnknownHostException("No host found for " + ipAddress);
-            }
-
-            hostName = Strings.domainNamePointerToHostName(ipAddressString);
-            reverseDNSCacheMap.put(ipAddress, hostName);
-        }
-
-        return hostName;
-    }
-
-    public static String getTableKeyOfSession(JobConf jobConf, String tableName) {
-
-        String sessionId = jobConf.get(PhoenixConfigurationUtil.SESSION_ID);
-        return new StringBuilder("[").append(sessionId).append("]-").append(tableName).toString();
-    }
-
-    public static Map<String, TypeInfo> createColumnTypeMap(JobConf jobConf) {
-        Map<String, TypeInfo> columnTypeMap = Maps.newHashMap();
-
-        String[] columnNames = jobConf.get(serdeConstants.LIST_COLUMNS).split
-                (PhoenixStorageHandlerConstants.COMMA);
-        List<TypeInfo> typeInfos =
-                TypeInfoUtils.getTypeInfosFromTypeString(jobConf.get(serdeConstants.LIST_COLUMN_TYPES));
-
-        for (int i = 0, limit = columnNames.length; i < limit; i++) {
-            columnTypeMap.put(columnNames[i], typeInfos.get(i));
-        }
-
-        return columnTypeMap;
-    }
-
-    public static List<String> getReadColumnNames(Configuration conf) {
-        String colNames = conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR);
-        if (colNames != null && !colNames.isEmpty()) {
-            return Arrays.asList(colNames.split(PhoenixStorageHandlerConstants.COMMA));
-        }
-        return Collections.EMPTY_LIST;
-    }
-
-    public static boolean isTransactionalTable(Properties tableProperties) {
-        String tableIsTransactional = tableProperties.getProperty(hive_metastoreConstants
-                .TABLE_IS_TRANSACTIONAL);
-
-        return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true");
-    }
-
-    public static boolean isTransactionalTable(Configuration config) {
-        String tableIsTransactional = config.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
-
-        return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true");
-    }
-
-    public static void printConfiguration(Configuration config) {
-        if (Boolean.getBoolean("dev")) {
-            for (Iterator<Entry<String, String>> iterator = config.iterator(); iterator.hasNext();
-                    ) {
-                Entry<String, String> entry = iterator.next();
-
-                System.out.println(entry.getKey() + "=" + entry.getValue());
-            }
-        }
-    }
-
-    public static String toString(Object obj) {
-        String content = null;
-
-        if (obj instanceof Array) {
-            Object[] values = (Object[]) obj;
-
-            content = Joiner.on(PhoenixStorageHandlerConstants.COMMA).join(values);
-        } else {
-            content = obj.toString();
-        }
-
-        return content;
-    }
-
-    public static Map<?, ?> toMap(byte[] serialized) {
-        ByteArrayInputStream bais = new ByteArrayInputStream(serialized);
-
-        try {
-            return PrimaryKeyData.deserialize(bais).getData();
-        } catch (ClassNotFoundException | IOException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    public static String getOptionsValue(Options options) {
-        StringBuilder content = new StringBuilder();
-
-        int bucket = getBucket(options);
-        String inspectorInfo = options.getInspector().getCategory() + ":" + options.getInspector()
-                .getTypeName();
-        long maxTxnId = options.getMaximumWriteId();
-        long minTxnId = options.getMinimumWriteId();
-        int recordIdColumn = options.getRecordIdColumn();
-        boolean isCompresses = options.isCompressed();
-        boolean isWritingBase = options.isWritingBase();
-
-        content.append("bucket : ").append(bucket).append(", inspectorInfo : ").append
-                (inspectorInfo).append(", minTxnId : ").append(minTxnId).append(", maxTxnId : ")
-                .append(maxTxnId).append(", recordIdColumn : ").append(recordIdColumn);
-        content.append(", isCompressed : ").append(isCompresses).append(", isWritingBase : ")
-                .append(isWritingBase);
-
-        return content.toString();
-    }
-
-    private static int getBucket(Options options) {
-        Method getBucketMethod = GET_BUCKET_METHOD_REF.get();
-        try {
-            if (getBucketMethod == null) {
-                getBucketMethod = Options.class.getMethod("getBucket");
-                GET_BUCKET_METHOD_REF.set(getBucketMethod);
-            }
-            return (int) getBucketMethod.invoke(options);
-        } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException e) {
-            LOG.trace("Failed to invoke Options.getBucket()", e);
-        }
-        Method getBucketIdMethod = GET_BUCKET_ID_METHOD_REF.get();
-        try {
-            if (getBucketIdMethod == null) {
-                getBucketIdMethod = Options.class.getMethod("getBucketId");
-                GET_BUCKET_ID_METHOD_REF.set(getBucketMethod);
-            }
-            return (int) getBucketIdMethod.invoke(options);
-        } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException e) {
-            throw new RuntimeException("Failed to invoke Options.getBucketId()", e);
-        }
-    }
-}
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixUtil.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixUtil.java
deleted file mode 100644
index d763fae..0000000
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixUtil.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hive.util;
-
-import com.google.common.base.CharMatcher;
-import com.google.common.base.Splitter;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
-import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.schema.MetaDataClient;
-import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.TableNotFoundException;
-import org.apache.phoenix.util.ColumnInfo;
-import org.apache.phoenix.util.PhoenixRuntime;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.DatabaseMetaData;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
-/**
- * Misc utils
- */
-public class PhoenixUtil {
-
-    private static final Log LOG = LogFactory.getLog(PhoenixUtil.class);
-
-    public static String getPhoenixType(String hiveTypeName) {
-        if (hiveTypeName.startsWith("array")) {
-            List<String> tokenList = Lists.newArrayList(Splitter.on(CharMatcher.is('<').or
-                    (CharMatcher.is('>'))).split(hiveTypeName));
-            return getPhoenixType(tokenList.get(1)) + "[]";
-        } else if (hiveTypeName.startsWith("int")) {
-            return "integer";
-        } else if (hiveTypeName.equals("string")) {
-            return "varchar";
-        } else {
-            return hiveTypeName;
-        }
-    }
-
-    public static boolean existTable(Connection conn, String tableName) throws SQLException {
-        boolean exist = false;
-        DatabaseMetaData dbMeta = conn.getMetaData();
-
-        String[] schemaInfo = getTableSchema(tableName.toUpperCase());
-        try (ResultSet rs = dbMeta.getTables(null, schemaInfo[0], schemaInfo[1], null)) {
-            exist = rs.next();
-
-            if (LOG.isDebugEnabled()) {
-                if (exist) {
-                    LOG.debug(rs.getString("TABLE_NAME") + " table exist. ");
-                } else {
-                    LOG.debug("table " + tableName + " doesn't exist.");
-                }
-            }
-        }
-
-        return exist;
-    }
-
-    public static List<String> getPrimaryKeyColumnList(Connection conn, String tableName) throws
-            SQLException {
-        Map<Short, String> primaryKeyColumnInfoMap = Maps.newHashMap();
-        DatabaseMetaData dbMeta = conn.getMetaData();
-
-        String[] schemaInfo = getTableSchema(tableName.toUpperCase());
-        try (ResultSet rs = dbMeta.getPrimaryKeys(null, schemaInfo[0], schemaInfo[1])) {
-            while (rs.next()) {
-                primaryKeyColumnInfoMap.put(rs.getShort("KEY_SEQ"), rs.getString("COLUMN_NAME"));
-            }
-
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("PK-columns : " + primaryKeyColumnInfoMap);
-            }
-        }
-
-        return Lists.newArrayList(primaryKeyColumnInfoMap.values());
-    }
-
-    public static List<String> getPrimaryKeyColumnList(Configuration config, String tableName) {
-        List<String> pkColumnNameList = null;
-
-        try (Connection conn = PhoenixConnectionUtil.getInputConnection(config, new Properties())) {
-            pkColumnNameList = getPrimaryKeyColumnList(conn, tableName);
-        } catch (SQLException e) {
-            throw new RuntimeException(e);
-        }
-
-        return pkColumnNameList;
-    }
-
-    public static void createTable(Connection conn, String createTableStatement) throws
-            SQLException {
-        conn.createStatement().execute(createTableStatement);
-    }
-
-    public static void dropTable(Connection conn, String tableName) throws SQLException {
-        conn.createStatement().execute("drop table " + tableName);
-    }
-
-    public static List<ColumnInfo> getColumnInfoList(Connection conn, String tableName) throws
-            SQLException {
-        List<ColumnInfo> columnInfoList = null;
-
-        try {
-            columnInfoList = PhoenixRuntime.generateColumnInfo(conn, tableName, null);
-        } catch (TableNotFoundException e) {
-            // Exception can be occurred when table create.
-            columnInfoList = Collections.emptyList();
-        }
-
-        return columnInfoList;
-    }
-
-    public static String[] getTableSchema(String tableName) {
-        String[] schemaInfo = new String[2];
-        String[] tokens = tableName.split("\\.");
-
-        if (tokens.length == 2) {
-            schemaInfo = tokens;
-        } else {
-            schemaInfo[1] = tokens[0];
-        }
-
-        return schemaInfo;
-    }
-
-    public static boolean isDisabledWal(MetaDataClient metaDataClient, String tableName) throws
-            SQLException {
-        String[] schemaInfo = getTableSchema(tableName.toUpperCase());
-        MetaDataMutationResult result = metaDataClient.updateCache(schemaInfo[0], schemaInfo[1]);
-        PTable dataTable = result.getTable();
-
-        return dataTable.isWALDisabled();
-    }
-
-    public static void alterTableForWalDisable(Connection conn, String tableName, boolean
-            disableMode) throws SQLException {
-        conn.createStatement().execute("alter table " + tableName + " set disable_wal=" +
-                disableMode);
-    }
-
-    public static void flush(Connection conn, String tableName) throws SQLException {
-        try (Admin admin = ((PhoenixConnection) conn).getQueryServices().getAdmin()) {
-            admin.flush(TableName.valueOf(tableName));
-        } catch (IOException e) {
-            throw new SQLException(e);
-        }
-    }
-
-    public static String constructDeleteStatement(Connection conn, String tableName) throws
-            SQLException {
-        StringBuilder deleteQuery = new StringBuilder("delete from ").append(tableName).append(" " +
-                "where ");
-
-        List<String> primaryKeyColumnList = getPrimaryKeyColumnList(conn, tableName);
-        for (int i = 0, limit = primaryKeyColumnList.size(); i < limit; i++) {
-            String pkColumn = primaryKeyColumnList.get(i);
-            deleteQuery.append(pkColumn).append(PhoenixStorageHandlerConstants.EQUAL).append
-                    (PhoenixStorageHandlerConstants.QUESTION);
-
-            if ((i + 1) != primaryKeyColumnList.size()) {
-                deleteQuery.append(" and ");
-            }
-        }
-
-        return deleteQuery.toString();
-    }
-
-    public static void closeResource(Statement stmt) throws SQLException {
-        if (stmt != null && !stmt.isClosed()) {
-            stmt.close();
-        }
-    }
-
-    public static void closeResource(Connection conn) throws SQLException {
-        if (conn != null && !conn.isClosed()) {
-            conn.close();
-        }
-    }
-}
diff --git a/phoenix-hive/src/test/java/org/apache/phoenix/hive/query/PhoenixQueryBuilderTest.java b/phoenix-hive/src/test/java/org/apache/phoenix/hive/query/PhoenixQueryBuilderTest.java
deleted file mode 100644
index e97b518..0000000
--- a/phoenix-hive/src/test/java/org/apache/phoenix/hive/query/PhoenixQueryBuilderTest.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hive.query;
-
-import com.google.common.collect.Lists;
-import org.apache.commons.lang3.ArrayUtils;
-import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
-import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.phoenix.hive.ql.index.IndexSearchCondition;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.List;
-
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-import static org.junit.Assert.assertEquals;
-
-public class PhoenixQueryBuilderTest {
-    private static final PhoenixQueryBuilder BUILDER = PhoenixQueryBuilder.getInstance();
-    private static final String TABLE_NAME = "TEST_TABLE";
-
-    private IndexSearchCondition mockedIndexSearchCondition(String comparisionOp,
-                                                            Object constantValue,
-                                                            Object[] constantValues,
-                                                            String columnName,
-                                                            String typeString,
-                                                            boolean isNot) {
-        IndexSearchCondition condition = mock(IndexSearchCondition.class);
-        when(condition.getComparisonOp()).thenReturn(comparisionOp);
-
-        if (constantValue != null) {
-            ExprNodeConstantDesc constantDesc = mock(ExprNodeConstantDesc.class);
-            when(constantDesc.getValue()).thenReturn(constantValue);
-            when(condition.getConstantDesc()).thenReturn(constantDesc);
-        }
-
-        ExprNodeColumnDesc columnDesc = mock(ExprNodeColumnDesc.class);
-        when(columnDesc.getColumn()).thenReturn(columnName);
-        when(columnDesc.getTypeString()).thenReturn(typeString);
-        when(condition.getColumnDesc()).thenReturn(columnDesc);
-
-
-        if (ArrayUtils.isNotEmpty(constantValues)) {
-            ExprNodeConstantDesc[] constantDescs = new ExprNodeConstantDesc[constantValues.length];
-            for (int i = 0; i < constantDescs.length; i++) {
-                constantDescs[i] = mock(ExprNodeConstantDesc.class);
-                when(condition.getConstantDesc(i)).thenReturn(constantDescs[i]);
-                when(constantDescs[i].getValue()).thenReturn(constantValues[i]);
-            }
-            when(condition.getConstantDescs()).thenReturn(constantDescs);
-        }
-
-        when(condition.isNot()).thenReturn(isNot);
-
-        return condition;
-    }
-
-    @Test
-    public void testBuildQueryWithCharColumns() throws IOException {
-        final String COLUMN_CHAR = "Column_Char";
-        final String COLUMN_VARCHAR = "Column_VChar";
-        final String expectedQueryPrefix = "select /*+ NO_CACHE  */ \"" + COLUMN_CHAR + "\",\"" + COLUMN_VARCHAR +
-                "\" from TEST_TABLE where ";
-
-        JobConf jobConf = new JobConf();
-        List<String> readColumnList = Lists.newArrayList(COLUMN_CHAR, COLUMN_VARCHAR);
-        List<IndexSearchCondition> searchConditions = Lists.newArrayList(
-                mockedIndexSearchCondition("GenericUDFOPEqual", "CHAR_VALUE", null, COLUMN_CHAR, "char(10)", false),
-                mockedIndexSearchCondition("GenericUDFOPEqual", "CHAR_VALUE2", null, COLUMN_VARCHAR, "varchar(10)", false)
-        );
-
-        assertEquals(expectedQueryPrefix + "\"Column_Char\" = 'CHAR_VALUE' and \"Column_VChar\" = 'CHAR_VALUE2'",
-                BUILDER.buildQuery(jobConf, TABLE_NAME, readColumnList, searchConditions));
-
-        searchConditions = Lists.newArrayList(
-                mockedIndexSearchCondition("GenericUDFIn", null,
-                        new Object[]{"CHAR1", "CHAR2", "CHAR3"}, COLUMN_CHAR, "char(10)", false)
-        );
-
-        assertEquals(expectedQueryPrefix + "\"Column_Char\" in ('CHAR1', 'CHAR2', 'CHAR3')",
-                BUILDER.buildQuery(jobConf, TABLE_NAME, readColumnList, searchConditions));
-
-        searchConditions = Lists.newArrayList(
-                mockedIndexSearchCondition("GenericUDFIn", null,
-                        new Object[]{"CHAR1", "CHAR2", "CHAR3"}, COLUMN_CHAR, "char(10)", true)
-        );
-
-        assertEquals(expectedQueryPrefix + "\"Column_Char\" not in ('CHAR1', 'CHAR2', 'CHAR3')",
-                BUILDER.buildQuery(jobConf, TABLE_NAME, readColumnList, searchConditions));
-
-        searchConditions = Lists.newArrayList(
-                mockedIndexSearchCondition("GenericUDFBetween", null,
-                        new Object[]{"CHAR1", "CHAR2"}, COLUMN_CHAR, "char(10)", false)
-        );
-
-        assertEquals(expectedQueryPrefix + "\"Column_Char\" between 'CHAR1' and 'CHAR2'",
-                BUILDER.buildQuery(jobConf, TABLE_NAME, readColumnList, searchConditions));
-
-        searchConditions = Lists.newArrayList(
-                mockedIndexSearchCondition("GenericUDFBetween", null,
-                        new Object[]{"CHAR1", "CHAR2"}, COLUMN_CHAR, "char(10)", true)
-        );
-
-        assertEquals(expectedQueryPrefix + "\"Column_Char\" not between 'CHAR1' and 'CHAR2'",
-                BUILDER.buildQuery(jobConf, TABLE_NAME, readColumnList, searchConditions));
-    }
-
-    @Test
-    public void testBuildBetweenQueryWithDateColumns() throws IOException {
-        final String COLUMN_DATE = "Column_Date";
-        final String tableName = "TEST_TABLE";
-        final String expectedQueryPrefix = "select /*+ NO_CACHE  */ \"" + COLUMN_DATE +
-                "\" from " + tableName + " where ";
-
-        JobConf jobConf = new JobConf();
-        List<String> readColumnList = Lists.newArrayList(COLUMN_DATE);
-
-        List<IndexSearchCondition> searchConditions = Lists.newArrayList(
-                mockedIndexSearchCondition("GenericUDFBetween", null,
-                        new Object[]{"1992-01-02", "1992-02-02"}, COLUMN_DATE, "date", false)
-        );
-
-        assertEquals(expectedQueryPrefix +
-                        "\"" + COLUMN_DATE + "\" between to_date('1992-01-02') and to_date('1992-02-02')",
-                BUILDER.buildQuery(jobConf, TABLE_NAME, readColumnList, searchConditions));
-
-        searchConditions = Lists.newArrayList(
-                mockedIndexSearchCondition("GenericUDFBetween", null,
-                        new Object[]{"1992-01-02", "1992-02-02"}, COLUMN_DATE, "date", true)
-        );
-
-        assertEquals(expectedQueryPrefix +
-                        "\"" + COLUMN_DATE + "\" not between to_date('1992-01-02') and to_date('1992-02-02')",
-                BUILDER.buildQuery(jobConf, TABLE_NAME, readColumnList, searchConditions));
-    }
-
-    @Test
-    public void testBuildQueryWithNotNull() throws IOException {
-        final String COLUMN_DATE = "Column_Date";
-        final String tableName = "TEST_TABLE";
-        final String expectedQueryPrefix = "select /*+ NO_CACHE  */ \"" + COLUMN_DATE +
-                "\" from " + tableName + " where ";
-
-        JobConf jobConf = new JobConf();
-        List<String> readColumnList = Lists.newArrayList(COLUMN_DATE);
-
-        List<IndexSearchCondition> searchConditions = Lists.newArrayList(
-                mockedIndexSearchCondition("GenericUDFOPNotNull", null,
-                        null, COLUMN_DATE, "date", true)
-        );
-
-        assertEquals(expectedQueryPrefix +
-                        "\"" + COLUMN_DATE + "\" is not null ",
-                BUILDER.buildQuery(jobConf, TABLE_NAME, readColumnList, searchConditions));
-    }
-}
diff --git a/phoenix-hive/src/test/resources/hive-site.xml b/phoenix-hive/src/test/resources/hive-site.xml
deleted file mode 100644
index 143a829..0000000
--- a/phoenix-hive/src/test/resources/hive-site.xml
+++ /dev/null
@@ -1,123 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configuration>
-
-<property>
-  <name>hive.in.test</name>
-  <value>true</value>
-  <description>Internal marker for test. Used for masking env-dependent values</description>
-</property>
-
-<property>
-  <name>hive.tez.container.size</name>
-  <value>128</value>
-  <description></description>
-</property>
-
-<property>
-  <name>phoenix.log.buffer.size</name>
-  <value>1024</value>
-  <description></description>
-</property>
-
-
-<property>
-  <name>datanucleus.schema.autoCreateAll</name>
-  <value>true</value>
-</property>
-
-
-<property>
-  <name>hive.metastore.schema.verification</name>
-  <value>false</value>
-</property>
-
-<property>
-  <name>hive.query.results.cache.enabled</name>
-  <value>false</value>
-</property>
-
-<property>
-  <name>hive.fetch.task.conversion</name>
-  <value>minimal</value>
-</property>
-
-<property>
-  <name>hive.auto.convert.join</name>
-  <value>false</value>
-  <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file size</description>
-</property>
-
-<property>
-  <name>hive.ignore.mapjoin.hint</name>
-  <value>false</value>
-  <description>Whether Hive ignores the mapjoin hint</description>
-</property>
-
-
-<property>
-  <name>hive.exec.mode.local.auto</name>
-  <value>false</value>
-  <description>
-    Let hive determine whether to run in local mode automatically
-    Disabling this for tests so that minimr is not affected
-  </description>
-</property>
-
-
-<!-- MetaStore settings -->
-
-
-<property>
-  <name>javax.jdo.option.ConnectionURL</name>
-  <value>jdbc:derby:memory:${test.tmp.dir}/junit_metastore_db;create=true</value>
-</property>
-
-<property>
-  <name>javax.jdo.option.ConnectionDriverName</name>
-  <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-</property>
-
-<property>
-  <name>javax.jdo.option.ConnectionUserName</name>
-  <value>APP</value>
-</property>
-
-<property>
-  <name>javax.jdo.option.ConnectionPassword</name>
-  <value>mine</value>
-</property>
-
-<property>
-  <!--  this should eventually be deprecated since the metastore should supply this -->
-  <name>hive.metastore.warehouse.dir</name>
-  <value>${test.warehouse.dir}</value>
-  <description></description>
-</property>
-
-<property>
-  <name>hive.metastore.metadb.dir</name>
-  <value>file://${test.tmp.dir}/metadb/</value>
-  <description>
-  Required by metastore server or if the uris argument below is not supplied
-  </description>
-</property>
-
-</configuration>
diff --git a/phoenix-hive/src/test/resources/tez-site.xml b/phoenix-hive/src/test/resources/tez-site.xml
deleted file mode 100644
index 97ae8c5..0000000
--- a/phoenix-hive/src/test/resources/tez-site.xml
+++ /dev/null
@@ -1,69 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-    http://www.apache.org/licenses/LICENSE-2.0
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<configuration>
-
-<!-- Site specific YARN configuration properties -->
-<property>
-    <name>tez.am.resource.memory.mb</name>
-    <value>500</value>
-  </property>
-
-<property>
-    <name>tez.am.task.memory.mb</name>
-    <value>500</value>
-  </property>
-
-<property>
-    <name>hive.tez.container.size</name>
-    <value>500</value>
-</property>
-
-
-<property>
-    <name>hive.in.tez.test</name>
-    <value>true</value>
-</property>
-
-<property>
-    <name>tez.ignore.lib.uris</name>
-    <value>true</value>
-</property>
-
-
-<property>
-  <name>hive.tez.input.format</name>
-  <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
-  <description>The default input format for tez. Tez groups splits in the AM.</description>
-</property>
-
-
-<property>
-  <name>hive.input.format</name>
-  <value>org.apache.hadoop.hive.ql.io.CombineHiveInputFormat</value>
-  <description>The default input format, if it is not specified, the system assigns it. It is set to HiveInputFormat for hadoop versions 17, 18 and 19, whereas it is set to CombineHiveInputFormat for hadoop 20. The user can always overwrite it - if there is a bug in CombineHiveInputFormat, it can always be manually set to HiveInputFormat. </description>
-</property>
-
-<property>
-  <name>hive.auto.convert.join</name>
-  <value>false</value>
-  <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file size</description>
-</property>
-
-<property>
-  <name>hive.ignore.mapjoin.hint</name>
-  <value>true</value>
-  <description>Whether Hive ignores the mapjoin hint</description>
-</property>
-
-  
-</configuration>
\ No newline at end of file
diff --git a/phoenix-kafka/pom.xml b/phoenix-kafka/pom.xml
deleted file mode 100644
index 6c0a229..0000000
--- a/phoenix-kafka/pom.xml
+++ /dev/null
@@ -1,421 +0,0 @@
-<?xml version='1.0'?>
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
-
--->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-	<modelVersion>4.0.0</modelVersion>
-	<parent>
-		<groupId>org.apache.phoenix</groupId>
-		<artifactId>phoenix</artifactId>
-		<version>5.1.0-HBase-2.0-SNAPSHOT</version>
-	</parent>
-	<artifactId>phoenix-kafka</artifactId>
-	<name>Phoenix - Kafka</name>
-
-	<licenses>
-		<license>
-			<name>The Apache Software License, Version 2.0</name>
-			<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
-			<distribution>repo</distribution>
-			<comments />
-		</license>
-	</licenses>
-
-	<organization>
-		<name>Apache Software Foundation</name>
-		<url>http://www.apache.org</url>
-	</organization>
-
-	<properties>
-		<top.dir>${project.basedir}/..</top.dir>
-	</properties>
-
-	<dependencies>
-		<!-- Make sure we have all the antlr dependencies -->
-		<dependency>
-			<groupId>org.antlr</groupId>
-			<artifactId>antlr-runtime</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>jline</groupId>
-			<artifactId>jline</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>sqlline</groupId>
-			<artifactId>sqlline</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>com.google.guava</groupId>
-			<artifactId>guava</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>joda-time</groupId>
-			<artifactId>joda-time</artifactId>
-		</dependency>
-		<!-- JSR-305 and jcip-annotations -->
-		<dependency>
-			<groupId>com.github.stephenc.findbugs</groupId>
-			<artifactId>findbugs-annotations</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>com.github.stephenc.jcip</groupId>
-			<artifactId>jcip-annotations</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.codehaus.jackson</groupId>
-			<artifactId>jackson-core-asl</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.codehaus.jackson</groupId>
-			<artifactId>jackson-mapper-asl</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.codehaus.jackson</groupId>
-			<artifactId>jackson-jaxrs</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.codehaus.jackson</groupId>
-			<artifactId>jackson-xc</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>junit</groupId>
-			<artifactId>junit</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.mockito</groupId>
-			<artifactId>mockito-all</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>com.google.protobuf</groupId>
-			<artifactId>protobuf-java</artifactId>
-			<version>${protobuf-java.version}</version>
-		</dependency>
-		<!-- Intentionally avoid an dependencyManagement entry because of conflict with thin-client -->
-		<dependency>
-			<groupId>org.apache.httpcomponents</groupId>
-			<artifactId>httpclient</artifactId>
-			<version>4.0.1</version>
-		</dependency>
-		<dependency>
-			<groupId>log4j</groupId>
-			<artifactId>log4j</artifactId>
-			<version>${log4j.version}</version>
-		</dependency>
-		<dependency>
-			<groupId>org.slf4j</groupId>
-			<artifactId>slf4j-api</artifactId>
-			<version>${slf4j.version}</version>
-		</dependency>
-		<dependency>
-			<groupId>org.iq80.snappy</groupId>
-			<artifactId>snappy</artifactId>
-			<version>${snappy.version}</version>
-		</dependency>
-		<dependency>
-			<groupId>com.github.stephenc.high-scale-lib</groupId>
-			<artifactId>high-scale-lib</artifactId>
-			<version>1.1.1</version>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>com.yammer.metrics</groupId>
-			<artifactId>metrics-core</artifactId>
-			<version>2.1.2</version>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.htrace</groupId>
-			<artifactId>htrace-core</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>commons-cli</groupId>
-			<artifactId>commons-cli</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>commons-codec</groupId>
-			<artifactId>commons-codec</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>commons-collections</groupId>
-			<artifactId>commons-collections</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.commons</groupId>
-			<artifactId>commons-csv</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>com.google.code.findbugs</groupId>
-			<artifactId>jsr305</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.hbase</groupId>
-			<artifactId>hbase-testing-util</artifactId>
-			<scope>test</scope>
-			<optional>true</optional>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.hbase</groupId>
-			<artifactId>hbase-it</artifactId>
-			<type>test-jar</type>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.hbase</groupId>
-			<artifactId>hbase-annotations</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.hbase</groupId>
-			<artifactId>hbase-common</artifactId>
-		</dependency>
-    		<dependency>
-      			<groupId>org.apache.hbase</groupId>
-      			<artifactId>hbase-mapreduce</artifactId>
-    		</dependency>
-		<dependency>
-			<groupId>org.apache.hbase</groupId>
-			<artifactId>hbase-common</artifactId>
-			<scope>test</scope>
-			<type>test-jar</type>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.hbase</groupId>
-			<artifactId>hbase-protocol</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.hbase</groupId>
-			<artifactId>hbase-client</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.hbase</groupId>
-			<artifactId>hbase-server</artifactId>
-			<exclusions>
-				<exclusion>
-					<groupId>xom</groupId>
-					<artifactId>xom</artifactId>
-				</exclusion>
-			</exclusions>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.hbase</groupId>
-			<artifactId>hbase-server</artifactId>
-			<type>test-jar</type>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.hbase</groupId>
-			<artifactId>hbase-hadoop-compat</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.hbase</groupId>
-			<artifactId>hbase-hadoop-compat</artifactId>
-			<type>test-jar</type>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.hbase</groupId>
-			<artifactId>hbase-hadoop2-compat</artifactId>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.hbase</groupId>
-			<artifactId>hbase-hadoop2-compat</artifactId>
-			<type>test-jar</type>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.hadoop</groupId>
-			<artifactId>hadoop-common</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.hadoop</groupId>
-			<artifactId>hadoop-annotations</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.hadoop</groupId>
-			<artifactId>hadoop-mapreduce-client-core</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.hadoop</groupId>
-			<artifactId>hadoop-minicluster</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.hadoop</groupId>
-			<artifactId>hadoop-minikdc</artifactId>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.jruby.joni</groupId>
-			<artifactId>joni</artifactId>
-			<version>${joni.version}</version>
-		</dependency>
-
-		<!-- To work with kafka with phoenix -->
-		<dependency>
-			<groupId>org.apache.phoenix</groupId>
-			<artifactId>phoenix-core</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.phoenix</groupId>
-			<artifactId>phoenix-core</artifactId>
-			<type>test-jar</type>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.kafka</groupId>
-			<artifactId>kafka_2.11</artifactId>
-			<version>${kafka.version}</version>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.kafka</groupId>
-			<artifactId>kafka-clients</artifactId>
-			<version>${kafka.version}</version>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.kafka</groupId>
-			<artifactId>kafka_2.11</artifactId>
-			<version>${kafka.version}</version>
-			<classifier>test</classifier>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.kafka</groupId>
-			<artifactId>kafka-clients</artifactId>
-			<version>${kafka.version}</version>
-			<classifier>test</classifier>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.kafka</groupId>
-			<artifactId>kafka-tools</artifactId>
-			<version>${kafka.version}</version>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.phoenix</groupId>
-			<artifactId>phoenix-flume</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.flume</groupId>
-			<artifactId>flume-ng-core</artifactId>
-		</dependency>
-                <dependency>
-                  <groupId>org.apache.tephra</groupId>
-                  <artifactId>tephra-core</artifactId>
-                  <type>test-jar</type>
-                  <scope>test</scope>
-                </dependency>
-	</dependencies>
-
-	<build>
-		<plugins>
-			<!-- Add the ant-generated sources to the source path -->
-			<plugin>
-				<groupId>org.apache.maven.plugins</groupId>
-				<artifactId>maven-site-plugin</artifactId>
-				<dependencies>
-					<dependency>
-						<groupId>org.apache.maven.doxia</groupId>
-						<artifactId>doxia-module-markdown</artifactId>
-						<version>1.3</version>
-					</dependency>
-					<dependency>
-						<groupId>lt.velykis.maven.skins</groupId>
-						<artifactId>reflow-velocity-tools</artifactId>
-						<version>1.0.0</version>
-					</dependency>
-					<dependency>
-						<groupId>org.apache.velocity</groupId>
-						<artifactId>velocity</artifactId>
-						<version>1.7</version>
-					</dependency>
-				</dependencies>
-			</plugin>
-
-			<!-- Setup eclipse -->
-			<plugin>
-				<groupId>org.apache.maven.plugins</groupId>
-				<artifactId>maven-eclipse-plugin</artifactId>
-				<configuration>
-					<buildcommands>
-						<buildcommand>org.jamon.project.templateBuilder</buildcommand>
-						<buildcommand>org.eclipse.jdt.core.javabuilder</buildcommand>
-					</buildcommands>
-				</configuration>
-			</plugin>
-
-			<plugin>
-				<groupId>org.codehaus.mojo</groupId>
-				<artifactId>build-helper-maven-plugin</artifactId>
-			</plugin>
-			<plugin>
-				<groupId>org.apache.maven.plugins</groupId>
-				<artifactId>maven-failsafe-plugin</artifactId>
-			</plugin>
-			<plugin>
-				<artifactId>maven-dependency-plugin</artifactId>
-				<version>${maven-dependency-plugin.version}</version>
-			</plugin>
-			<plugin>
-				<groupId>org.apache.maven.plugins</groupId>
-				<artifactId>maven-resources-plugin</artifactId>
-			</plugin>
-
-			<plugin>
-				<groupId>org.apache.maven.plugins</groupId>
-				<artifactId>maven-shade-plugin</artifactId>
-				<executions>
-					<execution>
-						<phase>package</phase>
-						<goals>
-							<goal>shade</goal>
-						</goals>
-						<configuration>
-							<finalName>phoenix-kafka-${project.version}-minimal</finalName>
-							<shadedArtifactAttached>false</shadedArtifactAttached>
-							<promoteTransitiveDependencies>true</promoteTransitiveDependencies>
-							<shadeTestJar>false</shadeTestJar>
-							<artifactSet>
-								<includes>
-									<include>org.apache.phoenix:phoenix-kafka</include>
-									<include>org.apache.kafka:kafka-clients</include>
-									<include>org.apache.phoenix:phoenix-flume</include>
-								</includes>
-							</artifactSet>
-						</configuration>
-					</execution>
-				</executions>
-			</plugin>
-		</plugins>
-	</build>
-
-        <reporting>
-            <plugins>
-                <plugin>
-                    <groupId>org.apache.maven.plugins</groupId>
-                    <artifactId>maven-project-info-reports-plugin</artifactId>
-                </plugin>
-                <plugin>
-                    <groupId>org.codehaus.mojo</groupId>
-                    <artifactId>findbugs-maven-plugin</artifactId>
-                </plugin>
-            </plugins>
-        </reporting>
-</project>
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
deleted file mode 100644
index 551fdc5..0000000
--- a/phoenix-pig/pom.xml
+++ /dev/null
@@ -1,464 +0,0 @@
-<?xml version='1.0'?>
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
-
--->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.phoenix</groupId>
-    <artifactId>phoenix</artifactId>
-    <version>5.1.0-HBase-2.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>phoenix-pig</artifactId>
-  <name>Phoenix - Pig</name>
-
-  <properties>
-    <top.dir>${project.basedir}/..</top.dir>
-    <shaded.package>org.apache.phoenix.shaded</shaded.package>
-  </properties>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-core</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>joda-time</groupId>
-      <artifactId>joda-time</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.pig</groupId>
-      <artifactId>pig</artifactId>
-      <classifier>h2</classifier>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-testing-util</artifactId>
-      <scope>test</scope>
-      <optional>true</optional>
-      <exclusions>
-        <exclusion>
-          <groupId>org.jruby</groupId>
-          <artifactId>jruby-complete</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-it</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>org.jruby</groupId>
-          <artifactId>jruby-complete</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-mapreduce</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-common</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-protocol</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-client</artifactId>
-    </dependency>
-   <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-server</artifactId>
-      <exclusions>
-        <exclusion>
-          <groupId>xom</groupId>
-          <artifactId>xom</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-server</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-client</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-hadoop-compat</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-hadoop-compat</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-hadoop2-compat</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-hadoop2-compat</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-annotations</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-mapreduce-client-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-minicluster</artifactId>
-    </dependency>
-    <!-- Test Dependencies -->
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-all</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.tephra</groupId>
-      <artifactId>tephra-core</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>commons-cli</groupId>
-      <artifactId>commons-cli</artifactId>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-failsafe-plugin</artifactId>
-      </plugin>
-      <plugin>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <version>${maven-dependency-plugin.version}</version>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-shade-plugin</artifactId>
-        <executions>
-          <execution>
-            <phase>package</phase>
-            <goals>
-              <goal>shade</goal>
-            </goals>
-            <configuration>
-              <finalName>phoenix-${project.version}-pig</finalName>
-              <shadedArtifactAttached>false</shadedArtifactAttached>
-              <promoteTransitiveDependencies>true</promoteTransitiveDependencies>
-              <shadeTestJar>false</shadeTestJar>
-              <transformers>
-                <transformer
-                        implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
-                  <resource>README.md</resource>
-                  <file>${project.basedir}/../README.md</file>
-                </transformer>
-                <transformer
-                        implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
-                  <resource>LICENSE.txt</resource>
-                  <file>${project.basedir}/../LICENSE</file>
-                </transformer>
-                <transformer
-                    implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
-                  <resource>NOTICE</resource>
-                  <file>${project.basedir}/../NOTICE</file>
-                </transformer>
-              </transformers>
-              <artifactSet>
-                <includes>
-                  <include>*:*</include>
-                </includes>
-                <excludes>
-                  <exclude>org.apache.phoenix:phoenix-client</exclude>
-                  <exclude>org.apache.pig:pig</exclude>
-                  <exclude>joda-time:joda-time</exclude>
-                  <exclude>xom:xom</exclude>
-                </excludes>
-              </artifactSet>
-              <filters>
-                <filter>
-                  <artifact>*:*</artifact>
-                  <excludes>
-                    <exclude>META-INF/*.SF</exclude>
-                    <exclude>META-INF/*.DSA</exclude>
-                    <exclude>META-INF/*.RSA</exclude>
-                    <exclude>META-INF/license/*</exclude>
-                    <exclude>LICENSE.*</exclude>
-                    <exclude>NOTICE.*</exclude>
-                  </excludes>
-                </filter>
-              </filters>
-              <relocations>
-                <!-- COM relocation -->
-                <relocation>
-                  <pattern>com.codahale</pattern>
-                  <shadedPattern>${shaded.package}.com.codahale</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>com.fasterxml</pattern>
-                  <shadedPattern>${shaded.package}.com.fasterxml</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>com.google.common</pattern>
-                  <shadedPattern>${shaded.package}.com.google.common</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>com.jamesmurty</pattern>
-                  <shadedPattern>${shaded.package}.com.jamesmurty</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>com.jcraft</pattern>
-                  <shadedPattern>${shaded.package}.com.jcraft</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>com.lmax</pattern>
-                  <shadedPattern>${shaded.package}.com.lmax</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>com.sun.jersey</pattern>
-                  <shadedPattern>${shaded.package}.com.sun.jersey</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>com.thoughtworks</pattern>
-                  <shadedPattern>${shaded.package}.com.thoughtworks</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>com.yammer</pattern>
-                  <shadedPattern>${shaded.package}.com.yammer</shadedPattern>
-                </relocation>
-                <!-- IO relocations -->
-                <relocation>
-                  <pattern>io.netty</pattern>
-                  <shadedPattern>${shaded.package}.io.netty</shadedPattern>
-                </relocation>
-                <!-- ORG relocations -->
-                <relocation>
-                  <pattern>org.antlr</pattern>
-                  <shadedPattern>${shaded.package}.org.antlr</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.aopalliance</pattern>
-                  <shadedPattern>${shaded.package}.org.aopalliance</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.codehaus</pattern>
-                  <shadedPattern>${shaded.package}.org.codehaus</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.fusesource</pattern>
-                  <shadedPattern>${shaded.package}.org.fusesource</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.hamcrest</pattern>
-                  <shadedPattern>${shaded.package}.org.hamcrest</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.hsqldb</pattern>
-                  <shadedPattern>${shaded.package}.org.hsqldb</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.iq80</pattern>
-                  <shadedPattern>${shaded.package}.org.iq80</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.jamon</pattern>
-                  <shadedPattern>${shaded.package}.org.jamon</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.jboss</pattern>
-                  <shadedPattern>${shaded.package}.org.jboss</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.jcodings</pattern>
-                  <shadedPattern>${shaded.package}.org.jcodings</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.jets3t</pattern>
-                  <shadedPattern>${shaded.package}.org.jets3t</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.joni</pattern>
-                  <shadedPattern>${shaded.package}.org.joni</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.junit</pattern>
-                  <shadedPattern>${shaded.package}.org.junit</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.kosmix</pattern>
-                  <shadedPattern>${shaded.package}.org.kosmix</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.mortbay</pattern>
-                  <shadedPattern>${shaded.package}.org.mortbay</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.objectweb</pattern>
-                  <shadedPattern>${shaded.package}.org.objectweb</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.stringtemplate</pattern>
-                  <shadedPattern>${shaded.package}.org.stringtemplate</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.tukaani</pattern>
-                  <shadedPattern>${shaded.package}.org.tukaani</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.znerd</pattern>
-                  <shadedPattern>${shaded.package}.org.znerd</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.apache.avro</pattern>
-                  <shadedPattern>${shaded.package}.org.apache.avro</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.apache.commons</pattern>
-                  <shadedPattern>${shaded.package}.org.apache.commons</shadedPattern>
-                  <excludes>
-                    <exclude>org.apache.commons.csv.**</exclude>
-                    <exclude>org.apache.commons.logging.**</exclude>
-                  </excludes>
-                </relocation>
-                <relocation>
-                  <pattern>org.apache.directory</pattern>
-                  <shadedPattern>${shaded.package}.org.apache.directory</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.apache.http</pattern>
-                  <shadedPattern>${shaded.package}.org.apache.http</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.apache.jasper</pattern>
-                  <shadedPattern>${shaded.package}.org.apache.jasper</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.apache.jute</pattern>
-                  <shadedPattern>${shaded.package}.org.apache.jute</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.apache.mina</pattern>
-                  <shadedPattern>${shaded.package}.org.apache.mina</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.apache.oro</pattern>
-                  <shadedPattern>${shaded.package}.org.apache.oro</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.apache.taglibs</pattern>
-                  <shadedPattern>${shaded.package}.org.apache.taglibs</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.apache.thrift</pattern>
-                  <shadedPattern>${shaded.package}.org.apache.thrift</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.apache.tools</pattern>
-                  <shadedPattern>${shaded.package}.org.apache.tools</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.apache.twill</pattern>
-                  <shadedPattern>${shaded.package}.org.apache.twill</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.apache.velocity</pattern>
-                  <shadedPattern>${shaded.package}.org.apache.velocity</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>org.apache.zookeeper</pattern>
-                  <shadedPattern>${shaded.package}.org.apache.zookeeper</shadedPattern>
-                </relocation>
-                <!-- NET relocations -->
-                <relocation>
-                  <pattern>net</pattern>
-                  <shadedPattern>${shaded.package}.net</shadedPattern>
-                </relocation>
-                <!-- Misc relocations -->
-                <relocation>
-                  <pattern>antlr</pattern>
-                  <shadedPattern>${shaded.package}.antlr</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>it.unimi</pattern>
-                  <shadedPattern>${shaded.package}.it.unimi</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>jline</pattern>
-                  <shadedPattern>${shaded.package}.jline</shadedPattern>
-                </relocation>
-                <relocation>
-                  <pattern>junit</pattern>
-                  <shadedPattern>${shaded.package}.junit</shadedPattern>
-                </relocation>
-              </relocations>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/phoenix-pig/src/main/java/org/apache/phoenix/pig/PhoenixHBaseLoader.java b/phoenix-pig/src/main/java/org/apache/phoenix/pig/PhoenixHBaseLoader.java
deleted file mode 100644
index 30a92b4..0000000
--- a/phoenix-pig/src/main/java/org/apache/phoenix/pig/PhoenixHBaseLoader.java
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.pig;
-
-import static org.apache.commons.lang3.StringUtils.isEmpty;
-
-import java.io.IOException;
-import java.util.Properties;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.mapreduce.InputFormat;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.RecordReader;
-import org.apache.phoenix.mapreduce.PhoenixInputFormat;
-import org.apache.phoenix.mapreduce.PhoenixRecordWritable;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.SchemaType;
-import org.apache.phoenix.pig.util.PhoenixPigSchemaUtil;
-import org.apache.phoenix.pig.util.QuerySchemaParserFunction;
-import org.apache.phoenix.pig.util.TableSchemaParserFunction;
-import org.apache.phoenix.pig.util.TypeUtil;
-import org.apache.pig.Expression;
-import org.apache.pig.LoadFunc;
-import org.apache.pig.LoadMetadata;
-import org.apache.pig.PigException;
-import org.apache.pig.ResourceSchema;
-import org.apache.pig.ResourceStatistics;
-import org.apache.pig.backend.executionengine.ExecException;
-import org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigSplit;
-import org.apache.pig.data.Tuple;
-import org.apache.pig.impl.util.ObjectSerializer;
-import org.apache.pig.impl.util.UDFContext;
-
-import com.google.common.base.Preconditions;
-
-
-/**
- * LoadFunc to load data from HBase using Phoenix .
- * 
- * Example usage: 
- * a) TABLE
- *   i)   A = load 'hbase://table/HIRES'  using
- * org.apache.phoenix.pig.PhoenixHBaseLoader('localhost');
- *               
- *       The above loads the data from a table 'HIRES'
- *       
- *   ii)  A = load 'hbase://table/HIRES/id,name' using
- *       org.apache.phoenix.pig.PhoenixHBaseLoader('localhost');
- *       
- *       Here, only id, name are returned from the table HIRES as part of LOAD.
- * 
- * b)  QUERY
- *   i)   B = load 'hbase://query/SELECT fname, lname FROM HIRES' using
- *             org.apache.phoenix.pig.PhoenixHBaseLoader('localhost');
- *       
- *        The above loads fname and lname columns from 'HIRES' table.
- * 
- */
-public final class PhoenixHBaseLoader extends LoadFunc implements LoadMetadata {
-
-    private static final Log LOG = LogFactory.getLog(PhoenixHBaseLoader.class);
-    private static final String PHOENIX_TABLE_NAME_SCHEME = "hbase://table/";
-    private static final String PHOENIX_QUERY_SCHEME      = "hbase://query/";
-    private static final String RESOURCE_SCHEMA_SIGNATURE = "phoenix.pig.schema";
-   
-    private Configuration config;
-    private String tableName;
-    private String selectQuery;
-    private String zkQuorum ;
-    private PhoenixInputFormat<PhoenixRecordWritable> inputFormat;
-    private RecordReader<NullWritable,PhoenixRecordWritable> reader;
-    private String contextSignature;
-    private ResourceSchema schema;
-       
-    /**
-     * @param zkQuorum
-     */
-    public PhoenixHBaseLoader(String zkQuorum) {
-        super();
-        Preconditions.checkNotNull(zkQuorum);
-        Preconditions.checkState(zkQuorum.length() > 0, "Zookeeper quorum cannot be empty!");
-        this.zkQuorum = zkQuorum;
-    }
-    
-    @Override
-    public void setLocation(String location, Job job) throws IOException {
-        PhoenixConfigurationUtil.loadHBaseConfiguration(job);
-
-        final Configuration configuration = job.getConfiguration();
-        //explicitly turning off combining splits. 
-        configuration.setBoolean("pig.noSplitCombination", true);
-
-        this.initializePhoenixPigConfiguration(location, configuration);
-    }
-
-    /**
-     * Initialize PhoenixPigConfiguration if it is null. Called by {@link #setLocation} and {@link #getSchema}
-     * @param location
-     * @param configuration
-     * @throws PigException
-     */
-    private void initializePhoenixPigConfiguration(final String location, final Configuration configuration) throws IOException {
-        if(this.config != null) {
-            return;
-        }
-        this.config = configuration;
-        this.config.set(HConstants.ZOOKEEPER_QUORUM,this.zkQuorum);
-        PhoenixConfigurationUtil.setInputClass(this.config, PhoenixRecordWritable.class);
-        Pair<String,String> pair = null;
-        try {
-            if (location.startsWith(PHOENIX_TABLE_NAME_SCHEME)) {
-                String tableSchema = location.substring(PHOENIX_TABLE_NAME_SCHEME.length());
-                final TableSchemaParserFunction parseFunction = new TableSchemaParserFunction();
-                pair =  parseFunction.apply(tableSchema);
-                PhoenixConfigurationUtil.setSchemaType(this.config, SchemaType.TABLE);
-             } else if (location.startsWith(PHOENIX_QUERY_SCHEME)) {
-                this.selectQuery = location.substring(PHOENIX_QUERY_SCHEME.length());
-                final QuerySchemaParserFunction queryParseFunction = new QuerySchemaParserFunction(this.config);
-                pair = queryParseFunction.apply(this.selectQuery);
-                PhoenixConfigurationUtil.setInputQuery(this.config, this.selectQuery);
-                PhoenixConfigurationUtil.setSchemaType(this.config, SchemaType.QUERY);
-            }
-            this.tableName = pair.getFirst();
-            final String selectedColumns = pair.getSecond();
-            
-            if(isEmpty(this.tableName) && isEmpty(this.selectQuery)) {
-                printUsage(location);
-            }
-            PhoenixConfigurationUtil.setInputTableName(this.config, this.tableName);
-            if(!isEmpty(selectedColumns)) {
-                PhoenixConfigurationUtil.setSelectColumnNames(this.config, selectedColumns.split(","));   
-            }
-        } catch(IllegalArgumentException iae) {
-            printUsage(location);
-        } 
-    }
-
-  
-    @Override
-    public String relativeToAbsolutePath(String location, Path curDir) throws IOException {
-        return location;
-    }
-
-    @Override
-    public InputFormat getInputFormat() throws IOException {
-        if(inputFormat == null) {
-            inputFormat = new PhoenixInputFormat<PhoenixRecordWritable>();
-            PhoenixConfigurationUtil.setInputClass(this.config, PhoenixRecordWritable.class);
-        }
-        return inputFormat;
-    }
-
-    @SuppressWarnings("unchecked")
-    @Override
-    public void prepareToRead(RecordReader reader, PigSplit split) throws IOException {
-        this.reader = reader;
-        final String resourceSchemaAsStr = getValueFromUDFContext(this.contextSignature,RESOURCE_SCHEMA_SIGNATURE);
-        if (resourceSchemaAsStr == null) {
-            throw new IOException("Could not find schema in UDF context");
-        }
-       schema = (ResourceSchema)ObjectSerializer.deserialize(resourceSchemaAsStr); 
-    }
-
-     /*
-     * @see org.apache.pig.LoadFunc#setUDFContextSignature(java.lang.String)
-     */
-    @Override
-    public void setUDFContextSignature(String signature) {
-        this.contextSignature = signature;
-    }
-    
-    @Override
-    public Tuple getNext() throws IOException {
-        try {
-            if(!reader.nextKeyValue()) {
-                return null; 
-             }
-            final PhoenixRecordWritable record = reader.getCurrentValue();
-            if(record == null) {
-                return null;
-            }
-            final Tuple tuple = TypeUtil.transformToTuple(record, schema.getFields());
-            return tuple;
-       } catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-            int errCode = 6018;
-            final String errMsg = "Error while reading input";
-            throw new ExecException(errMsg, errCode,PigException.REMOTE_ENVIRONMENT, e);
-       } 
-    }
-    
-    private void printUsage(final String location) throws PigException {
-        String locationErrMsg = String.format("The input location in load statement should be of the form " +
-                "%s<table name> or %s<query>. Got [%s] ",PHOENIX_TABLE_NAME_SCHEME,PHOENIX_QUERY_SCHEME,location);
-        LOG.error(locationErrMsg);
-        throw new PigException(locationErrMsg);
-    }
-    
-    @Override
-    public ResourceSchema getSchema(String location, Job job) throws IOException {
-        if(schema != null) {
-            return schema;
-        }
-
-        PhoenixConfigurationUtil.loadHBaseConfiguration(job);
-        final Configuration configuration = job.getConfiguration();
-        this.initializePhoenixPigConfiguration(location, configuration);
-        this.schema = PhoenixPigSchemaUtil.getResourceSchema(this.config);
-        if(LOG.isDebugEnabled()) {
-            LOG.debug(String.format("Resource Schema generated for location [%s] is [%s]", location, schema.toString()));
-        }
-        this.storeInUDFContext(this.contextSignature, RESOURCE_SCHEMA_SIGNATURE, ObjectSerializer.serialize(schema));
-        return schema;
-    }
-
-    @Override
-    public ResourceStatistics getStatistics(String location, Job job) throws IOException {
-       // not implemented
-        return null;
-    }
-
-    @Override
-    public String[] getPartitionKeys(String location, Job job) throws IOException {
-     // not implemented
-        return null;
-    }
-
-    @Override
-    public void setPartitionFilter(Expression partitionFilter) throws IOException {
-     // not implemented
-    }
- 
-    private void storeInUDFContext(final String signature,final String key,final String value) {
-        final UDFContext udfContext = UDFContext.getUDFContext();
-        final Properties props = udfContext.getUDFProperties(this.getClass(), new String[]{signature});
-        props.put(key, value);
-    }
-    
-    private String getValueFromUDFContext(final String signature,final String key) {
-        final UDFContext udfContext = UDFContext.getUDFContext();
-        final Properties props = udfContext.getUDFProperties(this.getClass(), new String[]{signature});
-        return props.getProperty(key);
-    }
-}
diff --git a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/PhoenixPigSchemaUtil.java b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/PhoenixPigSchemaUtil.java
deleted file mode 100644
index 7e0203f..0000000
--- a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/PhoenixPigSchemaUtil.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.pig.util;
-
-import java.io.IOException;
-import java.sql.SQLException;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.SchemaType;
-import org.apache.phoenix.schema.types.PDataType;
-import org.apache.phoenix.util.ColumnInfo;
-import org.apache.pig.ResourceSchema;
-import org.apache.pig.ResourceSchema.ResourceFieldSchema;
-
-import com.google.common.base.Preconditions;
-
-/**
- * 
- * Utility to generate the ResourceSchema from the list of {@link ColumnInfo}
- *
- */
-public final class PhoenixPigSchemaUtil {
-
-    private static final Log LOG = LogFactory.getLog(PhoenixPigSchemaUtil.class);
-    
-    private PhoenixPigSchemaUtil() {
-    }
-    
-    static class Dependencies {
-    	List<ColumnInfo> getSelectColumnMetadataList(Configuration configuration) throws SQLException {
-    		return PhoenixConfigurationUtil.getSelectColumnMetadataList(configuration);
-    	}
-    }
-    
-    public static ResourceSchema getResourceSchema(final Configuration configuration, Dependencies dependencies) throws IOException {
-        
-        final ResourceSchema schema = new ResourceSchema();
-        try {
-            List<ColumnInfo> columns = null;
-            final SchemaType schemaType = PhoenixConfigurationUtil.getSchemaType(configuration);
-            if(schemaType == SchemaType.QUERY) {
-                final String sqlQuery = PhoenixConfigurationUtil.getSelectStatement(configuration);
-                Preconditions.checkNotNull(sqlQuery, "No Sql Query exists within the configuration");
-                final SqlQueryToColumnInfoFunction function = new SqlQueryToColumnInfoFunction(configuration);
-                columns = function.apply(sqlQuery);
-            } else if (schemaType == SchemaType.TABLE) {
-                columns = dependencies.getSelectColumnMetadataList(configuration);
-            }
-            ResourceFieldSchema fields[] = new ResourceFieldSchema[columns.size()];
-            int i = 0;
-            for(ColumnInfo cinfo : columns) {
-                int sqlType = cinfo.getSqlType();
-                PDataType phoenixDataType = PDataType.fromTypeId(sqlType);
-                byte pigType = TypeUtil.getPigDataTypeForPhoenixType(phoenixDataType);
-                ResourceFieldSchema field = new ResourceFieldSchema();
-                field.setType(pigType).setName(cinfo.getDisplayName());
-                fields[i++] = field;
-            }
-            schema.setFields(fields);    
-        } catch(SQLException sqle) {
-            LOG.error(String.format("Error: SQLException [%s] ",sqle.getMessage()));
-            throw new IOException(sqle);
-        }
-        
-        return schema;
-    }
-    
-    public static ResourceSchema getResourceSchema(final Configuration configuration) throws IOException {
-        return getResourceSchema(configuration, new Dependencies());
-    }
-}
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
deleted file mode 100644
index c71c92a..0000000
--- a/phoenix-spark/pom.xml
+++ /dev/null
@@ -1,607 +0,0 @@
-<?xml version='1.0'?>
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
-
--->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-
-  <parent>
-    <groupId>org.apache.phoenix</groupId>
-    <artifactId>phoenix</artifactId>
-    <version>5.1.0-HBase-2.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>phoenix-spark</artifactId>
-  <name>Phoenix - Spark</name>
-
-  <properties>
-    <top.dir>${project.basedir}/..</top.dir>
-  </properties>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-core</artifactId>
-    </dependency>
-
-    <!-- Force import of Spark's servlet API for unit tests -->
-    <dependency>
-      <groupId>javax.servlet</groupId>
-      <artifactId>javax.servlet-api</artifactId>
-      <version>3.1.0</version>
-      <scope>test</scope>
-    </dependency>
-
-    <!-- Mark Spark / Scala as provided -->
-    <dependency>
-      <groupId>org.scala-lang</groupId>
-      <artifactId>scala-library</artifactId>
-      <version>${scala.version}</version>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.spark</groupId>
-      <artifactId>spark-core_${scala.binary.version}</artifactId>
-      <version>${spark.version}</version>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.spark</groupId>
-      <artifactId>spark-sql_${scala.binary.version}</artifactId>
-      <version>${spark.version}</version>
-      <scope>provided</scope>
-    </dependency>
-
-    <!-- Test dependencies -->
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-core</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.scalatest</groupId>
-      <artifactId>scalatest_${scala.binary.version}</artifactId>
-      <version>2.2.4</version>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.scalamock</groupId>
-      <artifactId>scalamock-scalatest-support_${scala.binary.version}</artifactId>
-      <version>3.1.4</version>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-all</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-client</artifactId>
-      <version>${hadoop.version}</version>
-      <exclusions>
-        <exclusion>
-          <groupId>log4j</groupId>
-          <artifactId>log4j</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>javax.servlet</groupId>
-          <artifactId>servlet-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>javax.servlet.jsp</groupId>
-          <artifactId>jsp-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.jruby</groupId>
-          <artifactId>jruby-complete</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.jboss.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>io.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <version>${hadoop.version}</version>
-      <exclusions>
-        <exclusion>
-          <groupId>log4j</groupId>
-          <artifactId>log4j</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>javax.servlet</groupId>
-          <artifactId>servlet-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>javax.servlet.jsp</groupId>
-          <artifactId>jsp-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.jruby</groupId>
-          <artifactId>jruby-complete</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.jboss.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>io.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <version>${hadoop.version}</version>
-      <type>test-jar</type>
-      <scope>test</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>log4j</groupId>
-          <artifactId>log4j</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>javax.servlet</groupId>
-          <artifactId>servlet-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>javax.servlet.jsp</groupId>
-          <artifactId>jsp-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.jruby</groupId>
-          <artifactId>jruby-complete</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.jboss.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>io.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <version>${hadoop.version}</version>
-      <type>test-jar</type>
-      <scope>test</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>log4j</groupId>
-          <artifactId>log4j</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>javax.servlet</groupId>
-          <artifactId>servlet-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>javax.servlet.jsp</groupId>
-          <artifactId>jsp-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.jruby</groupId>
-          <artifactId>jruby-complete</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.jboss.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>io.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-testing-util</artifactId>
-      <scope>test</scope>
-      <optional>true</optional>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-client</artifactId>
-      <exclusions>
-        <exclusion>
-          <groupId>log4j</groupId>
-          <artifactId>log4j</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.thrift</groupId>
-          <artifactId>thrift</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.jruby</groupId>
-          <artifactId>jruby-complete</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.slf4j</groupId>
-          <artifactId>slf4j-log4j12</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jsp-2.1</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jsp-api-2.1</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>servlet-api-2.5</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-core</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-json</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-server</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty-util</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-runtime</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-compiler</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.jruby</groupId>
-          <artifactId>jruby-complete</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.jboss.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>io.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-hadoop-compat</artifactId>
-      <version>${hbase.version}</version>
-      <scope>test</scope>
-      <type>test-jar</type>
-      <exclusions>
-        <exclusion>
-          <groupId>log4j</groupId>
-          <artifactId>log4j</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.thrift</groupId>
-          <artifactId>thrift</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.jruby</groupId>
-          <artifactId>jruby-complete</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.slf4j</groupId>
-          <artifactId>slf4j-log4j12</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jsp-2.1</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jsp-api-2.1</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>servlet-api-2.5</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-core</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-json</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-server</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty-util</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-runtime</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-compiler</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.jruby</groupId>
-          <artifactId>jruby-complete</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.jboss.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>io.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-hadoop2-compat</artifactId>
-      <version>${hbase.version}</version>
-      <scope>test</scope>
-      <type>test-jar</type>
-      <exclusions>
-        <exclusion>
-          <groupId>log4j</groupId>
-          <artifactId>log4j</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.thrift</groupId>
-          <artifactId>thrift</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.jruby</groupId>
-          <artifactId>jruby-complete</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.slf4j</groupId>
-          <artifactId>slf4j-log4j12</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jsp-2.1</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jsp-api-2.1</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>servlet-api-2.5</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-core</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-json</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jersey</groupId>
-          <artifactId>jersey-server</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty-util</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-runtime</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-compiler</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.jruby</groupId>
-          <artifactId>jruby-complete</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.jboss.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>io.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-server</artifactId>
-      <version>${hbase.version}</version>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-it</artifactId>
-      <version>${hbase.version}</version>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-        <groupId>org.apache.tephra</groupId>
-        <artifactId>tephra-core</artifactId>
-        <type>test-jar</type>
-        <scope>test</scope>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <testSourceDirectory>src/it/scala</testSourceDirectory>
-    <testResources><testResource><directory>src/it/resources</directory></testResource></testResources>
-    <plugins>
-        <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-compiler-plugin</artifactId>
-            <configuration>
-                <source>1.8</source>
-                <target>1.8</target>
-            </configuration>
-        </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-failsafe-plugin</artifactId>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-compiler-plugin</artifactId>
-      </plugin>
-
-      <plugin>
-        <groupId>net.alchim31.maven</groupId>
-        <artifactId>scala-maven-plugin</artifactId>
-        <version>3.4.4</version>
-        <configuration>
-          <charset>${project.build.sourceEncoding}</charset>
-          <jvmArgs>
-            <jvmArg>-Xmx1024m</jvmArg>
-          </jvmArgs>
-          <scalaVersion>${scala.version}</scalaVersion>
-          <scalaCompatVersion>${scala.binary.version}</scalaCompatVersion>
-        </configuration>
-        <executions>
-          <execution>
-            <id>scala-compile-first</id>
-            <phase>process-resources</phase>
-            <goals>
-              <goal>add-source</goal>
-              <goal>compile</goal>
-            </goals>
-          </execution>
-          <execution>
-            <id>scala-test-compile</id>
-            <phase>process-test-resources</phase>
-            <goals>
-              <goal>testCompile</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-
-      <plugin>
-        <groupId>org.scalatest</groupId>
-        <artifactId>scalatest-maven-plugin</artifactId>
-        <version>1.0</version>
-        <configuration>
-          <reportsDirectory>${project.build.directory}/surefire-reports</reportsDirectory>
-          <junitxml>.</junitxml>
-          <filereports>WDF TestSuite.txt</filereports>
-        </configuration>
-        <executions>
-          <execution>
-            <id>test</id>
-            <phase>test</phase>
-            <goals>
-              <goal>test</goal>
-            </goals>
-            <configuration>
-              <skipTests>true</skipTests>
-            </configuration>
-          </execution>
-          <execution>
-            <id>integration-test</id>
-            <phase>integration-test</phase>
-            <goals>
-              <goal>test</goal>
-            </goals>
-            <configuration>
-              <!-- Need this false until we can switch to JUnit 4.13 due to
-              https://github.com/junit-team/junit4/issues/1223
-              -->
-              <parallel>false</parallel>
-              <tagsToExclude>Integration-Test</tagsToExclude>
-              <argLine>-Xmx1536m -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m</argLine>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-jar-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>empty-javadoc-jar</id>
-            <phase>package</phase>
-            <goals>
-              <goal>jar</goal>
-            </goals>
-            <configuration>
-              <classifier>javadoc</classifier>
-              <classesDirectory>${basedir}/javadoc</classesDirectory>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/phoenix-spark/src/it/resources/hbase-site.xml b/phoenix-spark/src/it/resources/hbase-site.xml
deleted file mode 100644
index 326ef70..0000000
--- a/phoenix-spark/src/it/resources/hbase-site.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
... 419 lines suppressed ...