You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by st...@apache.org on 2023/04/24 10:44:52 UTC

[phoenix-connectors] branch master updated: PHOENIX-6934 Remove Phoenix Pig connector

This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix-connectors.git


The following commit(s) were added to refs/heads/master by this push:
     new 6e2601a  PHOENIX-6934 Remove Phoenix Pig connector
6e2601a is described below

commit 6e2601a23c76498d5ab1724e01a35ed135d27474
Author: Istvan Toth <st...@apache.org>
AuthorDate: Mon Apr 24 08:12:59 2023 +0200

    PHOENIX-6934 Remove Phoenix Pig connector
---
 README.md                                          |   2 +-
 examples/pig/test.pig                              |  19 -
 examples/pig/testdata                              |  18 -
 phoenix-pig-base/phoenix4-pig-shaded/pom.xml       |  83 --
 phoenix-pig-base/phoenix4-pig/pom.xml              |  52 --
 phoenix-pig-base/phoenix5-pig-shaded/pom.xml       |  95 ---
 phoenix-pig-base/phoenix5-pig/pom.xml              |  63 --
 phoenix-pig-base/pom.xml                           | 461 ------------
 .../it/java/org/apache/phoenix/pig/BasePigIT.java  |  92 ---
 .../apache/phoenix/pig/PhoenixHBaseLoaderIT.java   | 838 ---------------------
 .../apache/phoenix/pig/PhoenixHBaseStorerIT.java   | 292 -------
 .../phoenix/pig/udf/ReserveNSequenceTestIT.java    | 306 --------
 .../org/apache/phoenix/pig/PhoenixHBaseLoader.java | 268 -------
 .../apache/phoenix/pig/PhoenixHBaseStorage.java    | 236 ------
 .../apache/phoenix/pig/udf/ReserveNSequence.java   | 129 ----
 .../phoenix/pig/util/PhoenixPigSchemaUtil.java     |  90 ---
 .../pig/util/QuerySchemaParserFunction.java        | 119 ---
 .../pig/util/SqlQueryToColumnInfoFunction.java     |  84 ---
 .../pig/util/TableSchemaParserFunction.java        |  52 --
 .../java/org/apache/phoenix/pig/util/TypeUtil.java | 349 ---------
 .../phoenix/pig/util/PhoenixPigSchemaUtilTest.java |  93 ---
 .../pig/util/QuerySchemaParserFunctionTest.java    |  96 ---
 .../pig/util/SqlQueryToColumnInfoFunctionTest.java |  63 --
 .../pig/util/TableSchemaParserFunctionTest.java    |  56 --
 .../org/apache/phoenix/pig/util/TypeUtilTest.java  |  83 --
 phoenix4-connectors-assembly/pom.xml               |  28 -
 .../src/build/components/phoenix4-jars.xml         |   8 -
 phoenix5-connectors-assembly/pom.xml               |  28 -
 .../src/build/components/phoenix5-jars.xml         |   8 -
 pom.xml                                            |  36 -
 30 files changed, 1 insertion(+), 4146 deletions(-)

diff --git a/README.md b/README.md
index 58f9e71..c7487ff 100644
--- a/README.md
+++ b/README.md
@@ -22,4 +22,4 @@ limitations under the License.
 Copyright ©2019 [Apache Software Foundation](http://www.apache.org/). All Rights Reserved. 
 
 ## Introduction
-This repo contains the Flume, Pig, Kafka, Spark and Hive connectors for Phoenix.
\ No newline at end of file
+This repo contains the Flume, Kafka, Spark and Hive connectors for Phoenix.
\ No newline at end of file
diff --git a/examples/pig/test.pig b/examples/pig/test.pig
deleted file mode 100644
index 6835d00..0000000
--- a/examples/pig/test.pig
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-A = load 'examples/pig/testdata' as (a:chararray, b:chararray, c:int, d:chararray, e: datetime) ;
-STORE A into 'hbase://TESTPHX' using org.apache.phoenix.pig.PhoenixHBaseStorage('localhost','-batchSize 1000');
diff --git a/examples/pig/testdata b/examples/pig/testdata
deleted file mode 100644
index 15f3f0b..0000000
--- a/examples/pig/testdata
+++ /dev/null
@@ -1,18 +0,0 @@
-00D300000000XHP	124	123456	weq	2012-12-12
-00D300000000XHP	111	123456	nab	2012-01-21
-00D300000000UIH	101	123456	ben	2014-01-01
-00D300000000XHP	124	123456	weq	2012-12-12
-00D300000000XHP	111	123456	nab	2012-01-21
-00D300000000UIH	101	123456	ben	2014-01-01
-00D300000000XHP	124	123456	weq	2012-12-12
-00D300000000XHP	111	123456	nab	2012-01-21
-00D300000000UIH	101	123456	ben	2014-01-01
-00D300000000XHP	124	123456	weq	2012-12-12
-00D300000000XHP	111	123456	nab	2012-01-21
-00D300000000UIH	101	123456	ben	2014-01-01
-00D300000000XHP	124	123456	weq	2012-12-12
-00D300000000XHP	111	123456	nab	2012-01-21
-00D300000000UIH	101	123456	ben	2014-01-01
-00D300000000XHP	124	123456	weq	2012-12-12
-00D300000000XHP	111	123456	nab	2012-01-21
-00D300000000UIH	101	123456	ben	2014-01-01
diff --git a/phoenix-pig-base/phoenix4-pig-shaded/pom.xml b/phoenix-pig-base/phoenix4-pig-shaded/pom.xml
deleted file mode 100644
index c825c9f..0000000
--- a/phoenix-pig-base/phoenix4-pig-shaded/pom.xml
+++ /dev/null
@@ -1,83 +0,0 @@
-<?xml version='1.0'?>
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
-
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <parent>
-    <artifactId>phoenix-pig-base</artifactId>
-    <groupId>org.apache.phoenix</groupId>
-    <version>6.0.0-SNAPSHOT</version>
-  </parent>
-  <modelVersion>4.0.0</modelVersion>
-
-  <artifactId>phoenix4-pig-shaded</artifactId>
-  <name>Shaded Phoenix Pig Connector for Phoenix 4</name>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-shade-plugin</artifactId>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-compiler-plugin</artifactId>
-        <executions>
-          <execution>
-             <id>default-compile</id>
-             <phase>none</phase>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <configuration>
-          <skip>true</skip>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix4-pig</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-hbase-compat-${hbase.compat.version}</artifactId>
-      <scope>runtime</scope>
-    </dependency>
-    <!-- We want to take the implementation from Pig -->
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-log4j12</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-      <scope>provided</scope>
-    </dependency>
-  </dependencies>
-</project>
\ No newline at end of file
diff --git a/phoenix-pig-base/phoenix4-pig/pom.xml b/phoenix-pig-base/phoenix4-pig/pom.xml
deleted file mode 100644
index fd5e713..0000000
--- a/phoenix-pig-base/phoenix4-pig/pom.xml
+++ /dev/null
@@ -1,52 +0,0 @@
-<?xml version='1.0'?>
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
-
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <parent>
-    <artifactId>phoenix-pig-base</artifactId>
-    <groupId>org.apache.phoenix</groupId>
-    <version>6.0.0-SNAPSHOT</version>
-  </parent>
-  <modelVersion>4.0.0</modelVersion>
-
-  <artifactId>phoenix4-pig</artifactId>
-  <name>Phoenix Pig Connector for Phoenix 4</name>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-failsafe-plugin</artifactId>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-resources-plugin</artifactId>
-      </plugin>
-    </plugins>
-  </build>
-
-</project>
\ No newline at end of file
diff --git a/phoenix-pig-base/phoenix5-pig-shaded/pom.xml b/phoenix-pig-base/phoenix5-pig-shaded/pom.xml
deleted file mode 100644
index 19e6845..0000000
--- a/phoenix-pig-base/phoenix5-pig-shaded/pom.xml
+++ /dev/null
@@ -1,95 +0,0 @@
-<?xml version='1.0'?>
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
-
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <parent>
-    <artifactId>phoenix-pig-base</artifactId>
-    <groupId>org.apache.phoenix</groupId>
-    <version>6.0.0-SNAPSHOT</version>
-  </parent>
-  <modelVersion>4.0.0</modelVersion>
-
-  <artifactId>phoenix5-pig-shaded</artifactId>
-  <name>Shaded Phoenix Pig Connector for Phoenix 5</name>
-
-  <properties>
-    <top.dir>${project.basedir}/..</top.dir>
-    <phoenix.version>${phoenix-five.version}</phoenix.version>
-    <hbase.version>${hbase-two.version}</hbase.version>
-    <hbase.compat.version>${hbase-two.compat.version}</hbase.compat.version>
-    <hadoop.version>${hadoop-three.version}</hadoop.version>
-    <jdk.version>1.8</jdk.version>
-    <phoenix.main.version>5</phoenix.main.version>
-  </properties>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-shade-plugin</artifactId>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-compiler-plugin</artifactId>
-        <executions>
-          <execution>
-             <id>default-compile</id>
-             <phase>none</phase>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <configuration>
-          <skip>true</skip>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix5-pig</artifactId>
-      <scope>runtime</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-hbase-compat-${hbase.compat.version}</artifactId>
-      <scope>runtime</scope>
-    </dependency>
-    <!-- We want to take the implementation from Pig -->
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-log4j12</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-      <scope>provided</scope>
-    </dependency>
-  </dependencies>
-
-</project>
\ No newline at end of file
diff --git a/phoenix-pig-base/phoenix5-pig/pom.xml b/phoenix-pig-base/phoenix5-pig/pom.xml
deleted file mode 100644
index 35fa1f6..0000000
--- a/phoenix-pig-base/phoenix5-pig/pom.xml
+++ /dev/null
@@ -1,63 +0,0 @@
-<?xml version='1.0'?>
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
-
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <parent>
-    <artifactId>phoenix-pig-base</artifactId>
-    <groupId>org.apache.phoenix</groupId>
-    <version>6.0.0-SNAPSHOT</version>
-  </parent>
-  <modelVersion>4.0.0</modelVersion>
-
-  <artifactId>phoenix5-pig</artifactId>
-  <name>Phoenix Pig Connector for Phoenix 5</name>
-
-  <properties>
-    <top.dir>${project.basedir}/..</top.dir>
-    <phoenix.version>${phoenix-five.version}</phoenix.version>
-    <hbase.version>${hbase-two.version}</hbase.version>
-    <hbase.compat.version>${hbase-two.compat.version}</hbase.compat.version>
-    <hadoop.version>${hadoop-three.version}</hadoop.version>
-    <jdk.version>1.8</jdk.version>
-    <phoenix.main.version>5</phoenix.main.version>
-
-  </properties>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-failsafe-plugin</artifactId>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-resources-plugin</artifactId>
-      </plugin>
-    </plugins>
-  </build>
-
-</project>
\ No newline at end of file
diff --git a/phoenix-pig-base/pom.xml b/phoenix-pig-base/pom.xml
deleted file mode 100644
index cd2fb28..0000000
--- a/phoenix-pig-base/pom.xml
+++ /dev/null
@@ -1,461 +0,0 @@
-<?xml version='1.0'?>
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
-
--->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.phoenix</groupId>
-    <artifactId>phoenix-connectors</artifactId>
-    <version>6.0.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>phoenix-pig-base</artifactId>
-  <name>Phoenix Pig Connector - Base</name>
-  <packaging>pom</packaging>
-  <modules>
-    <module>phoenix4-pig</module>
-    <module>phoenix4-pig-shaded</module>
-    <module>phoenix5-pig</module>
-    <module>phoenix5-pig-shaded</module>
-  </modules>
-
-  <properties>
-    <top.dir>${project.basedir}/..</top.dir>
-  </properties>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-hbase-compat-${hbase.compat.version}</artifactId>
-      <scope>runtime</scope>
-      <optional>true</optional>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.pig</groupId>
-      <artifactId>pig</artifactId>
-      <classifier>h2</classifier>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-mapreduce-client-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.github.stephenc.findbugs</groupId>
-      <artifactId>findbugs-annotations</artifactId>
-      <version>${findbugs-annotations.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>commons-cli</groupId>
-      <artifactId>commons-cli</artifactId>
-      <version>${commons-cli.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>joda-time</groupId>
-      <artifactId>joda-time</artifactId>
-      <version>${jodatime.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.phoenix.thirdparty</groupId>
-      <artifactId>phoenix-shaded-guava</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-core</artifactId>
-      <classifier>tests</classifier>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-it</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <exclusions>
-        <exclusion>
-          <groupId>io.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-      </exclusions>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-all</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-
-  </dependencies>
-
-  <build>
-    <pluginManagement>
-      <plugins>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-dependency-plugin</artifactId>
-          <configuration>
-            <!-- AFAICT these are bogus dependency problems -->
-            <ignoredUsedUndeclaredDependencies>
-              <ignoredUsedUndeclaredDependency>
-                com.github.spotbugs:spotbugs-annotations
-              </ignoredUsedUndeclaredDependency>
-            </ignoredUsedUndeclaredDependencies>
-            <ignoredUnusedDeclaredDependencies>
-              <ignoredUnusedDeclaredDependency>
-                com.github.stephenc.findbugs:findbugs-annotations
-              </ignoredUnusedDeclaredDependency>
-              <ignoredUnusedDeclaredDependency>
-                org.apache.hbase:hbase-it
-              </ignoredUnusedDeclaredDependency>
-              <ignoredUnusedDeclaredDependency>
-                org.apache.hadoop:hadoop-hdfs
-              </ignoredUnusedDeclaredDependency>
-              <ignoredUnusedDeclaredDependency>
-                org.apache.phoenix:phoenix-hbase-compat-${hbase.compat.version}
-              </ignoredUnusedDeclaredDependency>
-            </ignoredUnusedDeclaredDependencies>
-          </configuration>
-        </plugin>
-        <plugin>
-          <groupId>org.codehaus.mojo</groupId>
-          <artifactId>build-helper-maven-plugin</artifactId>
-          <version>3.0.0</version>
-          <executions>
-            <execution>
-              <id>add-parent-source</id>
-              <phase>generate-sources</phase>
-              <goals>
-                <goal>add-source</goal>
-              </goals>
-              <configuration>
-                <sources>
-                  <source>${project.parent.basedir}/src/main/java</source>
-                </sources>
-              </configuration>
-            </execution>
-            <execution>
-              <id>add-parent-test-source</id>
-              <phase>generate-sources</phase>
-              <goals>
-                <goal>add-test-source</goal>
-              </goals>
-              <configuration>
-                <sources>
-                  <source>${project.parent.basedir}/src/test/java</source>
-                  <source>${project.parent.basedir}/src/it/java</source>
-                </sources>
-              </configuration>
-            </execution>
-          </executions>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-shade-plugin</artifactId>
-          <executions>
-            <execution>
-              <phase>package</phase>
-              <goals>
-                <goal>shade</goal>
-              </goals>
-              <configuration>
-                <shadedArtifactAttached>false</shadedArtifactAttached>
-                <promoteTransitiveDependencies>true</promoteTransitiveDependencies>
-                <shadeTestJar>false</shadeTestJar>
-                <transformers>
-                  <transformer
-                    implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
-                    <resource>README.md</resource>
-                    <file>${project.basedir}/../README.md</file>
-                  </transformer>
-                  <transformer
-                    implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
-                    <resource>LICENSE.txt</resource>
-                    <file>${project.basedir}/../LICENSE</file>
-                  </transformer>
-                  <transformer
-                    implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
-                    <resource>NOTICE</resource>
-                    <file>${project.basedir}/../NOTICE</file>
-                  </transformer>
-                </transformers>
-                <artifactSet>
-                  <includes>
-                    <include>*:*</include>
-                  </includes>
-                  <excludes>
-                    <exclude>org.apache.phoenix:phoenix-client</exclude>
-                    <exclude>org.apache.pig:pig</exclude>
-                    <exclude>joda-time:joda-time</exclude>
-                    <exclude>xom:xom</exclude>
-                  </excludes>
-                </artifactSet>
-                <filters>
-                  <filter>
-                    <artifact>*:*</artifact>
-                    <excludes>
-                      <exclude>META-INF/*.SF</exclude>
-                      <exclude>META-INF/*.DSA</exclude>
-                      <exclude>META-INF/*.RSA</exclude>
-                      <exclude>META-INF/license/*</exclude>
-                      <exclude>LICENSE.*</exclude>
-                      <exclude>NOTICE.*</exclude>
-                    </excludes>
-                  </filter>
-                </filters>
-                <relocations>
-                  <!-- COM relocation -->
-                  <relocation>
-                    <pattern>com.codahale</pattern>
-                    <shadedPattern>${shaded.package}.com.codahale</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>com.fasterxml</pattern>
-                    <shadedPattern>${shaded.package}.com.fasterxml</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>com.google.common</pattern>
-                    <shadedPattern>${shaded.package}.com.google.common</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>com.jamesmurty</pattern>
-                    <shadedPattern>${shaded.package}.com.jamesmurty</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>com.jcraft</pattern>
-                    <shadedPattern>${shaded.package}.com.jcraft</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>com.lmax</pattern>
-                    <shadedPattern>${shaded.package}.com.lmax</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>com.sun.jersey</pattern>
-                    <shadedPattern>${shaded.package}.com.sun.jersey</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>com.thoughtworks</pattern>
-                    <shadedPattern>${shaded.package}.com.thoughtworks</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>com.yammer</pattern>
-                    <shadedPattern>${shaded.package}.com.yammer</shadedPattern>
-                  </relocation>
-                  <!-- IO relocations -->
-                  <relocation>
-                    <pattern>io.netty</pattern>
-                    <shadedPattern>${shaded.package}.io.netty</shadedPattern>
-                  </relocation>
-                  <!-- ORG relocations -->
-                  <relocation>
-                    <pattern>org.antlr</pattern>
-                    <shadedPattern>${shaded.package}.org.antlr</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.aopalliance</pattern>
-                    <shadedPattern>${shaded.package}.org.aopalliance</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.codehaus</pattern>
-                    <shadedPattern>${shaded.package}.org.codehaus</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.fusesource</pattern>
-                    <shadedPattern>${shaded.package}.org.fusesource</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.hamcrest</pattern>
-                    <shadedPattern>${shaded.package}.org.hamcrest</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.hsqldb</pattern>
-                    <shadedPattern>${shaded.package}.org.hsqldb</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.iq80</pattern>
-                    <shadedPattern>${shaded.package}.org.iq80</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.jamon</pattern>
-                    <shadedPattern>${shaded.package}.org.jamon</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.jboss</pattern>
-                    <shadedPattern>${shaded.package}.org.jboss</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.jcodings</pattern>
-                    <shadedPattern>${shaded.package}.org.jcodings</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.jets3t</pattern>
-                    <shadedPattern>${shaded.package}.org.jets3t</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.joni</pattern>
-                    <shadedPattern>${shaded.package}.org.joni</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.junit</pattern>
-                    <shadedPattern>${shaded.package}.org.junit</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.kosmix</pattern>
-                    <shadedPattern>${shaded.package}.org.kosmix</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.mortbay</pattern>
-                    <shadedPattern>${shaded.package}.org.mortbay</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.objectweb</pattern>
-                    <shadedPattern>${shaded.package}.org.objectweb</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.stringtemplate</pattern>
-                    <shadedPattern>${shaded.package}.org.stringtemplate</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.tukaani</pattern>
-                    <shadedPattern>${shaded.package}.org.tukaani</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.znerd</pattern>
-                    <shadedPattern>${shaded.package}.org.znerd</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.apache.avro</pattern>
-                    <shadedPattern>${shaded.package}.org.apache.avro</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.apache.commons</pattern>
-                    <shadedPattern>${shaded.package}.org.apache.commons</shadedPattern>
-                    <excludes>
-                      <exclude>org.apache.commons.csv.**</exclude>
-                      <exclude>org.apache.commons.logging.**</exclude>
-                    </excludes>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.apache.directory</pattern>
-                    <shadedPattern>${shaded.package}.org.apache.directory</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.apache.http</pattern>
-                    <shadedPattern>${shaded.package}.org.apache.http</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.apache.jasper</pattern>
-                    <shadedPattern>${shaded.package}.org.apache.jasper</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.apache.jute</pattern>
-                    <shadedPattern>${shaded.package}.org.apache.jute</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.apache.mina</pattern>
-                    <shadedPattern>${shaded.package}.org.apache.mina</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.apache.oro</pattern>
-                    <shadedPattern>${shaded.package}.org.apache.oro</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.apache.taglibs</pattern>
-                    <shadedPattern>${shaded.package}.org.apache.taglibs</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.apache.thrift</pattern>
-                    <shadedPattern>${shaded.package}.org.apache.thrift</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.apache.tools</pattern>
-                    <shadedPattern>${shaded.package}.org.apache.tools</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.apache.twill</pattern>
-                    <shadedPattern>${shaded.package}.org.apache.twill</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.apache.velocity</pattern>
-                    <shadedPattern>${shaded.package}.org.apache.velocity</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>org.apache.zookeeper</pattern>
-                    <shadedPattern>${shaded.package}.org.apache.zookeeper</shadedPattern>
-                  </relocation>
-                  <!-- NET relocations -->
-                  <relocation>
-                    <pattern>net</pattern>
-                    <shadedPattern>${shaded.package}.net</shadedPattern>
-                  </relocation>
-                  <!-- Misc relocations -->
-                  <relocation>
-                    <pattern>antlr</pattern>
-                    <shadedPattern>${shaded.package}.antlr</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>it.unimi</pattern>
-                    <shadedPattern>${shaded.package}.it.unimi</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>jline</pattern>
-                    <shadedPattern>${shaded.package}.jline</shadedPattern>
-                  </relocation>
-                  <relocation>
-                    <pattern>junit</pattern>
-                    <shadedPattern>${shaded.package}.junit</shadedPattern>
-                  </relocation>
-                </relocations>
-              </configuration>
-            </execution>
-          </executions>
-        </plugin>
-      </plugins>
-    </pluginManagement>
-  </build>
-</project>
diff --git a/phoenix-pig-base/src/it/java/org/apache/phoenix/pig/BasePigIT.java b/phoenix-pig-base/src/it/java/org/apache/phoenix/pig/BasePigIT.java
deleted file mode 100644
index 8550fe0..0000000
--- a/phoenix-pig-base/src/it/java/org/apache/phoenix/pig/BasePigIT.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.pig;
-
-import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR;
-import static org.apache.phoenix.util.TestUtil.LOCALHOST;
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
-import org.apache.phoenix.query.BaseTest;
-import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.pig.ExecType;
-import org.apache.pig.PigServer;
-import org.apache.pig.data.TupleFactory;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.experimental.categories.Category;
-
-import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
-
-@Category(NeedsOwnMiniClusterTest.class)
-public class BasePigIT extends BaseTest {
-    protected TupleFactory tupleFactory;
-    protected String zkQuorum;
-    protected Connection conn;
-    protected Configuration conf;
-    protected PigServer pigServer;
-
-    @BeforeClass
-    public static void doSetup() throws Exception {
-        Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
-        props.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
-        // Must update config before starting server
-        setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
-    }
-    
-    @Before
-    public void setUp() throws Exception {
-        conf = getTestClusterConfig();
-        conf.set(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
-        // Set CURRENT_SCN to confirm that it's ignored
-        conf.set(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(System.currentTimeMillis()+QueryConstants.MILLIS_IN_DAY));
-        pigServer = new PigServer(ExecType.LOCAL, conf);
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        conn = DriverManager.getConnection(getUrl(), props);
-        zkQuorum = LOCALHOST + JDBC_PROTOCOL_SEPARATOR + getZKClientPort(conf);
-        tupleFactory = TupleFactory.getInstance();
-    }
-
-    @After
-    public void tearDown() throws Exception {
-        if(conn != null) {
-            conn.close();
-        }
-        if (pigServer != null) {
-            pigServer.shutdown();
-        }
-    }
-
-    protected static Configuration getTestClusterConfig() {
-        // don't want callers to modify config.
-        return new Configuration(config);
-    }
-
-}
diff --git a/phoenix-pig-base/src/it/java/org/apache/phoenix/pig/PhoenixHBaseLoaderIT.java b/phoenix-pig-base/src/it/java/org/apache/phoenix/pig/PhoenixHBaseLoaderIT.java
deleted file mode 100644
index febae90..0000000
--- a/phoenix-pig-base/src/it/java/org/apache/phoenix/pig/PhoenixHBaseLoaderIT.java
+++ /dev/null
@@ -1,838 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.pig;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.sql.Array;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.phoenix.util.SchemaUtil;
-import org.apache.pig.builtin.mock.Storage;
-import org.apache.pig.builtin.mock.Storage.Data;
-import org.apache.pig.data.DataType;
-import org.apache.pig.data.Tuple;
-import org.apache.pig.impl.logicalLayer.schema.Schema;
-import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema;
-import org.junit.Test;
-
-import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
-
-/**
- * 
- * Test class to run all the integration tests against a virtual map reduce cluster.
- */
-public class PhoenixHBaseLoaderIT extends BasePigIT {
-
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixHBaseLoaderIT.class);
-    private static final String SCHEMA_NAME = "T";
-    private static final String TABLE_NAME = "A";
-    private static final String INDEX_NAME = "I";
-    private static final String TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, TABLE_NAME);
-    private static final String CASE_SENSITIVE_TABLE_NAME = SchemaUtil.getEscapedArgument("a");
-    private static final String CASE_SENSITIVE_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME,CASE_SENSITIVE_TABLE_NAME);
-
-    /**
-     * Validates the schema returned for a table with Pig data types.
-     * @throws Exception
-     */
-    @Test
-    public void testSchemaForTable() throws Exception {
-        final String TABLE = "TABLE1";
-        final String ddl = String.format("CREATE TABLE %s "
-                + "  (a_string varchar not null, a_binary varbinary not null, a_integer integer, cf1.a_float float"
-                + "  CONSTRAINT pk PRIMARY KEY (a_string, a_binary))\n", TABLE);
-        conn.createStatement().execute(ddl);
-        conn.commit();
-        pigServer.registerQuery(String.format(
-                "A = load 'hbase://table/%s' using " + PhoenixHBaseLoader.class.getName() + "('%s');", TABLE,
-                zkQuorum));
-
-        final Schema schema = pigServer.dumpSchema("A");
-        List<FieldSchema> fields = schema.getFields();
-        assertEquals(4, fields.size());
-        assertTrue(fields.get(0).alias.equalsIgnoreCase("a_string"));
-        assertTrue(fields.get(0).type == DataType.CHARARRAY);
-        assertTrue(fields.get(1).alias.equalsIgnoreCase("a_binary"));
-        assertTrue(fields.get(1).type == DataType.BYTEARRAY);
-        assertTrue(fields.get(2).alias.equalsIgnoreCase("a_integer"));
-        assertTrue(fields.get(2).type == DataType.INTEGER);
-        assertTrue(fields.get(3).alias.equalsIgnoreCase("a_float"));
-        assertTrue(fields.get(3).type == DataType.FLOAT);
-    }
-
-    /**
-     * Validates the schema returned when specific columns of a table are given as part of LOAD .
-     * @throws Exception
-     */
-    @Test
-    public void testSchemaForTableWithSpecificColumns() throws Exception {
-
-        //create the table
-        final String TABLE = "TABLE2";
-        final String ddl = "CREATE TABLE " + TABLE
-                + "  (ID INTEGER NOT NULL PRIMARY KEY,NAME VARCHAR, AGE INTEGER) ";
-        conn.createStatement().execute(ddl);
-
-        final String selectColumns = "ID,NAME";
-        pigServer.registerQuery(String.format(
-                "A = load 'hbase://table/%s/%s' using " + PhoenixHBaseLoader.class.getName() + "('%s');",
-                TABLE, selectColumns, zkQuorum));
-
-        Schema schema = pigServer.dumpSchema("A");
-        List<FieldSchema> fields = schema.getFields();
-        assertEquals(2, fields.size());
-        assertTrue(fields.get(0).alias.equalsIgnoreCase("ID"));
-        assertTrue(fields.get(0).type == DataType.INTEGER);
-        assertTrue(fields.get(1).alias.equalsIgnoreCase("NAME"));
-        assertTrue(fields.get(1).type == DataType.CHARARRAY);
-    }
-
-    /**
-     * Validates the schema returned when a SQL SELECT query is given as part of LOAD .
-     * @throws Exception
-     */
-    @Test
-    public void testSchemaForQuery() throws Exception {
-
-        //create the table.
-        final String TABLE = "TABLE3";
-        String ddl = String.format("CREATE TABLE " + TABLE +
-                "  (A_STRING VARCHAR NOT NULL, A_DECIMAL DECIMAL NOT NULL, CF1.A_INTEGER INTEGER, CF2.A_DOUBLE DOUBLE"
-                + "  CONSTRAINT pk PRIMARY KEY (A_STRING, A_DECIMAL))\n", TABLE);
-        conn.createStatement().execute(ddl);
-
-
-
-        //sql query for LOAD
-        final String sqlQuery = "SELECT A_STRING,CF1.A_INTEGER,CF2.A_DOUBLE FROM " + TABLE;
-        pigServer.registerQuery(String.format(
-                "A = load 'hbase://query/%s' using " + PhoenixHBaseLoader.class.getName() + "('%s');",
-                sqlQuery, zkQuorum));
-
-        //assert the schema.
-        Schema schema = pigServer.dumpSchema("A");
-        List<FieldSchema> fields = schema.getFields();
-        assertEquals(3, fields.size());
-        assertTrue(fields.get(0).alias.equalsIgnoreCase("a_string"));
-        assertTrue(fields.get(0).type == DataType.CHARARRAY);
-        assertTrue(fields.get(1).alias.equalsIgnoreCase("a_integer"));
-        assertTrue(fields.get(1).type == DataType.INTEGER);
-        assertTrue(fields.get(2).alias.equalsIgnoreCase("a_double"));
-        assertTrue(fields.get(2).type == DataType.DOUBLE);
-    }
-
-    /**
-     * Validates the schema when it is given as part of LOAD..AS
-     * @throws Exception
-     */
-    @Test
-    public void testSchemaForTableWithAlias() throws Exception {
-
-        //create the table.
-        final String TABLE = "S.TABLE4";
-        String ddl = "CREATE TABLE  " + TABLE
-                + "  (A_STRING VARCHAR NOT NULL, A_DECIMAL DECIMAL NOT NULL, CF1.A_INTEGER INTEGER, CF2.A_DOUBLE DOUBLE"
-                + "  CONSTRAINT pk PRIMARY KEY (A_STRING, A_DECIMAL)) \n";
-        conn.createStatement().execute(ddl);
-
-        //select query given as part of LOAD.
-        final String sqlQuery = "SELECT A_STRING,A_DECIMAL,CF1.A_INTEGER,CF2.A_DOUBLE FROM " + TABLE;
-
-        LOG.info(String.format("Generated SQL Query [%s]",sqlQuery));
-
-        pigServer.registerQuery(String.format(
-                "raw = load 'hbase://query/%s' using " + PhoenixHBaseLoader.class.getName() + "('%s') AS (a:chararray,b:bigdecimal,c:int,d:double);",
-                sqlQuery, zkQuorum));
-
-        //test the schema.
-        Schema schema = pigServer.dumpSchema("raw");
-        List<FieldSchema> fields = schema.getFields();
-        assertEquals(4, fields.size());
-        assertTrue(fields.get(0).alias.equalsIgnoreCase("a"));
-        assertTrue(fields.get(0).type == DataType.CHARARRAY);
-        assertTrue(fields.get(1).alias.equalsIgnoreCase("b"));
-        assertTrue(fields.get(1).type == DataType.BIGDECIMAL);
-        assertTrue(fields.get(2).alias.equalsIgnoreCase("c"));
-        assertTrue(fields.get(2).type == DataType.INTEGER);
-        assertTrue(fields.get(3).alias.equalsIgnoreCase("d"));
-        assertTrue(fields.get(3).type == DataType.DOUBLE);
-    }
-
-    /**
-     * @throws Exception
-     */
-    @Test
-    public void testDataForTable() throws Exception {
-
-        //create the table
-        String ddl = "CREATE TABLE  " + CASE_SENSITIVE_TABLE_FULL_NAME 
-                + "  (ID  INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, AGE INTEGER) ";
-
-        conn.createStatement().execute(ddl);
-
-        //prepare data with 10 rows having age 25 and the other 30.
-        final String dml = "UPSERT INTO " + CASE_SENSITIVE_TABLE_FULL_NAME + " VALUES(?,?,?)";
-        PreparedStatement stmt = conn.prepareStatement(dml);
-        int rows = 20;
-        for(int i = 0 ; i < rows; i++) {
-            stmt.setInt(1, i);
-            stmt.setString(2, "a"+i);
-            stmt.setInt(3, (i % 2 == 0) ? 25 : 30);
-            stmt.execute();    
-        }
-        conn.commit();
-
-        //load data and filter rows whose age is > 25
-        pigServer.registerQuery(String.format(
-                "A = load 'hbase://table/%s' using "  + PhoenixHBaseLoader.class.getName() + "('%s');", CASE_SENSITIVE_TABLE_FULL_NAME,
-                zkQuorum));
-        pigServer.registerQuery("B = FILTER A BY AGE > 25;");
-
-        final Iterator<Tuple> iterator = pigServer.openIterator("B");
-        int recordsRead = 0;
-        while (iterator.hasNext()) {
-            final Tuple each = iterator.next();
-            assertEquals(3, each.size());
-            recordsRead++;
-        }
-        assertEquals(rows/2, recordsRead);
-    }
-
-    /**
-     * @throws Exception
-     */
-    @Test
-    public void testDataForSQLQuery() throws Exception {
-
-        //create the table
-        String ddl = "CREATE TABLE  " + TABLE_FULL_NAME 
-                + "  (ID  INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, AGE INTEGER) ";
-
-        conn.createStatement().execute(ddl);
-
-        //prepare data with 10 rows having age 25 and the other 30.
-        final String dml = "UPSERT INTO " + TABLE_FULL_NAME + " VALUES(?,?,?)";
-        PreparedStatement stmt = conn.prepareStatement(dml);
-        int rows = 20;
-        for(int i = 0 ; i < rows; i++) {
-            stmt.setInt(1, i);
-            stmt.setString(2, "a"+i);
-            stmt.setInt(3, (i % 2 == 0) ? 25 : 30);
-            stmt.execute();    
-        }
-        conn.commit();
-
-        //sql query
-        final String sqlQuery = " SELECT ID,NAME,AGE FROM " + TABLE_FULL_NAME + " WHERE AGE > 25";
-        //load data and filter rows whose age is > 25
-        pigServer.registerQuery(String.format(
-                "A = load 'hbase://query/%s' using org.apache.phoenix.pig.PhoenixHBaseLoader('%s');", sqlQuery,
-                zkQuorum));
-
-        final Iterator<Tuple> iterator = pigServer.openIterator("A");
-        int recordsRead = 0;
-        while (iterator.hasNext()) {
-            iterator.next();
-            recordsRead++;
-        }
-        assertEquals(rows/2, recordsRead);
-    }
-
-    /**
-     * 
-     * @throws Exception
-     */
-    @Test
-    public void testForNonPKSQLQuery() throws Exception {
-
-        //create the table
-        final String TABLE = "TABLE5";
-        String ddl = "CREATE TABLE  " + TABLE
-                + " ( ID VARCHAR PRIMARY KEY, FOO VARCHAR, BAR INTEGER, BAZ UNSIGNED_INT)";
-
-        conn.createStatement().execute(ddl);
-
-        //upsert data.
-        final String dml = "UPSERT INTO " + TABLE + " VALUES(?,?,?,?) ";
-        PreparedStatement stmt = conn.prepareStatement(dml);
-        stmt.setString(1, "a");
-        stmt.setString(2, "a");
-        stmt.setInt(3,-1);
-        stmt.setInt(4,1);
-        stmt.execute();
-        stmt.setString(1, "b");
-        stmt.setString(2, "b");
-        stmt.setInt(3,-2);
-        stmt.setInt(4,2);
-        stmt.execute();
-
-        conn.commit();
-
-        //sql query
-        final String sqlQuery = String.format(" SELECT FOO, BAZ FROM %s WHERE BAR = -1 " , TABLE);
-
-        pigServer.registerQuery(String.format(
-                "A = load 'hbase://query/%s' using " + PhoenixHBaseLoader.class.getName() + "('%s');", sqlQuery,
-                zkQuorum));
-
-        final Iterator<Tuple> iterator = pigServer.openIterator("A");
-        int recordsRead = 0;
-        while (iterator.hasNext()) {
-            final Tuple tuple = iterator.next();
-            assertEquals("a", tuple.get(0));
-            assertEquals(1, tuple.get(1));
-            recordsRead++;
-        }
-        assertEquals(1, recordsRead);
-
-        //test the schema. Test for PHOENIX-1123
-        Schema schema = pigServer.dumpSchema("A");
-        List<FieldSchema> fields = schema.getFields();
-        assertEquals(2, fields.size());
-        assertTrue(fields.get(0).alias.equalsIgnoreCase("FOO"));
-        assertTrue(fields.get(0).type == DataType.CHARARRAY);
-        assertTrue(fields.get(1).alias.equalsIgnoreCase("BAZ"));
-        assertTrue(fields.get(1).type == DataType.INTEGER);
-    }
-
-    /**
-     * @throws Exception
-     */
-    @Test
-    public void testGroupingOfDataForTable() throws Exception {
-
-        //create the table
-        final String TABLE = "TABLE6";
-        String ddl = "CREATE TABLE  " + TABLE
-                + "  (ID  INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, AGE INTEGER, SAL INTEGER) ";
-
-        conn.createStatement().execute(ddl);
-
-        //prepare data with 10 rows having age 25 and the other 30.
-        final String dml = "UPSERT INTO " + TABLE + " VALUES(?,?,?,?)";
-        PreparedStatement stmt = conn.prepareStatement(dml);
-        int rows = 20;
-        int j = 0, k = 0;
-        for(int i = 0 ; i < rows; i++) {
-            stmt.setInt(1, i);
-            stmt.setString(2, "a"+i);
-            if(i % 2 == 0) {
-                stmt.setInt(3, 25);
-                stmt.setInt(4, 10 * 2 * j++);    
-            } else {
-                stmt.setInt(3, 30);
-                stmt.setInt(4, 10 * 3 * k++);
-            }
-
-            stmt.execute();    
-        }
-        conn.commit();
-
-        //prepare the mock storage with expected output
-        final Data data = Storage.resetData(pigServer);
-        List<Tuple> expectedList = new ArrayList<Tuple>();
-        expectedList.add(Storage.tuple(0,180));
-        expectedList.add(Storage.tuple(0,270));
-
-        //load data and filter rows whose age is > 25
-        pigServer.setBatchOn();
-        pigServer.registerQuery(String.format(
-                "A = load 'hbase://table/%s' using " + PhoenixHBaseLoader.class.getName() + "('%s');", TABLE,
-                zkQuorum));
-
-        pigServer.registerQuery("B = GROUP A BY AGE;");
-        pigServer.registerQuery("C = FOREACH B GENERATE MIN(A.SAL),MAX(A.SAL);");
-        pigServer.registerQuery("STORE C INTO 'out' using mock.Storage();");
-        pigServer.executeBatch();
-
-        List<Tuple> actualList = data.get("out");
-        assertEquals(expectedList, actualList);
-    }
-
-    @Test
-    public void testTimestampForSQLQuery() throws Exception {
-        //create the table
-        String ddl = "CREATE TABLE TIMESTAMP_T (MYKEY VARCHAR,DATE_STP TIMESTAMP CONSTRAINT PK PRIMARY KEY (MYKEY)) ";
-        conn.createStatement().execute(ddl);
-
-        final String dml = "UPSERT INTO TIMESTAMP_T VALUES('foo',TO_TIMESTAMP('2006-04-12 00:00:00'))";
-        conn.createStatement().execute(dml);
-        conn.commit();
-
-        //sql query
-        final String sqlQuery = " SELECT mykey, year(DATE_STP) FROM TIMESTAMP_T ";
-        pigServer.registerQuery(String.format(
-                "A = load 'hbase://query/%s' using org.apache.phoenix.pig.PhoenixHBaseLoader('%s');", sqlQuery,
-                zkQuorum));
-
-        final Iterator<Tuple> iterator = pigServer.openIterator("A");
-        while (iterator.hasNext()) {
-            Tuple tuple = iterator.next();
-            assertEquals("foo", tuple.get(0));
-            assertEquals(2006, tuple.get(1));
-        }
-    }
-
-    @Test
-    public void testDateForSQLQuery() throws Exception {
-        //create the table
-        String ddl = "CREATE TABLE DATE_T (MYKEY VARCHAR,DATE_STP Date CONSTRAINT PK PRIMARY KEY (MYKEY)) ";
-        conn.createStatement().execute(ddl);
-
-        final String dml = "UPSERT INTO DATE_T VALUES('foo',TO_DATE('2004-03-10 10:00:00'))";
-        conn.createStatement().execute(dml);
-        conn.commit();
-
-        //sql query
-        final String sqlQuery = " SELECT mykey, hour(DATE_STP) FROM DATE_T ";
-        pigServer.registerQuery(String.format(
-                "A = load 'hbase://query/%s' using org.apache.phoenix.pig.PhoenixHBaseLoader('%s');", sqlQuery,
-                zkQuorum));
-
-        final Iterator<Tuple> iterator = pigServer.openIterator("A");
-        while (iterator.hasNext()) {
-            Tuple tuple = iterator.next();
-            assertEquals("foo", tuple.get(0));
-            assertEquals(10, tuple.get(1));
-        }
-    }
-
-    @Test
-    public void testTimeForSQLQuery() throws Exception {
-        //create the table
-        String ddl = "CREATE TABLE TIME_T (MYKEY VARCHAR,DATE_STP TIME CONSTRAINT PK PRIMARY KEY (MYKEY)) ";
-        conn.createStatement().execute(ddl);
-
-        final String dml = "UPSERT INTO TIME_T VALUES('foo',TO_TIME('2008-05-16 00:30:00'))";
-        conn.createStatement().execute(dml);
-        conn.commit();
-
-        //sql query
-        final String sqlQuery = " SELECT mykey, minute(DATE_STP) FROM TIME_T ";
-        pigServer.registerQuery(String.format(
-                "A = load 'hbase://query/%s' using org.apache.phoenix.pig.PhoenixHBaseLoader('%s');", sqlQuery,
-                zkQuorum));
-
-        final Iterator<Tuple> iterator = pigServer.openIterator("A");
-        while (iterator.hasNext()) {
-            Tuple tuple = iterator.next();
-            assertEquals("foo", tuple.get(0));
-            assertEquals(30, tuple.get(1));
-        }
-    }
-
-    /**
-     * Tests both  {@link PhoenixHBaseLoader} and {@link PhoenixHBaseStorage} 
-     * @throws Exception
-     */
-    @Test
-    public void testLoadAndStore() throws Exception {
-
-        //create the tables
-        final String TABLE = "TABLE7";
-        final String sourceTableddl = "CREATE TABLE  " + TABLE
-                + "  (ID  INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, AGE INTEGER, SAL INTEGER) ";
-
-        final String targetTable = "AGGREGATE";
-        final String targetTableddl = "CREATE TABLE " + targetTable
-                +  "(AGE INTEGER NOT NULL PRIMARY KEY , MIN_SAL INTEGER , MAX_SAL INTEGER) ";
-
-        conn.createStatement().execute(sourceTableddl);
-        conn.createStatement().execute(targetTableddl);
-
-        //prepare data with 10 rows having age 25 and the other 30.
-        final String dml = "UPSERT INTO " + TABLE + " VALUES(?,?,?,?)";
-        PreparedStatement stmt = conn.prepareStatement(dml);
-        int rows = 20;
-        int j = 0, k = 0;
-        for(int i = 0 ; i < rows; i++) {
-            stmt.setInt(1, i);
-            stmt.setString(2, "a"+i);
-            if(i % 2 == 0) {
-                stmt.setInt(3, 25);
-                stmt.setInt(4, 10 * 2 * j++);    
-            } else {
-                stmt.setInt(3, 30);
-                stmt.setInt(4, 10 * 3 * k++);
-            }
-
-            stmt.execute();    
-        }
-        conn.commit();
-
-
-        //load data and filter rows whose age is > 25
-        pigServer.setBatchOn();
-        pigServer.registerQuery(String.format(
-                "A = load 'hbase://table/%s' using " + PhoenixHBaseLoader.class.getName() + "('%s');", TABLE,
-                zkQuorum));
-
-        pigServer.registerQuery("B = GROUP A BY AGE;");
-        pigServer.registerQuery("C = FOREACH B GENERATE group as AGE,MIN(A.SAL),MAX(A.SAL);");
-        pigServer.registerQuery("STORE C INTO 'hbase://" + targetTable 
-                + "' using " + PhoenixHBaseStorage.class.getName() + "('"
-                + zkQuorum + "', '-batchSize 1000');");
-        pigServer.executeBatch();
-
-        //validate the data with what is stored.
-        final String selectQuery = "SELECT AGE , MIN_SAL ,MAX_SAL FROM " + targetTable + " ORDER BY AGE";
-        final ResultSet rs = conn.createStatement().executeQuery(selectQuery);
-        assertTrue(rs.next());
-        assertEquals(25, rs.getInt("AGE"));
-        assertEquals(0, rs.getInt("MIN_SAL"));
-        assertEquals(180, rs.getInt("MAX_SAL"));
-        assertTrue(rs.next());
-        assertEquals(30, rs.getInt("AGE"));
-        assertEquals(0, rs.getInt("MIN_SAL"));
-        assertEquals(270, rs.getInt("MAX_SAL"));
-    }
-
-    /**
-     * Test for Sequence
-     * @throws Exception
-     */
-    @Test
-    public void testDataForSQLQueryWithSequences() throws Exception {
-
-        //create the table
-        final String TABLE = "TABLE8";
-        String ddl = "CREATE TABLE " + TABLE
-                + " (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, AGE INTEGER) ";
-
-        conn.createStatement().execute(ddl);
-
-        String sequenceDdl = "CREATE SEQUENCE my_sequence";
-
-        conn.createStatement().execute(sequenceDdl);
-
-        //prepare data with 10 rows having age 25 and the other 30.
-        final String dml = "UPSERT INTO " + TABLE + " VALUES(?,?,?)";
-        PreparedStatement stmt = conn.prepareStatement(dml);
-        int rows = 20;
-        for(int i = 0 ; i < rows; i++) {
-            stmt.setInt(1, i);
-            stmt.setString(2, "a"+i);
-            stmt.setInt(3, (i % 2 == 0) ? 25 : 30);
-            stmt.execute();
-        }
-        conn.commit();
-
-        //sql query load data and filter rows whose age is > 25
-        final String sqlQuery = " SELECT NEXT VALUE FOR my_sequence AS my_seq,ID,NAME,AGE FROM " + TABLE + " WHERE AGE > 25";
-        pigServer.registerQuery(String.format(
-                "A = load 'hbase://query/%s' using " + PhoenixHBaseLoader.class.getName() + "('%s');", sqlQuery,
-                zkQuorum));
-
-
-        Iterator<Tuple> iterator = pigServer.openIterator("A");
-        int recordsRead = 0;
-        while (iterator.hasNext()) {
-            iterator.next();
-            recordsRead++;
-        }
-        assertEquals(rows/2, recordsRead);
-    }
-
-    @Test
-    public void testDataForSQLQueryWithFunctions() throws Exception {
-
-        //create the table
-        final String TABLE = "TABLE9";
-        String ddl = "CREATE TABLE " + TABLE
-                + " (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR) ";
-
-        conn.createStatement().execute(ddl);
-
-        final String dml = "UPSERT INTO " + TABLE + " VALUES(?,?)";
-        PreparedStatement stmt = conn.prepareStatement(dml);
-        int rows = 20;
-        for(int i = 0 ; i < rows; i++) {
-            stmt.setInt(1, i);
-            stmt.setString(2, "a"+i);
-            stmt.execute();
-        }
-        conn.commit();
-
-        //sql query
-        final String sqlQuery = " SELECT UPPER(NAME) AS n FROM " + TABLE + " ORDER BY ID" ;
-
-        pigServer.registerQuery(String.format(
-                "A = load 'hbase://query/%s' using "  + PhoenixHBaseLoader.class.getName() + "('%s');", sqlQuery,
-                zkQuorum));
-
-
-        Iterator<Tuple> iterator = pigServer.openIterator("A");
-        int i = 0;
-        while (iterator.hasNext()) {
-            Tuple tuple = iterator.next();
-            String name = (String)tuple.get(0);
-            assertEquals("A" + i, name);
-            i++;
-        }
-
-    }
-
-    @Test
-    public void testDataFromIndexTable() throws Exception {
-        //create the table
-        String ddl = "CREATE TABLE " + TABLE_NAME
-                + " (ID INTEGER NOT NULL, NAME VARCHAR NOT NULL, EMPLID INTEGER CONSTRAINT pk PRIMARY KEY (ID, NAME)) IMMUTABLE_ROWS=true";
-
-        conn.createStatement().execute(ddl);
-
-        //create a index table
-        String indexDdl = " CREATE INDEX " + INDEX_NAME + " ON " + TABLE_NAME + " (EMPLID) INCLUDE (NAME) ";
-        conn.createStatement().execute(indexDdl);
-
-        //upsert the data.
-        final String dml = "UPSERT INTO " + TABLE_NAME + " VALUES(?,?,?)";
-        PreparedStatement stmt = conn.prepareStatement(dml);
-        int rows = 20;
-        for(int i = 0 ; i < rows; i++) {
-            stmt.setInt(1, i);
-            stmt.setString(2, "a"+i);
-            stmt.setInt(3, i * 5);
-            stmt.execute();
-        }
-        conn.commit();
-        pigServer.registerQuery("A = load 'hbase://query/SELECT NAME , EMPLID FROM A WHERE EMPLID = 25 ' using " + PhoenixHBaseLoader.class.getName() + "('"+zkQuorum + "')  ;");
-        Iterator<Tuple> iterator = pigServer.openIterator("A");
-        while (iterator.hasNext()) {
-            Tuple tuple = iterator.next();
-            assertEquals("a5", tuple.get(0));
-            assertEquals(25, tuple.get(1));
-        }
-    }
-
-    @Test 
-    public void testLoadOfSaltTable() throws Exception {
-        final String TABLE = "TABLE11";
-        final String sourceTableddl = "CREATE TABLE  " + TABLE
-                + "  (ID  INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, AGE INTEGER, SAL INTEGER) SALT_BUCKETS=2  ";
-
-        conn.createStatement().execute(sourceTableddl);
-
-        //prepare data with 10 rows having age 25 and the other 30.
-        final String dml = "UPSERT INTO " + TABLE + " VALUES(?,?,?,?)";
-        PreparedStatement stmt = conn.prepareStatement(dml);
-        int rows = 20;
-        int j = 0, k = 0;
-        for(int i = 0 ; i < rows; i++) {
-            stmt.setInt(1, i);
-            stmt.setString(2, "a"+i);
-            if(i % 2 == 0) {
-                stmt.setInt(3, 25);
-                stmt.setInt(4, 10 * 2 * j++);    
-            } else {
-                stmt.setInt(3, 30);
-                stmt.setInt(4, 10 * 3 * k++);
-            }
-
-            stmt.execute();    
-        }
-        conn.commit();
-
-        final Data data = Storage.resetData(pigServer);
-        List<Tuple> expectedList = new ArrayList<Tuple>();
-        expectedList.add(Storage.tuple(25,10));
-        expectedList.add(Storage.tuple(30,10));
-
-        pigServer.setBatchOn();
-        pigServer.registerQuery(String.format(
-                "A = load 'hbase://table/%s' using " + PhoenixHBaseLoader.class.getName() + "('%s');", TABLE,
-                zkQuorum));
-
-        pigServer.registerQuery("B = GROUP A BY AGE;");
-        pigServer.registerQuery("C = FOREACH B GENERATE group,COUNT(A);");
-        pigServer.registerQuery("STORE C INTO 'out' using mock.Storage();");
-        pigServer.executeBatch();
-
-        List<Tuple> actualList = data.get("out");
-        assertEquals(expectedList.size(), actualList.size());
-    }
-    
-   /**
-    * 
-    * @throws Exception
-    */
-    @Test
-    public void testLoadForArrayWithQuery() throws Exception {
-         //create the table
-        final String TABLE = "TABLE14";
-        String ddl = "CREATE TABLE  " + TABLE
-                + " ( ID INTEGER PRIMARY KEY, a_double_array double array[] , a_varchar_array varchar array, a_concat_str varchar, sep varchar)";
-                
-        conn.createStatement().execute(ddl);
-        
-        Double[] doubleArr =  new Double[3];
-        doubleArr[0] = 2.2;
-        doubleArr[1] = 4.4;
-        doubleArr[2] = 6.6;
-        Array doubleArray = conn.createArrayOf("DOUBLE", doubleArr);
-        Tuple doubleArrTuple = Storage.tuple(2.2d, 4.4d, 6.6d);
-        
-        Double[] doubleArr2 =  new Double[2];
-        doubleArr2[0] = 12.2;
-        doubleArr2[1] = 22.2;
-        Array doubleArray2 = conn.createArrayOf("DOUBLE", doubleArr2);
-        Tuple doubleArrTuple2 = Storage.tuple(12.2d, 22.2d);
-        
-        String[] strArr =  new String[4];
-        strArr[0] = "ABC";
-        strArr[1] = "DEF";
-        strArr[2] = "GHI";
-        strArr[3] = "JKL";
-        Array strArray  = conn.createArrayOf("VARCHAR", strArr);
-        Tuple strArrTuple = Storage.tuple("ABC", "DEF", "GHI", "JKL");
-        
-        String[] strArr2 =  new String[2];
-        strArr2[0] = "ABC";
-        strArr2[1] = "XYZ";
-        Array strArray2  = conn.createArrayOf("VARCHAR", strArr2);
-        Tuple strArrTuple2 = Storage.tuple("ABC", "XYZ");
-        
-        //upsert data.
-        final String dml = "UPSERT INTO " + TABLE + " VALUES(?, ?, ?, ?, ?) ";
-        PreparedStatement stmt = conn.prepareStatement(dml);
-        stmt.setInt(1, 1);
-        stmt.setArray(2, doubleArray);
-        stmt.setArray(3, strArray);
-        stmt.setString(4, "ONE,TWO,THREE");
-        stmt.setString(5, ",");
-        stmt.execute();
-        
-        stmt.setInt(1, 2);
-        stmt.setArray(2, doubleArray2);
-        stmt.setArray(3, strArray2);
-        stmt.setString(4, "FOUR:five:six");
-        stmt.setString(5, ":");
-        stmt.execute();
-       
-        conn.commit();
-        
-        Tuple dynArrTuple = Storage.tuple("ONE", "TWO", "THREE");
-        Tuple dynArrTuple2 = Storage.tuple("FOUR", "five", "six");
-        
-        //sql query
-        final String sqlQuery = String.format(" SELECT ID, A_DOUBLE_ARRAY, A_VARCHAR_ARRAY, REGEXP_SPLIT(a_concat_str, sep) AS flattend_str FROM %s ", TABLE); 
-      
-        final Data data = Storage.resetData(pigServer);
-        List<Tuple> expectedList = new ArrayList<Tuple>();
-        expectedList.add(Storage.tuple(1, 3L, 4L, dynArrTuple));
-        expectedList.add(Storage.tuple(2, 2L, 2L, dynArrTuple2));
-        final String load = String.format("A = load 'hbase://query/%s' using " + PhoenixHBaseLoader.class.getName() + "('%s');",sqlQuery,zkQuorum);
-        pigServer.setBatchOn();
-        pigServer.registerQuery(load);
-        pigServer.registerQuery("B = FOREACH A GENERATE ID, SIZE(A_DOUBLE_ARRAY), SIZE(A_VARCHAR_ARRAY), FLATTEND_STR;");
-        pigServer.registerQuery("STORE B INTO 'out' using mock.Storage();");
-        pigServer.executeBatch();
-        
-        List<Tuple> actualList = data.get("out");
-        assertEquals(expectedList.size(), actualList.size());
-        assertEquals(expectedList, actualList);
-        
-        Schema schema = pigServer.dumpSchema("A");
-        List<FieldSchema> fields = schema.getFields();
-        assertEquals(4, fields.size());
-        assertTrue(fields.get(0).alias.equalsIgnoreCase("ID"));
-        assertTrue(fields.get(0).type == DataType.INTEGER);
-        assertTrue(fields.get(1).alias.equalsIgnoreCase("A_DOUBLE_ARRAY"));
-        assertTrue(fields.get(1).type == DataType.TUPLE);
-        assertTrue(fields.get(2).alias.equalsIgnoreCase("A_VARCHAR_ARRAY"));
-        assertTrue(fields.get(2).type == DataType.TUPLE);
-        assertTrue(fields.get(3).alias.equalsIgnoreCase("FLATTEND_STR"));
-        assertTrue(fields.get(3).type == DataType.TUPLE);
-        
-        Iterator<Tuple> iterator = pigServer.openIterator("A");
-        Tuple firstTuple = Storage.tuple(1, doubleArrTuple, strArrTuple, dynArrTuple);
-        Tuple secondTuple = Storage.tuple(2, doubleArrTuple2, strArrTuple2, dynArrTuple2);
-        List<Tuple> expectedRows = Lists.newArrayList(firstTuple, secondTuple);
-        List<Tuple> actualRows = Lists.newArrayList();
-        while (iterator.hasNext()) {
-            Tuple tuple = iterator.next();
-            actualRows.add(tuple);
-        }
-        assertEquals(expectedRows, actualRows);
-    }
-    
-    
-    /**
-     * 
-     * @throws Exception
-     */
-     @Test
-     public void testLoadForArrayWithTable() throws Exception {
-          //create the table
-         final String TABLE = "TABLE15";
-         String ddl = "CREATE TABLE  " + TABLE
-                 + " ( ID INTEGER PRIMARY KEY, a_double_array double array[])";
-                 
-         conn.createStatement().execute(ddl);
-         
-         Double[] doubleArr =  new Double[3];
-         doubleArr[0] = 2.2;
-         doubleArr[1] = 4.4;
-         doubleArr[2] = 6.6;
-         Array doubleArray = conn.createArrayOf("DOUBLE", doubleArr);
-         Tuple doubleArrTuple = Storage.tuple(2.2d, 4.4d, 6.6d);
-         
-         Double[] doubleArr2 =  new Double[2];
-         doubleArr2[0] = 12.2;
-         doubleArr2[1] = 22.2;
-         Array doubleArray2 = conn.createArrayOf("DOUBLE", doubleArr2);
-         Tuple doubleArrTuple2 = Storage.tuple(12.2d, 22.2d);
-         
-         //upsert data.
-         final String dml = "UPSERT INTO " + TABLE + " VALUES(?, ?) ";
-         PreparedStatement stmt = conn.prepareStatement(dml);
-         stmt.setInt(1, 1);
-         stmt.setArray(2, doubleArray);
-         stmt.execute();
-         
-         stmt.setInt(1, 2);
-         stmt.setArray(2, doubleArray2);
-         stmt.execute();
-        
-         conn.commit();
-         
-         final Data data = Storage.resetData(pigServer);
-         List<Tuple> expectedList = new ArrayList<Tuple>();
-         expectedList.add(Storage.tuple(1, doubleArrTuple));
-         expectedList.add(Storage.tuple(2, doubleArrTuple2));
-         
-         pigServer.setBatchOn();
-         pigServer.registerQuery(String.format(
-             "A = load 'hbase://table/%s' using " + PhoenixHBaseLoader.class.getName() + "('%s');", TABLE,
-             zkQuorum));
-
-         pigServer.registerQuery("STORE A INTO 'out' using mock.Storage();");
-         pigServer.executeBatch();
-         
-         List<Tuple> actualList = data.get("out");
-         assertEquals(expectedList.size(), actualList.size());
-         assertEquals(expectedList, actualList);
-     }
-}
diff --git a/phoenix-pig-base/src/it/java/org/apache/phoenix/pig/PhoenixHBaseStorerIT.java b/phoenix-pig-base/src/it/java/org/apache/phoenix/pig/PhoenixHBaseStorerIT.java
deleted file mode 100644
index d9aa87b..0000000
--- a/phoenix-pig-base/src/it/java/org/apache/phoenix/pig/PhoenixHBaseStorerIT.java
+++ /dev/null
@@ -1,292 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.pig;
-
-import static org.apache.pig.builtin.mock.Storage.tuple;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.sql.Array;
-import java.sql.ResultSet;
-import java.sql.Statement;
-import java.util.Collection;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.util.SchemaUtil;
-import org.apache.pig.backend.executionengine.ExecJob.JOB_STATUS;
-import org.apache.pig.builtin.mock.Storage;
-import org.apache.pig.builtin.mock.Storage.Data;
-import org.apache.pig.data.DataByteArray;
-import org.apache.pig.data.Tuple;
-import org.joda.time.DateTime;
-import org.junit.Test;
-
-import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
-
-
-public class PhoenixHBaseStorerIT extends BasePigIT {
-    /**
-     * Basic test - writes data to a Phoenix table and compares the data written
-     * to expected
-     * 
-     * @throws Exception
-     */
-    @Test
-    public void testStorer() throws Exception {
-        final String tableName = "TABLE1";
-        final Statement stmt = conn.createStatement();
-
-        stmt.execute("CREATE TABLE " + tableName +
-                 " (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR)");
-
-        final Data data = Storage.resetData(pigServer);
-        final Collection<Tuple> list = Lists.newArrayList();
-
-        // Create input dataset
-        int rows = 100;
-        for (int i = 0; i < rows; i++) {
-            Tuple t = tupleFactory.newTuple();
-            t.append(i);
-            t.append("a" + i);
-            list.add(t);
-        }
-        data.set("in", "id:int, name:chararray", list);
-
-        pigServer.setBatchOn();
-        pigServer.registerQuery("A = LOAD 'in' USING mock.Storage();");
-
-        pigServer.registerQuery("Store A into 'hbase://" + tableName
-                               + "' using " + PhoenixHBaseStorage.class.getName() + "('"
-                                + zkQuorum + "', '-batchSize 1000');");
-
-         // Now run the Pig script
-        if (pigServer.executeBatch().get(0).getStatus() != JOB_STATUS.COMPLETED) {
-            throw new RuntimeException("Job failed", pigServer.executeBatch()
-                    .get(0).getException());
-        }
-
-        // Compare data in Phoenix table to the expected
-        final ResultSet rs = stmt
-                .executeQuery("SELECT id, name FROM table1 ORDER BY id");
-
-        for (int i = 0; i < rows; i++) {
-            assertTrue(rs.next());
-            assertEquals(i, rs.getInt(1));
-            assertEquals("a" +  i, rs.getString(2));
-        }
-    }
-    
-    /**
-     * Basic test - writes specific columns data to a Phoenix table and compares the data written
-     * to expected
-     * 
-     * @throws Exception
-     */
-    @Test
-    public void testStorerForSpecificColumns() throws Exception {
-        final String tableName = SchemaUtil.getTableName("TABLE2", SchemaUtil.getEscapedArgument("zo2"));
-        final Statement stmt = conn.createStatement();
-
-        stmt.execute("CREATE TABLE " + tableName +
-                 " (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, AGE INTEGER)");
-        final Data data = Storage.resetData(pigServer);
-        final Collection<Tuple> list = Lists.newArrayList();
-
-        // Create input dataset
-        int rows = 100;
-        for (int i = 0; i < rows; i++) {
-            Tuple t = tupleFactory.newTuple();
-            t.append(i);
-            t.append("a" + i);
-            t.append(i * 2);
-            list.add(t);
-        }
-        data.set("in", "id:int, name:chararray,age:int", list);
-
-        pigServer.setBatchOn();
-        pigServer.registerQuery("A = LOAD 'in' USING mock.Storage();");
-        pigServer.registerQuery("B = FOREACH A GENERATE id,name;");
-        pigServer.registerQuery("Store B into 'hbase://" + tableName + "/ID,NAME"
-                               + "' using " + PhoenixHBaseStorage.class.getName() + "('"
-                                + zkQuorum + "', '-batchSize 1000');");
-
-         // Now run the Pig script
-        if (pigServer.executeBatch().get(0).getStatus() != JOB_STATUS.COMPLETED) {
-            throw new RuntimeException("Job failed", pigServer.executeBatch()
-                    .get(0).getException());
-        }
-
-        // Compare data in Phoenix table to the expected
-        final ResultSet rs = stmt
-                .executeQuery("SELECT id, name,age FROM " + tableName + " ORDER BY id");
-
-        for (int i = 0; i < rows; i++) {
-            assertTrue(rs.next());
-            assertEquals(i, rs.getInt(1));
-            assertEquals("a" +  i, rs.getString(2));
-            assertEquals(0, rs.getInt(3));
-        }
-    }
-    
-    /**
-     * Test storage of DataByteArray columns to Phoenix
-     * Maps the DataByteArray with the target PhoenixDataType and persists in HBase. 
-    * @throws Exception
-     */
-    @Test
-    public void testStoreWithBinaryDataTypes() throws Exception {
-     
-    	final String tableName = "TABLE3";
-        final Statement stmt = conn.createStatement();
-
-        stmt.execute("CREATE TABLE " + tableName +
-                " (col1 BIGINT NOT NULL, col2 INTEGER , col3 FLOAT, col4 DOUBLE , col5 TINYINT , " +
-                "  col6 BOOLEAN , col7 VARBINARY CONSTRAINT my_pk PRIMARY KEY (col1))");
-
-        final Data data = Storage.resetData(pigServer);
-        final Collection<Tuple> list = Lists.newArrayList();
-
-        int rows = 10;
-        for (int i = 1; i <= rows; i++) {
-            Tuple t = tupleFactory.newTuple();
-            t.append(i);
-            t.append(new DataByteArray(Bytes.toBytes(i * 5)));
-            t.append(new DataByteArray(Bytes.toBytes(i * 10.0F)));
-            t.append(new DataByteArray(Bytes.toBytes(i * 15.0D)));
-            t.append(new DataByteArray(Bytes.toBytes(i)));
-            t.append(new DataByteArray(Bytes.toBytes( i % 2 == 0)));
-            t.append(new DataByteArray(Bytes.toBytes(i)));
-            list.add(t);
-        }
-        data.set("in", "col1:int,col2:bytearray,col3:bytearray,col4:bytearray,col5:bytearray,col6:bytearray,col7:bytearray ", list);
-
-        pigServer.setBatchOn();
-        pigServer.registerQuery("A = LOAD 'in' USING mock.Storage();");
-
-        pigServer.registerQuery("Store A into 'hbase://" + tableName
-                               + "' using " + PhoenixHBaseStorage.class.getName() + "('"
-                                + zkQuorum + "', '-batchSize 1000');");
-
-        if (pigServer.executeBatch().get(0).getStatus() != JOB_STATUS.COMPLETED) {
-            throw new RuntimeException("Job failed", pigServer.executeBatch()
-                    .get(0).getException());
-        }
-
-        final ResultSet rs = stmt
-                .executeQuery(String.format("SELECT col1 , col2 , col3 , col4 , col5 , col6, col7  FROM %s ORDER BY col1" , tableName));
-
-        int count = 0;
-        for (int i = 1; i <= rows; i++) {
-            assertTrue(rs.next());
-            assertEquals(i, rs.getInt(1));
-            assertEquals(i * 5, rs.getInt(2));
-            assertEquals(i * 10.0F, rs.getFloat(3),0.0);
-            assertEquals(i * 15.0D, rs.getInt(4),0.0);
-            assertEquals(i,rs.getInt(5));
-            assertEquals(i % 2 == 0, rs.getBoolean(6));
-            assertArrayEquals(Bytes.toBytes(i), rs.getBytes(7));
-            count++;
-        }
-        assertEquals(rows, count);
-     }
-    
-    @Test
-    public void testStoreWithDateTime() throws Exception {
-     
-    	final String tableName = "TABLE4";
-        final Statement stmt = conn.createStatement();
-
-        stmt.execute("CREATE TABLE " + tableName +
-                " (col1 BIGINT NOT NULL, col2 DATE , col3 TIME, " +
-                " col4 TIMESTAMP CONSTRAINT my_pk PRIMARY KEY (col1))");
-
-        long now = System.currentTimeMillis();
-        final DateTime dt = new DateTime(now);
-        
-        final Data data = Storage.resetData(pigServer);
-        final Collection<Tuple> list = Lists.newArrayList();
-        Tuple t = tupleFactory.newTuple();
-        
-        t.append(1);
-        t.append(dt);
-        t.append(dt);
-        t.append(dt);
-       
-        list.add(t);
-        
-        data.set("in", "col1:int,col2:datetime,col3:datetime,col4:datetime", list);
-
-        pigServer.setBatchOn();
-        pigServer.registerQuery("A = LOAD 'in' USING mock.Storage();");
-
-        pigServer.registerQuery("Store A into 'hbase://" + tableName
-                               + "' using " + PhoenixHBaseStorage.class.getName() + "('"
-                                + zkQuorum + "', '-batchSize 1000');");
-
-        if (pigServer.executeBatch().get(0).getStatus() != JOB_STATUS.COMPLETED) {
-            throw new RuntimeException("Job failed", pigServer.executeBatch()
-                    .get(0).getException());
-        }
-
-        final ResultSet rs = stmt
-                .executeQuery(String.format("SELECT col1 , col2 , col3 , col4 FROM %s " , tableName));
-
-        assertTrue(rs.next());
-        assertEquals(1, rs.getInt(1));
-        assertEquals(now, rs.getDate(2).getTime());
-        assertEquals(now, rs.getTime(3).getTime());
-        assertEquals(now, rs.getTimestamp(4).getTime());
-     
-    }
-    
-    @Test
-    public void testStoreForArray() throws Exception {
-     
-        final String tableName = "TABLE5";
-        final Statement stmt = conn.createStatement();
-        String ddl = "CREATE TABLE  " + tableName
-                + " ( ID INTEGER PRIMARY KEY, dbl double array[], a_varchar_array varchar array)";
-      
-        stmt.execute(ddl);
-      
-        final Data data = Storage.resetData(pigServer);
-        data.set("in",  tuple(1, tuple(2.2)),
-                        tuple(2, tuple(2.4, 2.5)),
-                        tuple(3, tuple(2.3)));
-
-        pigServer.setBatchOn();
-        pigServer.registerQuery("A = LOAD 'in' USING mock.Storage() as (id:int, dbl:tuple());");
-        pigServer.registerQuery("Store A into 'hbase://" + tableName + "/ID,DBL"
-                               + "' using " + PhoenixHBaseStorage.class.getName() + "('"
-                                + zkQuorum + "', '-batchSize 1000');");
-
-        if (pigServer.executeBatch().get(0).getStatus() != JOB_STATUS.COMPLETED) {
-             throw new RuntimeException("Job failed", pigServer.executeBatch()
-                    .get(0).getException());
-        }
-
-        final ResultSet rs = stmt
-                .executeQuery(String.format("SELECT id , dbl FROM %s where id = 2" , tableName));
-
-        assertTrue(rs.next());
-        assertEquals(2, rs.getInt(1));
-        Array expectedDoubleArr = conn.createArrayOf("DOUBLE", new Double[] { 2.4, 2.5 });
-        assertEquals(expectedDoubleArr,rs.getArray(2));
-    }
-}
diff --git a/phoenix-pig-base/src/it/java/org/apache/phoenix/pig/udf/ReserveNSequenceTestIT.java b/phoenix-pig-base/src/it/java/org/apache/phoenix/pig/udf/ReserveNSequenceTestIT.java
deleted file mode 100644
index 98f46f0..0000000
--- a/phoenix-pig-base/src/it/java/org/apache/phoenix/pig/udf/ReserveNSequenceTestIT.java
+++ /dev/null
@@ -1,306 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.pig.udf;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.Properties;
-
-import org.apache.phoenix.pig.BasePigIT;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.pig.data.Tuple;
-import org.apache.pig.impl.util.UDFContext;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-/**
- * Test class to run all the Pig Sequence UDF integration tests against a virtual map reduce cluster.
- */
-public class ReserveNSequenceTestIT extends BasePigIT {
-
-    private static final String CREATE_SEQUENCE_SYNTAX = "CREATE SEQUENCE %s START WITH %s INCREMENT BY %s MINVALUE %s MAXVALUE %s CACHE %s";
-    private static final String SEQUENCE_NAME = "my_schema.my_sequence";
-    private static final long MAX_VALUE = 10;
-
-    private static UDFContext udfContext;
-
-    @Rule
-    public ExpectedException thrown = ExpectedException.none();
-
-    @Override
-    @Before
-    public void setUp() throws Exception {
-        super.setUp();
-        createSequence(conn);
-        createUdfContext();
-    }
-
-    @Override
-    @After
-    public void tearDown() throws Exception {
-        udfContext.reset();
-        dropSequence(conn);
-        super.tearDown();
-    }
-
-    @Test
-    public void testReserve() throws Exception {
-        doTest(new UDFTestProperties(1));
-    }
-
-    @Test
-    public void testReserveN() throws Exception {
-        doTest(new UDFTestProperties(5));
-    }
-
-    @Test
-    public void testReserveNwithPreviousAllocations() throws Exception {
-        UDFTestProperties props = new UDFTestProperties(5);
-        props.setCurrentValue(4);
-        doTest(props);
-    }
-
-    @Test
-    public void testReserveWithZero() throws Exception {
-        UDFTestProperties props = new UDFTestProperties(0);
-        props.setExceptionExpected(true);
-        props.setExceptionClass(IllegalArgumentException.class);
-        props.setErrorMessage(ReserveNSequence.INVALID_NUMBER_MESSAGE);
-        doTest(props);
-    }
-
-    @Test
-    public void testReserveWithNegativeNumber() throws Exception {
-        UDFTestProperties props = new UDFTestProperties(-1);
-        props.setExceptionExpected(true);
-        props.setExceptionClass(IllegalArgumentException.class);
-        props.setErrorMessage(ReserveNSequence.INVALID_NUMBER_MESSAGE);
-        doTest(props);
-    }
-
-    @Test
-    public void testReserveMaxLimit() throws Exception {
-        UDFTestProperties props = new UDFTestProperties(MAX_VALUE);
-        props.setExceptionExpected(true);
-        props.setExceptionClass(IOException.class);
-        props.setErrorMessage("Reached MAXVALUE of sequence");
-        doTest(props);
-    }
-
-    @Test
-    public void testNoSequenceName() throws Exception {
-        UDFTestProperties props = new UDFTestProperties(1);
-        props.setExceptionExpected(true);
-        props.setSequenceName(null);
-        props.setExceptionClass(NullPointerException.class);
-        props.setErrorMessage(ReserveNSequence.EMPTY_SEQUENCE_NAME_MESSAGE);
-        doTest(props);
-    }
-
-    @Test
-    public void testSequenceNotExisting() throws Exception {
-        UDFTestProperties props = new UDFTestProperties(1);
-        props.setExceptionExpected(true);
-        props.setSequenceName("foo.bar");
-        props.setExceptionClass(IOException.class);
-        props.setErrorMessage("Sequence undefined");
-        doTest(props);
-    }
-    
-    /**
-     * Test reserving sequence with tenant Id passed to udf.
-     * @throws Exception
-     */
-    @Test
-    public void testTenantSequence() throws Exception {
-        Properties tentantProps = new Properties();
-        String tenantId = "TENANT";
-        tentantProps.put(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
-        Connection tenantConn = DriverManager.getConnection(getUrl(), tentantProps);
-        createSequence(tenantConn);
-
-        try {
-            UDFTestProperties props = new UDFTestProperties(3);
-
-            // validates UDF reservation is for that tentant
-            doTest(tenantConn, props);
-
-            // validate global sequence value is still set to 1
-            assertEquals(1L, getNextSequenceValue(conn));
-        } finally {
-            dropSequence(tenantConn);
-        }
-    }
-    
-    /**
-     * Test Use the udf to reserve multiple tuples
-     * 
-     * @throws Exception
-     */
-    @Test
-    public void testMultipleTuples() throws Exception {
-        Tuple tuple = tupleFactory.newTuple(2);
-        tuple.set(0, 2L);
-        tuple.set(1, SEQUENCE_NAME);
-
-        final String tentantId = conn.getClientInfo(PhoenixRuntime.TENANT_ID_ATTRIB);
-        ReserveNSequence udf = new ReserveNSequence(zkQuorum, tentantId);
-
-        for (int i = 0; i < 2; i++) {
-            udf.exec(tuple);
-        }
-        long nextValue = getNextSequenceValue(conn);
-        assertEquals(5L, nextValue);
-    }
-    
-    private void doTest(UDFTestProperties props) throws Exception {
-        doTest(conn, props);
-    }
-
-    private void doTest(Connection conn, UDFTestProperties props) throws Exception {
-        setCurrentValue(conn, props.getCurrentValue());
-        Tuple tuple = tupleFactory.newTuple(3);
-        tuple.set(0, props.getNumToReserve());
-        tuple.set(1, props.getSequenceName());
-        tuple.set(2, zkQuorum);
-        Long result = null;
-        try {
-            final String tenantId = conn.getClientInfo(PhoenixRuntime.TENANT_ID_ATTRIB);
-            ReserveNSequence udf = new ReserveNSequence(zkQuorum, tenantId);
-            result = udf.exec(tuple);
-            validateReservedSequence(conn, props.getCurrentValue(), props.getNumToReserve(), result);
-            // Calling this to cleanup for the udf. To close the connection
-            udf.finish();
-        } catch (Exception e) {
-            if (props.isExceptionExpected()) {
-                assertEquals(props.getExceptionClass(), e.getClass());
-                e.getMessage().contains(props.getErrorMessage());
-            } else {
-                throw e;
-            }
-        }
-    }
-
-    private void createUdfContext() {
-        udfContext = UDFContext.getUDFContext();
-        udfContext.addJobConf(conf);
-    }
-
-    private void validateReservedSequence(Connection conn, Long currentValue, long count, Long result) throws SQLException {
-        Long startIndex = currentValue + 1;
-        assertEquals("Start index is incorrect", startIndex, result);
-        final long newNextSequenceValue = getNextSequenceValue(conn);
-        assertEquals(startIndex + count, newNextSequenceValue);
-    }
-
-    private void createSequence(Connection conn) throws SQLException {
-        conn.createStatement().execute(String.format(CREATE_SEQUENCE_SYNTAX, SEQUENCE_NAME, 1, 1, 1, MAX_VALUE, 1));
-        conn.commit();
-    }
-
-    private void setCurrentValue(Connection conn, long currentValue) throws SQLException {
-        for (int i = 1; i <= currentValue; i++) {
-            getNextSequenceValue(conn);
-        }
-    }
-
-    private long getNextSequenceValue(Connection conn) throws SQLException {
-        String ddl = new StringBuilder().append("SELECT NEXT VALUE FOR ").append(SEQUENCE_NAME).toString();
-        ResultSet rs = conn.createStatement().executeQuery(ddl);
-        assertTrue(rs.next());
-        conn.commit();
-        return rs.getLong(1);
-    }
-
-    private void dropSequence(Connection conn) throws Exception {
-        String ddl = new StringBuilder().append("DROP SEQUENCE ").append(SEQUENCE_NAME).toString();
-        conn.createStatement().execute(ddl);
-        conn.commit();
-    }
-
-    /**
-     * Static class to define properties for the test
-     */
-    private static class UDFTestProperties {
-        private final Long numToReserve;
-        private Long currentValue = 1L;
-        private String sequenceName = SEQUENCE_NAME;
-        private boolean exceptionExpected = false;
-        private Class exceptionClass = null;
-        private String errorMessage = null;
-
-        public UDFTestProperties(long numToReserve) {
-            this.numToReserve = numToReserve;
-        }
-
-        public Long getCurrentValue() {
-            return currentValue;
-        }
-
-        public void setCurrentValue(long currentValue) {
-            this.currentValue = currentValue;
-        }
-
-        public String getSequenceName() {
-            return sequenceName;
-        }
-
-        public void setSequenceName(String sequenceName) {
-            this.sequenceName = sequenceName;
-        }
-
-        public boolean isExceptionExpected() {
-            return exceptionExpected;
-        }
-
-        public void setExceptionExpected(boolean shouldThrowException) {
-            this.exceptionExpected = shouldThrowException;
-        }
-
-        public String getErrorMessage() {
-            return errorMessage;
-        }
-
-        public void setErrorMessage(String errorMessage) {
-            this.errorMessage = errorMessage;
-        }
-
-        public Long getNumToReserve() {
-            return numToReserve;
-        }
-
-        public Class getExceptionClass() {
-            return exceptionClass;
-        }
-
-        public void setExceptionClass(Class exceptionClass) {
-            this.exceptionClass = exceptionClass;
-        }
-
-    }
-
-}
diff --git a/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/PhoenixHBaseLoader.java b/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/PhoenixHBaseLoader.java
deleted file mode 100644
index 08c2646..0000000
--- a/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/PhoenixHBaseLoader.java
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.pig;
-
-import java.io.IOException;
-import java.util.Properties;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.mapreduce.InputFormat;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.RecordReader;
-import org.apache.phoenix.mapreduce.PhoenixInputFormat;
-import org.apache.phoenix.mapreduce.PhoenixRecordWritable;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.SchemaType;
-import org.apache.phoenix.pig.util.PhoenixPigSchemaUtil;
-import org.apache.phoenix.pig.util.QuerySchemaParserFunction;
-import org.apache.phoenix.pig.util.TableSchemaParserFunction;
-import org.apache.phoenix.pig.util.TypeUtil;
-import org.apache.pig.Expression;
-import org.apache.pig.LoadFunc;
-import org.apache.pig.LoadMetadata;
-import org.apache.pig.PigException;
-import org.apache.pig.ResourceSchema;
-import org.apache.pig.ResourceStatistics;
-import org.apache.pig.backend.executionengine.ExecException;
-import org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigSplit;
-import org.apache.pig.data.Tuple;
-import org.apache.pig.impl.util.ObjectSerializer;
-import org.apache.pig.impl.util.UDFContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-/**
- * LoadFunc to load data from HBase using Phoenix .
- * 
- * Example usage: 
- * a) TABLE
- *   i)   A = load 'hbase://table/HIRES'  using
- * org.apache.phoenix.pig.PhoenixHBaseLoader('localhost');
- *               
- *       The above loads the data from a table 'HIRES'
- *       
- *   ii)  A = load 'hbase://table/HIRES/id,name' using
- *       org.apache.phoenix.pig.PhoenixHBaseLoader('localhost');
- *       
- *       Here, only id, name are returned from the table HIRES as part of LOAD.
- * 
- * b)  QUERY
- *   i)   B = load 'hbase://query/SELECT fname, lname FROM HIRES' using
- *             org.apache.phoenix.pig.PhoenixHBaseLoader('localhost');
- *       
- *        The above loads fname and lname columns from 'HIRES' table.
- * 
- */
-public final class PhoenixHBaseLoader extends LoadFunc implements LoadMetadata {
-
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixHBaseLoader.class);
-    private static final String PHOENIX_TABLE_NAME_SCHEME = "hbase://table/";
-    private static final String PHOENIX_QUERY_SCHEME      = "hbase://query/";
-    private static final String RESOURCE_SCHEMA_SIGNATURE = "phoenix.pig.schema";
-   
-    private Configuration config;
-    private String tableName;
-    private String selectQuery;
-    private String zkQuorum ;
-    private PhoenixInputFormat<PhoenixRecordWritable> inputFormat;
-    private RecordReader<NullWritable,PhoenixRecordWritable> reader;
-    private String contextSignature;
-    private ResourceSchema schema;
-       
-    /**
-     * @param zkQuorum
-     */
-    public PhoenixHBaseLoader(String zkQuorum) {
-        super();
-        if (zkQuorum == null){
-            throw new NullPointerException();
-        }
-
-        if (!(zkQuorum.length() > 0)) {
-            throw new IllegalStateException("Zookeeper quorum cannot be empty!");
-        }
-
-        this.zkQuorum = zkQuorum;
-    }
-    
-    @Override
-    public void setLocation(String location, Job job) throws IOException {
-        PhoenixConfigurationUtil.loadHBaseConfiguration(job);
-
-        final Configuration configuration = job.getConfiguration();
-        //explicitly turning off combining splits. 
-        configuration.setBoolean("pig.noSplitCombination", true);
-
-        this.initializePhoenixPigConfiguration(location, configuration);
-    }
-
-    /**
-     * Initialize PhoenixPigConfiguration if it is null. Called by {@link #setLocation} and {@link #getSchema}
-     * @param location
-     * @param configuration
-     * @throws PigException
-     */
-    private void initializePhoenixPigConfiguration(final String location, final Configuration configuration) throws IOException {
-        if(this.config != null) {
-            return;
-        }
-        this.config = configuration;
-        this.config.set(HConstants.ZOOKEEPER_QUORUM,this.zkQuorum);
-        PhoenixConfigurationUtil.setInputClass(this.config, PhoenixRecordWritable.class);
-        Pair<String,String> pair = null;
-        try {
-            if (location.startsWith(PHOENIX_TABLE_NAME_SCHEME)) {
-                String tableSchema = location.substring(PHOENIX_TABLE_NAME_SCHEME.length());
-                final TableSchemaParserFunction parseFunction = new TableSchemaParserFunction();
-                pair =  parseFunction.apply(tableSchema);
-                PhoenixConfigurationUtil.setSchemaType(this.config, SchemaType.TABLE);
-             } else if (location.startsWith(PHOENIX_QUERY_SCHEME)) {
-                this.selectQuery = location.substring(PHOENIX_QUERY_SCHEME.length());
-                final QuerySchemaParserFunction queryParseFunction = new QuerySchemaParserFunction(this.config);
-                pair = queryParseFunction.apply(this.selectQuery);
-                PhoenixConfigurationUtil.setInputQuery(this.config, this.selectQuery);
-                PhoenixConfigurationUtil.setSchemaType(this.config, SchemaType.QUERY);
-            }
-            this.tableName = pair.getFirst();
-            final String selectedColumns = pair.getSecond();
-            
-            if((this.tableName == null || this.tableName.equals("")) &&
-                    (this.selectQuery == null || this.selectQuery.equals(""))) {
-                printUsage(location);
-            }
-            PhoenixConfigurationUtil.setInputTableName(this.config, this.tableName);
-            if(selectedColumns != null && !selectedColumns.isEmpty()) {
-                PhoenixConfigurationUtil.setSelectColumnNames(this.config, selectedColumns.split(","));   
-            }
-        } catch(IllegalArgumentException iae) {
-            printUsage(location);
-        } 
-    }
-
-  
-    @Override
-    public String relativeToAbsolutePath(String location, Path curDir) throws IOException {
-        return location;
-    }
-
-    @Override
-    public InputFormat getInputFormat() throws IOException {
-        if(inputFormat == null) {
-            inputFormat = new PhoenixInputFormat<PhoenixRecordWritable>();
-            PhoenixConfigurationUtil.setInputClass(this.config, PhoenixRecordWritable.class);
-        }
-        return inputFormat;
-    }
-
-    @SuppressWarnings("unchecked")
-    @Override
-    public void prepareToRead(RecordReader reader, PigSplit split) throws IOException {
-        this.reader = reader;
-        final String resourceSchemaAsStr = getValueFromUDFContext(this.contextSignature,RESOURCE_SCHEMA_SIGNATURE);
-        if (resourceSchemaAsStr == null) {
-            throw new IOException("Could not find schema in UDF context");
-        }
-       schema = (ResourceSchema)ObjectSerializer.deserialize(resourceSchemaAsStr); 
-    }
-
-     /*
-     * @see org.apache.pig.LoadFunc#setUDFContextSignature(java.lang.String)
-     */
-    @Override
-    public void setUDFContextSignature(String signature) {
-        this.contextSignature = signature;
-    }
-    
-    @Override
-    public Tuple getNext() throws IOException {
-        try {
-            if(!reader.nextKeyValue()) {
-                return null; 
-             }
-            final PhoenixRecordWritable record = reader.getCurrentValue();
-            if(record == null) {
-                return null;
-            }
-            final Tuple tuple = TypeUtil.transformToTuple(record, schema.getFields());
-            return tuple;
-       } catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-            int errCode = 6018;
-            final String errMsg = "Error while reading input";
-            throw new ExecException(errMsg, errCode,PigException.REMOTE_ENVIRONMENT, e);
-       } 
-    }
-    
-    private void printUsage(final String location) throws PigException {
-        String locationErrMsg = String.format("The input location in load statement should be of the form " +
-                "%s<table name> or %s<query>. Got [%s] ",PHOENIX_TABLE_NAME_SCHEME,PHOENIX_QUERY_SCHEME,location);
-        LOG.error(locationErrMsg);
-        throw new PigException(locationErrMsg);
-    }
-    
-    @Override
-    public ResourceSchema getSchema(String location, Job job) throws IOException {
-        if(schema != null) {
-            return schema;
-        }
-
-        PhoenixConfigurationUtil.loadHBaseConfiguration(job);
-        final Configuration configuration = job.getConfiguration();
-        this.initializePhoenixPigConfiguration(location, configuration);
-        this.schema = PhoenixPigSchemaUtil.getResourceSchema(this.config);
-        if(LOG.isDebugEnabled()) {
-            LOG.debug(String.format("Resource Schema generated for location [%s] is [%s]", location, schema.toString()));
-        }
-        this.storeInUDFContext(this.contextSignature, RESOURCE_SCHEMA_SIGNATURE, ObjectSerializer.serialize(schema));
-        return schema;
-    }
-
-    @Override
-    public ResourceStatistics getStatistics(String location, Job job) throws IOException {
-       // not implemented
-        return null;
-    }
-
-    @Override
-    public String[] getPartitionKeys(String location, Job job) throws IOException {
-     // not implemented
-        return null;
-    }
-
-    @Override
-    public void setPartitionFilter(Expression partitionFilter) throws IOException {
-     // not implemented
-    }
- 
-    private void storeInUDFContext(final String signature,final String key,final String value) {
-        final UDFContext udfContext = UDFContext.getUDFContext();
-        final Properties props = udfContext.getUDFProperties(this.getClass(), new String[]{signature});
-        props.put(key, value);
-    }
-    
-    private String getValueFromUDFContext(final String signature,final String key) {
-        final UDFContext udfContext = UDFContext.getUDFContext();
-        final Properties props = udfContext.getUDFProperties(this.getClass(), new String[]{signature});
-        return props.getProperty(key);
-    }
-}
diff --git a/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/PhoenixHBaseStorage.java b/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/PhoenixHBaseStorage.java
deleted file mode 100644
index e061c1c..0000000
--- a/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/PhoenixHBaseStorage.java
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.pig;
-
-import java.io.IOException;
-import java.sql.SQLException;
-import java.util.List;
-import java.util.Properties;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.Arrays;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
-import org.apache.commons.cli.GnuParser;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.OutputFormat;
-import org.apache.hadoop.mapreduce.RecordWriter;
-import org.apache.phoenix.mapreduce.PhoenixOutputFormat;
-import org.apache.phoenix.mapreduce.PhoenixRecordWritable;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
-import org.apache.phoenix.pig.util.TableSchemaParserFunction;
-import org.apache.phoenix.pig.util.TypeUtil;
-import org.apache.phoenix.schema.types.PDataType;
-import org.apache.phoenix.util.ColumnInfo;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.pig.ResourceSchema;
-import org.apache.pig.ResourceSchema.ResourceFieldSchema;
-import org.apache.pig.StoreFuncInterface;
-import org.apache.pig.data.DataType;
-import org.apache.pig.data.Tuple;
-import org.apache.pig.impl.util.ObjectSerializer;
-import org.apache.pig.impl.util.UDFContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * StoreFunc that uses Phoenix to store data into HBase.
- * 
- * Example usage: A = load 'testdata' as (a:chararray, b:chararray, c:chararray,
- * d:chararray, e: datetime); STORE A into 'hbase://CORE.ENTITY_HISTORY' using
- * org.apache.bdaas.PhoenixHBaseStorage('localhost','-batchSize 5000');
- * 
- * The above reads a file 'testdata' and writes the elements to HBase. First
- * argument to this StoreFunc is the server, the 2nd argument is the batch size
- * for upserts via Phoenix.
- * 
- * Alternative usage: A = load 'testdata' as (a:chararray, b:chararray, 
- *  e: datetime); STORE A into 'hbase://CORE.ENTITY_HISTORY/ID,F.B,F.E' using
- * org.apache.bdaas.PhoenixHBaseStorage('localhost','-batchSize 5000');
- * 
- * The above reads a file 'testdata' and writes the elements ID, F.B, and F.E to HBase. 
- * In this example, ID is the row key, and F is the column family for the data elements.  
- * First argument to this StoreFunc is the server, the 2nd argument is the batch size
- * for upserts via Phoenix. In this case, less than the full table row is required.
- * For configuration message, look in the info log file.
- *
- * Note that Pig types must be in sync with the target Phoenix data types. This
- * StoreFunc tries best to cast based on input Pig types and target Phoenix data
- * types, but it is recommended to supply appropriate schema.
- * 
- * 
- * 
- * 
- * 
- */
-@SuppressWarnings("rawtypes")
-public class PhoenixHBaseStorage implements StoreFuncInterface {
-
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixHBaseStorage.class);
-    private static final Set<String> PROPS_TO_IGNORE = new HashSet<>(Arrays.asList(PhoenixRuntime.CURRENT_SCN_ATTRIB));
-
-    private Configuration config;
-    private RecordWriter<NullWritable, PhoenixRecordWritable> writer;
-    private List<ColumnInfo> columnInfo = null;
-    private String contextSignature = null;
-    private ResourceSchema schema;  
-    private long batchSize;
-    private final PhoenixOutputFormat outputFormat = new PhoenixOutputFormat<PhoenixRecordWritable>(PROPS_TO_IGNORE);
-    // Set of options permitted
-    private final static Options validOptions = new Options();
-    private final static CommandLineParser parser = new GnuParser();
-    private final static String SCHEMA = "_schema";
-    private final static String PHOENIX_TABLE_NAME_SCHEME = "hbase://";
-    
-    private final CommandLine configuredOptions;
-    private final String server;
-
-    public PhoenixHBaseStorage(String server) throws ParseException {
-        this(server, null);
-    }
-
-    public PhoenixHBaseStorage(String server, String optString)
-            throws ParseException {
-        populateValidOptions();
-        this.server = server;
-
-        String[] optsArr = optString == null ? new String[0] : optString.split(" ");
-        try {
-            configuredOptions = parser.parse(validOptions, optsArr);
-        } catch (ParseException e) {
-            HelpFormatter formatter = new HelpFormatter();
-            formatter.printHelp("[-batchSize]", validOptions);
-            throw e;
-        }
-        batchSize = Long.parseLong(configuredOptions.getOptionValue("batchSize"));
-    }
-
-    private static void populateValidOptions() {
-        validOptions.addOption("batchSize", true, "Specify upsert batch size");
-    }
-
-    /**
-     * Returns UDFProperties based on <code>contextSignature</code>.
-     */
-    private Properties getUDFProperties() {
-        return UDFContext.getUDFContext().getUDFProperties(this.getClass(), new String[] { contextSignature });
-    }
-
-    
-    /**
-     * Parse the HBase table name and configure job
-     */
-    @Override
-    public void setStoreLocation(String location, Job job) throws IOException {
-        String tableSchema = location.substring(PHOENIX_TABLE_NAME_SCHEME.length());
-        final TableSchemaParserFunction parseFunction = new TableSchemaParserFunction();
-        Pair<String,String> pair =  parseFunction.apply(tableSchema);
-        PhoenixConfigurationUtil.loadHBaseConfiguration(job);
-        config = job.getConfiguration();
-        config.set(HConstants.ZOOKEEPER_QUORUM, server);
-        String tableName = pair.getFirst();
-        String columns = pair.getSecond(); 
-        if(columns != null && columns.length() > 0) {
-            PhoenixConfigurationUtil.setUpsertColumnNames(config, columns.split(","));
-        }
-        PhoenixConfigurationUtil.setPhysicalTableName(config,tableName);
-        PhoenixConfigurationUtil.setOutputTableName(config,tableName);
-        PhoenixConfigurationUtil.setBatchSize(config,batchSize);
-        String serializedSchema = getUDFProperties().getProperty(contextSignature + SCHEMA);
-        if (serializedSchema != null) {
-            schema = (ResourceSchema) ObjectSerializer.deserialize(serializedSchema);
-        }
-     }
-
-    @SuppressWarnings("unchecked")
-    @Override
-    public void prepareToWrite(RecordWriter writer) throws IOException {
-        this.writer = writer;
-        try {
-            this.columnInfo = PhoenixConfigurationUtil.getUpsertColumnMetadataList(this.config);
-        } catch(SQLException sqle) {
-            throw new IOException(sqle);
-        }
-    }
-
-    @Override
-    public void putNext(Tuple t) throws IOException {
-        ResourceFieldSchema[] fieldSchemas = (schema == null) ? null : schema.getFields();
-        PhoenixRecordWritable record = new PhoenixRecordWritable(this.columnInfo);
-        try {
-            for(int i=0; i<t.size(); i++) {
-                Object value = t.get(i);
-                if(value == null) {
-                    record.add(null);
-                    continue;
-                }
-                ColumnInfo cinfo = this.columnInfo.get(i);
-                byte type = (fieldSchemas == null) ? DataType.findType(value) : fieldSchemas[i].getType();
-                PDataType pDataType = PDataType.fromTypeId(cinfo.getSqlType());
-                Object v =  TypeUtil.castPigTypeToPhoenix(value, type, pDataType);
-                record.add(v);
-            }
-            this.writer.write(null, record);
-        } catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-            throw new RuntimeException(e);
-        } catch (SQLException e) {
-            LOG.error("Error on tuple {} .",t);
-            throw new IOException(e);
-        }
-        
-    }
-
-    @Override
-    public void setStoreFuncUDFContextSignature(String signature) {
-        this.contextSignature = signature;
-    }
-
-    @Override
-    public void cleanupOnFailure(String location, Job job) throws IOException {
-    }
-
-    @Override
-    public void cleanupOnSuccess(String location, Job job) throws IOException {
-    }
-
-    @Override
-    public String relToAbsPathForStoreLocation(String location, Path curDir) throws IOException {
-        return location;
-    }
-
-    @Override
-    public OutputFormat getOutputFormat() throws IOException {
-        return outputFormat;
-    }
-
-    @Override
-    public void checkSchema(ResourceSchema s) throws IOException {
-        schema = s;
-        getUDFProperties().setProperty(contextSignature + SCHEMA, ObjectSerializer.serialize(schema));
-    }
-}
diff --git a/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/udf/ReserveNSequence.java b/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/udf/ReserveNSequence.java
deleted file mode 100644
index f1cef1d..0000000
--- a/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/udf/ReserveNSequence.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.pig.udf;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.phoenix.mapreduce.util.ConnectionUtil;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.pig.EvalFunc;
-import org.apache.pig.data.Tuple;
-import org.apache.pig.impl.util.UDFContext;
-
-import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
-import org.apache.phoenix.thirdparty.com.google.common.base.Strings;
-
-import edu.umd.cs.findbugs.annotations.NonNull;
-import edu.umd.cs.findbugs.annotations.Nullable;
-
-/**
- * UDF to Reserve a chunk of numbers for a given sequence
- * 
- * Note: The way this UDF is invoked we open a new connection for every tuple row. The UDF will not perform well on
- *       large datasets as it involves creating a new connection for every tuple row
- */
-public class ReserveNSequence extends EvalFunc<Long> {
-
-    public static final String INVALID_TUPLE_MESSAGE = "Tuple should have correct fields(NumtoReserve,SequenceName).";
-    public static final String EMPTY_SEQUENCE_NAME_MESSAGE = "Sequence name should be not null";
-    public static final String EMPTY_ZK_MESSAGE = "ZKQuorum should be not null";
-    public static final String INVALID_NUMBER_MESSAGE = "Number of Sequences to Reserve should be greater than 0";
-
-    private final String zkQuorum;
-    private final String tenantId;
-    private Configuration configuration;
-    Connection connection;
-    
-    public ReserveNSequence(@NonNull String zkQuorum, @Nullable String tenantId) {
-        Preconditions.checkNotNull(zkQuorum, EMPTY_ZK_MESSAGE);
-        this.zkQuorum = zkQuorum;
-        this.tenantId = tenantId;
-    }
-    /**
-     * Reserve N next sequences for a sequence name. N is the first field in the tuple. Sequence name is the second
-     * field in the tuple zkquorum is the third field in the tuple
-     */
-    @Override
-    public Long exec(Tuple input) throws IOException {
-        Preconditions.checkArgument(input != null && input.size() >= 2, INVALID_TUPLE_MESSAGE);
-        Long numToReserve = (Long)(input.get(0));
-        Preconditions.checkArgument(numToReserve > 0, INVALID_NUMBER_MESSAGE);
-        String sequenceName = (String)input.get(1);
-        Preconditions.checkNotNull(sequenceName, EMPTY_SEQUENCE_NAME_MESSAGE);
-        // It will create a connection when called for the first Tuple per task.
-        // The connection gets cleaned up in finish() method
-        if (connection == null) {
-            initConnection();
-        }
-        ResultSet rs = null;
-        try {
-            String sql = getNextNSequenceSelectStatement(Long.valueOf(numToReserve), sequenceName);
-            rs = connection.createStatement().executeQuery(sql);
-            Preconditions.checkArgument(rs.next());
-            Long startIndex = rs.getLong(1);
-            rs.close();
-            connection.commit();
-            return startIndex;
-        } catch (SQLException e) {
-            throw new IOException("Caught exception while processing row." + e.getMessage(), e);
-        }
-    }
-    
-    /**
-     * Cleanup to be performed at the end.
-     * Close connection
-     */
-    @Override
-    public void finish() {
-        if (connection != null) {
-            try {
-                connection.close();
-            } catch (SQLException e) {
-                throw new RuntimeException("Caught exception while closing connection", e);
-            }
-        }
-    }
-    
-    private void initConnection() throws IOException {
-        // Create correct configuration to be used to make phoenix connections
-        UDFContext context = UDFContext.getUDFContext();
-        configuration = new Configuration(context.getJobConf());
-        configuration.set(HConstants.ZOOKEEPER_QUORUM, this.zkQuorum);
-        if (Strings.isNullOrEmpty(tenantId)) {
-            configuration.unset(PhoenixRuntime.TENANT_ID_ATTRIB);
-        } else {
-            configuration.set(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
-        }
-        try {
-            connection = ConnectionUtil.getOutputConnection(configuration);
-        } catch (SQLException e) {
-            throw new IOException("Caught exception while creating connection", e);
-        }
-    }
-
-    private String getNextNSequenceSelectStatement(Long numToReserve, String sequenceName) {
-        return new StringBuilder().append("SELECT NEXT " + numToReserve + " VALUES" + " FOR ").append(sequenceName)
-                .toString();
-    }
-
-}
diff --git a/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/util/PhoenixPigSchemaUtil.java b/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/util/PhoenixPigSchemaUtil.java
deleted file mode 100644
index 0f6bcd7..0000000
--- a/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/util/PhoenixPigSchemaUtil.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.pig.util;
-
-import java.io.IOException;
-import java.sql.SQLException;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.SchemaType;
-import org.apache.phoenix.schema.types.PDataType;
-import org.apache.phoenix.util.ColumnInfo;
-import org.apache.pig.ResourceSchema;
-import org.apache.pig.ResourceSchema.ResourceFieldSchema;
-
-import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
-
-/**
- * 
- * Utility to generate the ResourceSchema from the list of {@link ColumnInfo}
- *
- */
-public final class PhoenixPigSchemaUtil {
-
-    private static final Logger LOG = LoggerFactory.getLogger(PhoenixPigSchemaUtil.class);
-    
-    private PhoenixPigSchemaUtil() {
-    }
-    
-    static class Dependencies {
-    	List<ColumnInfo> getSelectColumnMetadataList(Configuration configuration) throws SQLException {
-    		return PhoenixConfigurationUtil.getSelectColumnMetadataList(configuration);
-    	}
-    }
-    
-    public static ResourceSchema getResourceSchema(final Configuration configuration, Dependencies dependencies) throws IOException {
-        
-        final ResourceSchema schema = new ResourceSchema();
-        try {
-            List<ColumnInfo> columns = null;
-            final SchemaType schemaType = PhoenixConfigurationUtil.getSchemaType(configuration);
-            if(schemaType == SchemaType.QUERY) {
-                final String sqlQuery = PhoenixConfigurationUtil.getSelectStatement(configuration);
-                Preconditions.checkNotNull(sqlQuery, "No Sql Query exists within the configuration");
-                final SqlQueryToColumnInfoFunction function = new SqlQueryToColumnInfoFunction(configuration);
-                columns = function.apply(sqlQuery);
-            } else if (schemaType == SchemaType.TABLE) {
-                columns = dependencies.getSelectColumnMetadataList(configuration);
-            }
-            ResourceFieldSchema fields[] = new ResourceFieldSchema[columns.size()];
-            int i = 0;
-            for(ColumnInfo cinfo : columns) {
-                int sqlType = cinfo.getSqlType();
-                PDataType phoenixDataType = PDataType.fromTypeId(sqlType);
-                byte pigType = TypeUtil.getPigDataTypeForPhoenixType(phoenixDataType);
-                ResourceFieldSchema field = new ResourceFieldSchema();
-                field.setType(pigType).setName(cinfo.getDisplayName());
-                fields[i++] = field;
-            }
-            schema.setFields(fields);    
-        } catch(SQLException sqle) {
-            LOG.error(String.format("Error: SQLException [%s] ",sqle.getMessage()));
-            throw new IOException(sqle);
-        }
-        
-        return schema;
-    }
-    
-    public static ResourceSchema getResourceSchema(final Configuration configuration) throws IOException {
-        return getResourceSchema(configuration, new Dependencies());
-    }
-}
diff --git a/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/util/QuerySchemaParserFunction.java b/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/util/QuerySchemaParserFunction.java
deleted file mode 100644
index a5f4e62..0000000
--- a/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/util/QuerySchemaParserFunction.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.pig.util;
-
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.List;
-
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.phoenix.compile.ColumnProjector;
-import org.apache.phoenix.compile.QueryPlan;
-import org.apache.phoenix.jdbc.PhoenixStatement;
-import org.apache.phoenix.mapreduce.util.ConnectionUtil;
-
-import org.apache.phoenix.thirdparty.com.google.common.base.Function;
-import org.apache.phoenix.thirdparty.com.google.common.base.Joiner;
-import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
-import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * 
- *  A function to parse the select query passed to LOAD into a Pair of {@code <table Name, List<columns> }
- *
- */
-public class QuerySchemaParserFunction implements Function<String,Pair<String,String>> {
-
-    private static final Logger LOG = LoggerFactory.getLogger(QuerySchemaParserFunction.class);
-    private final Configuration configuration;
-    
-    public QuerySchemaParserFunction(Configuration configuration) {
-        Preconditions.checkNotNull(configuration);
-        this.configuration = configuration;
-    }
-    
-    @Override
-    public Pair<String, String> apply(final String selectStatement) {
-        Preconditions.checkNotNull(selectStatement);
-        Preconditions.checkArgument(!selectStatement.isEmpty(), "Select Query is empty!!");
-        Connection connection = null;
-        try {
-            connection = ConnectionUtil.getInputConnection(this.configuration);
-            final Statement  statement = connection.createStatement();
-            final PhoenixStatement pstmt = statement.unwrap(PhoenixStatement.class);
-            final QueryPlan queryPlan = pstmt.compileQuery(selectStatement);
-            isValidStatement(queryPlan);
-            final String tableName = queryPlan.getTableRef().getTable().getName().getString();
-            final List<? extends ColumnProjector> projectedColumns = queryPlan.getProjector().getColumnProjectors();
-            final List<String> columns = Lists.transform(projectedColumns,
-                                                            new Function<ColumnProjector,String>() {
-                                                                @Override
-                                                                public String apply(ColumnProjector column) {
-                                                                    return column.getName();
-                                                                }
-                                                            });
-            final String columnsAsStr = Joiner.on(",").join(columns);
-            return new Pair<String, String>(tableName, columnsAsStr);
-        } catch (SQLException e) {
-            LOG.error(String.format(" Error [%s] parsing SELECT query [%s] ",e.getMessage(),selectStatement));
-            throw new RuntimeException(e);
-        } finally {
-            if(connection != null) {
-                try {
-                    connection.close();
-                } catch(SQLException sqle) {
-                    LOG.error(" Error closing connection ");
-                    throw new RuntimeException(sqle);
-                }
-            }
-        }
-    }
-    
-    /**
-     * The method validates the statement passed to the query plan. List of conditions are
-     * <ol>
-     *   <li>Is a SELECT statement</li>
-     *   <li>doesn't contain ORDER BY expression</li>
-     *   <li>doesn't contain LIMIT</li>
-     *   <li>doesn't contain GROUP BY expression</li>
-     *   <li>doesn't contain DISTINCT</li>
-     *   <li>doesn't contain AGGREGATE functions</li>
-     * </ol>  
-     * @param queryPlan
-     * @return
-     */
-    private boolean isValidStatement(final QueryPlan queryPlan) {
-        if(queryPlan.getStatement().getOperation() != PhoenixStatement.Operation.QUERY) {
-            throw new IllegalArgumentException("Query passed isn't a SELECT statement");
-        }
-        if(!queryPlan.getOrderBy().getOrderByExpressions().isEmpty() 
-                || queryPlan.getLimit() != null 
-                || (queryPlan.getGroupBy() != null && !queryPlan.getGroupBy().isEmpty()) 
-                || queryPlan.getStatement().isDistinct()
-                || queryPlan.getStatement().isAggregate()) {
-            throw new IllegalArgumentException("SELECT statement shouldn't contain DISTINCT or ORDER BY or LIMIT or GROUP BY expressions");
-        }
-        return true;
-    }
-
-}
diff --git a/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/util/SqlQueryToColumnInfoFunction.java b/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/util/SqlQueryToColumnInfoFunction.java
deleted file mode 100644
index 01e1277..0000000
--- a/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/util/SqlQueryToColumnInfoFunction.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.pig.util;
-
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.List;
-
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.phoenix.compile.ColumnProjector;
-import org.apache.phoenix.compile.QueryPlan;
-import org.apache.phoenix.jdbc.PhoenixStatement;
-import org.apache.phoenix.mapreduce.util.ConnectionUtil;
-import org.apache.phoenix.util.ColumnInfo;
-
-import org.apache.phoenix.thirdparty.com.google.common.base.Function;
-import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
-import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public final class SqlQueryToColumnInfoFunction implements Function<String,List<ColumnInfo>> {
-    
-    private static final Logger LOG = LoggerFactory.getLogger(SqlQueryToColumnInfoFunction.class);
-    private final Configuration configuration;
-
-    public SqlQueryToColumnInfoFunction(final Configuration configuration) {
-        this.configuration = configuration;
-    }
-
-    @Override
-    public List<ColumnInfo> apply(String sqlQuery) {
-        Preconditions.checkNotNull(sqlQuery);
-        Connection connection = null;
-        List<ColumnInfo> columnInfos = null;
-        try {
-            connection = ConnectionUtil.getInputConnection(this.configuration);
-            final Statement  statement = connection.createStatement();
-            final PhoenixStatement pstmt = statement.unwrap(PhoenixStatement.class);
-            final QueryPlan queryPlan = pstmt.compileQuery(sqlQuery);
-            final List<? extends ColumnProjector> projectedColumns = queryPlan.getProjector().getColumnProjectors();
-            columnInfos = Lists.newArrayListWithCapacity(projectedColumns.size());
-            columnInfos = Lists.transform(projectedColumns, new Function<ColumnProjector,ColumnInfo>() {
-                @Override
-                public ColumnInfo apply(final ColumnProjector columnProjector) {
-                    return new ColumnInfo(columnProjector.getName(), columnProjector.getExpression().getDataType().getSqlType());
-                }
-                
-            });
-       } catch (SQLException e) {
-            LOG.error(String.format(" Error [%s] parsing SELECT query [%s] ",e.getMessage(),sqlQuery));
-            throw new RuntimeException(e);
-        } finally {
-            if(connection != null) {
-                try {
-                    connection.close();
-                } catch(SQLException sqle) {
-                    LOG.error("Error closing connection!!");
-                    throw new RuntimeException(sqle);
-                }
-            }
-        }
-        return columnInfos;
-    }
-
-}
diff --git a/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/util/TableSchemaParserFunction.java b/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/util/TableSchemaParserFunction.java
deleted file mode 100644
index e944b00..0000000
--- a/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/util/TableSchemaParserFunction.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.pig.util;
-
-
-
-import org.apache.hadoop.hbase.util.Pair;
-
-import org.apache.phoenix.thirdparty.com.google.common.base.Function;
-import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
-import org.apache.phoenix.thirdparty.com.google.common.base.Splitter;
-import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables;
-
-/**
- * 
- * A function to parse the table schema passed to LOAD/STORE into a Pair of {@code <table Name, columns> }
- *
- */
-public final class TableSchemaParserFunction implements Function<String,Pair<String,String>> {
-
-    private static final char TABLE_COLUMN_DELIMITER    = '/';
-    
-    @Override
-    public Pair<String, String> apply(final String tableSchema) {
-        Preconditions.checkNotNull(tableSchema);
-        Preconditions.checkArgument(!tableSchema.isEmpty(), "HBase Table name is empty!!");
-        
-        final String  tokens[] = Iterables.toArray(Splitter.on(TABLE_COLUMN_DELIMITER).
-                                    trimResults().omitEmptyStrings().split(tableSchema) , String.class); 
-        final String tableName = tokens[0];
-        String columns = null;
-        if(tokens.length > 1) {
-            columns = tokens[1];    
-        }
-        return new Pair<String, String>(tableName, columns);
-    }
-}
diff --git a/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java b/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
deleted file mode 100644
index aa47fed..0000000
--- a/phoenix-pig-base/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
+++ /dev/null
@@ -1,349 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.phoenix.pig.util;
-
-import java.io.IOException;
-import java.sql.Date;
-import java.sql.SQLException;
-import java.sql.Time;
-import java.sql.Timestamp;
-import java.sql.Types;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.phoenix.mapreduce.PhoenixRecordWritable;
-import org.apache.phoenix.schema.types.PArrayDataType;
-import org.apache.phoenix.schema.types.PBinary;
-import org.apache.phoenix.schema.types.PBoolean;
-import org.apache.phoenix.schema.types.PChar;
-import org.apache.phoenix.schema.types.PDataType;
-import org.apache.phoenix.schema.types.PDate;
-import org.apache.phoenix.schema.types.PDecimal;
-import org.apache.phoenix.schema.types.PDouble;
-import org.apache.phoenix.schema.types.PFloat;
-import org.apache.phoenix.schema.types.PInteger;
-import org.apache.phoenix.schema.types.PLong;
-import org.apache.phoenix.schema.types.PSmallint;
-import org.apache.phoenix.schema.types.PTime;
-import org.apache.phoenix.schema.types.PTimestamp;
-import org.apache.phoenix.schema.types.PTinyint;
-import org.apache.phoenix.schema.types.PUnsignedDate;
-import org.apache.phoenix.schema.types.PUnsignedDouble;
-import org.apache.phoenix.schema.types.PUnsignedFloat;
-import org.apache.phoenix.schema.types.PUnsignedInt;
-import org.apache.phoenix.schema.types.PUnsignedLong;
-import org.apache.phoenix.schema.types.PUnsignedSmallint;
-import org.apache.phoenix.schema.types.PUnsignedTime;
-import org.apache.phoenix.schema.types.PUnsignedTimestamp;
-import org.apache.phoenix.schema.types.PUnsignedTinyint;
-import org.apache.phoenix.schema.types.PVarbinary;
-import org.apache.phoenix.schema.types.PVarchar;
-import org.apache.phoenix.schema.types.PhoenixArray;
-import org.apache.pig.PigException;
-import org.apache.pig.ResourceSchema.ResourceFieldSchema;
-import org.apache.pig.backend.hadoop.hbase.HBaseBinaryConverter;
-import org.apache.pig.data.DataByteArray;
-import org.apache.pig.data.DataType;
-import org.apache.pig.data.Tuple;
-import org.apache.pig.data.TupleFactory;
-import org.joda.time.DateTime;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
-import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap;
-import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap.Builder;
-
-public final class TypeUtil {
-
-    private static final Logger LOG = LoggerFactory.getLogger(TypeUtil.class);
-    private static final HBaseBinaryConverter BINARY_CONVERTER = new HBaseBinaryConverter();
-    private static final ImmutableMap<PDataType, Byte> PHOENIX_TO_PIG_TYPE = init();
-    private static final TupleFactory TUPLE_FACTORY = TupleFactory.getInstance();
-    
-    private TypeUtil() {}
-
-    /**
-     * @return map of Phoenix to Pig data types.
-     */
-    private static ImmutableMap<PDataType, Byte> init() {
-        final ImmutableMap.Builder<PDataType, Byte> builder = new Builder<PDataType, Byte>();
-        builder.put(PLong.INSTANCE, DataType.LONG);
-        builder.put(PVarbinary.INSTANCE, DataType.BYTEARRAY);
-        builder.put(PChar.INSTANCE, DataType.CHARARRAY);
-        builder.put(PVarchar.INSTANCE, DataType.CHARARRAY);
-        builder.put(PDouble.INSTANCE, DataType.DOUBLE);
-        builder.put(PFloat.INSTANCE, DataType.FLOAT);
-        builder.put(PInteger.INSTANCE, DataType.INTEGER);
-        builder.put(PTinyint.INSTANCE, DataType.INTEGER);
-        builder.put(PSmallint.INSTANCE, DataType.INTEGER);
-        builder.put(PDecimal.INSTANCE, DataType.BIGDECIMAL);
-        builder.put(PTime.INSTANCE, DataType.DATETIME);
-        builder.put(PTimestamp.INSTANCE, DataType.DATETIME);
-        builder.put(PBoolean.INSTANCE, DataType.BOOLEAN);
-        builder.put(PDate.INSTANCE, DataType.DATETIME);
-        builder.put(PUnsignedDate.INSTANCE, DataType.DATETIME);
-        builder.put(PUnsignedDouble.INSTANCE, DataType.DOUBLE);
-        builder.put(PUnsignedFloat.INSTANCE, DataType.FLOAT);
-        builder.put(PUnsignedInt.INSTANCE, DataType.INTEGER);
-        builder.put(PUnsignedLong.INSTANCE, DataType.LONG);
-        builder.put(PUnsignedSmallint.INSTANCE, DataType.INTEGER);
-        builder.put(PUnsignedTime.INSTANCE, DataType.DATETIME);
-        builder.put(PUnsignedTimestamp.INSTANCE, DataType.DATETIME);
-        builder.put(PUnsignedTinyint.INSTANCE, DataType.INTEGER);
-        return builder.build();
-    }
-
-    /**
-     * This method returns the most appropriate PDataType associated with the incoming Pig type. Note for Pig DataType
-     * DATETIME, returns DATE as inferredSqlType. This is later used to make a cast to targetPhoenixType accordingly.
-     * See {@link #castPigTypeToPhoenix(Object, byte, PDataType)}
-     * 
-     * @param obj
-     * @return PDataType
-     */
-    public static PDataType getType(Object obj, byte type) {
-        if (obj == null) { return null; }
-        PDataType sqlType;
-
-        switch (type) {
-        case DataType.BYTEARRAY:
-            sqlType = PVarbinary.INSTANCE;
-            break;
-        case DataType.CHARARRAY:
-            sqlType = PVarchar.INSTANCE;
-            break;
-        case DataType.DOUBLE:
-        case DataType.BIGDECIMAL:
-            sqlType = PDouble.INSTANCE;
-            break;
-        case DataType.FLOAT:
-            sqlType = PFloat.INSTANCE;
-            break;
-        case DataType.INTEGER:
-            sqlType = PInteger.INSTANCE;
-            break;
-        case DataType.LONG:
-        case DataType.BIGINTEGER:
-            sqlType = PLong.INSTANCE;
-            break;
-        case DataType.BOOLEAN:
-            sqlType = PBoolean.INSTANCE;
-            break;
-        case DataType.DATETIME:
-            sqlType = PDate.INSTANCE;
-            break;
-        case DataType.BYTE:
-            sqlType = PTinyint.INSTANCE;
-            break;
-        default:
-            throw new RuntimeException("Unknown type " + obj.getClass().getName() + " passed to PhoenixHBaseStorage");
-        }
-
-        return sqlType;
-
-    }
-
-    /**
-     * This method encodes a value with Phoenix data type. It begins with checking whether an object is TUPLE. A {@link Tuple} is mapped
-     * to a {@link PArrayDataType} .  It then checks if it is BINARY and makes
-     * a call to {@link #castBytes(Object, PDataType)} to convert bytes to targetPhoenixType. It returns a {@link RuntimeException}
-     * when object can not be coerced.
-     * 
-     * @param o
-     * @param targetPhoenixType
-     * @return Object
-     * @throws SQLException 
-     */
-    public static Object castPigTypeToPhoenix(Object o, byte objectType, PDataType targetPhoenixType) throws SQLException {
-        
-        if(DataType.TUPLE == objectType) {
-            Tuple tuple = (Tuple)o;
-            List<Object> data = tuple.getAll();
-            return data.toArray();
-        }
-        
-        PDataType inferredPType = getType(o, objectType);
-
-        if (inferredPType == null) { return null; }
-
-        if (inferredPType == PVarbinary.INSTANCE) {
-            try {
-                o = castBytes(o, targetPhoenixType);
-                if (targetPhoenixType != PVarbinary.INSTANCE && targetPhoenixType != PBinary.INSTANCE) {
-                    inferredPType = getType(o, DataType.findType(o));
-                }
-            } catch (IOException e) {
-                throw new RuntimeException("Error while casting bytes for object " + o);
-            }
-        }
-        if (inferredPType == PDate.INSTANCE) {
-            int inferredSqlType = targetPhoenixType.getSqlType();
-
-            if (inferredSqlType == Types.DATE) { return new Date(((DateTime)o).getMillis()); }
-            if (inferredSqlType == Types.TIME) { return new Time(((DateTime)o).getMillis()); }
-            if (inferredSqlType == Types.TIMESTAMP) { return new Timestamp(((DateTime)o).getMillis()); }
-        }
-
-        if (targetPhoenixType == inferredPType || inferredPType.isCoercibleTo(targetPhoenixType)) { return inferredPType
-                .toObject(o, targetPhoenixType); }
-
-        throw new RuntimeException(o.getClass().getName() + " cannot be coerced to " + targetPhoenixType.toString());
-    }
-
-    /**
-     * This method converts bytes to the target type required for Phoenix. It uses {@link HBaseBinaryConverter} for the
-     * conversion.
-     * 
-     * @param o
-     * @param targetPhoenixType
-     * @return Object
-     * @throws IOException
-     */
-    private static Object castBytes(Object o, PDataType targetPhoenixType) throws IOException {
-        byte[] bytes = ((DataByteArray)o).get();
-
-        if (PDataType.equalsAny(targetPhoenixType, PChar.INSTANCE, PVarchar.INSTANCE)) {
-            return BINARY_CONVERTER.bytesToCharArray(bytes);
-        } else if (PDataType.equalsAny(targetPhoenixType, PUnsignedSmallint.INSTANCE, PSmallint.INSTANCE)) {
-            return BINARY_CONVERTER.bytesToInteger(bytes).shortValue();
-        } else if (PDataType.equalsAny(targetPhoenixType, PUnsignedTinyint.INSTANCE, PTinyint.INSTANCE)) {
-            return BINARY_CONVERTER.bytesToInteger(bytes).byteValue();
-        } else if (PDataType.equalsAny(targetPhoenixType, PUnsignedInt.INSTANCE, PInteger.INSTANCE)) {
-            return BINARY_CONVERTER.bytesToInteger(bytes);
-        } else if (targetPhoenixType.equals(PBoolean.INSTANCE)) {
-            return BINARY_CONVERTER.bytesToBoolean(bytes);
-        } else if (PDataType.equalsAny(targetPhoenixType, PFloat.INSTANCE, PUnsignedFloat.INSTANCE)) {
-            return BINARY_CONVERTER.bytesToFloat(bytes);
-        } else if (PDataType.equalsAny(targetPhoenixType, PDouble.INSTANCE, PUnsignedDouble.INSTANCE)) {
-            return BINARY_CONVERTER.bytesToDouble(bytes);
-        } else if (PDataType.equalsAny(targetPhoenixType, PUnsignedLong.INSTANCE, PLong.INSTANCE)) {
-            return BINARY_CONVERTER.bytesToLong(bytes);
-        } else if (PDataType.equalsAny(targetPhoenixType, PVarbinary.INSTANCE, PBinary.INSTANCE)) {
-            return bytes;
-        } else {
-            return o;
-        }
-    }
-
-    /**
-     * Transforms the PhoenixRecord to Pig {@link Tuple}.
-     * 
-     * @param record
-     * @param projectedColumns
-     * @return
-     * @throws IOException
-     */
-    public static Tuple transformToTuple(final PhoenixRecordWritable record, final ResourceFieldSchema[] projectedColumns) 
-            throws IOException {
-
-        Map<String, Object> columnValues = record.getResultMap();
-        
-        if (columnValues == null || columnValues.size() == 0 || projectedColumns == null
-                || projectedColumns.length != columnValues.size()) { return null; }
-        int numColumns = columnValues.size();
-        Tuple tuple = TUPLE_FACTORY.newTuple(numColumns);
-        try {
-            int i = 0;
-            for (Map.Entry<String,Object> entry : columnValues.entrySet()) {
-                final ResourceFieldSchema fieldSchema = projectedColumns[i];
-                Object object = entry.getValue();
-                if (object == null) {
-                    tuple.set(i++, null);
-                    continue;
-                }
-
-                switch (fieldSchema.getType()) {
-                case DataType.BYTEARRAY:
-                    byte[] bytes = PDataType.fromTypeId(PBinary.INSTANCE.getSqlType()).toBytes(object);
-                    tuple.set(i, new DataByteArray(bytes, 0, bytes.length));
-                    break;
-                case DataType.CHARARRAY:
-                    tuple.set(i, DataType.toString(object));
-                    break;
-                case DataType.DOUBLE:
-                    tuple.set(i, DataType.toDouble(object));
-                    break;
-                case DataType.FLOAT:
-                    tuple.set(i, DataType.toFloat(object));
-                    break;
-                case DataType.INTEGER:
-                    tuple.set(i, DataType.toInteger(object));
-                    break;
-                case DataType.LONG:
-                    tuple.set(i, DataType.toLong(object));
-                    break;
-                case DataType.BOOLEAN:
-                    tuple.set(i, DataType.toBoolean(object));
-                    break;
-                case DataType.DATETIME:
-                    if (object instanceof java.sql.Timestamp)
-                        tuple.set(i,new DateTime(((java.sql.Timestamp)object).getTime()));
-                    else
-                        tuple.set(i,new DateTime(object));
-                    break;
-                case DataType.BIGDECIMAL:
-                    tuple.set(i, DataType.toBigDecimal(object));
-                    break;
-                case DataType.BIGINTEGER:
-                    tuple.set(i, DataType.toBigInteger(object));
-                    break;
-                case DataType.TUPLE:
-                {
-                    PhoenixArray array = (PhoenixArray)object;
-                    Tuple t = TUPLE_FACTORY.newTuple(array.getDimensions());;
-                    for(int j = 0 ; j < array.getDimensions() ; j++) {
-                        t.set(j,array.getElement(j));
-                    }
-                    tuple.set(i, t);
-                    break;
-                }
-                default:
-                    throw new RuntimeException(String.format(" Not supported [%s] pig type", fieldSchema));
-                }
-                i++;
-            }
-        } catch (Exception ex) {
-            final String errorMsg = String.format(" Error transforming PhoenixRecord to Tuple [%s] ", ex.getMessage());
-            LOG.error(errorMsg);
-            throw new PigException(errorMsg);
-        }
-        return tuple;
-    }
-    
-    /**
-     * Returns the mapping pig data type for a given phoenix data type.
-     * 
-     * @param phoenixDataType
-     * @return
-     */
-    public static Byte getPigDataTypeForPhoenixType(final PDataType phoenixDataType) {
-        Preconditions.checkNotNull(phoenixDataType);
-        if(phoenixDataType instanceof PArrayDataType) {
-            return DataType.TUPLE;
-        }
-        final Byte pigDataType = PHOENIX_TO_PIG_TYPE.get(phoenixDataType);
-        if (LOG.isDebugEnabled()) {
-            LOG.debug(String.format(" For PhoenixDataType [%s] , pigDataType is [%s] ",
-                    phoenixDataType.getSqlTypeName(), DataType.findTypeName(pigDataType)));
-        }
-        return pigDataType;
-    }
-
-}
diff --git a/phoenix-pig-base/src/test/java/org/apache/phoenix/pig/util/PhoenixPigSchemaUtilTest.java b/phoenix-pig-base/src/test/java/org/apache/phoenix/pig/util/PhoenixPigSchemaUtilTest.java
deleted file mode 100644
index 2e6794a..0000000
--- a/phoenix-pig-base/src/test/java/org/apache/phoenix/pig/util/PhoenixPigSchemaUtilTest.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.pig.util;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import java.io.IOException;
-import java.sql.SQLException;
-import java.sql.Types;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.SchemaType;
-import org.apache.phoenix.pig.util.PhoenixPigSchemaUtil.Dependencies;
-import org.apache.phoenix.schema.IllegalDataException;
-import org.apache.phoenix.util.ColumnInfo;
-import org.apache.pig.ResourceSchema;
-import org.apache.pig.ResourceSchema.ResourceFieldSchema;
-import org.apache.pig.data.DataType;
-import org.junit.Test;
-
-
-/**
- * 
- * Tests on PhoenixPigSchemaUtil
- */
-public class PhoenixPigSchemaUtilTest {
-    private static final ColumnInfo ID_COLUMN = new ColumnInfo("ID", Types.BIGINT);
-    private static final ColumnInfo NAME_COLUMN = new ColumnInfo("NAME", Types.VARCHAR);
-    private static final ColumnInfo LOCATION_COLUMN = new ColumnInfo("LOCATION", Types.ARRAY);
-    
-    
-    @Test
-    public void testSchema() throws SQLException, IOException {
-        
-        final Configuration configuration = mock(Configuration.class);
-        when(configuration.get(PhoenixConfigurationUtil.SCHEMA_TYPE)).thenReturn(SchemaType.TABLE.name());
-		final ResourceSchema actual = PhoenixPigSchemaUtil.getResourceSchema(
-				configuration, new Dependencies() {
-					List<ColumnInfo> getSelectColumnMetadataList(
-							Configuration configuration) throws SQLException {
-						return new ArrayList<>(Arrays.asList(ID_COLUMN, NAME_COLUMN));
-					}
-				});        
-        // expected schema.
-        final ResourceFieldSchema[] fields = new ResourceFieldSchema[2];
-        fields[0] = new ResourceFieldSchema().setName("ID")
-                                                .setType(DataType.LONG);
-
-        fields[1] = new ResourceFieldSchema().setName("NAME")
-                                                .setType(DataType.CHARARRAY);
-        final ResourceSchema expected = new ResourceSchema().setFields(fields);
-        
-        assertEquals(expected.toString(), actual.toString());
-        
-    }
-    
-    @Test(expected=IllegalDataException.class)
-    public void testUnSupportedTypes() throws SQLException, IOException {
-        
-        final Configuration configuration = mock(Configuration.class);
-        when(configuration.get(PhoenixConfigurationUtil.SCHEMA_TYPE)).thenReturn(SchemaType.TABLE.name());
-		PhoenixPigSchemaUtil.getResourceSchema(
-				configuration, new Dependencies() {
-					List<ColumnInfo> getSelectColumnMetadataList(
-							Configuration configuration) throws SQLException {
-						return new ArrayList<>(Arrays.asList(ID_COLUMN, LOCATION_COLUMN));
-					}
-				});  
-        fail("We currently don't support Array type yet. WIP!!");
-    }
-}
diff --git a/phoenix-pig-base/src/test/java/org/apache/phoenix/pig/util/QuerySchemaParserFunctionTest.java b/phoenix-pig-base/src/test/java/org/apache/phoenix/pig/util/QuerySchemaParserFunctionTest.java
deleted file mode 100644
index 4b13691..0000000
--- a/phoenix-pig-base/src/test/java/org/apache/phoenix/pig/util/QuerySchemaParserFunctionTest.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.pig.util;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-import java.sql.SQLException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.phoenix.query.BaseConnectionlessQueryTest;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-
-/**
- * 
- * Unit tests to validate the query passed to LOAD .
- *
- */
-public class QuerySchemaParserFunctionTest extends BaseConnectionlessQueryTest {
-
-    private Configuration configuration;
-    private QuerySchemaParserFunction function;
-    
-    @Before
-    public void setUp() throws SQLException {
-        configuration = Mockito.mock(Configuration.class);
-        Mockito.when(configuration.get(HConstants.ZOOKEEPER_QUORUM)).thenReturn(getUrl());
-        function = new QuerySchemaParserFunction(configuration);
-    }
-    
-    @Test(expected=RuntimeException.class)
-    public void testSelectQuery() {
-        final String selectQuery = "SELECT col1 FROM test";
-        function.apply(selectQuery);
-        fail("Should fail as the table [test] doesn't exist");
-   }
-    
-    @Test
-    public void testValidSelectQuery() throws SQLException {
-        String ddl = "CREATE TABLE EMPLOYEE " +
-                "  (id integer not null, name varchar, age integer,location varchar " +
-                "  CONSTRAINT pk PRIMARY KEY (id))\n";
-        createTestTable(getUrl(), ddl);
-  
-        final String selectQuery = "SELECT name,age,location FROM EMPLOYEE";
-        Pair<String,String> pair = function.apply(selectQuery);
-         
-        assertEquals(pair.getFirst(), "EMPLOYEE");
-        assertEquals(pair.getSecond(), String.join(",", "NAME","AGE","LOCATION"));
-    }
-    
-    @Test(expected=RuntimeException.class)
-    public void testUpsertQuery() throws SQLException {
-        String ddl = "CREATE TABLE EMPLOYEE " +
-                "  (id integer not null, name varchar, age integer,location varchar " +
-                "  CONSTRAINT pk PRIMARY KEY (id))\n";
-        createTestTable(getUrl(), ddl);
-  
-        final String upsertQuery = "UPSERT INTO EMPLOYEE (ID, NAME) VALUES (?, ?)";
-        
-        function.apply(upsertQuery);
-        fail(" Function call successful despite passing an UPSERT query");
-    }
-    
-    @Test(expected=IllegalArgumentException.class)
-    public void testAggregationQuery() throws SQLException {
-        String ddl = "CREATE TABLE EMPLOYEE " +
-                "  (id integer not null, name varchar, age integer,location varchar " +
-                "  CONSTRAINT pk PRIMARY KEY (id))\n";
-        createTestTable(getUrl(), ddl);
-  
-        final String selectQuery = "SELECT MAX(ID) FROM EMPLOYEE";
-        function.apply(selectQuery);
-        fail(" Function call successful despite passing an aggreagate query");
-    }
-}
diff --git a/phoenix-pig-base/src/test/java/org/apache/phoenix/pig/util/SqlQueryToColumnInfoFunctionTest.java b/phoenix-pig-base/src/test/java/org/apache/phoenix/pig/util/SqlQueryToColumnInfoFunctionTest.java
deleted file mode 100644
index 7b51ca5..0000000
--- a/phoenix-pig-base/src/test/java/org/apache/phoenix/pig/util/SqlQueryToColumnInfoFunctionTest.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.pig.util;
-
-import java.sql.SQLException;
-import java.sql.Types;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.phoenix.query.BaseConnectionlessQueryTest;
-import org.apache.phoenix.util.ColumnInfo;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList;
-
-public class SqlQueryToColumnInfoFunctionTest  extends BaseConnectionlessQueryTest {
-
-    private Configuration configuration;
-    private SqlQueryToColumnInfoFunction function;
-    
-    @Before
-    public void setUp() throws SQLException {
-        configuration = Mockito.mock(Configuration.class);
-        Mockito.when(configuration.get(HConstants.ZOOKEEPER_QUORUM)).thenReturn(getUrl());
-        function = new SqlQueryToColumnInfoFunction(configuration);
-    }
-    
-    @Test
-    public void testValidSelectQuery() throws SQLException {
-        String ddl = "CREATE TABLE EMPLOYEE " +
-                "  (id integer not null, name varchar, age integer,location varchar " +
-                "  CONSTRAINT pk PRIMARY KEY (id))\n";
-        createTestTable(getUrl(), ddl);
-  
-        final String selectQuery = "SELECT name as a ,age AS b,UPPER(location) AS c FROM EMPLOYEE";
-        final ColumnInfo NAME_COLUMN = new ColumnInfo("A", Types.VARCHAR);
-        final ColumnInfo AGE_COLUMN = new ColumnInfo("B", Types.INTEGER);
-        final ColumnInfo LOCATION_COLUMN = new ColumnInfo("C", Types.VARCHAR);
-        final List<ColumnInfo> expectedColumnInfos = ImmutableList.of(NAME_COLUMN, AGE_COLUMN,LOCATION_COLUMN);
-        final List<ColumnInfo> actualColumnInfos = function.apply(selectQuery);
-        Assert.assertEquals(expectedColumnInfos, actualColumnInfos);
-        
-    }
-}
diff --git a/phoenix-pig-base/src/test/java/org/apache/phoenix/pig/util/TableSchemaParserFunctionTest.java b/phoenix-pig-base/src/test/java/org/apache/phoenix/pig/util/TableSchemaParserFunctionTest.java
deleted file mode 100644
index ea36b63..0000000
--- a/phoenix-pig-base/src/test/java/org/apache/phoenix/pig/util/TableSchemaParserFunctionTest.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.pig.util;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.phoenix.pig.util.TableSchemaParserFunction;
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-
-public class TableSchemaParserFunctionTest {
-
-    final TableSchemaParserFunction function = new TableSchemaParserFunction();
-    
-    @Test
-    public void testTableSchema() {
-        final String loadTableSchema = "EMPLOYEE/col1,col2";
-        final Pair<String,String> pair = function.apply(loadTableSchema);
-        assertEquals("EMPLOYEE", pair.getFirst());
-        assertEquals(pair.getSecond(),
-        String.join(",","col1","col2" ));
-    }
-    
-    @Test(expected=IllegalArgumentException.class)
-    public void testEmptyTableSchema() {
-        final String loadTableSchema = "";
-        function.apply(loadTableSchema);
-    }
-    
-    @Test
-    public void testTableOnlySchema() {
-        final String loadTableSchema = "EMPLOYEE";
-        final Pair<String,String> pair = function.apply(loadTableSchema);
-        assertEquals("EMPLOYEE", pair.getFirst());
-        assertNull(pair.getSecond());
-    }
-}
diff --git a/phoenix-pig-base/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java b/phoenix-pig-base/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
deleted file mode 100644
index 5eb1b8a..0000000
--- a/phoenix-pig-base/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.phoenix.pig.util;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import java.math.BigDecimal;
-import java.math.BigInteger;
-import java.util.Map;
-
-import org.apache.phoenix.mapreduce.PhoenixRecordWritable;
-import org.apache.phoenix.schema.types.PArrayDataType;
-import org.apache.phoenix.schema.types.PDouble;
-import org.apache.phoenix.schema.types.PhoenixArray;
-import org.apache.pig.ResourceSchema.ResourceFieldSchema;
-import org.apache.pig.data.DataType;
-import org.apache.pig.data.Tuple;
-import org.junit.Test;
-
-import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
-
-public class TypeUtilTest {
-
-    @Test
-    public void testTransformToTuple() throws Exception {
-        PhoenixRecordWritable record = mock(PhoenixRecordWritable.class);
-        Double[] doubleArr =  new Double[2];
-        doubleArr[0] = 64.87;
-        doubleArr[1] = 89.96;
-        PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PDouble.INSTANCE, doubleArr);
-        Map<String,Object> values = Maps.newLinkedHashMap();
-        values.put("first", "213123");
-        values.put("second", 1231123);
-        values.put("third", 31231231232131L);
-        values.put("four", "bytearray".getBytes());
-        values.put("five", arr);
-        when(record.getResultMap()).thenReturn(values);
-
-        ResourceFieldSchema field = new ResourceFieldSchema().setType(DataType.CHARARRAY);
-        ResourceFieldSchema field1 = new ResourceFieldSchema().setType(DataType.INTEGER);
-        ResourceFieldSchema field2 = new ResourceFieldSchema().setType(DataType.LONG);
-        ResourceFieldSchema field3 = new ResourceFieldSchema().setType(DataType.BYTEARRAY);
-        ResourceFieldSchema field4 = new ResourceFieldSchema().setType(DataType.TUPLE);
-        ResourceFieldSchema[] projectedColumns = { field, field1, field2, field3 , field4 }; 
-
-        Tuple t = TypeUtil.transformToTuple(record, projectedColumns);
-
-        assertEquals(DataType.LONG, DataType.findType(t.get(2)));
-        assertEquals(DataType.TUPLE, DataType.findType(t.get(4)));
-        Tuple doubleArrayTuple = (Tuple)t.get(4);
-        assertEquals(2,doubleArrayTuple.size());
-
-        field = new ResourceFieldSchema().setType(DataType.BIGDECIMAL);
-        field1 = new ResourceFieldSchema().setType(DataType.BIGINTEGER);
-        values.clear();
-        values.put("first", new BigDecimal(123123123.123213));
-        values.put("second", new BigInteger("1312313231312"));
-        ResourceFieldSchema[] columns = { field, field1 };
-        
-        t = TypeUtil.transformToTuple(record, columns);
-
-        assertEquals(DataType.BIGDECIMAL, DataType.findType(t.get(0)));
-        assertEquals(DataType.BIGINTEGER, DataType.findType(t.get(1)));
-    }
-}
diff --git a/phoenix4-connectors-assembly/pom.xml b/phoenix4-connectors-assembly/pom.xml
index c7ca9cf..2d8b143 100644
--- a/phoenix4-connectors-assembly/pom.xml
+++ b/phoenix4-connectors-assembly/pom.xml
@@ -49,14 +49,6 @@
       <groupId>org.apache.phoenix</groupId>
       <artifactId>phoenix4-kafka</artifactId>
     </dependency>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix4-pig</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix4-pig-shaded</artifactId>
-    </dependency>
     <dependency>
       <groupId>org.apache.phoenix</groupId>
       <artifactId>phoenix4-spark</artifactId>
@@ -113,26 +105,6 @@
               </arguments>
             </configuration>
           </execution>
-          <execution>
-            <id>pig without version</id>
-            <phase>package</phase>
-            <goals>
-              <goal>exec</goal>
-            </goals>
-            <configuration>
-              <executable>ln</executable>
-              <workingDirectory>${project.basedir}/../phoenix-pig-base/phoenix4-pig-shaded/target</workingDirectory>
-              <arguments>
-                <argument>-fnsv</argument>
-                <argument>
-                  phoenix4-pig-shaded-${project.version}.jar
-                </argument>
-                <argument>
-                  phoenix4-pig-shaded.jar
-                </argument>
-              </arguments>
-            </configuration>
-          </execution>
           <execution>
             <id>spark without version</id>
             <phase>package</phase>
diff --git a/phoenix4-connectors-assembly/src/build/components/phoenix4-jars.xml b/phoenix4-connectors-assembly/src/build/components/phoenix4-jars.xml
index 7a8eb81..bcfe386 100644
--- a/phoenix4-connectors-assembly/src/build/components/phoenix4-jars.xml
+++ b/phoenix4-connectors-assembly/src/build/components/phoenix4-jars.xml
@@ -39,14 +39,6 @@
         <include>phoenix4-kafka-shaded-minimal.jar</include>
       </includes>
     </fileSet>
-    <fileSet>
-      <directory>${project.basedir}/../phoenix-pig-base/phoenix4-pig-shaded/target</directory>
-      <outputDirectory>/</outputDirectory>
-      <includes>
-        <include>phoenix4-pig-shaded-${project.version}.jar</include>
-        <include>phoenix4-pig-shaded.jar</include>
-      </includes>
-    </fileSet>
     <fileSet>
       <directory>${project.basedir}/../phoenix-spark-base/phoenix4-spark-shaded/target</directory>
       <outputDirectory>/</outputDirectory>
diff --git a/phoenix5-connectors-assembly/pom.xml b/phoenix5-connectors-assembly/pom.xml
index 216e35b..a2454e0 100644
--- a/phoenix5-connectors-assembly/pom.xml
+++ b/phoenix5-connectors-assembly/pom.xml
@@ -56,14 +56,6 @@
       <groupId>org.apache.phoenix</groupId>
       <artifactId>phoenix5-kafka</artifactId>
     </dependency>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix5-pig</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix5-pig-shaded</artifactId>
-    </dependency>
     <dependency>
       <groupId>org.apache.phoenix</groupId>
       <artifactId>phoenix5-spark</artifactId>
@@ -124,26 +116,6 @@
               </arguments>
             </configuration>
           </execution>
-          <execution>
-            <id>pig without version</id>
-            <phase>package</phase>
-            <goals>
-              <goal>exec</goal>
-            </goals>
-            <configuration>
-              <executable>ln</executable>
-              <workingDirectory>${project.basedir}/../phoenix-pig-base/phoenix5-pig-shaded/target</workingDirectory>
-              <arguments>
-                <argument>-fnsv</argument>
-                <argument>
-                  phoenix5-pig-shaded-${project.version}.jar
-                </argument>
-                <argument>
-                  phoenix5-pig-shaded.jar
-                </argument>
-              </arguments>
-            </configuration>
-          </execution>
           <execution>
             <id>spark without version</id>
             <phase>package</phase>
diff --git a/phoenix5-connectors-assembly/src/build/components/phoenix5-jars.xml b/phoenix5-connectors-assembly/src/build/components/phoenix5-jars.xml
index 4ea9e10..52e38f6 100644
--- a/phoenix5-connectors-assembly/src/build/components/phoenix5-jars.xml
+++ b/phoenix5-connectors-assembly/src/build/components/phoenix5-jars.xml
@@ -39,14 +39,6 @@
         <include>phoenix5-kafka-shaded-minimal.jar</include>
       </includes>
     </fileSet>
-    <fileSet>
-      <directory>${project.basedir}/../phoenix-pig-base/phoenix5-pig-shaded/target</directory>
-      <outputDirectory>/</outputDirectory>
-      <includes>
-        <include>phoenix5-pig-shaded-${project.version}.jar</include>
-        <include>phoenix5-pig-shaded.jar</include>
-      </includes>
-    </fileSet>
     <fileSet>
       <directory>${project.basedir}/../phoenix-spark-base/phoenix5-spark-shaded/target</directory>
       <outputDirectory>/</outputDirectory>
diff --git a/pom.xml b/pom.xml
index 5da7f74..e76f9a9 100644
--- a/pom.xml
+++ b/pom.xml
@@ -47,7 +47,6 @@
       <!-- Changing the module order here may cause maven to get stuck in an infinite loop -->
       <module>phoenix-hive-base</module>
       <module>phoenix-flume-base</module>
-      <module>phoenix-pig-base</module>
       <module>phoenix-kafka-base</module>
       <module>phoenix-spark-base</module>
       <module>phoenix5-spark3</module>
@@ -93,7 +92,6 @@
     <hive2-storage.version>2.4.0</hive2-storage.version>
     <hive3-storage.version>2.7.0</hive3-storage.version>
     <hive-storage.version>${hive3-storage.version}</hive-storage.version>
-    <pig.version>0.13.0</pig.version>
     <flume.version>1.4.0</flume.version>
     <kafka.version>0.9.0.0</kafka.version>
     <spark.version>2.4.0</spark.version>
@@ -458,8 +456,6 @@
             <exclude>dev/release_files/NOTICE</exclude>
             <!-- Exclude data files for examples -->
             <exclude>docs/*.csv</exclude>
-            <!-- Data files -->
-            <exclude>examples/pig/testdata</exclude>
             <!-- precommit? -->
             <exclude>**/patchprocess/**</exclude>
             <exclude>**/derby.log</exclude>
@@ -525,16 +521,6 @@
         <artifactId>phoenix4-kafka</artifactId>
         <version>${project.version}</version>
       </dependency>
-      <dependency>
-        <groupId>org.apache.phoenix</groupId>
-        <artifactId>phoenix4-pig</artifactId>
-        <version>${project.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.phoenix</groupId>
-        <artifactId>phoenix4-pig-shaded</artifactId>
-        <version>${project.version}</version>
-      </dependency>
       <dependency>
         <groupId>org.apache.phoenix</groupId>
         <artifactId>phoenix4-spark</artifactId>
@@ -565,16 +551,6 @@
         <artifactId>phoenix5-kafka</artifactId>
         <version>${project.version}</version>
       </dependency>
-      <dependency>
-        <groupId>org.apache.phoenix</groupId>
-        <artifactId>phoenix5-pig</artifactId>
-        <version>${project.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.phoenix</groupId>
-        <artifactId>phoenix5-pig-shaded</artifactId>
-        <version>${project.version}</version>
-      </dependency>
       <dependency>
         <groupId>org.apache.phoenix</groupId>
         <artifactId>phoenix5-spark</artifactId>
@@ -888,18 +864,6 @@
         <artifactId>log4j</artifactId>
         <version>${log4j.version}</version>
       </dependency>
-      <dependency>
-        <groupId>org.apache.pig</groupId>
-        <artifactId>pig</artifactId>
-        <version>${pig.version}</version>
-        <classifier>h2</classifier>
-        <exclusions>
-          <exclusion>
-            <groupId>org.xerial.snappy</groupId>
-            <artifactId>snappy-java</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
       <dependency>
         <groupId>org.apache.flume</groupId>
         <artifactId>flume-ng-core</artifactId>