You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by el...@apache.org on 2018/04/19 03:49:57 UTC

[1/3] phoenix git commit: PHOENIX-4423 Hive 2.3.0 support

Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 0a2ff12d2 -> 83825dec5


http://git-wip-us.apache.org/repos/asf/phoenix/blob/83825dec/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/security/DummyAuthenticator.java
----------------------------------------------------------------------
diff --git a/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/security/DummyAuthenticator.java b/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/security/DummyAuthenticator.java
new file mode 100644
index 0000000..45fabf5
--- /dev/null
+++ b/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/security/DummyAuthenticator.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.security;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.session.SessionState;
+
+public class DummyAuthenticator implements HiveAuthenticationProvider {
+
+  private final List<String> groupNames;
+  private final String userName;
+  private Configuration conf;
+
+  public DummyAuthenticator() {
+    this.groupNames = new ArrayList<String>();
+    groupNames.add("hive_test_group1");
+    groupNames.add("hive_test_group2");
+    userName = "hive_test_user";
+  }
+
+  @Override
+  public void destroy() throws HiveException{
+    return;
+  }
+
+  @Override
+  public List<String> getGroupNames() {
+    return groupNames;
+  }
+
+  @Override
+  public String getUserName() {
+    return userName;
+  }
+
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  @Override
+  public Configuration getConf() {
+    return this.conf;
+  }
+
+  @Override
+  public void setSessionState(SessionState ss) {
+    //no op
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/83825dec/phoenix-hive/src/it/java/org/apache/phoenix/hive/BaseHivePhoenixStoreIT.java
----------------------------------------------------------------------
diff --git a/phoenix-hive/src/it/java/org/apache/phoenix/hive/BaseHivePhoenixStoreIT.java b/phoenix-hive/src/it/java/org/apache/phoenix/hive/BaseHivePhoenixStoreIT.java
index bc25909..aede9ac 100644
--- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/BaseHivePhoenixStoreIT.java
+++ b/phoenix-hive/src/it/java/org/apache/phoenix/hive/BaseHivePhoenixStoreIT.java
@@ -17,6 +17,17 @@
  */
 package org.apache.phoenix.hive;
 
+import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.Statement;
+import java.util.Properties;
+
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -24,7 +35,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.QTestProcessExecResult;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.query.QueryServices;
@@ -35,14 +47,7 @@ import org.junit.AfterClass;
 import org.junit.Ignore;
 import org.junit.experimental.categories.Category;
 
-import java.io.File;
-import java.io.IOException;
-import java.sql.*;
-import java.util.Properties;
-
-import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import com.google.common.base.Throwables;
 
 /**
  * Base class for all Hive Phoenix integration tests that may be run with Tez or MR mini cluster
@@ -72,6 +77,7 @@ public class BaseHivePhoenixStoreIT {
         conf = hbaseTestUtil.getConfiguration();
         setUpConfigForMiniCluster(conf);
         conf.set(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
+        conf.set("hive.metastore.schema.verification","false");
         hiveOutputDir = new Path(hbaseTestUtil.getDataTestDir(), "hive_output").toString();
         File outputDir = new File(hiveOutputDir);
         outputDir.mkdirs();
@@ -82,17 +88,20 @@ public class BaseHivePhoenixStoreIT {
         Path testRoot = hbaseTestUtil.getDataTestDir();
         System.setProperty("test.tmp.dir", testRoot.toString());
         System.setProperty("test.warehouse.dir", (new Path(testRoot, "warehouse")).toString());
-
+        System.setProperty(HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION.toString(), "false");
+        //System.setProperty(HiveConf.ConfVars.METASTORE_AUTO_CREATE_ALL.toString(),"true");
         try {
-            qt = new HiveTestUtil(hiveOutputDir, hiveLogDir, clusterType, null);
+            qt = new HiveTestUtil(hiveOutputDir, hiveLogDir, clusterType, "", "0.20",null, null, false);
+            // do a one time initialization
+            qt.createSources();
         } catch (Exception e) {
-            LOG.error("Unexpected exception in setup", e);
-            fail("Unexpected exception in setup");
+            LOG.error("Unexpected exception in setup: " + e.getMessage(), e);
+            fail("Unexpected exception in setup"+Throwables.getStackTraceAsString(e));
         }
 
         //Start HBase cluster
         hbaseCluster = hbaseTestUtil.startMiniCluster(1);
-        MiniDFSCluster x = hbaseTestUtil.getDFSCluster();
+        //MiniDFSCluster x = hbaseTestUtil.getDFSCluster();
         Class.forName(PhoenixDriver.class.getName());
         zkQuorum = "localhost:" + hbaseTestUtil.getZkCluster().getClientPort();
         Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
@@ -124,14 +133,14 @@ public class BaseHivePhoenixStoreIT {
                 return;
             }
 
-            ecode = qt.checkCliDriverResults(fname);
-            if (ecode != 0) {
-                qt.failedDiff(ecode, fname, null);
+            QTestProcessExecResult result = qt.checkCliDriverResults(fname);
+            if (result.getReturnCode() != 0) {
+              qt.failedDiff(result.getReturnCode(), fname, result.getCapturedOutput());
             }
             qt.clearPostTestEffects();
 
         } catch (Throwable e) {
-            qt.failed(e, fname, null);
+            qt.failed(new Exception(e), fname, null);
         }
 
         long elapsedTime = System.currentTimeMillis() - startTime;
@@ -145,14 +154,6 @@ public class BaseHivePhoenixStoreIT {
 
     @AfterClass
     public static void tearDownAfterClass() throws Exception {
-        if (qt != null) {
-            try {
-                qt.shutdown();
-            } catch (Exception e) {
-                LOG.error("Unexpected exception in setup", e);
-                fail("Unexpected exception in tearDown");
-            }
-        }
         try {
             conn.close();
         } finally {
@@ -166,5 +167,14 @@ public class BaseHivePhoenixStoreIT {
                 }
             }
         }
+        // Shutdowns down the filesystem -- do this after stopping HBase.
+        if (qt != null) {
+          try {
+              qt.shutdown();
+          } catch (Exception e) {
+              LOG.error("Unexpected exception in setup", e);
+              //fail("Unexpected exception in tearDown");
+          }
+      }
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/83825dec/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveMapReduceIT.java
----------------------------------------------------------------------
diff --git a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveMapReduceIT.java b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveMapReduceIT.java
index 5150324..6e880ef 100644
--- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveMapReduceIT.java
+++ b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveMapReduceIT.java
@@ -20,15 +20,35 @@ package org.apache.phoenix.hive;
 
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.junit.BeforeClass;
-import org.junit.experimental.categories.Category;
 import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
 @Category(NeedsOwnMiniClusterTest.class)
-@Ignore
 public class HiveMapReduceIT extends HivePhoenixStoreIT {
 
     @BeforeClass
     public static void setUpBeforeClass() throws Exception {
         setup(HiveTestUtil.MiniClusterType.mr);
     }
+    
+    @Override
+    @Test
+    @Ignore 
+    /**
+     * Ignoring because precicate pushdown is skipped for MR (ref:HIVE-18873) when there are multiple aliases
+     */
+    public void testJoinNoColumnMaps() throws Exception {
+        
+    }
+    
+    @Override
+    @Test
+    @Ignore 
+    /**
+     * Ignoring because projection pushdown is incorrect for MR when there are multiple aliases (ref:HIVE-18872)
+     */
+    public void testJoinColumnMaps() throws Exception {
+        
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/83825dec/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java
----------------------------------------------------------------------
diff --git a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java
index 1828818..d4e7005 100644
--- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java
+++ b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HivePhoenixStoreIT.java
@@ -17,6 +17,11 @@
  */
 package org.apache.phoenix.hive;
 
+import static org.junit.Assert.assertTrue;
+
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+
 import org.apache.hadoop.fs.Path;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.util.StringUtil;
@@ -24,11 +29,6 @@ import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-
-import static org.junit.Assert.assertTrue;
-
 /**
  * Test methods only. All supporting methods should be placed to BaseHivePhoenixStoreIT
  */
@@ -178,7 +178,7 @@ public class HivePhoenixStoreIT  extends BaseHivePhoenixStoreIT {
                 "   'phoenix.zookeeper.client.port'='" +
                 hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
                 "   'phoenix.rowkeys'='id,id2');" + HiveTestUtil.CRLF);
-        sb.append("INSERT INTO TABLE phoenix_MultiKey VALUES (10, \"part2\",\"foodesc\",200,2.0,-1);" +
+        sb.append("INSERT INTO TABLE phoenix_MultiKey" + HiveTestUtil.CRLF +"VALUES (10, \'part2\',\'foodesc\',200,2.0,-1);" +
                 HiveTestUtil.CRLF);
         String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString();
         createFile(sb.toString(), fullPath);
@@ -208,7 +208,7 @@ public class HivePhoenixStoreIT  extends BaseHivePhoenixStoreIT {
         String testName = "testJoin";
         hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out"));
         createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString());
-        createFile("10\tpart2\tfoodesc\t200.0\t2.0\t-1\t10\tpart2\tfoodesc\t200.0\t2.0\t-1\n",
+        createFile("#### A masked pattern was here ####\n10\tpart2\tfoodesc\t200.0\t2.0\t-1\t10\tpart2\tfoodesc\t200.0\t2.0\t-1\n",
                 new Path(hiveOutputDir, testName + ".out").toString());
         StringBuilder sb = new StringBuilder();
         sb.append("CREATE TABLE joinTable1(ID int, ID2 String,description STRING," +
@@ -234,13 +234,13 @@ public class HivePhoenixStoreIT  extends BaseHivePhoenixStoreIT {
                 hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
                 "   'phoenix.rowkeys'='id,id2');" + HiveTestUtil.CRLF);
 
-        sb.append("INSERT INTO TABLE joinTable1 VALUES (5, \"part2\",\"foodesc\",200,2.0,-1);" + HiveTestUtil.CRLF);
-        sb.append("INSERT INTO TABLE joinTable1 VALUES (10, \"part2\",\"foodesc\",200,2.0,-1);" + HiveTestUtil.CRLF);
-
-        sb.append("INSERT INTO TABLE joinTable2 VALUES (5, \"part2\",\"foodesc\",200,2.0,-1);" + HiveTestUtil.CRLF);
-        sb.append("INSERT INTO TABLE joinTable2 VALUES (10, \"part2\",\"foodesc\",200,2.0,-1);" + HiveTestUtil.CRLF);
+        sb.append("INSERT INTO TABLE joinTable1" + HiveTestUtil.CRLF +"VALUES (5, \'part2\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
+        sb.append("INSERT INTO TABLE joinTable1" + HiveTestUtil.CRLF +"VALUES (10, \'part2\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
 
-        sb.append("SELECT  * from joinTable1 A join joinTable2 B on A.ID = B.ID WHERE A.ID=10;" +
+        sb.append("INSERT INTO TABLE joinTable2" + HiveTestUtil.CRLF +"VALUES (5, \'part2\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
+        sb.append("INSERT INTO TABLE joinTable2" + HiveTestUtil.CRLF +"VALUES (10, \'part2\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
+        
+        sb.append("SELECT  * from joinTable1 A join joinTable2 B on A.id = B.id WHERE A.ID=10;" +
                 HiveTestUtil.CRLF);
 
         String fullPath = new Path(hbaseTestUtil.getDataTestDir(), testName).toString();
@@ -257,7 +257,7 @@ public class HivePhoenixStoreIT  extends BaseHivePhoenixStoreIT {
     public void testJoinColumnMaps() throws Exception {
         String testName = "testJoin";
         hbaseTestUtil.getTestFileSystem().createNewFile(new Path(hiveLogDir, testName + ".out"));
-        createFile("10\t200.0\tpart2\n", new Path(hiveOutputDir, testName + ".out").toString());
+        createFile("#### A masked pattern was here ####\n10\t200.0\tpart2\n", new Path(hiveOutputDir, testName + ".out").toString());
         createFile(StringUtil.EMPTY_STRING, new Path(hiveLogDir, testName + ".out").toString());
 
         StringBuilder sb = new StringBuilder();
@@ -271,7 +271,7 @@ public class HivePhoenixStoreIT  extends BaseHivePhoenixStoreIT {
                 "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
                 "   'phoenix.zookeeper.client.port'='" +
                 hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
-                "   'phoenix.column.mapping' = 'id:i1, id2:I2'," + HiveTestUtil.CRLF +
+                "   'phoenix.column.mapping' = 'id:i1, id2:I2, db:db'," + HiveTestUtil.CRLF +
                 "   'phoenix.rowkeys'='id,id2');" + HiveTestUtil.CRLF);
         sb.append("CREATE TABLE joinTable4(ID int, ID2 String,description STRING," +
                 "db DOUBLE,fl FLOAT, us INT)" + HiveTestUtil.CRLF +
@@ -283,15 +283,15 @@ public class HivePhoenixStoreIT  extends BaseHivePhoenixStoreIT {
                 "   'phoenix.zookeeper.quorum'='localhost'," + HiveTestUtil.CRLF +
                 "   'phoenix.zookeeper.client.port'='" +
                 hbaseTestUtil.getZkCluster().getClientPort() + "'," + HiveTestUtil.CRLF +
-                "   'phoenix.column.mapping' = 'id:i1, id2:I2'," + HiveTestUtil.CRLF +
+                "   'phoenix.column.mapping' = 'id:i1, id2:I2, db:db'," + HiveTestUtil.CRLF +
                 "   'phoenix.rowkeys'='id,id2');" + HiveTestUtil.CRLF);
 
-        sb.append("INSERT INTO TABLE joinTable3 VALUES (5, \"part1\",\"foodesc\",200,2.0,-1);" + HiveTestUtil.CRLF);
-        sb.append("INSERT INTO TABLE joinTable3 VALUES (10, \"part1\",\"foodesc\",200,2.0,-1);" + HiveTestUtil.CRLF);
-
-        sb.append("INSERT INTO TABLE joinTable4 VALUES (5, \"part2\",\"foodesc\",200,2.0,-1);" + HiveTestUtil.CRLF);
-        sb.append("INSERT INTO TABLE joinTable4 VALUES (10, \"part2\",\"foodesc\",200,2.0,-1);" + HiveTestUtil.CRLF);
+        sb.append("INSERT INTO TABLE joinTable3" + HiveTestUtil.CRLF +"VALUES (5, \'part1\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
+        sb.append("INSERT INTO TABLE joinTable3" + HiveTestUtil.CRLF +"VALUES (10, \'part1\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
 
+        sb.append("INSERT INTO TABLE joinTable4" + HiveTestUtil.CRLF +"VALUES (5, \'part2\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
+        sb.append("INSERT INTO TABLE joinTable4" + HiveTestUtil.CRLF +"VALUES (10, \'part2\',\'foodesc\',200,2.0,-1);" + HiveTestUtil.CRLF);
+        
         sb.append("SELECT A.ID, a.db, B.ID2 from joinTable3 A join joinTable4 B on A.ID = B.ID WHERE A.ID=10;" +
                 HiveTestUtil.CRLF);
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/83825dec/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTestUtil.java b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTestUtil.java
index b4c4e46..3d2657b 100644
--- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTestUtil.java
+++ b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTestUtil.java
@@ -17,1264 +17,24 @@
  */
 package org.apache.phoenix.hive;
 
-import com.google.common.collect.ImmutableList;
-import junit.framework.Assert;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
-import org.apache.hadoop.hive.cli.CliDriver;
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.common.io.CachingPrintStream;
-import org.apache.hadoop.hive.common.io.DigestPrintStream;
-import org.apache.hadoop.hive.common.io.SortAndDigestPrintStream;
-import org.apache.hadoop.hive.common.io.SortPrintStream;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
-import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager;
-import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.parse.ASTNode;
-import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
-import org.apache.hadoop.hive.ql.parse.ParseDriver;
-import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.shims.HadoopShims;
-import org.apache.hadoop.hive.shims.ShimLoader;
-import org.apache.hadoop.util.Shell;
-import org.apache.hive.common.util.StreamPrinter;
-import org.apache.tools.ant.BuildException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.ZooKeeper;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.FilenameFilter;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.io.OutputStreamWriter;
-import java.io.PrintStream;
-import java.io.StringWriter;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Deque;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import org.apache.hadoop.hive.ql.QTestUtil;
 
 /**
  * HiveTestUtil cloned from Hive QTestUtil. Can be outdated and may require update once a problem
  * found.
  */
-public class HiveTestUtil {
-
-    public static final String UTF_8 = "UTF-8";
-    private static final Log LOG = LogFactory.getLog("HiveTestUtil");
-    private static final String QTEST_LEAVE_FILES = "QTEST_LEAVE_FILES";
-    public static final String DEFAULT_DATABASE_NAME = "default";
-
-    private String testWarehouse;
-    private final String testFiles;
-    protected final String outDir;
-    protected final String logDir;
-    private final TreeMap<String, String> qMap;
-    private final Set<String> qSkipSet;
-    private final Set<String> qSortSet;
-    private final Set<String> qSortQuerySet;
-    private final Set<String> qHashQuerySet;
-    private final Set<String> qSortNHashQuerySet;
-    private final Set<String> qJavaVersionSpecificOutput;
-    private static final String SORT_SUFFIX = ".sorted";
-    private static MiniClusterType clusterType = MiniClusterType.none;
-    private ParseDriver pd;
-    protected Hive db;
-    protected HiveConf conf;
-    private BaseSemanticAnalyzer sem;
-    protected final boolean overWrite;
-    private CliDriver cliDriver;
-    private HadoopShims.MiniMrShim mr = null;
-    private HadoopShims.MiniDFSShim dfs = null;
-    private String hadoopVer = null;
-    private HiveTestSetup setup = null;
-    private boolean isSessionStateStarted = false;
-    private static final String javaVersion = getJavaVersion();
-
-    private String initScript = "";
-    private String cleanupScript = "";
-
-    public HiveConf getConf() {
-        return conf;
-    }
-
-    public boolean deleteDirectory(File path) {
-        if (path.exists()) {
-            File[] files = path.listFiles();
-            for (File file : files) {
-                if (file.isDirectory()) {
-                    deleteDirectory(file);
-                } else {
-                    file.delete();
-                }
-            }
-        }
-        return (path.delete());
-    }
-
-    public void copyDirectoryToLocal(Path src, Path dest) throws Exception {
-
-        FileSystem srcFs = src.getFileSystem(conf);
-        FileSystem destFs = dest.getFileSystem(conf);
-        if (srcFs.exists(src)) {
-            FileStatus[] files = srcFs.listStatus(src);
-            for (FileStatus file : files) {
-                String name = file.getPath().getName();
-                Path dfs_path = file.getPath();
-                Path local_path = new Path(dest, name);
-
-                if (file.isDir()) {
-                    if (!destFs.exists(local_path)) {
-                        destFs.mkdirs(local_path);
-                    }
-                    copyDirectoryToLocal(dfs_path, local_path);
-                } else {
-                    srcFs.copyToLocalFile(dfs_path, local_path);
-                }
-            }
-        }
-    }
-
-    static Pattern mapTok = Pattern.compile("(\\.?)(.*)_map_(.*)");
-    static Pattern reduceTok = Pattern.compile("(.*)(reduce_[^\\.]*)((\\..*)?)");
-
-    public void normalizeNames(File path) throws Exception {
-        if (path.isDirectory()) {
-            File[] files = path.listFiles();
-            for (File file : files) {
-                normalizeNames(file);
-            }
-        } else {
-            Matcher m = reduceTok.matcher(path.getName());
-            if (m.matches()) {
-                String name = m.group(1) + "reduce" + m.group(3);
-                path.renameTo(new File(path.getParent(), name));
-            } else {
-                m = mapTok.matcher(path.getName());
-                if (m.matches()) {
-                    String name = m.group(1) + "map_" + m.group(3);
-                    path.renameTo(new File(path.getParent(), name));
-                }
-            }
-        }
-    }
-
-    public String getOutputDirectory() {
-        return outDir;
-    }
-
-    public String getLogDirectory() {
-        return logDir;
-    }
-
-    private String getHadoopMainVersion(String input) {
-        if (input == null) {
-            return null;
-        }
-        Pattern p = Pattern.compile("^(\\d+\\.\\d+).*");
-        Matcher m = p.matcher(input);
-        if (m.matches()) {
-            return m.group(1);
-        }
-        return null;
-    }
-
-    public void initConf() throws Exception {
-        // Plug verifying metastore in for testing.
-        conf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL,
-                "org.apache.hadoop.hive.metastore.VerifyingObjectStore");
-
-        if (mr != null) {
-            assert dfs != null;
-
-            mr.setupConfiguration(conf);
-
-            // set fs.default.name to the uri of mini-dfs
-            String dfsUriString = WindowsPathUtil.getHdfsUriString(dfs.getFileSystem().getUri()
-                    .toString());
-            conf.setVar(HiveConf.ConfVars.HADOOPFS, dfsUriString);
-            // hive.metastore.warehouse.dir needs to be set relative to the mini-dfs
-            conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE,
-                    (new Path(dfsUriString,
-                            "/build/ql/test/data/warehouse/")).toString());
-        }
-
-        // Windows paths should be converted after MiniMrShim.setupConfiguration()
-        // since setupConfiguration may overwrite configuration values.
-        if (Shell.WINDOWS) {
-            WindowsPathUtil.convertPathsFromWindowsToHdfs(conf);
-        }
-    }
-
-    public enum MiniClusterType {
-        mr,
-        tez,
-        none;
-
-        public static MiniClusterType valueForString(String type) {
-            if (type.equals("miniMR")) {
-                return mr;
-            } else if (type.equals("tez")) {
-                return tez;
-            } else {
-                return none;
-            }
-        }
-    }
-
-    public HiveTestUtil(String outDir, String logDir, MiniClusterType clusterType, String hadoopVer)
-            throws Exception {
-        this(outDir, logDir, clusterType, null, hadoopVer);
-    }
-
-    public HiveTestUtil(String outDir, String logDir, MiniClusterType clusterType, String confDir,
-                        String hadoopVer)
-            throws Exception {
-        this.outDir = outDir;
-        this.logDir = logDir;
-        if (confDir != null && !confDir.isEmpty()) {
-            HiveConf.setHiveSiteLocation(new URL("file://" + new File(confDir).toURI().getPath()
-                    + "/hive-site.xml"));
-            LOG.info("Setting hive-site: " + HiveConf.getHiveSiteLocation());
-        }
-        conf = new HiveConf();
-        String tmpBaseDir = System.getProperty("test.tmp.dir");
-        if (tmpBaseDir == null || tmpBaseDir == "") {
-            tmpBaseDir = System.getProperty("java.io.tmpdir");
-        }
-        String metaStoreURL = "jdbc:derby:" + tmpBaseDir + File.separator + "metastore_dbtest;" +
-                "create=true";
-        conf.set(ConfVars.METASTORECONNECTURLKEY.varname, metaStoreURL);
-        System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, metaStoreURL);
-
-        //set where derby logs
-        File derbyLogFile = new File(tmpBaseDir + "/derby.log");
-        derbyLogFile.createNewFile();
-        System.setProperty("derby.stream.error.file", derbyLogFile.getPath());
-
-        this.hadoopVer = getHadoopMainVersion(hadoopVer);
-        qMap = new TreeMap<String, String>();
-        qSkipSet = new HashSet<String>();
-        qSortSet = new HashSet<String>();
-        qSortQuerySet = new HashSet<String>();
-        qHashQuerySet = new HashSet<String>();
-        qSortNHashQuerySet = new HashSet<String>();
-        qJavaVersionSpecificOutput = new HashSet<String>();
-        this.clusterType = clusterType;
-
-        // Using randomUUID for dfs cluster
-        System.setProperty("test.build.data", "target/test-data/hive-" + UUID.randomUUID().toString
-                ());
-
-        HadoopShims shims = ShimLoader.getHadoopShims();
-        int numberOfDataNodes = 1;
-
-        if (clusterType != MiniClusterType.none) {
-            dfs = shims.getMiniDfs(conf, numberOfDataNodes, true, null);
-            FileSystem fs = dfs.getFileSystem();
-            String uriString = WindowsPathUtil.getHdfsUriString(fs.getUri().toString());
-            if (clusterType == MiniClusterType.tez) {
-                conf.set("hive.execution.engine", "tez");
-                mr = shims.getMiniTezCluster(conf, 1, uriString, 1);
-            } else {
-                conf.set("hive.execution.engine", "mr");
-                mr = shims.getMiniMrCluster(conf, 1, uriString, 1);
-
-            }
-        }
-
-        initConf();
-
-        // Use the current directory if it is not specified
-        String dataDir = conf.get("test.data.files");
-        if (dataDir == null) {
-            dataDir = new File(".").getAbsolutePath() + "/data/files";
-        }
-
-        testFiles = dataDir;
-
-        // Use the current directory if it is not specified
-        String scriptsDir = conf.get("test.data.scripts");
-        if (scriptsDir == null) {
-            scriptsDir = new File(".").getAbsolutePath() + "/data/scripts";
-        }
-        if (!initScript.isEmpty()) {
-            this.initScript = scriptsDir + "/" + initScript;
-        }
-        if (!cleanupScript.isEmpty()) {
-            this.cleanupScript = scriptsDir + "/" + cleanupScript;
-        }
-
-        overWrite = "true".equalsIgnoreCase(System.getProperty("test.output.overwrite"));
-
-        setup = new HiveTestSetup();
-        setup.preTest(conf);
-        init();
-    }
-
-    public void shutdown() throws Exception {
-        cleanUp();
-        setup.tearDown();
-        if (mr != null) {
-            mr.shutdown();
-            mr = null;
-        }
-        FileSystem.closeAll();
-        if (dfs != null) {
-            dfs.shutdown();
-            dfs = null;
-        }
-    }
-
-    public String readEntireFileIntoString(File queryFile) throws IOException {
-        InputStreamReader isr = new InputStreamReader(
-                new BufferedInputStream(new FileInputStream(queryFile)), HiveTestUtil.UTF_8);
-        StringWriter sw = new StringWriter();
-        try {
-            IOUtils.copy(isr, sw);
-        } finally {
-            if (isr != null) {
-                isr.close();
-            }
-        }
-        return sw.toString();
-    }
-
-    public void addFile(String queryFile) throws IOException {
-        addFile(queryFile, false);
-    }
-
-    public void addFile(String queryFile, boolean partial) throws IOException {
-        addFile(new File(queryFile));
-    }
-
-    public void addFile(File qf) throws IOException {
-        addFile(qf, false);
-    }
-
-    public void addFile(File qf, boolean partial) throws IOException {
-        String query = readEntireFileIntoString(qf);
-        qMap.put(qf.getName(), query);
-        if (partial) return;
-
-        if (matches(SORT_BEFORE_DIFF, query)) {
-            qSortSet.add(qf.getName());
-        } else if (matches(SORT_QUERY_RESULTS, query)) {
-            qSortQuerySet.add(qf.getName());
-        } else if (matches(HASH_QUERY_RESULTS, query)) {
-            qHashQuerySet.add(qf.getName());
-        } else if (matches(SORT_AND_HASH_QUERY_RESULTS, query)) {
-            qSortNHashQuerySet.add(qf.getName());
-        }
-    }
-
-    private static final Pattern SORT_BEFORE_DIFF = Pattern.compile("-- SORT_BEFORE_DIFF");
-    private static final Pattern SORT_QUERY_RESULTS = Pattern.compile("-- SORT_QUERY_RESULTS");
-    private static final Pattern HASH_QUERY_RESULTS = Pattern.compile("-- HASH_QUERY_RESULTS");
-    private static final Pattern SORT_AND_HASH_QUERY_RESULTS = Pattern.compile("-- " +
-            "SORT_AND_HASH_QUERY_RESULTS");
-
-    private boolean matches(Pattern pattern, String query) {
-        Matcher matcher = pattern.matcher(query);
-        if (matcher.find()) {
-            return true;
-        }
-        return false;
-    }
-
-    /**
-     * Get formatted Java version to include minor version, but
-     * exclude patch level.
-     *
-     * @return Java version formatted as major_version.minor_version
-     */
-    private static String getJavaVersion() {
-        String version = System.getProperty("java.version");
-        if (version == null) {
-            throw new NullPointerException("No java version could be determined " +
-                    "from system properties");
-        }
-
-        // "java version" system property is formatted
-        // major_version.minor_version.patch_level.
-        // Find second dot, instead of last dot, to be safe
-        int pos = version.indexOf('.');
-        pos = version.indexOf('.', pos + 1);
-        return version.substring(0, pos);
-    }
-
-    /**
-     * Clear out any side effects of running tests
-     */
-    public void clearPostTestEffects() throws Exception {
-        setup.postTest(conf);
-    }
-
-    /**
-     * Clear out any side effects of running tests
-     */
-    public void clearTablesCreatedDuringTests() throws Exception {
-        if (System.getenv(QTEST_LEAVE_FILES) != null) {
-            return;
-        }
-
-        // Delete any tables other than the source tables
-        // and any databases other than the default database.
-        for (String dbName : db.getAllDatabases()) {
-            SessionState.get().setCurrentDatabase(dbName);
-            for (String tblName : db.getAllTables()) {
-                if (!DEFAULT_DATABASE_NAME.equals(dbName)) {
-                    Table tblObj = db.getTable(tblName);
-                    // dropping index table can not be dropped directly. Dropping the base
-                    // table will automatically drop all its index table
-                    if (tblObj.isIndexTable()) {
-                        continue;
-                    }
-                    db.dropTable(dbName, tblName);
-                } else {
-                    // this table is defined in srcTables, drop all indexes on it
-                    List<Index> indexes = db.getIndexes(dbName, tblName, (short) -1);
-                    if (indexes != null && indexes.size() > 0) {
-                        for (Index index : indexes) {
-                            db.dropIndex(dbName, tblName, index.getIndexName(), true, true);
-                        }
-                    }
-                }
-            }
-            if (!DEFAULT_DATABASE_NAME.equals(dbName)) {
-                // Drop cascade, may need to drop functions
-                db.dropDatabase(dbName, true, true, true);
-            }
-        }
-
-        // delete remaining directories for external tables (can affect stats for following tests)
-        try {
-            Path p = new Path(testWarehouse);
-            FileSystem fileSystem = p.getFileSystem(conf);
-            if (fileSystem.exists(p)) {
-                for (FileStatus status : fileSystem.listStatus(p)) {
-                    if (status.isDir()) {
-                        fileSystem.delete(status.getPath(), true);
-                    }
-                }
-            }
-        } catch (IllegalArgumentException e) {
-            // ignore.. provides invalid url sometimes intentionally
-        }
-        SessionState.get().setCurrentDatabase(DEFAULT_DATABASE_NAME);
-
-        List<String> roleNames = db.getAllRoleNames();
-        for (String roleName : roleNames) {
-            if (!"PUBLIC".equalsIgnoreCase(roleName) && !"ADMIN".equalsIgnoreCase(roleName)) {
-                db.dropRole(roleName);
-            }
-        }
-    }
-
-    /**
-     * Clear out any side effects of running tests
-     */
-    public void clearTestSideEffects() throws Exception {
-        if (System.getenv(QTEST_LEAVE_FILES) != null) {
-            return;
-        }
-
-        clearTablesCreatedDuringTests();
-    }
-
-    public void cleanUp() throws Exception {
-        if (!isSessionStateStarted) {
-            startSessionState();
-        }
-        if (System.getenv(QTEST_LEAVE_FILES) != null) {
-            return;
-        }
-
-        clearTablesCreatedDuringTests();
-
-        SessionState.get().getConf().setBoolean("hive.test.shutdown.phase", true);
-
-        if (cleanupScript != "") {
-            String cleanupCommands = readEntireFileIntoString(new File(cleanupScript));
-            LOG.info("Cleanup (" + cleanupScript + "):\n" + cleanupCommands);
-            if (cliDriver == null) {
-                cliDriver = new CliDriver();
-            }
-            cliDriver.processLine(cleanupCommands);
-        }
-
-        SessionState.get().getConf().setBoolean("hive.test.shutdown.phase", false);
-
-        // delete any contents in the warehouse dir
-        Path p = new Path(testWarehouse);
-        FileSystem fs = p.getFileSystem(conf);
-
-        try {
-            FileStatus[] ls = fs.listStatus(p);
-            for (int i = 0; (ls != null) && (i < ls.length); i++) {
-                fs.delete(ls[i].getPath(), true);
-            }
-        } catch (FileNotFoundException e) {
-            // Best effort
-        }
-
-        FunctionRegistry.unregisterTemporaryUDF("test_udaf");
-        FunctionRegistry.unregisterTemporaryUDF("test_error");
-    }
-
-    public void createSources() throws Exception {
-        if (!isSessionStateStarted) {
-            startSessionState();
-        }
-        conf.setBoolean("hive.test.init.phase", true);
-
-        if (cliDriver == null) {
-            cliDriver = new CliDriver();
-        }
-        cliDriver.processLine("set test.data.dir=" + testFiles + ";");
-
-        conf.setBoolean("hive.test.init.phase", false);
-    }
-
-    public void init() throws Exception {
-        testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);
-        conf.setBoolVar(HiveConf.ConfVars.SUBMITLOCALTASKVIACHILD, false);
-        String execEngine = conf.get("hive.execution.engine");
-        conf.set("hive.execution.engine", "mr");
-        SessionState.start(conf);
-        conf.set("hive.execution.engine", execEngine);
-        db = Hive.get(conf);
-        pd = new ParseDriver();
-        sem = new SemanticAnalyzer(conf);
-    }
-
-    public void init(String tname) throws Exception {
-        cleanUp();
-        createSources();
-        cliDriver.processCmd("set hive.cli.print.header=true;");
-    }
-
-    public void cliInit(String tname) throws Exception {
-        cliInit(tname, true);
-    }
-
-    public String cliInit(String tname, boolean recreate) throws Exception {
-        if (recreate) {
-            cleanUp();
-            createSources();
-        }
-
-        HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER,
-                "org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator");
-        Utilities.clearWorkMap();
-        CliSessionState ss = new CliSessionState(conf);
-        assert ss != null;
-        ss.in = System.in;
-
-        String outFileExtension = getOutFileExtension(tname);
-        String stdoutName = null;
-        if (outDir != null) {
-            File qf = new File(outDir, tname);
-            stdoutName = qf.getName().concat(outFileExtension);
-        } else {
-            stdoutName = tname + outFileExtension;
-        }
-
-        File outf = new File(logDir, stdoutName);
-        OutputStream fo = new BufferedOutputStream(new FileOutputStream(outf));
-        if (qSortQuerySet.contains(tname)) {
-            ss.out = new SortPrintStream(fo, "UTF-8");
-        } else if (qHashQuerySet.contains(tname)) {
-            ss.out = new DigestPrintStream(fo, "UTF-8");
-        } else if (qSortNHashQuerySet.contains(tname)) {
-            ss.out = new SortAndDigestPrintStream(fo, "UTF-8");
-        } else {
-            ss.out = new PrintStream(fo, true, "UTF-8");
-        }
-        ss.err = new CachingPrintStream(fo, true, "UTF-8");
-        ss.setIsSilent(true);
-        SessionState oldSs = SessionState.get();
-
-        if (oldSs != null && clusterType == MiniClusterType.tez) {
-            oldSs.close();
-        }
-
-        if (oldSs != null && oldSs.out != null && oldSs.out != System.out) {
-            oldSs.out.close();
-        }
-        SessionState.start(ss);
-
-        cliDriver = new CliDriver();
-        cliDriver.processInitFiles(ss);
-
-        return outf.getAbsolutePath();
-    }
-
-    private CliSessionState startSessionState()
-            throws IOException {
-
-        HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER,
-                "org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator");
-
-        String execEngine = conf.get("hive.execution.engine");
-        conf.set("hive.execution.engine", "mr");
-        CliSessionState ss = new CliSessionState(conf);
-        assert ss != null;
-        ss.in = System.in;
-        ss.out = System.out;
-        ss.err = System.out;
-
-        SessionState oldSs = SessionState.get();
-        if (oldSs != null && clusterType == MiniClusterType.tez) {
-            oldSs.close();
-        }
-        if (oldSs != null && oldSs.out != null && oldSs.out != System.out) {
-            oldSs.out.close();
-        }
-        SessionState.start(ss);
-
-        isSessionStateStarted = true;
-
-        conf.set("hive.execution.engine", execEngine);
-        return ss;
-    }
-
-    public int executeOne(String tname) {
-        String q = qMap.get(tname);
-
-        if (q.indexOf(";") == -1) {
-            return -1;
-        }
-
-        String q1 = q.substring(0, q.indexOf(";") + 1);
-        String qrest = q.substring(q.indexOf(";") + 1);
-        qMap.put(tname, qrest);
-
-        LOG.info("Executing " + q1);
-        return cliDriver.processLine(q1);
-    }
-
+public class HiveTestUtil extends QTestUtil{
     public static final String CRLF = System.getProperty("line.separator");
 
-    public int executeClient(String tname1, String tname2) {
-        String commands = getCommands(tname1) + CRLF + getCommands(tname2);
-        return cliDriver.processLine(commands);
+    public HiveTestUtil(String outDir, String logDir, MiniClusterType clusterType, String confDir, String hadoopVer,
+            String initScript, String cleanupScript, boolean withLlapIo) throws Exception {
+        super(outDir, logDir, clusterType, confDir, hadoopVer, initScript, cleanupScript, withLlapIo);
     }
 
+    @Override
     public int executeClient(String tname) {
         conf.set("mapreduce.job.name", "test");
-        return cliDriver.processLine(getCommands(tname), false);
-    }
-
-    private String getCommands(String tname) {
-        String commands = qMap.get(tname);
-        StringBuilder newCommands = new StringBuilder(commands.length());
-        int lastMatchEnd = 0;
-        Matcher commentMatcher = Pattern.compile("^--.*$", Pattern.MULTILINE).matcher(commands);
-        while (commentMatcher.find()) {
-            newCommands.append(commands.substring(lastMatchEnd, commentMatcher.start()));
-            newCommands.append(commentMatcher.group().replaceAll("(?<!\\\\);", "\\\\;"));
-            lastMatchEnd = commentMatcher.end();
-        }
-        newCommands.append(commands.substring(lastMatchEnd, commands.length()));
-        commands = newCommands.toString();
-        return commands;
-    }
-
-    public boolean shouldBeSkipped(String tname) {
-        return qSkipSet.contains(tname);
-    }
-
-    private String getOutFileExtension(String fname) {
-        String outFileExtension = ".out";
-        if (qJavaVersionSpecificOutput.contains(fname)) {
-            outFileExtension = ".java" + javaVersion + ".out";
-        }
-
-        return outFileExtension;
-    }
-
-    /**
-     * Given the current configurations (e.g., hadoop version and execution mode), return
-     * the correct file name to compare with the current test run output.
-     *
-     * @param outDir   The directory where the reference log files are stored.
-     * @param testName The test file name (terminated by ".out").
-     * @return The file name appended with the configuration values if it exists.
-     */
-    public String outPath(String outDir, String testName) {
-        String ret = (new File(outDir, testName)).getPath();
-        // List of configurations. Currently the list consists of hadoop version and execution
-        // mode only
-        List<String> configs = new ArrayList<String>();
-        configs.add(this.hadoopVer);
-
-        Deque<String> stack = new LinkedList<String>();
-        StringBuilder sb = new StringBuilder();
-        sb.append(testName);
-        stack.push(sb.toString());
-
-        // example file names are input1.q.out_0.20.0_minimr or input2.q.out_0.17
-        for (String s : configs) {
-            sb.append('_');
-            sb.append(s);
-            stack.push(sb.toString());
-        }
-        while (stack.size() > 0) {
-            String fileName = stack.pop();
-            File f = new File(outDir, fileName);
-            if (f.exists()) {
-                ret = f.getPath();
-                break;
-            }
-        }
-        return ret;
-    }
-
-    private Pattern[] toPattern(String[] patternStrs) {
-        Pattern[] patterns = new Pattern[patternStrs.length];
-        for (int i = 0; i < patternStrs.length; i++) {
-            patterns[i] = Pattern.compile(patternStrs[i]);
-        }
-        return patterns;
-    }
-
-    private void maskPatterns(Pattern[] patterns, String fname) throws Exception {
-        String maskPattern = "#### A masked pattern was here ####";
-
-        String line;
-        BufferedReader in;
-        BufferedWriter out;
-
-        File file = new File(fname);
-        File fileOrig = new File(fname + ".orig");
-        FileUtils.copyFile(file, fileOrig);
-
-        in = new BufferedReader(new InputStreamReader(new FileInputStream(fileOrig), "UTF-8"));
-        out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), "UTF-8"));
-
-        boolean lastWasMasked = false;
-        while (null != (line = in.readLine())) {
-            for (Pattern pattern : patterns) {
-                line = pattern.matcher(line).replaceAll(maskPattern);
-            }
-
-            if (line.equals(maskPattern)) {
-                // We're folding multiple masked lines into one.
-                if (!lastWasMasked) {
-                    out.write(line);
-                    out.write("\n");
-                    lastWasMasked = true;
-                }
-            } else {
-                out.write(line);
-                out.write("\n");
-                lastWasMasked = false;
-            }
-        }
-
-        in.close();
-        out.close();
-    }
-
-    private final Pattern[] planMask = toPattern(new String[]{
-            ".*file:.*",
-            ".*pfile:.*",
-            ".*hdfs:.*",
-            ".*/tmp/.*",
-            ".*invalidscheme:.*",
-            ".*lastUpdateTime.*",
-            ".*lastAccessTime.*",
-            ".*lastModifiedTime.*",
-            ".*[Oo]wner.*",
-            ".*CreateTime.*",
-            ".*LastAccessTime.*",
-            ".*Location.*",
-            ".*LOCATION '.*",
-            ".*transient_lastDdlTime.*",
-            ".*last_modified_.*",
-            ".*at org.*",
-            ".*at sun.*",
-            ".*at java.*",
-            ".*at junit.*",
-            ".*Caused by:.*",
-            ".*LOCK_QUERYID:.*",
-            ".*LOCK_TIME:.*",
-            ".*grantTime.*",
-            ".*[.][.][.] [0-9]* more.*",
-            ".*job_[0-9_]*.*",
-            ".*job_local[0-9_]*.*",
-            ".*USING 'java -cp.*",
-            "^Deleted.*",
-            ".*DagName:.*",
-            ".*Input:.*/data/files/.*",
-            ".*Output:.*/data/files/.*",
-            ".*total number of created files now is.*"
-    });
-
-    public int checkCliDriverResults(String tname) throws Exception {
-        assert (qMap.containsKey(tname));
-
-        String outFileExtension = getOutFileExtension(tname);
-        String outFileName = outPath(outDir, tname + outFileExtension);
-
-        File f = new File(logDir, tname + outFileExtension);
-
-        maskPatterns(planMask, f.getPath());
-        int exitVal = executeDiffCommand(f.getPath(),
-                outFileName, false,
-                qSortSet.contains(tname));
-
-        if (exitVal != 0 && overWrite) {
-            exitVal = overwriteResults(f.getPath(), outFileName);
-        }
-
-        return exitVal;
-    }
-
-
-    public int checkCompareCliDriverResults(String tname, List<String> outputs) throws Exception {
-        assert outputs.size() > 1;
-        maskPatterns(planMask, outputs.get(0));
-        for (int i = 1; i < outputs.size(); ++i) {
-            maskPatterns(planMask, outputs.get(i));
-            int ecode = executeDiffCommand(
-                    outputs.get(i - 1), outputs.get(i), false, qSortSet.contains(tname));
-            if (ecode != 0) {
-                LOG.info("Files don't match: " + outputs.get(i - 1) + " and " + outputs.get(i));
-                return ecode;
-            }
-        }
-        return 0;
-    }
-
-    private static int overwriteResults(String inFileName, String outFileName) throws Exception {
-        // This method can be replaced with Files.copy(source, target, REPLACE_EXISTING)
-        // once Hive uses JAVA 7.
-        LOG.info("Overwriting results " + inFileName + " to " + outFileName);
-        return executeCmd(new String[]{
-                "cp",
-                getQuotedString(inFileName),
-                getQuotedString(outFileName)
-        });
-    }
-
-    private static int executeDiffCommand(String inFileName,
-                                          String outFileName,
-                                          boolean ignoreWhiteSpace,
-                                          boolean sortResults
-    ) throws Exception {
-
-        int result = 0;
-
-        if (sortResults) {
-            // sort will try to open the output file in write mode on windows. We need to
-            // close it first.
-            SessionState ss = SessionState.get();
-            if (ss != null && ss.out != null && ss.out != System.out) {
-                ss.out.close();
-            }
-
-            String inSorted = inFileName + SORT_SUFFIX;
-            String outSorted = outFileName + SORT_SUFFIX;
-
-            result = sortFiles(inFileName, inSorted);
-            result |= sortFiles(outFileName, outSorted);
-            if (result != 0) {
-                LOG.error("ERROR: Could not sort files before comparing");
-                return result;
-            }
-            inFileName = inSorted;
-            outFileName = outSorted;
-        }
-
-        ArrayList<String> diffCommandArgs = new ArrayList<String>();
-        diffCommandArgs.add("diff");
-
-        // Text file comparison
-        diffCommandArgs.add("-a");
-
-        // Ignore changes in the amount of white space
-        if (ignoreWhiteSpace || Shell.WINDOWS) {
-            diffCommandArgs.add("-b");
-        }
-
-        // Files created on Windows machines have different line endings
-        // than files created on Unix/Linux. Windows uses carriage return and line feed
-        // ("\r\n") as a line ending, whereas Unix uses just line feed ("\n").
-        // Also StringBuilder.toString(), Stream to String conversions adds extra
-        // spaces at the end of the line.
-        if (Shell.WINDOWS) {
-            diffCommandArgs.add("--strip-trailing-cr"); // Strip trailing carriage return on input
-            diffCommandArgs.add("-B"); // Ignore changes whose lines are all blank
-        }
-        // Add files to compare to the arguments list
-        diffCommandArgs.add(getQuotedString(inFileName));
-        diffCommandArgs.add(getQuotedString(outFileName));
-
-        result = executeCmd(diffCommandArgs);
-
-        if (sortResults) {
-            new File(inFileName).delete();
-            new File(outFileName).delete();
-        }
-
-        return result;
-    }
-
-    private static int sortFiles(String in, String out) throws Exception {
-        return executeCmd(new String[]{
-                "sort",
-                getQuotedString(in),
-        }, out, null);
-    }
-
-    private static int executeCmd(Collection<String> args) throws Exception {
-        return executeCmd(args, null, null);
-    }
-
-    private static int executeCmd(String[] args) throws Exception {
-        return executeCmd(args, null, null);
-    }
-
-    private static int executeCmd(Collection<String> args, String outFile, String errFile) throws
-            Exception {
-        String[] cmdArray = args.toArray(new String[args.size()]);
-        return executeCmd(cmdArray, outFile, errFile);
-    }
-
-    private static int executeCmd(String[] args, String outFile, String errFile) throws Exception {
-        LOG.info("Running: " + org.apache.commons.lang.StringUtils.join(args, ' '));
-
-        PrintStream out = outFile == null ?
-                SessionState.getConsole().getChildOutStream() :
-                new PrintStream(new FileOutputStream(outFile), true);
-        PrintStream err = errFile == null ?
-                SessionState.getConsole().getChildErrStream() :
-                new PrintStream(new FileOutputStream(errFile), true);
-
-        Process executor = Runtime.getRuntime().exec(args);
-
-        StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, err);
-        StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, out);
-
-        outPrinter.start();
-        errPrinter.start();
-
-        int result = executor.waitFor();
-
-        outPrinter.join();
-        errPrinter.join();
-
-        if (outFile != null) {
-            out.close();
-        }
-
-        if (errFile != null) {
-            err.close();
-        }
-
-        return result;
-    }
-
-    private static String getQuotedString(String str) {
-        return Shell.WINDOWS ? String.format("\"%s\"", str) : str;
-    }
-
-    public ASTNode parseQuery(String tname) throws Exception {
-        return pd.parse(qMap.get(tname));
-    }
-
-    public void resetParser() throws SemanticException {
-        pd = new ParseDriver();
-        sem = new SemanticAnalyzer(conf);
-    }
-
-    public TreeMap<String, String> getQMap() {
-        return qMap;
-    }
-
-    /**
-     * HiveTestSetup defines test fixtures which are reused across testcases,
-     * and are needed before any test can be run
-     */
-    public static class HiveTestSetup {
-        private MiniZooKeeperCluster zooKeeperCluster = null;
-        private int zkPort;
-        private ZooKeeper zooKeeper;
-
-        public HiveTestSetup() {
-        }
-
-        public void preTest(HiveConf conf) throws Exception {
-
-            if (zooKeeperCluster == null) {
-                //create temp dir
-                String tmpBaseDir = System.getProperty("test.tmp.dir");
-                File tmpDir = Utilities.createTempDir(tmpBaseDir);
-
-                zooKeeperCluster = new MiniZooKeeperCluster();
-                zkPort = zooKeeperCluster.startup(tmpDir);
-            }
-
-            if (zooKeeper != null) {
-                zooKeeper.close();
-            }
-
-            int sessionTimeout = (int) conf.getTimeVar(HiveConf.ConfVars
-                    .HIVE_ZOOKEEPER_SESSION_TIMEOUT, TimeUnit.MILLISECONDS);
-            zooKeeper = new ZooKeeper("localhost:" + zkPort, sessionTimeout, new Watcher() {
-                @Override
-                public void process(WatchedEvent arg0) {
-                }
-            });
-
-            String zkServer = "localhost";
-            conf.set("hive.zookeeper.quorum", zkServer);
-            conf.set("hive.zookeeper.client.port", "" + zkPort);
-        }
-
-        public void postTest(HiveConf conf) throws Exception {
-            if (zooKeeperCluster == null) {
-                return;
-            }
-
-            if (zooKeeper != null) {
-                zooKeeper.close();
-            }
-
-            ZooKeeperHiveLockManager.releaseAllLocks(conf);
-        }
-
-        public void tearDown() throws Exception {
-            if (zooKeeperCluster != null) {
-                zooKeeperCluster.shutdown();
-                zooKeeperCluster = null;
-            }
-        }
-    }
-
-    /**
-     * QTRunner: Runnable class for running a a single query file.
-     **/
-    public static class HiveTestRunner implements Runnable {
-        private final HiveTestUtil qt;
-        private final String fname;
-
-        public HiveTestRunner(HiveTestUtil qt, String fname) {
-            this.qt = qt;
-            this.fname = fname;
-        }
-
-        @Override
-        public void run() {
-            try {
-                // assumption is that environment has already been cleaned once globally
-                // hence each thread does not call cleanUp() and createSources() again
-                qt.cliInit(fname, false);
-                qt.executeClient(fname);
-            } catch (Throwable e) {
-                LOG.error("Query file " + fname + " failed with exception ", e);
-                e.printStackTrace();
-                outputTestFailureHelpMessage();
-            }
-        }
-    }
-
-    /**
-     * Executes a set of query files in sequence.
-     *
-     * @param qfiles array of input query files containing arbitrary number of hive
-     *               queries
-     * @param qt     array of HiveTestUtils, one per qfile
-     * @return true if all queries passed, false otw
-     */
-    public static boolean queryListRunnerSingleThreaded(File[] qfiles, HiveTestUtil[] qt)
-            throws Exception {
-        boolean failed = false;
-        qt[0].cleanUp();
-        qt[0].createSources();
-        for (int i = 0; i < qfiles.length && !failed; i++) {
-            qt[i].clearTestSideEffects();
-            qt[i].cliInit(qfiles[i].getName(), false);
-            qt[i].executeClient(qfiles[i].getName());
-            int ecode = qt[i].checkCliDriverResults(qfiles[i].getName());
-            if (ecode != 0) {
-                failed = true;
-                LOG.error("Test " + qfiles[i].getName()
-                        + " results check failed with error code " + ecode);
-                outputTestFailureHelpMessage();
-            }
-            qt[i].clearPostTestEffects();
-        }
-        return (!failed);
-    }
-
-    public static void outputTestFailureHelpMessage() {
-        LOG.error("See ./ql/target/tmp/log/hive.log or ./itests/qtest/target/tmp/log/hive.log, "
-                + "or check ./ql/target/surefire-reports or " +
-                "./itests/qtest/target/surefire-reports/ for specific test cases logs.");
-    }
-
-    public static String ensurePathEndsInSlash(String path) {
-        if (path == null) {
-            throw new NullPointerException("Path cannot be null");
-        }
-        if (path.endsWith(File.separator)) {
-            return path;
-        } else {
-            return path + File.separator;
-        }
-    }
-
-    private static String[] cachedQvFileList = null;
-    private static ImmutableList<String> cachedDefaultQvFileList = null;
-    private static Pattern qvSuffix = Pattern.compile("_[0-9]+.qv$", Pattern.CASE_INSENSITIVE);
-
-    public static List<String> getVersionFiles(String queryDir, String tname) {
-        ensureQvFileList(queryDir);
-        List<String> result = getVersionFilesInternal(tname);
-        if (result == null) {
-            result = cachedDefaultQvFileList;
-        }
-        return result;
-    }
-
-    private static void ensureQvFileList(String queryDir) {
-        if (cachedQvFileList != null) return;
-        // Not thread-safe.
-        LOG.info("Getting versions from " + queryDir);
-        cachedQvFileList = (new File(queryDir)).list(new FilenameFilter() {
-            @Override
-            public boolean accept(File dir, String name) {
-                return name.toLowerCase().endsWith(".qv");
-            }
-        });
-        if (cachedQvFileList == null) return; // no files at all
-        Arrays.sort(cachedQvFileList, String.CASE_INSENSITIVE_ORDER);
-        List<String> defaults = getVersionFilesInternal("default");
-        cachedDefaultQvFileList = (defaults != null)
-                ? ImmutableList.copyOf(defaults) : ImmutableList.<String>of();
-    }
-
-    private static List<String> getVersionFilesInternal(String tname) {
-        if (cachedQvFileList == null) {
-            return new ArrayList<String>();
-        }
-        int pos = Arrays.binarySearch(cachedQvFileList, tname, String.CASE_INSENSITIVE_ORDER);
-        if (pos >= 0) {
-            throw new BuildException("Unexpected file list element: " + cachedQvFileList[pos]);
-        }
-        List<String> result = null;
-        for (pos = (-pos - 1); pos < cachedQvFileList.length; ++pos) {
-            String candidate = cachedQvFileList[pos];
-            if (candidate.length() <= tname.length()
-                    || !tname.equalsIgnoreCase(candidate.substring(0, tname.length()))
-                    || !qvSuffix.matcher(candidate.substring(tname.length())).matches()) {
-                break;
-            }
-            if (result == null) {
-                result = new ArrayList<String>();
-            }
-            result.add(candidate);
-        }
-        return result;
-    }
-
-    public void failed(int ecode, String fname, String debugHint) {
-        String command = SessionState.get() != null ? SessionState.get().getLastCommand() : null;
-        Assert.fail("Client Execution failed with error code = " + ecode +
-                (command != null ? " running " + command : "") + (debugHint != null ? debugHint :
-                ""));
-    }
-
-    // for negative tests, which is succeeded.. no need to print the query string
-    public void failed(String fname, String debugHint) {
-        Assert.fail("Client Execution was expected to fail, but succeeded with error code 0 " +
-                (debugHint != null ? debugHint : ""));
-    }
-
-    public void failedDiff(int ecode, String fname, String debugHint) {
-        Assert.fail("Client Execution results failed with error code = " + ecode +
-                (debugHint != null ? debugHint : ""));
-    }
-
-    public void failed(Throwable e, String fname, String debugHint) {
-        String command = SessionState.get() != null ? SessionState.get().getLastCommand() : null;
-        LOG.error("Exception: ", e);
-        e.printStackTrace();
-        LOG.error("Failed query: " + fname);
-        Assert.fail("Unexpected exception " +
-                org.apache.hadoop.util.StringUtils.stringifyException(e) + "\n" +
-                (command != null ? " running " + command : "") +
-                (debugHint != null ? debugHint : ""));
-    }
-
-    public static class WindowsPathUtil {
-
-        public static void convertPathsFromWindowsToHdfs(HiveConf conf) {
-            // Following local paths are used as HDFS paths in unit tests.
-            // It works well in Unix as the path notation in Unix and HDFS is more or less same.
-            // But when it comes to Windows, drive letter separator ':' & backslash '\" are invalid
-            // characters in HDFS so we need to converts these local paths to HDFS paths before
-            // using them
-            // in unit tests.
-
-            String orgWarehouseDir = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);
-            conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, getHdfsUriString(orgWarehouseDir));
-
-            String orgTestTempDir = System.getProperty("test.tmp.dir");
-            System.setProperty("test.tmp.dir", getHdfsUriString(orgTestTempDir));
-
-            String orgTestWarehouseDir = System.getProperty("test.warehouse.dir");
-            System.setProperty("test.warehouse.dir", getHdfsUriString(orgTestWarehouseDir));
-
-            String orgScratchDir = conf.getVar(HiveConf.ConfVars.SCRATCHDIR);
-            conf.setVar(HiveConf.ConfVars.SCRATCHDIR, getHdfsUriString(orgScratchDir));
-        }
-
-        public static String getHdfsUriString(String uriStr) {
-            assert uriStr != null;
-            if (Shell.WINDOWS) {
-                // If the URI conversion is from Windows to HDFS then replace the '\' with '/'
-                // and remove the windows single drive letter & colon from absolute path.
-                return uriStr.replace('\\', '/')
-                        .replaceFirst("/[c-zC-Z]:", "/")
-                        .replaceFirst("^[c-zC-Z]:", "");
-            }
-            return uriStr;
-        }
+        return super.executeClient(tname);
     }
+    
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/83825dec/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTezIT.java
----------------------------------------------------------------------
diff --git a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTezIT.java b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTezIT.java
index f6efe18..a675a0e 100644
--- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTezIT.java
+++ b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTezIT.java
@@ -21,10 +21,8 @@ package org.apache.phoenix.hive;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.junit.BeforeClass;
 import org.junit.experimental.categories.Category;
-import org.junit.Ignore;
 
 @Category(NeedsOwnMiniClusterTest.class)
-@Ignore
 public class HiveTezIT extends HivePhoenixStoreIT {
 
     @BeforeClass

http://git-wip-us.apache.org/repos/asf/phoenix/blob/83825dec/phoenix-hive/src/main/java/org/apache/phoenix/hive/PhoenixStorageHandler.java
----------------------------------------------------------------------
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/PhoenixStorageHandler.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/PhoenixStorageHandler.java
index 4e9f465..0f8ee93 100644
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/PhoenixStorageHandler.java
+++ b/phoenix-hive/src/main/java/org/apache/phoenix/hive/PhoenixStorageHandler.java
@@ -35,8 +35,8 @@ import org.apache.hadoop.hive.ql.metadata.InputEstimator;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
 import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.serde2.AbstractSerDe;
 import org.apache.hadoop.hive.serde2.Deserializer;
-import org.apache.hadoop.hive.serde2.SerDe;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.JobConf;
@@ -242,7 +242,7 @@ public class PhoenixStorageHandler extends DefaultStorageHandler implements
     }
 
     @Override
-    public Class<? extends SerDe> getSerDeClass() {
+    public Class<? extends AbstractSerDe> getSerDeClass() {
         return PhoenixSerDe.class;
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/83825dec/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java
----------------------------------------------------------------------
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java
index b4f96ee..02c62d1 100644
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java
+++ b/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.mapreduce.RegionSizeCalculator;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.ql.plan.TableScanDesc;
 import org.apache.hadoop.hive.serde.serdeConstants;
@@ -104,7 +104,7 @@ public class PhoenixInputFormat<T extends DBWritable> implements InputFormat<Wri
             String filterExprSerialized = jobConf.get(TableScanDesc.FILTER_EXPR_CONF_STR);
             if (filterExprSerialized != null) {
                 ExprNodeGenericFuncDesc filterExpr =
-                        Utilities.deserializeExpression(filterExprSerialized);
+                        SerializationUtilities.deserializeExpression(filterExprSerialized);
                 PhoenixPredicateDecomposer predicateDecomposer =
                         PhoenixPredicateDecomposer.create(Arrays.asList(jobConf.get(serdeConstants.LIST_COLUMNS).split(",")));
                 predicateDecomposer.decomposePredicate(filterExpr);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/83825dec/phoenix-hive/src/main/java/org/apache/phoenix/hive/ql/index/IndexPredicateAnalyzer.java
----------------------------------------------------------------------
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/ql/index/IndexPredicateAnalyzer.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/ql/index/IndexPredicateAnalyzer.java
index 659983a..207b46a 100644
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/ql/index/IndexPredicateAnalyzer.java
+++ b/phoenix-hive/src/main/java/org/apache/phoenix/hive/ql/index/IndexPredicateAnalyzer.java
@@ -17,7 +17,16 @@
  */
 package org.apache.phoenix.hive.ql.index;
 
-import com.google.common.collect.Lists;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.Stack;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
@@ -53,15 +62,7 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToUtcTimestamp;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToVarchar;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.Stack;
+import com.google.common.collect.Lists;
 
 /**
  * Clone of org.apache.hadoop.hive.ql.index.IndexPredicateAnalyzer with modifying
@@ -345,17 +346,14 @@ public class IndexPredicateAnalyzer {
 
         if (FunctionRegistry.isOpAnd(expr)) {
             assert (nodeOutputs.length == 2);
-            ExprNodeDesc residual1 = (ExprNodeDesc) nodeOutputs[0];
-            ExprNodeDesc residual2 = (ExprNodeDesc) nodeOutputs[1];
-            if (residual1 == null) {
-                return residual2;
-            }
-            if (residual2 == null) {
-                return residual1;
-            }
+            ExprNodeDesc residual1 = (ExprNodeDesc)nodeOutputs[0];
+            ExprNodeDesc residual2 = (ExprNodeDesc)nodeOutputs[1];
+            if (residual1 == null) { return residual2; }
+            if (residual2 == null) { return residual1; }
             List<ExprNodeDesc> residuals = new ArrayList<ExprNodeDesc>();
             residuals.add(residual1);
             residuals.add(residual2);
+
             return new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, FunctionRegistry
                     .getGenericUDFForAnd(), residuals);
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/83825dec/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixConnectionUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixConnectionUtil.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixConnectionUtil.java
index 8d76ac0..d5eb86f 100644
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixConnectionUtil.java
+++ b/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixConnectionUtil.java
@@ -17,10 +17,15 @@
  */
 package org.apache.phoenix.hive.util;
 
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Map;
+import java.util.Properties;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.mapred.JobConf;
@@ -28,12 +33,6 @@ import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.util.Map;
-import java.util.Properties;
-
 /**
  * Set of methods to obtain Connection depending on configuration
  */
@@ -80,7 +79,11 @@ public class PhoenixConnectionUtil {
         String zNodeParent = tableParameterMap.get(PhoenixStorageHandlerConstants.ZOOKEEPER_PARENT);
         zNodeParent = zNodeParent == null ? PhoenixStorageHandlerConstants
                 .DEFAULT_ZOOKEEPER_PARENT : zNodeParent;
-
+        try {
+            Class.forName("org.apache.phoenix.jdbc.PhoenixDriver");
+        } catch (ClassNotFoundException e) {
+            LOG.warn(e.getStackTrace());
+        }
         return DriverManager.getConnection(QueryUtil.getUrl(zookeeperQuorum, clientPort,
                 zNodeParent));
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/83825dec/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixStorageHandlerUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixStorageHandlerUtil.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixStorageHandlerUtil.java
index 19c26e5..4b23103 100644
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixStorageHandlerUtil.java
+++ b/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixStorageHandlerUtil.java
@@ -17,11 +17,11 @@
  */
 package org.apache.phoenix.hive.util;
 
-import com.google.common.base.Joiner;
-import com.google.common.collect.Maps;
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.lang.reflect.Array;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
 import java.math.BigDecimal;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
@@ -35,8 +35,12 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Properties;
+import java.util.concurrent.atomic.AtomicReference;
+
 import javax.naming.NamingException;
+
 import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.util.Strings;
@@ -55,11 +59,17 @@ import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
 import org.apache.phoenix.hive.ql.index.IndexSearchCondition;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 
+import com.google.common.base.Joiner;
+import com.google.common.collect.Maps;
+
 /**
  * Misc utils for PhoenixStorageHandler
  */
 
 public class PhoenixStorageHandlerUtil {
+    private static final Log LOG = LogFactory.getLog(PhoenixStorageHandlerUtil.class);
+    private static final AtomicReference<Method> GET_BUCKET_METHOD_REF = new AtomicReference<>();
+    private static final AtomicReference<Method> GET_BUCKET_ID_METHOD_REF = new AtomicReference<>();
 
     public static String getTargetTableName(Table table) {
         Map<String, String> tableParameterMap = table.getParameters();
@@ -268,11 +278,11 @@ public class PhoenixStorageHandlerUtil {
     public static String getOptionsValue(Options options) {
         StringBuilder content = new StringBuilder();
 
-        int bucket = options.getBucket();
+        int bucket = getBucket(options);
         String inspectorInfo = options.getInspector().getCategory() + ":" + options.getInspector()
                 .getTypeName();
-        long maxTxnId = options.getMaximumTransactionId();
-        long minTxnId = options.getMinimumTransactionId();
+        long maxTxnId = options.getMaximumWriteId();
+        long minTxnId = options.getMinimumWriteId();
         int recordIdColumn = options.getRecordIdColumn();
         boolean isCompresses = options.isCompressed();
         boolean isWritingBase = options.isWritingBase();
@@ -285,4 +295,27 @@ public class PhoenixStorageHandlerUtil {
 
         return content.toString();
     }
+
+    private static int getBucket(Options options) {
+        Method getBucketMethod = GET_BUCKET_METHOD_REF.get();
+        try {
+            if (getBucketMethod == null) {
+                getBucketMethod = Options.class.getMethod("getBucket");
+                GET_BUCKET_METHOD_REF.set(getBucketMethod);
+            }
+            return (int) getBucketMethod.invoke(options);
+        } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException e) {
+            LOG.trace("Failed to invoke Options.getBucket()", e);
+        }
+        Method getBucketIdMethod = GET_BUCKET_ID_METHOD_REF.get();
+        try {
+            if (getBucketIdMethod == null) {
+                getBucketIdMethod = Options.class.getMethod("getBucketId");
+                GET_BUCKET_ID_METHOD_REF.set(getBucketMethod);
+            }
+            return (int) getBucketIdMethod.invoke(options);
+        } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException e) {
+            throw new RuntimeException("Failed to invoke Options.getBucketId()", e);
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/83825dec/phoenix-hive/src/test/resources/hive-site.xml
----------------------------------------------------------------------
diff --git a/phoenix-hive/src/test/resources/hive-site.xml b/phoenix-hive/src/test/resources/hive-site.xml
new file mode 100644
index 0000000..143a829
--- /dev/null
+++ b/phoenix-hive/src/test/resources/hive-site.xml
@@ -0,0 +1,123 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+
+<property>
+  <name>hive.in.test</name>
+  <value>true</value>
+  <description>Internal marker for test. Used for masking env-dependent values</description>
+</property>
+
+<property>
+  <name>hive.tez.container.size</name>
+  <value>128</value>
+  <description></description>
+</property>
+
+<property>
+  <name>phoenix.log.buffer.size</name>
+  <value>1024</value>
+  <description></description>
+</property>
+
+
+<property>
+  <name>datanucleus.schema.autoCreateAll</name>
+  <value>true</value>
+</property>
+
+
+<property>
+  <name>hive.metastore.schema.verification</name>
+  <value>false</value>
+</property>
+
+<property>
+  <name>hive.query.results.cache.enabled</name>
+  <value>false</value>
+</property>
+
+<property>
+  <name>hive.fetch.task.conversion</name>
+  <value>minimal</value>
+</property>
+
+<property>
+  <name>hive.auto.convert.join</name>
+  <value>false</value>
+  <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file size</description>
+</property>
+
+<property>
+  <name>hive.ignore.mapjoin.hint</name>
+  <value>false</value>
+  <description>Whether Hive ignores the mapjoin hint</description>
+</property>
+
+
+<property>
+  <name>hive.exec.mode.local.auto</name>
+  <value>false</value>
+  <description>
+    Let hive determine whether to run in local mode automatically
+    Disabling this for tests so that minimr is not affected
+  </description>
+</property>
+
+
+<!-- MetaStore settings -->
+
+
+<property>
+  <name>javax.jdo.option.ConnectionURL</name>
+  <value>jdbc:derby:memory:${test.tmp.dir}/junit_metastore_db;create=true</value>
+</property>
+
+<property>
+  <name>javax.jdo.option.ConnectionDriverName</name>
+  <value>org.apache.derby.jdbc.EmbeddedDriver</value>
+</property>
+
+<property>
+  <name>javax.jdo.option.ConnectionUserName</name>
+  <value>APP</value>
+</property>
+
+<property>
+  <name>javax.jdo.option.ConnectionPassword</name>
+  <value>mine</value>
+</property>
+
+<property>
+  <!--  this should eventually be deprecated since the metastore should supply this -->
+  <name>hive.metastore.warehouse.dir</name>
+  <value>${test.warehouse.dir}</value>
+  <description></description>
+</property>
+
+<property>
+  <name>hive.metastore.metadb.dir</name>
+  <value>file://${test.tmp.dir}/metadb/</value>
+  <description>
+  Required by metastore server or if the uris argument below is not supplied
+  </description>
+</property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/83825dec/phoenix-hive/src/test/resources/tez-site.xml
----------------------------------------------------------------------
diff --git a/phoenix-hive/src/test/resources/tez-site.xml b/phoenix-hive/src/test/resources/tez-site.xml
new file mode 100644
index 0000000..97ae8c5
--- /dev/null
+++ b/phoenix-hive/src/test/resources/tez-site.xml
@@ -0,0 +1,69 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+    http://www.apache.org/licenses/LICENSE-2.0
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+<!-- Site specific YARN configuration properties -->
+<property>
+    <name>tez.am.resource.memory.mb</name>
+    <value>500</value>
+  </property>
+
+<property>
+    <name>tez.am.task.memory.mb</name>
+    <value>500</value>
+  </property>
+
+<property>
+    <name>hive.tez.container.size</name>
+    <value>500</value>
+</property>
+
+
+<property>
+    <name>hive.in.tez.test</name>
+    <value>true</value>
+</property>
+
+<property>
+    <name>tez.ignore.lib.uris</name>
+    <value>true</value>
+</property>
+
+
+<property>
+  <name>hive.tez.input.format</name>
+  <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
+  <description>The default input format for tez. Tez groups splits in the AM.</description>
+</property>
+
+
+<property>
+  <name>hive.input.format</name>
+  <value>org.apache.hadoop.hive.ql.io.CombineHiveInputFormat</value>
+  <description>The default input format, if it is not specified, the system assigns it. It is set to HiveInputFormat for hadoop versions 17, 18 and 19, whereas it is set to CombineHiveInputFormat for hadoop 20. The user can always overwrite it - if there is a bug in CombineHiveInputFormat, it can always be manually set to HiveInputFormat. </description>
+</property>
+
+<property>
+  <name>hive.auto.convert.join</name>
+  <value>false</value>
+  <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file size</description>
+</property>
+
+<property>
+  <name>hive.ignore.mapjoin.hint</name>
+  <value>true</value>
+  <description>Whether Hive ignores the mapjoin hint</description>
+</property>
+
+  
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/83825dec/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index ad061e9..69be062 100644
--- a/pom.xml
+++ b/pom.xml
@@ -71,7 +71,7 @@
 
     <!-- Dependency versions -->
     <commons-cli.version>1.4</commons-cli.version>
-    <hive.version>1.2.1</hive.version>
+    <hive.version>3.0.0-SNAPSHOT</hive.version>
     <pig.version>0.13.0</pig.version>
     <jackson.version>1.9.2</jackson.version>
     <antlr.version>3.5.2</antlr.version>
@@ -97,7 +97,7 @@
     <!-- Do not change jodatime.version until HBASE-15199 is fixed -->
     <jodatime.version>1.6</jodatime.version>
     <joni.version>2.1.2</joni.version>
-    <avatica.version>1.10.0</avatica.version>
+    <avatica.version>1.11.0</avatica.version>
     <jetty.version>9.3.19.v20170502</jetty.version>
     <tephra.version>0.13.0-incubating</tephra.version>
     <spark.version>2.0.2</spark.version>
@@ -666,12 +666,6 @@
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-common</artifactId>
         <version>${hadoop.version}</version>
-        <exclusions>
-          <exclusion>
-            <groupId>org.xerial.snappy</groupId>
-            <artifactId>snappy-java</artifactId>
-          </exclusion>
-        </exclusions>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>


[3/3] phoenix git commit: PHOENIX-4423 Hive 2.3.0 support

Posted by el...@apache.org.
PHOENIX-4423 Hive 2.3.0 support

Signed-off-by: Josh Elser <el...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/83825dec
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/83825dec
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/83825dec

Branch: refs/heads/5.x-HBase-2.0
Commit: 83825dec5229458bb5aa585908661fb9c087e61f
Parents: 0a2ff12
Author: Ankit Singhal <an...@gmail.com>
Authored: Wed Apr 18 16:14:14 2018 -0400
Committer: Josh Elser <el...@apache.org>
Committed: Wed Apr 18 23:41:41 2018 -0400

----------------------------------------------------------------------
 phoenix-hive/pom.xml                            |   79 +-
 .../hadoop/hive/ql/QTestProcessExecResult.java  |   66 +
 .../org/apache/hadoop/hive/ql/QTestUtil.java    | 2489 ++++++++++++++++++
 .../hive/ql/security/DummyAuthenticator.java    |   70 +
 .../phoenix/hive/BaseHivePhoenixStoreIT.java    |   62 +-
 .../apache/phoenix/hive/HiveMapReduceIT.java    |   24 +-
 .../apache/phoenix/hive/HivePhoenixStoreIT.java |   42 +-
 .../org/apache/phoenix/hive/HiveTestUtil.java   | 1256 +--------
 .../java/org/apache/phoenix/hive/HiveTezIT.java |    2 -
 .../phoenix/hive/PhoenixStorageHandler.java     |    4 +-
 .../hive/mapreduce/PhoenixInputFormat.java      |    4 +-
 .../hive/ql/index/IndexPredicateAnalyzer.java   |   34 +-
 .../hive/util/PhoenixConnectionUtil.java        |   19 +-
 .../hive/util/PhoenixStorageHandlerUtil.java    |   43 +-
 phoenix-hive/src/test/resources/hive-site.xml   |  123 +
 phoenix-hive/src/test/resources/tez-site.xml    |   69 +
 pom.xml                                         |   10 +-
 17 files changed, 3051 insertions(+), 1345 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/83825dec/phoenix-hive/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
index 2e3fdac..8b9b4c1 100644
--- a/phoenix-hive/pom.xml
+++ b/phoenix-hive/pom.xml
@@ -31,11 +31,19 @@
   </parent>
   <artifactId>phoenix-hive</artifactId>
   <name>Phoenix - Hive</name>
-
+<properties>
+    <test.tmp.dir>${project.build.directory}/tmp</test.tmp.dir>
+</properties>
   <dependencies>
     <dependency>
       <groupId>org.apache.phoenix</groupId>
       <artifactId>phoenix-core</artifactId>
+      <exclusions>
+	  <exclusion>
+          <groupId>com.google.guava</groupId>
+          <artifactId>guava</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
@@ -45,6 +53,19 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
+      <artifactId>hive-metastore</artifactId>
+      <version>${hive.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-standalone-metastore</artifactId>
+      <type>test-jar</type>
+      <version>${hive.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
       <artifactId>hive-exec</artifactId>
       <version>${hive.version}</version>
       <scope>provided</scope>
@@ -114,7 +135,7 @@
       <groupId>org.apache.tez</groupId>
       <artifactId>tez-tests</artifactId>
       <scope>test</scope>
-      <version>0.8.4</version>
+      <version>0.9.1</version>
       <type>test-jar</type>
       <exclusions>
         <exclusion>
@@ -127,7 +148,7 @@
       <groupId>org.apache.tez</groupId>
       <artifactId>tez-dag</artifactId>
       <scope>test</scope>
-      <version>0.8.4</version>
+      <version>0.9.1</version>
       <exclusions>
         <exclusion>
           <groupId>org.apache.hadoop</groupId>
@@ -141,6 +162,38 @@
       <version>${mockito-all.version}</version>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <version>19.0</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.calcite.avatica</groupId>
+      <artifactId>avatica</artifactId>
+      <!-- Overriding the version of Avatica that PQS uses so that Hive will work -->
+      <version>${avatica.version}</version>
+      <scope>test</scope>
+      <!-- And removing a bunch of dependencies that haven't been shaded in this older
+           Avatica version which conflict with HDFS -->
+      <exclusions>
+        <exclusion>
+          <groupId>org.hsqldb</groupId>
+          <artifactId>hsqldb</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.fasterxml.jackson.core</groupId>
+          <artifactId>jackson-databind</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.fasterxml.jackson.core</groupId>
+          <artifactId>jackson-annotations</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.fasterxml.jackson.core</groupId>
+          <artifactId>jackson-core</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
   </dependencies>
 
   <build>
@@ -152,6 +205,26 @@
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-failsafe-plugin</artifactId>
+        <executions>
+	    <execution>
+              <id>NeedTheirOwnClusterTests</id>
+              <configuration>
+                 <encoding>UTF-8</encoding>
+                 <forkCount>1</forkCount>
+                 <runOrder>alphabetical</runOrder>
+                 <reuseForks>false</reuseForks>
+                 <argLine>-enableassertions -Xmx2000m -XX:MaxPermSize=256m -Djava.security.egd=file:/dev/./urandom "-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}" -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./target/ -Dorg.apache.hadoop.hbase.shaded.io.netty.packagePrefix=org.apache.hadoop.hbase.shaded.</argLine>
+                 <redirectTestOutputToFile>${test.output.tofile}</redirectTestOutputToFile>
+                 <testSourceDirectory>${basedir}/src/it/java</testSourceDirectory>
+                 <groups>org.apache.phoenix.end2end.NeedsOwnMiniClusterTest</groups>
+                 <shutdown>kill</shutdown>
+              </configuration>
+              <goals>
+                 <goal>integration-test</goal>
+                 <goal>verify</goal>
+              </goals>
+            </execution>
+          </executions>
       </plugin>
       <plugin>
         <artifactId>maven-dependency-plugin</artifactId>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/83825dec/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/QTestProcessExecResult.java
----------------------------------------------------------------------
diff --git a/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/QTestProcessExecResult.java b/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/QTestProcessExecResult.java
new file mode 100644
index 0000000..f9f7057
--- /dev/null
+++ b/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/QTestProcessExecResult.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql;
+
+/**
+ * Standard output and return code of a process executed during the qtests.
+ */
+public class QTestProcessExecResult {
+
+  private static final String TRUNCATED_OUTPUT = "Output was too long and had to be truncated...";
+  private static final short MAX_OUTPUT_CHAR_LENGTH = 2000;
+
+  private final int returnCode;
+  private final String standardOut;
+
+  QTestProcessExecResult(int code, String output) {
+    this.returnCode = code;
+    this.standardOut = truncatefNeeded(output);
+  }
+
+  /**
+   * @return executed process return code
+   */
+  public int getReturnCode() {
+    return this.returnCode;
+  }
+
+  /**
+   * @return output captured from stdout while process was executing
+   */
+  public String getCapturedOutput() {
+    return this.standardOut;
+  }
+
+  public static QTestProcessExecResult create(int code, String output) {
+    return new QTestProcessExecResult(code, output);
+  }
+
+  public static  QTestProcessExecResult createWithoutOutput(int code) {
+    return new QTestProcessExecResult(code, "");
+  }
+
+  private String truncatefNeeded(String orig) {
+    if (orig.length() > MAX_OUTPUT_CHAR_LENGTH) {
+      return orig.substring(0, MAX_OUTPUT_CHAR_LENGTH) + "\r\n" + TRUNCATED_OUTPUT;
+    } else {
+      return orig;
+    }
+  }
+}


[2/3] phoenix git commit: PHOENIX-4423 Hive 2.3.0 support

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/83825dec/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/QTestUtil.java b/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/QTestUtil.java
new file mode 100644
index 0000000..2341804
--- /dev/null
+++ b/phoenix-hive/src/it/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -0,0 +1,2489 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql;
+
+import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.PrintStream;
+import java.io.Serializable;
+import java.io.StringWriter;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.FileSystems;
+import java.nio.file.Files;
+import java.nio.file.StandardOpenOption;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Deque;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Stream;
+
+import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.io.output.ByteArrayOutputStream;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
+import org.apache.hadoop.hive.cli.CliDriver;
+import org.apache.hadoop.hive.cli.CliSessionState;
+import org.apache.hadoop.hive.common.io.CachingPrintStream;
+import org.apache.hadoop.hive.common.io.DigestPrintStream;
+import org.apache.hadoop.hive.common.io.SortAndDigestPrintStream;
+import org.apache.hadoop.hive.common.io.SortPrintStream;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.llap.io.api.LlapProxy;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache;
+import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.exec.spark.session.SparkSession;
+import org.apache.hadoop.hive.ql.exec.spark.session.SparkSessionManagerImpl;
+import org.apache.hadoop.hive.ql.exec.tez.TezSessionState;
+import org.apache.hadoop.hive.ql.lockmgr.zookeeper.CuratorFrameworkSingleton;
+import org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry;
+import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.ASTNode;
+import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
+import org.apache.hadoop.hive.ql.parse.ParseDriver;
+import org.apache.hadoop.hive.ql.parse.ParseException;
+import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.processors.CommandProcessor;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.processors.HiveCommand;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.shims.HadoopShims;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hive.common.util.StreamPrinter;
+import org.apache.logging.log4j.util.Strings;
+import org.apache.tools.ant.BuildException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.ZooKeeper;
+import org.junit.Assert;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableList;
+
+import junit.framework.TestSuite;
+
+/**
+ * QTestUtil. Cloned from Hive 3.0.0 as hive doesn't release hive-it-util artifact
+ *
+ */
+public class QTestUtil {
+  public static final String UTF_8 = "UTF-8";
+  public static final String HIVE_ROOT = getHiveRoot();
+  // security property names
+  private static final String SECURITY_KEY_PROVIDER_URI_NAME = "dfs.encryption.key.provider.uri";
+  private static final String CRLF = System.getProperty("line.separator");
+
+  public static final String QTEST_LEAVE_FILES = "QTEST_LEAVE_FILES";
+  private static final Logger LOG = LoggerFactory.getLogger("QTestUtil");
+  private final static String defaultInitScript = "q_test_init.sql";
+  private final static String defaultCleanupScript = "q_test_cleanup.sql";
+  private final String[] testOnlyCommands = new String[]{"crypto"};
+
+  private static final String TEST_TMP_DIR_PROPERTY = "test.tmp.dir"; // typically target/tmp
+  private static final String BUILD_DIR_PROPERTY = "build.dir"; // typically target
+
+  public static final String PATH_HDFS_REGEX = "(hdfs://)([a-zA-Z0-9:/_\\-\\.=])+";
+  public static final String PATH_HDFS_WITH_DATE_USER_GROUP_REGEX = "([a-z]+) ([a-z]+)([ ]+)([0-9]+) ([0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}) " + PATH_HDFS_REGEX;
+
+  private String testWarehouse;
+  private final String testFiles;
+  protected final String outDir;
+  protected String overrideResultsDir;
+  protected final String logDir;
+  private final TreeMap<String, String> qMap;
+  private final Set<String> qSkipSet;
+  private final Set<String> qSortSet;
+  private final Set<String> qSortQuerySet;
+  private final Set<String> qHashQuerySet;
+  private final Set<String> qSortNHashQuerySet;
+  private final Set<String> qNoSessionReuseQuerySet;
+  private final Set<String> qJavaVersionSpecificOutput;
+  private static final String SORT_SUFFIX = ".sorted";
+  private final Set<String> srcTables;
+  private final Set<String> srcUDFs;
+  private final MiniClusterType clusterType;
+  private final FsType fsType;
+  private ParseDriver pd;
+  protected Hive db;
+  protected QueryState queryState;
+  protected HiveConf conf;
+  private IDriver drv;
+  private BaseSemanticAnalyzer sem;
+  protected final boolean overWrite;
+  private CliDriver cliDriver;
+  private HadoopShims.MiniMrShim mr = null;
+  private HadoopShims.MiniDFSShim dfs = null;
+  private FileSystem fs;
+  private HadoopShims.HdfsEncryptionShim hes = null;
+  private String hadoopVer = null;
+  private QTestSetup setup = null;
+  private SparkSession sparkSession = null;
+  private boolean isSessionStateStarted = false;
+  private static final String javaVersion = getJavaVersion();
+
+  private final String initScript;
+  private final String cleanupScript;
+
+
+  public interface SuiteAddTestFunctor {
+    public void addTestToSuite(TestSuite suite, Object setup, String tName);
+  }
+
+  public static Set<String> getSrcTables() {
+    HashSet<String> srcTables = new HashSet<String>();
+    // FIXME: moved default value to here...for now
+    // i think this features is never really used from the command line
+    String defaultTestSrcTables = "src,src1,srcbucket,srcbucket2,src_json,src_thrift," +
+        "src_sequencefile,srcpart,alltypesorc,src_hbase,cbo_t1,cbo_t2,cbo_t3,src_cbo,part," +
+        "lineitem,alltypesparquet";
+    for (String srcTable : System.getProperty("test.src.tables", defaultTestSrcTables).trim().split(",")) {
+      srcTable = srcTable.trim();
+      if (!srcTable.isEmpty()) {
+        srcTables.add(srcTable);
+      }
+    }
+    if (srcTables.isEmpty()) {
+      throw new RuntimeException("Source tables cannot be empty");
+    }
+    return srcTables;
+  }
+
+  /**
+   * Returns the default UDF names which should not be removed when resetting the test database
+   * @return The list of the UDF names not to remove
+   */
+  private Set<String> getSrcUDFs() {
+    HashSet<String> srcUDFs = new HashSet<String>();
+    // FIXME: moved default value to here...for now
+    // i think this features is never really used from the command line
+    String defaultTestSrcUDFs = "qtest_get_java_boolean";
+    for (String srcUDF : System.getProperty("test.src.udfs", defaultTestSrcUDFs).trim().split(","))
+    {
+      srcUDF = srcUDF.trim();
+      if (!srcUDF.isEmpty()) {
+        srcUDFs.add(srcUDF);
+      }
+    }
+    if (srcUDFs.isEmpty()) {
+      throw new RuntimeException("Source UDFs cannot be empty");
+    }
+    return srcUDFs;
+  }
+
+
+
+  public HiveConf getConf() {
+    return conf;
+  }
+
+  public boolean deleteDirectory(File path) {
+    if (path.exists()) {
+      File[] files = path.listFiles();
+      for (File file : files) {
+        if (file.isDirectory()) {
+          deleteDirectory(file);
+        } else {
+          file.delete();
+        }
+      }
+    }
+    return (path.delete());
+  }
+
+  public void copyDirectoryToLocal(Path src, Path dest) throws Exception {
+
+    FileSystem srcFs = src.getFileSystem(conf);
+    FileSystem destFs = dest.getFileSystem(conf);
+    if (srcFs.exists(src)) {
+      FileStatus[] files = srcFs.listStatus(src);
+      for (FileStatus file : files) {
+        String name = file.getPath().getName();
+        Path dfs_path = file.getPath();
+        Path local_path = new Path(dest, name);
+
+        // If this is a source table we do not copy it out
+        if (srcTables.contains(name)) {
+          continue;
+        }
+
+        if (file.isDirectory()) {
+          if (!destFs.exists(local_path)) {
+            destFs.mkdirs(local_path);
+          }
+          copyDirectoryToLocal(dfs_path, local_path);
+        } else {
+          srcFs.copyToLocalFile(dfs_path, local_path);
+        }
+      }
+    }
+  }
+
+  static Pattern mapTok = Pattern.compile("(\\.?)(.*)_map_(.*)");
+  static Pattern reduceTok = Pattern.compile("(.*)(reduce_[^\\.]*)((\\..*)?)");
+
+  public void normalizeNames(File path) throws Exception {
+    if (path.isDirectory()) {
+      File[] files = path.listFiles();
+      for (File file : files) {
+        normalizeNames(file);
+      }
+    } else {
+      Matcher m = reduceTok.matcher(path.getName());
+      if (m.matches()) {
+        String name = m.group(1) + "reduce" + m.group(3);
+        path.renameTo(new File(path.getParent(), name));
+      } else {
+        m = mapTok.matcher(path.getName());
+        if (m.matches()) {
+          String name = m.group(1) + "map_" + m.group(3);
+          path.renameTo(new File(path.getParent(), name));
+        }
+      }
+    }
+  }
+
+  public String getOutputDirectory() {
+    return outDir;
+  }
+
+  public String getLogDirectory() {
+    return logDir;
+  }
+
+  private String getHadoopMainVersion(String input) {
+    if (input == null) {
+      return null;
+    }
+    Pattern p = Pattern.compile("^(\\d+\\.\\d+).*");
+    Matcher m = p.matcher(input);
+    if (m.matches()) {
+      return m.group(1);
+    }
+    return null;
+  }
+
+  public void initConf() throws Exception {
+
+    String vectorizationEnabled = System.getProperty("test.vectorization.enabled");
+    if(vectorizationEnabled != null && vectorizationEnabled.equalsIgnoreCase("true")) {
+      conf.setBoolVar(ConfVars.HIVE_VECTORIZATION_ENABLED, true);
+    }
+
+    // Plug verifying metastore in for testing DirectSQL.
+    conf.setVar(ConfVars.METASTORE_RAW_STORE_IMPL,
+        "org.apache.hadoop.hive.metastore.VerifyingObjectStore");
+
+    if (mr != null) {
+      mr.setupConfiguration(conf);
+
+      // TODO Ideally this should be done independent of whether mr is setup or not.
+      setFsRelatedProperties(conf, fs.getScheme().equals("file"),fs);
+    }
+    conf.set(ConfVars.HIVE_EXECUTION_ENGINE.varname, clusterType.name());
+  }
+
+  private void setFsRelatedProperties(HiveConf conf, boolean isLocalFs, FileSystem fs) {
+    String fsUriString = fs.getUri().toString();
+
+    // Different paths if running locally vs a remote fileSystem. Ideally this difference should not exist.
+    Path warehousePath;
+    Path jarPath;
+    Path userInstallPath;
+    if (isLocalFs) {
+      String buildDir = System.getProperty(BUILD_DIR_PROPERTY);
+      Preconditions.checkState(Strings.isNotBlank(buildDir));
+      Path path = new Path(fsUriString, buildDir);
+
+      // Create a fake fs root for local fs
+      Path localFsRoot  = new Path(path, "localfs");
+      warehousePath = new Path(localFsRoot, "warehouse");
+      jarPath = new Path(localFsRoot, "jar");
+      userInstallPath = new Path(localFsRoot, "user_install");
+    } else {
+      // TODO Why is this changed from the default in hive-conf?
+      warehousePath = new Path(fsUriString, "/build/ql/test/data/warehouse/");
+      jarPath = new Path(new Path(fsUriString, "/user"), "hive");
+      userInstallPath = new Path(fsUriString, "/user");
+    }
+
+    warehousePath = fs.makeQualified(warehousePath);
+    jarPath = fs.makeQualified(jarPath);
+    userInstallPath = fs.makeQualified(userInstallPath);
+
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsUriString);
+
+    // Remote dirs
+    conf.setVar(ConfVars.METASTOREWAREHOUSE, warehousePath.toString());
+    conf.setVar(ConfVars.HIVE_JAR_DIRECTORY, jarPath.toString());
+    conf.setVar(ConfVars.HIVE_USER_INSTALL_DIR, userInstallPath.toString());
+    // ConfVars.SCRATCHDIR - {test.tmp.dir}/scratchdir
+
+    // Local dirs
+    // ConfVars.LOCALSCRATCHDIR - {test.tmp.dir}/localscratchdir
+
+    // TODO Make sure to cleanup created dirs.
+  }
+
+  private void createRemoteDirs() {
+    assert fs != null;
+    Path warehousePath = fs.makeQualified(new Path(conf.getVar(ConfVars.METASTOREWAREHOUSE)));
+    assert warehousePath != null;
+    Path hiveJarPath = fs.makeQualified(new Path(conf.getVar(ConfVars.HIVE_JAR_DIRECTORY)));
+    assert hiveJarPath != null;
+    Path userInstallPath = fs.makeQualified(new Path(conf.getVar(ConfVars.HIVE_USER_INSTALL_DIR)));
+    assert userInstallPath != null;
+    try {
+      fs.mkdirs(warehousePath);
+    } catch (IOException e) {
+      LOG.error("Failed to create path={}. Continuing. Exception message={}", warehousePath,
+          e.getMessage());
+    }
+    try {
+      fs.mkdirs(hiveJarPath);
+    } catch (IOException e) {
+      LOG.error("Failed to create path={}. Continuing. Exception message={}", warehousePath,
+          e.getMessage());
+    }
+    try {
+      fs.mkdirs(userInstallPath);
+    } catch (IOException e) {
+      LOG.error("Failed to create path={}. Continuing. Exception message={}", warehousePath,
+          e.getMessage());
+    }
+  }
+
+  private enum CoreClusterType {
+    MR,
+    TEZ,
+    SPARK,
+    DRUID
+  }
+
+  public enum FsType {
+    local,
+    hdfs,
+    encrypted_hdfs,
+  }
+
+  public enum MiniClusterType {
+
+    mr(CoreClusterType.MR, FsType.hdfs),
+    tez(CoreClusterType.TEZ, FsType.hdfs),
+    tez_local(CoreClusterType.TEZ, FsType.local),
+    spark(CoreClusterType.SPARK, FsType.local),
+    miniSparkOnYarn(CoreClusterType.SPARK, FsType.hdfs),
+    llap(CoreClusterType.TEZ, FsType.hdfs),
+    llap_local(CoreClusterType.TEZ, FsType.local),
+    none(CoreClusterType.MR, FsType.local),
+    druid(CoreClusterType.DRUID, FsType.hdfs);
+
+
+    private final CoreClusterType coreClusterType;
+    private final FsType defaultFsType;
+
+    MiniClusterType(CoreClusterType coreClusterType, FsType defaultFsType) {
+      this.coreClusterType = coreClusterType;
+      this.defaultFsType = defaultFsType;
+    }
+
+    public CoreClusterType getCoreClusterType() {
+      return coreClusterType;
+    }
+
+    public FsType getDefaultFsType() {
+      return defaultFsType;
+    }
+
+    public static MiniClusterType valueForString(String type) {
+      // Replace this with valueOf.
+      if (type.equals("miniMR")) {
+        return mr;
+      } else if (type.equals("tez")) {
+        return tez;
+      } else if (type.equals("tez_local")) {
+        return tez_local;
+      } else if (type.equals("spark")) {
+        return spark;
+      } else if (type.equals("miniSparkOnYarn")) {
+        return miniSparkOnYarn;
+      } else if (type.equals("llap")) {
+        return llap;
+      } else if (type.equals("llap_local")) {
+        return llap_local;
+      } else if (type.equals("druid")) {
+      return druid;
+      } else {
+        return none;
+      }
+    }
+  }
+
+
+  private String getKeyProviderURI() {
+    // Use the target directory if it is not specified
+    String keyDir = HIVE_ROOT + "ql/target/";
+
+    // put the jks file in the current test path only for test purpose
+    return "jceks://file" + new Path(keyDir, "test.jks").toUri();
+  }
+
+  public QTestUtil(String outDir, String logDir, MiniClusterType clusterType,
+                   String confDir, String hadoopVer, String initScript, String cleanupScript,
+                   boolean withLlapIo) throws Exception {
+    this(outDir, logDir, clusterType, confDir, hadoopVer, initScript, cleanupScript,
+        withLlapIo, null);
+  }
+
+  public QTestUtil(String outDir, String logDir, MiniClusterType clusterType,
+      String confDir, String hadoopVer, String initScript, String cleanupScript,
+      boolean withLlapIo, FsType fsType)
+    throws Exception {
+    LOG.info("Setting up QTestUtil with outDir="+outDir+", logDir="+logDir+", clusterType="+clusterType+", confDir="+confDir+"," +
+        " hadoopVer="+hadoopVer+", initScript="+initScript+", cleanupScript="+cleanupScript+", withLlapIo="+withLlapIo+"," +
+            " fsType="+fsType+"");
+    Preconditions.checkNotNull(clusterType, "ClusterType cannot be null");
+    if (fsType != null) {
+      this.fsType = fsType;
+    } else {
+      this.fsType = clusterType.getDefaultFsType();
+    }
+    this.outDir = outDir;
+    this.logDir = logDir;
+    this.srcTables=getSrcTables();
+    this.srcUDFs = getSrcUDFs();
+
+    // HIVE-14443 move this fall-back logic to CliConfigs
+    if (confDir != null && !confDir.isEmpty()) {
+      HiveConf.setHiveSiteLocation(new URL("file://"+ new File(confDir).toURI().getPath() + "/hive-site.xml"));
+      MetastoreConf.setHiveSiteLocation(HiveConf.getHiveSiteLocation());
+      System.out.println("Setting hive-site: "+HiveConf.getHiveSiteLocation());
+    }
+
+    queryState = new QueryState.Builder().withHiveConf(new HiveConf(IDriver.class)).build();
+    conf = queryState.getConf();
+    this.hadoopVer = getHadoopMainVersion(hadoopVer);
+    qMap = new TreeMap<String, String>();
+    qSkipSet = new HashSet<String>();
+    qSortSet = new HashSet<String>();
+    qSortQuerySet = new HashSet<String>();
+    qHashQuerySet = new HashSet<String>();
+    qSortNHashQuerySet = new HashSet<String>();
+    qNoSessionReuseQuerySet = new HashSet<String>();
+    qJavaVersionSpecificOutput = new HashSet<String>();
+    this.clusterType = clusterType;
+
+    HadoopShims shims = ShimLoader.getHadoopShims();
+
+    setupFileSystem(shims);
+
+    setup = new QTestSetup();
+    setup.preTest(conf);
+
+    setupMiniCluster(shims, confDir);
+
+    initConf();
+
+    if (withLlapIo && (clusterType == MiniClusterType.none)) {
+      LOG.info("initializing llap IO");
+      LlapProxy.initializeLlapIo(conf);
+    }
+
+
+    // Use the current directory if it is not specified
+    String dataDir = conf.get("test.data.files");
+    if (dataDir == null) {
+      dataDir = new File(".").getAbsolutePath() + "/data/files";
+    }
+    testFiles = dataDir;
+
+    // Use the current directory if it is not specified
+    String scriptsDir = conf.get("test.data.scripts");
+    if (scriptsDir == null) {
+      scriptsDir = new File(".").getAbsolutePath() + "/data/scripts";
+    }
+
+    this.initScript = scriptsDir + File.separator + initScript;
+    this.cleanupScript = scriptsDir + File.separator + cleanupScript;
+
+    overWrite = "true".equalsIgnoreCase(System.getProperty("test.output.overwrite"));
+
+    init();
+  }
+
+  private void setupFileSystem(HadoopShims shims) throws IOException {
+
+    if (fsType == FsType.local) {
+      fs = FileSystem.getLocal(conf);
+    } else if (fsType == FsType.hdfs || fsType == FsType.encrypted_hdfs) {
+      int numDataNodes = 4;
+
+      if (fsType == FsType.encrypted_hdfs) {
+        // Set the security key provider so that the MiniDFS cluster is initialized
+        // with encryption
+        conf.set(SECURITY_KEY_PROVIDER_URI_NAME, getKeyProviderURI());
+        conf.setInt("fs.trash.interval", 50);
+
+        dfs = shims.getMiniDfs(conf, numDataNodes, true, null);
+        fs = dfs.getFileSystem();
+
+        // set up the java key provider for encrypted hdfs cluster
+        hes = shims.createHdfsEncryptionShim(fs, conf);
+
+        LOG.info("key provider is initialized");
+      } else {
+        dfs = shims.getMiniDfs(conf, numDataNodes, true, null);
+        fs = dfs.getFileSystem();
+      }
+    } else {
+      throw new IllegalArgumentException("Unknown or unhandled fsType [" + fsType + "]");
+    }
+  }
+
+  private void setupMiniCluster(HadoopShims shims, String confDir) throws
+      IOException {
+
+    String uriString = fs.getUri().toString();
+
+    if (clusterType.getCoreClusterType() == CoreClusterType.TEZ) {
+        if (confDir != null && !confDir.isEmpty()) {
+          conf.addResource(new URL("file://" + new File(confDir).toURI().getPath()
+              + "/tez-site.xml"));
+        }
+        int numTrackers = 2;
+        if (EnumSet.of(MiniClusterType.llap_local, MiniClusterType.tez_local).contains(clusterType)) {
+          mr = shims.getLocalMiniTezCluster(conf, clusterType == MiniClusterType.llap_local);
+        } else {
+          mr = shims.getMiniTezCluster(conf, numTrackers, uriString,
+              EnumSet.of(MiniClusterType.llap, MiniClusterType.llap_local).contains(clusterType));
+        }
+      } else if (clusterType == MiniClusterType.miniSparkOnYarn) {
+        mr = shims.getMiniSparkCluster(conf, 2, uriString, 1);
+      } else if (clusterType == MiniClusterType.mr) {
+        mr = shims.getMiniMrCluster(conf, 2, uriString, 1);
+      }
+  }
+
+
+  public void shutdown() throws Exception {
+    if (System.getenv(QTEST_LEAVE_FILES) == null) {
+      cleanUp();
+    }
+
+    if (clusterType.getCoreClusterType() == CoreClusterType.TEZ) {
+      SessionState.get().getTezSession().destroy();
+    }
+    
+    setup.tearDown();
+    if (sparkSession != null) {
+      try {
+        SparkSessionManagerImpl.getInstance().closeSession(sparkSession);
+      } catch (Exception ex) {
+        LOG.error("Error closing spark session.", ex);
+      } finally {
+        sparkSession = null;
+      }
+    }
+    if (mr != null) {
+      mr.shutdown();
+      mr = null;
+    }
+    FileSystem.closeAll();
+    if (dfs != null) {
+      dfs.shutdown();
+      dfs = null;
+    }
+    Hive.closeCurrent();
+  }
+
+  public String readEntireFileIntoString(File queryFile) throws IOException {
+    InputStreamReader isr = new InputStreamReader(
+        new BufferedInputStream(new FileInputStream(queryFile)), QTestUtil.UTF_8);
+    StringWriter sw = new StringWriter();
+    try {
+      IOUtils.copy(isr, sw);
+    } finally {
+      if (isr != null) {
+        isr.close();
+      }
+    }
+    return sw.toString();
+  }
+
+  public void addFile(String queryFile) throws IOException {
+    addFile(queryFile, false);
+  }
+
+  public void addFile(String queryFile, boolean partial) throws IOException {
+    addFile(new File(queryFile));
+  }
+
+  public void addFile(File qf) throws IOException {
+    addFile(qf, false);
+  }
+
+  public void addFile(File qf, boolean partial) throws IOException  {
+    String query = readEntireFileIntoString(qf);
+    qMap.put(qf.getName(), query);
+    if (partial) {
+      return;
+    }
+
+    if(checkHadoopVersionExclude(qf.getName(), query)) {
+      qSkipSet.add(qf.getName());
+    }
+
+    if (checkNeedJavaSpecificOutput(qf.getName(), query)) {
+      qJavaVersionSpecificOutput.add(qf.getName());
+    }
+
+    if (matches(SORT_BEFORE_DIFF, query)) {
+      qSortSet.add(qf.getName());
+    } else if (matches(SORT_QUERY_RESULTS, query)) {
+      qSortQuerySet.add(qf.getName());
+    } else if (matches(HASH_QUERY_RESULTS, query)) {
+      qHashQuerySet.add(qf.getName());
+    } else if (matches(SORT_AND_HASH_QUERY_RESULTS, query)) {
+      qSortNHashQuerySet.add(qf.getName());
+    }
+    if (matches(NO_SESSION_REUSE, query)) {
+      qNoSessionReuseQuerySet.add(qf.getName());
+    }
+  }
+
+  private static final Pattern SORT_BEFORE_DIFF = Pattern.compile("-- SORT_BEFORE_DIFF");
+  private static final Pattern SORT_QUERY_RESULTS = Pattern.compile("-- SORT_QUERY_RESULTS");
+  private static final Pattern HASH_QUERY_RESULTS = Pattern.compile("-- HASH_QUERY_RESULTS");
+  private static final Pattern SORT_AND_HASH_QUERY_RESULTS = Pattern.compile("-- SORT_AND_HASH_QUERY_RESULTS");
+  private static final Pattern NO_SESSION_REUSE = Pattern.compile("-- NO_SESSION_REUSE");
+
+  private boolean matches(Pattern pattern, String query) {
+    Matcher matcher = pattern.matcher(query);
+    if (matcher.find()) {
+      return true;
+    }
+    return false;
+  }
+
+  private boolean checkHadoopVersionExclude(String fileName, String query){
+
+    // Look for a hint to not run a test on some Hadoop versions
+    Pattern pattern = Pattern.compile("-- (EX|IN)CLUDE_HADOOP_MAJOR_VERSIONS\\((.*)\\)");
+
+    boolean excludeQuery = false;
+    boolean includeQuery = false;
+    Set<String> versionSet = new HashSet<String>();
+    String hadoopVer = ShimLoader.getMajorVersion();
+
+    Matcher matcher = pattern.matcher(query);
+
+    // Each qfile may include at most one INCLUDE or EXCLUDE directive.
+    //
+    // If a qfile contains an INCLUDE directive, and hadoopVer does
+    // not appear in the list of versions to include, then the qfile
+    // is skipped.
+    //
+    // If a qfile contains an EXCLUDE directive, and hadoopVer is
+    // listed in the list of versions to EXCLUDE, then the qfile is
+    // skipped.
+    //
+    // Otherwise, the qfile is included.
+
+    if (matcher.find()) {
+
+      String prefix = matcher.group(1);
+      if ("EX".equals(prefix)) {
+        excludeQuery = true;
+      } else {
+        includeQuery = true;
+      }
+
+      String versions = matcher.group(2);
+      for (String s : versions.split("\\,")) {
+        s = s.trim();
+        versionSet.add(s);
+      }
+    }
+
+    if (matcher.find()) {
+      //2nd match is not supposed to be there
+      String message = "QTestUtil: qfile " + fileName
+        + " contains more than one reference to (EX|IN)CLUDE_HADOOP_MAJOR_VERSIONS";
+      throw new UnsupportedOperationException(message);
+    }
+
+    if (excludeQuery && versionSet.contains(hadoopVer)) {
+      System.out.println("QTestUtil: " + fileName
+        + " EXCLUDE list contains Hadoop Version " + hadoopVer + ". Skipping...");
+      return true;
+    } else if (includeQuery && !versionSet.contains(hadoopVer)) {
+      System.out.println("QTestUtil: " + fileName
+        + " INCLUDE list does not contain Hadoop Version " + hadoopVer + ". Skipping...");
+      return true;
+    }
+    return false;
+  }
+
+  private boolean checkNeedJavaSpecificOutput(String fileName, String query) {
+    Pattern pattern = Pattern.compile("-- JAVA_VERSION_SPECIFIC_OUTPUT");
+    Matcher matcher = pattern.matcher(query);
+    if (matcher.find()) {
+      System.out.println("Test is flagged to generate Java version specific " +
+          "output. Since we are using Java version " + javaVersion +
+          ", we will generated Java " + javaVersion + " specific " +
+          "output file for query file " + fileName);
+      return true;
+    }
+
+    return false;
+  }
+
+  /**
+   * Get formatted Java version to include minor version, but
+   * exclude patch level.
+   *
+   * @return Java version formatted as major_version.minor_version
+   */
+  private static String getJavaVersion() {
+    String version = System.getProperty("java.version");
+    if (version == null) {
+      throw new NullPointerException("No java version could be determined " +
+          "from system properties");
+    }
+
+    // "java version" system property is formatted
+    // major_version.minor_version.patch_level.
+    // Find second dot, instead of last dot, to be safe
+    int pos = version.indexOf('.');
+    pos = version.indexOf('.', pos + 1);
+    return version.substring(0, pos);
+  }
+
+  /**
+   * Clear out any side effects of running tests
+   */
+  public void clearPostTestEffects() throws Exception {
+    setup.postTest(conf);
+  }
+
+  public void clearKeysCreatedInTests() {
+    if (hes == null) {
+      return;
+    }
+    try {
+      for (String keyAlias : hes.getKeys()) {
+        hes.deleteKey(keyAlias);
+      }
+    } catch (IOException e) {
+      LOG.error("Fail to clean the keys created in test due to the error", e);
+    }
+  }
+
+  public void clearUDFsCreatedDuringTests() throws Exception {
+    if (System.getenv(QTEST_LEAVE_FILES) != null) {
+      return;
+    }
+    // Delete functions created by the tests
+    // It is enough to remove functions from the default database, other databases are dropped
+    for (String udfName : db.getFunctions(DEFAULT_DATABASE_NAME, ".*")) {
+      if (!srcUDFs.contains(udfName)) {
+        db.dropFunction(DEFAULT_DATABASE_NAME, udfName);
+      }
+    }
+  }
+
+  /**
+   * Clear out any side effects of running tests
+   */
+  public void clearTablesCreatedDuringTests() throws Exception {
+    if (System.getenv(QTEST_LEAVE_FILES) != null) {
+      return;
+    }
+
+    conf.set("hive.metastore.filter.hook",
+        "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl");
+    db = Hive.get(conf);
+
+    // First delete any MVs to avoid race conditions
+    for (String dbName : db.getAllDatabases()) {
+      SessionState.get().setCurrentDatabase(dbName);
+      for (String tblName : db.getAllTables()) {
+        Table tblObj = null;
+        try {
+          tblObj = db.getTable(tblName);
+        } catch (InvalidTableException e) {
+          LOG.warn("Trying to drop table " + e.getTableName() + ". But it does not exist.");
+          continue;
+        }
+        // only remove MVs first
+        if (!tblObj.isMaterializedView()) {
+          continue;
+        }
+        db.dropTable(dbName, tblName, true, true, fsType == FsType.encrypted_hdfs);
+      }
+    }
+
+    // Delete any tables other than the source tables
+    // and any databases other than the default database.
+    for (String dbName : db.getAllDatabases()) {
+      SessionState.get().setCurrentDatabase(dbName);
+      for (String tblName : db.getAllTables()) {
+        if (!DEFAULT_DATABASE_NAME.equals(dbName) || !srcTables.contains(tblName)) {
+          Table tblObj = null;
+          try {
+            tblObj = db.getTable(tblName);
+          } catch (InvalidTableException e) {
+            LOG.warn("Trying to drop table " + e.getTableName() + ". But it does not exist.");
+            continue;
+          }
+          // only remove MVs first
+          if (!tblObj.isMaterializedView()) {
+            continue;
+          }
+          db.dropTable(dbName, tblName, true, true, fsType == FsType.encrypted_hdfs);
+        }
+      }
+      if (!DEFAULT_DATABASE_NAME.equals(dbName)) {
+        // Drop cascade, functions dropped by cascade
+        db.dropDatabase(dbName, true, true, true);
+      }
+    }
+
+    // delete remaining directories for external tables (can affect stats for following tests)
+    try {
+      Path p = new Path(testWarehouse);
+      FileSystem fileSystem = p.getFileSystem(conf);
+      if (fileSystem.exists(p)) {
+        for (FileStatus status : fileSystem.listStatus(p)) {
+          if (status.isDirectory() && !srcTables.contains(status.getPath().getName())) {
+            fileSystem.delete(status.getPath(), true);
+          }
+        }
+      }
+    } catch (IllegalArgumentException e) {
+      // ignore.. provides invalid url sometimes intentionally
+    }
+    SessionState.get().setCurrentDatabase(DEFAULT_DATABASE_NAME);
+
+    List<String> roleNames = db.getAllRoleNames();
+      for (String roleName : roleNames) {
+        if (!"PUBLIC".equalsIgnoreCase(roleName) && !"ADMIN".equalsIgnoreCase(roleName)) {
+          db.dropRole(roleName);
+        }
+    }
+  }
+
+  /**
+   * Clear out any side effects of running tests
+   */
+  public void clearTestSideEffects() throws Exception {
+    if (System.getenv(QTEST_LEAVE_FILES) != null) {
+      return;
+    }
+
+    // Remove any cached results from the previous test.
+    QueryResultsCache.cleanupInstance();
+
+    // allocate and initialize a new conf since a test can
+    // modify conf by using 'set' commands
+    conf = new HiveConf(IDriver.class);
+    initConf();
+    initConfFromSetup();
+
+    // renew the metastore since the cluster type is unencrypted
+    db = Hive.get(conf);  // propagate new conf to meta store
+
+    clearTablesCreatedDuringTests();
+    clearUDFsCreatedDuringTests();
+    clearKeysCreatedInTests();
+  }
+
+  protected void initConfFromSetup() throws Exception {
+    setup.preTest(conf);
+  }
+
+  public void cleanUp() throws Exception {
+    cleanUp(null);
+  }
+
+  public void cleanUp(String tname) throws Exception {
+    boolean canReuseSession = (tname == null) || !qNoSessionReuseQuerySet.contains(tname);
+    if(!isSessionStateStarted) {
+      startSessionState(canReuseSession);
+    }
+    if (System.getenv(QTEST_LEAVE_FILES) != null) {
+      return;
+    }
+
+    clearTablesCreatedDuringTests();
+    clearUDFsCreatedDuringTests();
+    clearKeysCreatedInTests();
+
+    File cleanupFile = new File(cleanupScript);
+    if (cleanupFile.isFile()) {
+      String cleanupCommands = readEntireFileIntoString(cleanupFile);
+      LOG.info("Cleanup (" + cleanupScript + "):\n" + cleanupCommands);
+      if(cliDriver == null) {
+        cliDriver = new CliDriver();
+      }
+      SessionState.get().getConf().setBoolean("hive.test.shutdown.phase", true);
+      int result = cliDriver.processLine(cleanupCommands);
+      if (result != 0) {
+        LOG.error("Failed during cleanup processLine with code={}. Ignoring", result);
+        // TODO Convert this to an Assert.fail once HIVE-14682 is fixed
+      }
+      SessionState.get().getConf().setBoolean("hive.test.shutdown.phase", false);
+    } else {
+      LOG.info("No cleanup script detected. Skipping.");
+    }
+
+    // delete any contents in the warehouse dir
+    Path p = new Path(testWarehouse);
+    FileSystem fs = p.getFileSystem(conf);
+
+    try {
+      FileStatus [] ls = fs.listStatus(p);
+      for (int i=0; (ls != null) && (i<ls.length); i++) {
+        fs.delete(ls[i].getPath(), true);
+      }
+    } catch (FileNotFoundException e) {
+      // Best effort
+    }
+
+    // TODO: Clean up all the other paths that are created.
+
+    FunctionRegistry.unregisterTemporaryUDF("test_udaf");
+    FunctionRegistry.unregisterTemporaryUDF("test_error");
+  }
+
+  protected void runCreateTableCmd(String createTableCmd) throws Exception {
+    int ecode = 0;
+    ecode = drv.run(createTableCmd).getResponseCode();
+    if (ecode != 0) {
+      throw new Exception("create table command: " + createTableCmd
+          + " failed with exit code= " + ecode);
+    }
+
+    return;
+  }
+
+  protected void runCmd(String cmd) throws Exception {
+    int ecode = 0;
+    ecode = drv.run(cmd).getResponseCode();
+    drv.close();
+    if (ecode != 0) {
+      throw new Exception("command: " + cmd
+          + " failed with exit code= " + ecode);
+    }
+    return;
+  }
+
+  public void createSources() throws Exception {
+    createSources(null);
+  }
+
+  public void createSources(String tname) throws Exception {
+    boolean canReuseSession = (tname == null) || !qNoSessionReuseQuerySet.contains(tname);
+    if(!isSessionStateStarted) {
+      startSessionState(canReuseSession);
+    }
+
+    if(cliDriver == null) {
+      cliDriver = new CliDriver();
+    }
+    cliDriver.processLine("set test.data.dir=" + testFiles + ";");
+    File scriptFile = new File(this.initScript);
+    if (!scriptFile.isFile()) {
+      LOG.info("No init script detected. Skipping");
+      return;
+    }
+    conf.setBoolean("hive.test.init.phase", true);
+
+    String initCommands = readEntireFileIntoString(scriptFile);
+    LOG.info("Initial setup (" + initScript + "):\n" + initCommands);
+
+    int result = cliDriver.processLine(initCommands);
+    LOG.info("Result from cliDrriver.processLine in createSources=" + result);
+    if (result != 0) {
+      Assert.fail("Failed during createSources processLine with code=" + result);
+    }
+
+    conf.setBoolean("hive.test.init.phase", false);
+  }
+
+  public void init() throws Exception {
+
+    // Create remote dirs once.
+    if (mr != null) {
+      createRemoteDirs();
+    }
+
+    // Create views registry
+    HiveMaterializedViewsRegistry.get().init();
+
+    testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);
+    String execEngine = conf.get("hive.execution.engine");
+    conf.set("hive.execution.engine", "mr");
+    SessionState.start(conf);
+    conf.set("hive.execution.engine", execEngine);
+    db = Hive.get(conf);
+    drv = DriverFactory.newDriver(conf);
+    pd = new ParseDriver();
+    sem = new SemanticAnalyzer(queryState);
+  }
+
+  public void init(String tname) throws Exception {
+    cleanUp(tname);
+    createSources(tname);
+    cliDriver.processCmd("set hive.cli.print.header=true;");
+  }
+
+  public void cliInit(String tname) throws Exception {
+    cliInit(tname, true);
+  }
+
+  public String cliInit(String tname, boolean recreate) throws Exception {
+    if (recreate) {
+      cleanUp(tname);
+      createSources(tname);
+    }
+
+    HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER,
+    "org.apache.hadoop.hive.ql.security.DummyAuthenticator");
+    Utilities.clearWorkMap(conf);
+    CliSessionState ss = new CliSessionState(conf);
+    assert ss != null;
+    ss.in = System.in;
+
+    String outFileExtension = getOutFileExtension(tname);
+    String stdoutName = null;
+    if (outDir != null) {
+      // TODO: why is this needed?
+      File qf = new File(outDir, tname);
+      stdoutName = qf.getName().concat(outFileExtension);
+    } else {
+      stdoutName = tname + outFileExtension;
+    }
+
+    File outf = new File(logDir, stdoutName);
+    OutputStream fo = new BufferedOutputStream(new FileOutputStream(outf));
+    if (qSortQuerySet.contains(tname)) {
+      ss.out = new SortPrintStream(fo, "UTF-8");
+    } else if (qHashQuerySet.contains(tname)) {
+      ss.out = new DigestPrintStream(fo, "UTF-8");
+    } else if (qSortNHashQuerySet.contains(tname)) {
+      ss.out = new SortAndDigestPrintStream(fo, "UTF-8");
+    } else {
+      ss.out = new PrintStream(fo, true, "UTF-8");
+    }
+    ss.err = new CachingPrintStream(fo, true, "UTF-8");
+    ss.setIsSilent(true);
+    SessionState oldSs = SessionState.get();
+
+    boolean canReuseSession = !qNoSessionReuseQuerySet.contains(tname);
+    if (oldSs != null && canReuseSession && clusterType.getCoreClusterType() == CoreClusterType.TEZ) {
+      // Copy the tezSessionState from the old CliSessionState.
+      TezSessionState tezSessionState = oldSs.getTezSession();
+      oldSs.setTezSession(null);
+      ss.setTezSession(tezSessionState);
+      oldSs.close();
+    }
+
+    if (oldSs != null && clusterType.getCoreClusterType() == CoreClusterType.SPARK) {
+      sparkSession = oldSs.getSparkSession();
+      ss.setSparkSession(sparkSession);
+      oldSs.setSparkSession(null);
+      oldSs.close();
+    }
+
+    if (oldSs != null && oldSs.out != null && oldSs.out != System.out) {
+      oldSs.out.close();
+    }
+    if (oldSs != null) {
+      oldSs.close();
+    }
+    SessionState.start(ss);
+
+    cliDriver = new CliDriver();
+
+    if (tname.equals("init_file.q")) {
+      ss.initFiles.add(HIVE_ROOT + "/data/scripts/test_init_file.sql");
+    }
+    cliDriver.processInitFiles(ss);
+
+    return outf.getAbsolutePath();
+  }
+
+  private CliSessionState startSessionState(boolean canReuseSession)
+      throws IOException {
+
+    HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER,
+        "org.apache.hadoop.hive.ql.security.DummyAuthenticator");
+
+    String execEngine = conf.get("hive.execution.engine");
+    conf.set("hive.execution.engine", "mr");
+    CliSessionState ss = new CliSessionState(conf);
+    assert ss != null;
+    ss.in = System.in;
+    ss.out = System.out;
+    ss.err = System.out;
+
+    SessionState oldSs = SessionState.get();
+    if (oldSs != null && canReuseSession && clusterType.getCoreClusterType() == CoreClusterType.TEZ) {
+      // Copy the tezSessionState from the old CliSessionState.
+      TezSessionState tezSessionState = oldSs.getTezSession();
+      ss.setTezSession(tezSessionState);
+      oldSs.setTezSession(null);
+      oldSs.close();
+    }
+
+    if (oldSs != null && clusterType.getCoreClusterType() == CoreClusterType.SPARK) {
+      sparkSession = oldSs.getSparkSession();
+      ss.setSparkSession(sparkSession);
+      oldSs.setSparkSession(null);
+      oldSs.close();
+    }
+    if (oldSs != null && oldSs.out != null && oldSs.out != System.out) {
+      oldSs.out.close();
+    }
+    if (oldSs != null) {
+      oldSs.close();
+    }
+    SessionState.start(ss);
+
+    isSessionStateStarted = true;
+
+    conf.set("hive.execution.engine", execEngine);
+    return ss;
+  }
+
+  public int executeAdhocCommand(String q) {
+    if (!q.contains(";")) {
+      return -1;
+    }
+
+    String q1 = q.split(";")[0] + ";";
+
+    LOG.debug("Executing " + q1);
+    return cliDriver.processLine(q1);
+  }
+
+  public int executeOne(String tname) {
+    String q = qMap.get(tname);
+
+    if (q.indexOf(";") == -1) {
+      return -1;
+    }
+
+    String q1 = q.substring(0, q.indexOf(";") + 1);
+    String qrest = q.substring(q.indexOf(";") + 1);
+    qMap.put(tname, qrest);
+
+    System.out.println("Executing " + q1);
+    return cliDriver.processLine(q1);
+  }
+
+  public int execute(String tname) {
+    return drv.run(qMap.get(tname)).getResponseCode();
+  }
+
+  public int executeClient(String tname1, String tname2) {
+    String commands = getCommand(tname1) + CRLF + getCommand(tname2);
+    return executeClientInternal(commands);
+  }
+
+  public int executeClient(String tname) {
+    return executeClientInternal(getCommand(tname));
+  }
+
+  private int executeClientInternal(String commands) {
+    List<String> cmds = CliDriver.splitSemiColon(commands);
+    int rc = 0;
+
+    String command = "";
+    for (String oneCmd : cmds) {
+      if (StringUtils.endsWith(oneCmd, "\\")) {
+        command += StringUtils.chop(oneCmd) + "\\;";
+        continue;
+      } else {
+        if (isHiveCommand(oneCmd)) {
+          command = oneCmd;
+        } else {
+          command += oneCmd;
+        }
+      }
+      if (StringUtils.isBlank(command)) {
+        continue;
+      }
+
+      if (isCommandUsedForTesting(command)) {
+        rc = executeTestCommand(command);
+      } else {
+        rc = cliDriver.processLine(command);
+      }
+
+      if (rc != 0 && !ignoreErrors()) {
+        break;
+      }
+      command = "";
+    }
+    if (rc == 0 && SessionState.get() != null) {
+      SessionState.get().setLastCommand(null);  // reset
+    }
+    return rc;
+  }
+
+  /**
+   * This allows a .q file to continue executing after a statement runs into an error which is convenient
+   * if you want to use another hive cmd after the failure to sanity check the state of the system.
+   */
+  private boolean ignoreErrors() {
+    return conf.getBoolVar(HiveConf.ConfVars.CLIIGNOREERRORS);
+  }
+
+  private boolean isHiveCommand(String command) {
+    String[] cmd = command.trim().split("\\s+");
+    if (HiveCommand.find(cmd) != null) {
+      return true;
+    } else if (HiveCommand.find(cmd, HiveCommand.ONLY_FOR_TESTING) != null) {
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  private int executeTestCommand(final String command) {
+    String commandName = command.trim().split("\\s+")[0];
+    String commandArgs = command.trim().substring(commandName.length());
+
+    if (commandArgs.endsWith(";")) {
+      commandArgs = StringUtils.chop(commandArgs);
+    }
+
+    //replace ${hiveconf:hive.metastore.warehouse.dir} with actual dir if existed.
+    //we only want the absolute path, so remove the header, such as hdfs://localhost:57145
+    String wareHouseDir = SessionState.get().getConf().getVar(ConfVars.METASTOREWAREHOUSE)
+        .replaceAll("^[a-zA-Z]+://.*?:\\d+", "");
+    commandArgs = commandArgs.replaceAll("\\$\\{hiveconf:hive\\.metastore\\.warehouse\\.dir\\}",
+      wareHouseDir);
+
+    if (SessionState.get() != null) {
+      SessionState.get().setLastCommand(commandName + " " + commandArgs.trim());
+    }
+
+    enableTestOnlyCmd(SessionState.get().getConf());
+
+    try {
+      CommandProcessor proc = getTestCommand(commandName);
+      if (proc != null) {
+        CommandProcessorResponse response = proc.run(commandArgs.trim());
+
+        int rc = response.getResponseCode();
+        if (rc != 0) {
+          SessionState.getConsole().printError(response.toString(), response.getException() != null ?
+                  Throwables.getStackTraceAsString(response.getException()) : "");
+        }
+
+        return rc;
+      } else {
+        throw new RuntimeException("Could not get CommandProcessor for command: " + commandName);
+      }
+    } catch (Exception e) {
+      throw new RuntimeException("Could not execute test command", e);
+    }
+  }
+
+  private CommandProcessor getTestCommand(final String commandName) throws SQLException {
+    HiveCommand testCommand = HiveCommand.find(new String[]{commandName}, HiveCommand.ONLY_FOR_TESTING);
+
+    if (testCommand == null) {
+      return null;
+    }
+
+    return CommandProcessorFactory
+      .getForHiveCommandInternal(new String[]{commandName}, SessionState.get().getConf(),
+        testCommand.isOnlyForTesting());
+  }
+
+  private void enableTestOnlyCmd(HiveConf conf){
+    StringBuilder securityCMDs = new StringBuilder(conf.getVar(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST));
+    for(String c : testOnlyCommands){
+      securityCMDs.append(",");
+      securityCMDs.append(c);
+    }
+    conf.set(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST.toString(), securityCMDs.toString());
+  }
+
+  private boolean isCommandUsedForTesting(final String command) {
+    String commandName = command.trim().split("\\s+")[0];
+    HiveCommand testCommand = HiveCommand.find(new String[]{commandName}, HiveCommand.ONLY_FOR_TESTING);
+    return testCommand != null;
+  }
+
+  private String getCommand(String tname) {
+    String commands = qMap.get(tname);
+    StringBuilder newCommands = new StringBuilder(commands.length());
+    int lastMatchEnd = 0;
+    Matcher commentMatcher = Pattern.compile("^--.*$", Pattern.MULTILINE).matcher(commands);
+    // remove the comments
+    while (commentMatcher.find()) {
+      newCommands.append(commands.substring(lastMatchEnd, commentMatcher.start()));
+      lastMatchEnd = commentMatcher.end();
+    }
+    newCommands.append(commands.substring(lastMatchEnd, commands.length()));
+    commands = newCommands.toString();
+    return commands;
+  }
+
+  public boolean shouldBeSkipped(String tname) {
+    return qSkipSet.contains(tname);
+  }
+
+  private String getOutFileExtension(String fname) {
+    String outFileExtension = ".out";
+    if (qJavaVersionSpecificOutput.contains(fname)) {
+      outFileExtension = ".java" + javaVersion + ".out";
+    }
+
+    return outFileExtension;
+  }
+
+  public void convertSequenceFileToTextFile() throws Exception {
+    // Create an instance of hive in order to create the tables
+    testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);
+    db = Hive.get(conf);
+
+    // Move all data from dest4_sequencefile to dest4
+    drv
+      .run("FROM dest4_sequencefile INSERT OVERWRITE TABLE dest4 SELECT dest4_sequencefile.*");
+
+    // Drop dest4_sequencefile
+    db.dropTable(Warehouse.DEFAULT_DATABASE_NAME, "dest4_sequencefile",
+        true, true);
+  }
+
+  public QTestProcessExecResult checkNegativeResults(String tname, Exception e) throws Exception {
+
+    String outFileExtension = getOutFileExtension(tname);
+
+    File qf = new File(outDir, tname);
+    String expf = outPath(outDir.toString(), tname.concat(outFileExtension));
+
+    File outf = null;
+    outf = new File(logDir);
+    outf = new File(outf, qf.getName().concat(outFileExtension));
+
+    FileWriter outfd = new FileWriter(outf);
+    if (e instanceof ParseException) {
+      outfd.write("Parse Error: ");
+    } else if (e instanceof SemanticException) {
+      outfd.write("Semantic Exception: \n");
+    } else {
+      throw e;
+    }
+
+    outfd.write(e.getMessage());
+    outfd.close();
+
+    QTestProcessExecResult result = executeDiffCommand(outf.getPath(), expf, false,
+                                     qSortSet.contains(qf.getName()));
+    if (overWrite) {
+      overwriteResults(outf.getPath(), expf);
+      return QTestProcessExecResult.createWithoutOutput(0);
+    }
+
+    return result;
+  }
+
+  public QTestProcessExecResult checkParseResults(String tname, ASTNode tree) throws Exception {
+
+    if (tree != null) {
+      String outFileExtension = getOutFileExtension(tname);
+
+      File parseDir = new File(outDir, "parse");
+      String expf = outPath(parseDir.toString(), tname.concat(outFileExtension));
+
+      File outf = null;
+      outf = new File(logDir);
+      outf = new File(outf, tname.concat(outFileExtension));
+
+      FileWriter outfd = new FileWriter(outf);
+      outfd.write(tree.toStringTree());
+      outfd.close();
+
+      QTestProcessExecResult exitVal = executeDiffCommand(outf.getPath(), expf, false, false);
+
+      if (overWrite) {
+        overwriteResults(outf.getPath(), expf);
+        return QTestProcessExecResult.createWithoutOutput(0);
+      }
+
+      return exitVal;
+    } else {
+      throw new Exception("Parse tree is null");
+    }
+  }
+
+  /**
+   * Given the current configurations (e.g., hadoop version and execution mode), return
+   * the correct file name to compare with the current test run output.
+   * @param outDir The directory where the reference log files are stored.
+   * @param testName The test file name (terminated by ".out").
+   * @return The file name appended with the configuration values if it exists.
+   */
+  public String outPath(String outDir, String testName) {
+    String ret = (new File(outDir, testName)).getPath();
+    // List of configurations. Currently the list consists of hadoop version and execution mode only
+    List<String> configs = new ArrayList<String>();
+    configs.add(this.clusterType.toString());
+    configs.add(this.hadoopVer);
+
+    Deque<String> stack = new LinkedList<String>();
+    StringBuilder sb = new StringBuilder();
+    sb.append(testName);
+    stack.push(sb.toString());
+
+    // example file names are input1.q.out_mr_0.17 or input2.q.out_0.17
+    for (String s: configs) {
+      sb.append('_');
+      sb.append(s);
+      stack.push(sb.toString());
+    }
+    while (stack.size() > 0) {
+      String fileName = stack.pop();
+      File f = new File(outDir, fileName);
+      if (f.exists()) {
+        ret = f.getPath();
+        break;
+      }
+    }
+   return ret;
+  }
+
+  private Pattern[] toPattern(String[] patternStrs) {
+    Pattern[] patterns = new Pattern[patternStrs.length];
+    for (int i = 0; i < patternStrs.length; i++) {
+      patterns[i] = Pattern.compile(patternStrs[i]);
+    }
+    return patterns;
+  }
+
+  private void maskPatterns(Pattern[] patterns, String fname) throws Exception {
+    String maskPattern = "#### A masked pattern was here ####";
+    String partialMaskPattern = "#### A PARTIAL masked pattern was here ####";
+
+    String line;
+    BufferedReader in;
+    BufferedWriter out;
+
+    File file = new File(fname);
+    File fileOrig = new File(fname + ".orig");
+    FileUtils.copyFile(file, fileOrig);
+
+    in = new BufferedReader(new InputStreamReader(new FileInputStream(fileOrig), "UTF-8"));
+    out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), "UTF-8"));
+
+    boolean lastWasMasked = false;
+    boolean partialMaskWasMatched = false;
+    Matcher matcher;
+    while (null != (line = in.readLine())) {
+      if (fsType == FsType.encrypted_hdfs) {
+        for (Pattern pattern : partialReservedPlanMask) {
+          matcher = pattern.matcher(line);
+          if (matcher.find()) {
+            line = partialMaskPattern + " " + matcher.group(0);
+            partialMaskWasMatched = true;
+            break;
+          }
+        }
+      }
+      else {
+        for (PatternReplacementPair prp : partialPlanMask) {
+          matcher = prp.pattern.matcher(line);
+          if (matcher.find()) {
+            line = line.replaceAll(prp.pattern.pattern(), prp.replacement);
+            partialMaskWasMatched = true;
+          }
+        }
+      }
+
+      if (!partialMaskWasMatched) {
+        for (Pair<Pattern, String> pair : patternsWithMaskComments) {
+          Pattern pattern = pair.getLeft();
+          String maskComment = pair.getRight();
+
+          matcher = pattern.matcher(line);
+          if (matcher.find()) {
+            line = matcher.replaceAll(maskComment);
+            partialMaskWasMatched = true;
+            break;
+          }
+        }
+
+        for (Pattern pattern : patterns) {
+          line = pattern.matcher(line).replaceAll(maskPattern);
+        }
+      }
+
+      if (line.equals(maskPattern)) {
+        // We're folding multiple masked lines into one.
+        if (!lastWasMasked) {
+          out.write(line);
+          out.write("\n");
+          lastWasMasked = true;
+          partialMaskWasMatched = false;
+        }
+      } else {
+        out.write(line);
+        out.write("\n");
+        lastWasMasked = false;
+        partialMaskWasMatched = false;
+      }
+    }
+
+    in.close();
+    out.close();
+  }
+
+  private final Pattern[] planMask = toPattern(new String[] {
+      ".*file:.*",
+      ".*pfile:.*",
+      ".*/tmp/.*",
+      ".*invalidscheme:.*",
+      ".*lastUpdateTime.*",
+      ".*lastAccessTime.*",
+      ".*lastModifiedTime.*",
+      ".*[Oo]wner.*",
+      ".*CreateTime.*",
+      ".*LastAccessTime.*",
+      ".*Location.*",
+      ".*LOCATION '.*",
+      ".*transient_lastDdlTime.*",
+      ".*last_modified_.*",
+      ".*at org.*",
+      ".*at sun.*",
+      ".*at java.*",
+      ".*at junit.*",
+      ".*Caused by:.*",
+      ".*LOCK_QUERYID:.*",
+      ".*LOCK_TIME:.*",
+      ".*grantTime.*",
+      ".*[.][.][.] [0-9]* more.*",
+      ".*job_[0-9_]*.*",
+      ".*job_local[0-9_]*.*",
+      ".*USING 'java -cp.*",
+      "^Deleted.*",
+      ".*DagName:.*",
+      ".*DagId:.*",
+      ".*Input:.*/data/files/.*",
+      ".*Output:.*/data/files/.*",
+      ".*total number of created files now is.*",
+      ".*.hive-staging.*",
+      ".*Warning.*",
+      "pk_-?[0-9]*_[0-9]*_[0-9]*",
+      "fk_-?[0-9]*_[0-9]*_[0-9]*",
+      "uk_-?[0-9]*_[0-9]*_[0-9]*",
+      "nn_-?[0-9]*_[0-9]*_[0-9]*",
+      ".*at com\\.sun\\.proxy.*",
+      ".*at com\\.jolbox.*",
+      ".*at com\\.zaxxer.*",
+      "org\\.apache\\.hadoop\\.hive\\.metastore\\.model\\.MConstraint@([0-9]|[a-z])*",
+      "^Repair: Added partition to metastore.*"
+  });
+
+  private final Pattern[] partialReservedPlanMask = toPattern(new String[] {
+      "data/warehouse/(.*?/)+\\.hive-staging"  // the directory might be db/table/partition
+      //TODO: add more expected test result here
+  });
+  /**
+   * Pattern to match and (partial) replacement text.
+   * For example, {"transaction":76,"bucketid":8249877}.  We just want to mask 76 but a regex that
+   * matches just 76 will match a lot of other things.
+   */
+  private final static class PatternReplacementPair {
+    private final Pattern pattern;
+    private final String replacement;
+    PatternReplacementPair(Pattern p, String r) {
+      pattern = p;
+      replacement = r;
+    }
+  }
+  private final PatternReplacementPair[] partialPlanMask;
+  {
+    ArrayList<PatternReplacementPair> ppm = new ArrayList<>();
+    ppm.add(new PatternReplacementPair(Pattern.compile("\\{\"transactionid\":[1-9][0-9]*,\"bucketid\":"),
+      "{\"transactionid\":### Masked txnid ###,\"bucketid\":"));
+
+    ppm.add(new PatternReplacementPair(Pattern.compile("attempt_[0-9]+"), "attempt_#ID#"));
+    ppm.add(new PatternReplacementPair(Pattern.compile("vertex_[0-9_]+"), "vertex_#ID#"));
+    ppm.add(new PatternReplacementPair(Pattern.compile("task_[0-9_]+"), "task_#ID#"));
+    partialPlanMask = ppm.toArray(new PatternReplacementPair[ppm.size()]);
+  }
+  /* This list may be modified by specific cli drivers to mask strings that change on every test */
+  private final List<Pair<Pattern, String>> patternsWithMaskComments =
+      new ArrayList<Pair<Pattern, String>>() {
+        {
+          add(toPatternPair("(pblob|s3.?|swift|wasb.?).*hive-staging.*",
+              "### BLOBSTORE_STAGING_PATH ###"));
+          add(toPatternPair(PATH_HDFS_WITH_DATE_USER_GROUP_REGEX,
+              "### USER ### ### GROUP ###$3$4 ### HDFS DATE ### $6### HDFS PATH ###"));
+          add(toPatternPair(PATH_HDFS_REGEX, "$1### HDFS PATH ###"));
+        }
+      };
+
+  private Pair<Pattern, String> toPatternPair(String patternStr, String maskComment) {
+    return ImmutablePair.of(Pattern.compile(patternStr), maskComment);
+  }
+
+  public void addPatternWithMaskComment(String patternStr, String maskComment) {
+    patternsWithMaskComments.add(toPatternPair(patternStr, maskComment));
+  }
+
+  public QTestProcessExecResult checkCliDriverResults(String tname) throws Exception {
+    assert(qMap.containsKey(tname));
+
+    String outFileExtension = getOutFileExtension(tname);
+    String outFileName = outPath(outDir, tname + outFileExtension);
+
+    File f = new File(logDir, tname + outFileExtension);
+
+    maskPatterns(planMask, f.getPath());
+    QTestProcessExecResult exitVal = executeDiffCommand(f.getPath(),
+                                     outFileName, false,
+                                     qSortSet.contains(tname));
+
+    if (overWrite) {
+      overwriteResults(f.getPath(), outFileName);
+      return QTestProcessExecResult.createWithoutOutput(0);
+    }
+
+    return exitVal;
+  }
+
+
+  public QTestProcessExecResult checkCompareCliDriverResults(String tname, List<String> outputs)
+      throws Exception {
+    assert outputs.size() > 1;
+    maskPatterns(planMask, outputs.get(0));
+    for (int i = 1; i < outputs.size(); ++i) {
+      maskPatterns(planMask, outputs.get(i));
+      QTestProcessExecResult result = executeDiffCommand(
+          outputs.get(i - 1), outputs.get(i), false, qSortSet.contains(tname));
+      if (result.getReturnCode() != 0) {
+        System.out.println("Files don't match: " + outputs.get(i - 1) + " and " + outputs.get(i));
+        return result;
+      }
+    }
+    return QTestProcessExecResult.createWithoutOutput(0);
+  }
+
+  private static void overwriteResults(String inFileName, String outFileName) throws Exception {
+    // This method can be replaced with Files.copy(source, target, REPLACE_EXISTING)
+    // once Hive uses JAVA 7.
+    System.out.println("Overwriting results " + inFileName + " to " + outFileName);
+    int result = executeCmd(new String[]{
+        "cp",
+        getQuotedString(inFileName),
+        getQuotedString(outFileName)
+    }).getReturnCode();
+    if (result != 0) {
+      throw new IllegalStateException("Unexpected error while overwriting " +
+          inFileName + " with " + outFileName);
+    }
+  }
+
+  private static QTestProcessExecResult executeDiffCommand(String inFileName,
+      String outFileName,
+      boolean ignoreWhiteSpace,
+      boolean sortResults
+      ) throws Exception {
+
+    QTestProcessExecResult result;
+
+    if (sortResults) {
+      // sort will try to open the output file in write mode on windows. We need to
+      // close it first.
+      SessionState ss = SessionState.get();
+      if (ss != null && ss.out != null && ss.out != System.out) {
+        ss.out.close();
+      }
+
+      String inSorted = inFileName + SORT_SUFFIX;
+      String outSorted = outFileName + SORT_SUFFIX;
+
+      sortFiles(inFileName, inSorted);
+      sortFiles(outFileName, outSorted);
+
+      inFileName = inSorted;
+      outFileName = outSorted;
+    }
+
+    ArrayList<String> diffCommandArgs = new ArrayList<String>();
+    diffCommandArgs.add("diff");
+
+    // Text file comparison
+    diffCommandArgs.add("-a");
+
+    // Ignore changes in the amount of white space
+    if (ignoreWhiteSpace) {
+      diffCommandArgs.add("-b");
+    }
+
+    // Add files to compare to the arguments list
+    diffCommandArgs.add(getQuotedString(inFileName));
+    diffCommandArgs.add(getQuotedString(outFileName));
+
+    result = executeCmd(diffCommandArgs);
+
+    if (sortResults) {
+      new File(inFileName).delete();
+      new File(outFileName).delete();
+    }
+
+    return result;
+  }
+
+  private static void sortFiles(String in, String out) throws Exception {
+    int result = executeCmd(new String[]{
+        "sort",
+        getQuotedString(in),
+    }, out, null).getReturnCode();
+    if (result != 0) {
+      throw new IllegalStateException("Unexpected error while sorting " + in);
+    }
+  }
+
+  private static QTestProcessExecResult executeCmd(Collection<String> args) throws Exception {
+    return executeCmd(args, null, null);
+  }
+
+  private static QTestProcessExecResult executeCmd(String[] args) throws Exception {
+    return executeCmd(args, null, null);
+  }
+
+  private static QTestProcessExecResult executeCmd(Collection<String> args, String outFile,
+                                            String errFile) throws Exception {
+    String[] cmdArray = args.toArray(new String[args.size()]);
+    return executeCmd(cmdArray, outFile, errFile);
+  }
+
+  private static QTestProcessExecResult executeCmd(String[] args, String outFile,
+                                            String errFile) throws Exception {
+    System.out.println("Running: " + org.apache.commons.lang.StringUtils.join(args, ' '));
+
+    PrintStream out = outFile == null ?
+      SessionState.getConsole().getChildOutStream() :
+      new PrintStream(new FileOutputStream(outFile), true, "UTF-8");
+    PrintStream err = errFile == null ?
+      SessionState.getConsole().getChildErrStream() :
+      new PrintStream(new FileOutputStream(errFile), true, "UTF-8");
+
+    Process executor = Runtime.getRuntime().exec(args);
+
+    ByteArrayOutputStream bos = new ByteArrayOutputStream();
+    PrintStream str = new PrintStream(bos, true, "UTF-8");
+
+    StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, err);
+    StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, out, str);
+
+    outPrinter.start();
+    errPrinter.start();
+
+    int result = executor.waitFor();
+
+    outPrinter.join();
+    errPrinter.join();
+
+    if (outFile != null) {
+      out.close();
+    }
+
+    if (errFile != null) {
+      err.close();
+    }
+
+    return QTestProcessExecResult.
+        create(result, new String(bos.toByteArray(), StandardCharsets.UTF_8));
+  }
+
+  private static String getQuotedString(String str){
+    return str;
+  }
+
+  public ASTNode parseQuery(String tname) throws Exception {
+    return pd.parse(qMap.get(tname));
+  }
+
+  public void resetParser() throws SemanticException {
+    pd = new ParseDriver();
+    queryState = new QueryState.Builder().withHiveConf(conf).build();
+    sem = new SemanticAnalyzer(queryState);
+  }
+
+
+  public List<Task<? extends Serializable>> analyzeAST(ASTNode ast) throws Exception {
+
+    // Do semantic analysis and plan generation
+    Context ctx = new Context(conf);
+    while ((ast.getToken() == null) && (ast.getChildCount() > 0)) {
+      ast = (ASTNode) ast.getChild(0);
+    }
+    sem.getOutputs().clear();
+    sem.getInputs().clear();
+    sem.analyze(ast, ctx);
+    ctx.clear();
+    return sem.getRootTasks();
+  }
+
+  public TreeMap<String, String> getQMap() {
+    return qMap;
+  }
+
+  /**
+   * QTestSetup defines test fixtures which are reused across testcases,
+   * and are needed before any test can be run
+   */
+  public static class QTestSetup
+  {
+    private MiniZooKeeperCluster zooKeeperCluster = null;
+    private int zkPort;
+    private ZooKeeper zooKeeper;
+
+    public QTestSetup() {
+    }
+
+    public void preTest(HiveConf conf) throws Exception {
+
+      if (zooKeeperCluster == null) {
+        //create temp dir
+        String tmpBaseDir =  System.getProperty(TEST_TMP_DIR_PROPERTY);
+        File tmpDir = Utilities.createTempDir(tmpBaseDir);
+
+        zooKeeperCluster = new MiniZooKeeperCluster();
+        zkPort = zooKeeperCluster.startup(tmpDir);
+      }
+
+      if (zooKeeper != null) {
+        zooKeeper.close();
+      }
+
+      int sessionTimeout =  (int) conf.getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT, TimeUnit.MILLISECONDS);
+      zooKeeper = new ZooKeeper("localhost:" + zkPort, sessionTimeout, new Watcher() {
+        @Override
+        public void process(WatchedEvent arg0) {
+        }
+      });
+
+      String zkServer = "localhost";
+      conf.set("hive.zookeeper.quorum", zkServer);
+      conf.set("hive.zookeeper.client.port", "" + zkPort);
+    }
+
+    public void postTest(HiveConf conf) throws Exception {
+      if (zooKeeperCluster == null) {
+        return;
+      }
+
+      if (zooKeeper != null) {
+        zooKeeper.close();
+      }
+
+      ZooKeeperHiveLockManager.releaseAllLocks(conf);
+    }
+
+    public void tearDown() throws Exception {
+      CuratorFrameworkSingleton.closeAndReleaseInstance();
+
+      if (zooKeeperCluster != null) {
+        zooKeeperCluster.shutdown();
+        zooKeeperCluster = null;
+      }
+    }
+  }
+
+  /**
+   * QTRunner: Runnable class for running a a single query file.
+   *
+   **/
+  public static class QTRunner implements Runnable {
+    private final QTestUtil qt;
+    private final String fname;
+
+    public QTRunner(QTestUtil qt, String fname) {
+      this.qt = qt;
+      this.fname = fname;
+    }
+
+    @Override
+    public void run() {
+      try {
+        // assumption is that environment has already been cleaned once globally
+        // hence each thread does not call cleanUp() and createSources() again
+        qt.cliInit(fname, false);
+        qt.executeClient(fname);
+      } catch (Throwable e) {
+        System.err.println("Query file " + fname + " failed with exception "
+            + e.getMessage());
+        e.printStackTrace();
+        outputTestFailureHelpMessage();
+      }
+    }
+  }
+
+  /**
+   * Setup to execute a set of query files. Uses QTestUtil to do so.
+   *
+   * @param qfiles
+   *          array of input query files containing arbitrary number of hive
+   *          queries
+   * @param resDir
+   *          output directory
+   * @param logDir
+   *          log directory
+   * @return one QTestUtil for each query file
+   */
+  public static QTestUtil[] queryListRunnerSetup(File[] qfiles, String resDir,
+      String logDir, String initScript, String cleanupScript) throws Exception
+  {
+    QTestUtil[] qt = new QTestUtil[qfiles.length];
+    for (int i = 0; i < qfiles.length; i++) {
+      qt[i] = new QTestUtil(resDir, logDir, MiniClusterType.none, null, "0.20",
+        initScript == null ? defaultInitScript : initScript,
+        cleanupScript == null ? defaultCleanupScript : cleanupScript, false);
+      qt[i].addFile(qfiles[i]);
+      qt[i].clearTestSideEffects();
+    }
+
+    return qt;
+  }
+
+  /**
+   * Executes a set of query files in sequence.
+   *
+   * @param qfiles
+   *          array of input query files containing arbitrary number of hive
+   *          queries
+   * @param qt
+   *          array of QTestUtils, one per qfile
+   * @return true if all queries passed, false otw
+   */
+  public static boolean queryListRunnerSingleThreaded(File[] qfiles, QTestUtil[] qt)
+    throws Exception
+  {
+    boolean failed = false;
+    qt[0].cleanUp();
+    qt[0].createSources();
+    for (int i = 0; i < qfiles.length && !failed; i++) {
+      qt[i].clearTestSideEffects();
+      qt[i].cliInit(qfiles[i].getName(), false);
+      qt[i].executeClient(qfiles[i].getName());
+      QTestProcessExecResult result = qt[i].checkCliDriverResults(qfiles[i].getName());
+      if (result.getReturnCode() != 0) {
+        failed = true;
+        StringBuilder builder = new StringBuilder();
+        builder.append("Test ")
+            .append(qfiles[i].getName())
+            .append(" results check failed with error code ")
+            .append(result.getReturnCode());
+        if (Strings.isNotEmpty(result.getCapturedOutput())) {
+          builder.append(" and diff value ").append(result.getCapturedOutput());
+        }
+        System.err.println(builder.toString());
+        outputTestFailureHelpMessage();
+      }
+      qt[i].clearPostTestEffects();
+    }
+    return (!failed);
+  }
+
+  /**
+   * Executes a set of query files parallel.
+   *
+   * Each query file is run in a separate thread. The caller has to arrange
+   * that different query files do not collide (in terms of destination tables)
+   *
+   * @param qfiles
+   *          array of input query files containing arbitrary number of hive
+   *          queries
+   * @param qt
+   *          array of QTestUtils, one per qfile
+   * @return true if all queries passed, false otw
+   *
+   */
+  public static boolean queryListRunnerMultiThreaded(File[] qfiles, QTestUtil[] qt)
+    throws Exception
+  {
+    boolean failed = false;
+
+    // in multithreaded mode - do cleanup/initialization just once
+
+    qt[0].cleanUp();
+    qt[0].createSources();
+    qt[0].clearTestSideEffects();
+
+    QTRunner[] qtRunners = new QTRunner[qfiles.length];
+    Thread[] qtThread = new Thread[qfiles.length];
+
+    for (int i = 0; i < qfiles.length; i++) {
+      qtRunners[i] = new QTRunner(qt[i], qfiles[i].getName());
+      qtThread[i] = new Thread(qtRunners[i]);
+    }
+
+    for (int i = 0; i < qfiles.length; i++) {
+      qtThread[i].start();
+    }
+
+    for (int i = 0; i < qfiles.length; i++) {
+      qtThread[i].join();
+      QTestProcessExecResult result = qt[i].checkCliDriverResults(qfiles[i].getName());
+      if (result.getReturnCode() != 0) {
+        failed = true;
+        StringBuilder builder = new StringBuilder();
+        builder.append("Test ")
+            .append(qfiles[i].getName())
+            .append(" results check failed with error code ")
+            .append(result.getReturnCode());
+        if (Strings.isNotEmpty(result.getCapturedOutput())) {
+          builder.append(" and diff value ").append(result.getCapturedOutput());
+        }
+        System.err.println(builder.toString());
+        outputTestFailureHelpMessage();
+      }
+    }
+    return (!failed);
+  }
+
+  public static void outputTestFailureHelpMessage() {
+    System.err.println(
+      "See ./ql/target/tmp/log/hive.log or ./itests/qtest/target/tmp/log/hive.log, or check " +
+        "./ql/target/surefire-reports or ./itests/qtest/target/surefire-reports/ for specific " +
+        "test cases logs.");
+    System.err.flush();
+  }
+
+  private static String[] cachedQvFileList = null;
+  private static ImmutableList<String> cachedDefaultQvFileList = null;
+  private static Pattern qvSuffix = Pattern.compile("_[0-9]+.qv$", Pattern.CASE_INSENSITIVE);
+
+  public static List<String> getVersionFiles(String queryDir, String tname) {
+    ensureQvFileList(queryDir);
+    List<String> result = getVersionFilesInternal(tname);
+    if (result == null) {
+      result = cachedDefaultQvFileList;
+    }
+    return result;
+  }
+
+  private static void ensureQvFileList(String queryDir) {
+    if (cachedQvFileList != null) {
+      return;
+    }
+    // Not thread-safe.
+    System.out.println("Getting versions from " + queryDir);
+    cachedQvFileList = (new File(queryDir)).list(new FilenameFilter() {
+      @Override
+      public boolean accept(File dir, String name) {
+        return name.toLowerCase().endsWith(".qv");
+      }
+    });
+    if (cachedQvFileList == null) {
+      return; // no files at all
+    }
+    Arrays.sort(cachedQvFileList, String.CASE_INSENSITIVE_ORDER);
+    List<String> defaults = getVersionFilesInternal("default");
+    cachedDefaultQvFileList = (defaults != null)
+        ? ImmutableList.copyOf(defaults) : ImmutableList.<String>of();
+  }
+
+  private static List<String> getVersionFilesInternal(String tname) {
+    if (cachedQvFileList == null) {
+      return new ArrayList<String>();
+    }
+    int pos = Arrays.binarySearch(cachedQvFileList, tname, String.CASE_INSENSITIVE_ORDER);
+    if (pos >= 0) {
+      throw new BuildException("Unexpected file list element: " + cachedQvFileList[pos]);
+    }
+    List<String> result = null;
+    for (pos = (-pos - 1); pos < cachedQvFileList.length; ++pos) {
+      String candidate = cachedQvFileList[pos];
+      if (candidate.length() <= tname.length()
+          || !tname.equalsIgnoreCase(candidate.substring(0, tname.length()))
+          || !qvSuffix.matcher(candidate.substring(tname.length())).matches()) {
+        break;
+      }
+      if (result == null) {
+        result = new ArrayList<String>();
+      }
+      result.add(candidate);
+    }
+    return result;
+  }
+
+  public void failed(int ecode, String fname, String debugHint) {
+    String command = SessionState.get() != null ? SessionState.get().getLastCommand() : null;
+    String message = "Client execution failed with error code = " + ecode +
+        (command != null ? " running \"" + command : "") + "\" fname=" + fname + " " +
+        (debugHint != null ? debugHint : "");
+    LOG.error(message);
+    Assert.fail(message);
+  }
+
+  // for negative tests, which is succeeded.. no need to print the query string
+  public void failed(String fname, String debugHint) {
+    Assert.fail(
+        "Client Execution was expected to fail, but succeeded with error code 0 for fname=" +
+            fname + (debugHint != null ? (" " + debugHint) : ""));
+  }
+
+  public void failedDiff(int ecode, String fname, String debugHint) {
+    String message =
+        "Client Execution succeeded but contained differences " +
+            "(error code = " + ecode + ") after executing " +
+            fname + (debugHint != null ? (" " + debugHint) : "");
+    LOG.error(message);
+    Assert.fail(message);
+  }
+
+  public void failed(Exception e, String fname, String debugHint) {
+    String command = SessionState.get() != null ? SessionState.get().getLastCommand() : null;
+    System.err.println("Failed query: " + fname);
+    System.err.flush();
+    Assert.fail("Unexpected exception " +
+        org.apache.hadoop.util.StringUtils.stringifyException(e) + "\n" +
+        (command != null ? " running " + command : "") +
+        (debugHint != null ? debugHint : ""));
+  }
+
+  public static void addTestsToSuiteFromQfileNames(
+    String qFileNamesFile,
+    Set<String> qFilesToExecute,
+    TestSuite suite,
+    Object setup,
+    SuiteAddTestFunctor suiteAddTestCallback) {
+    try {
+      File qFileNames = new File(qFileNamesFile);
+      FileReader fr = new FileReader(qFileNames.getCanonicalFile());
+      BufferedReader br = new BufferedReader(fr);
+      String fName = null;
+
+      while ((fName = br.readLine()) != null) {
+        if (fName.isEmpty() || fName.trim().equals("")) {
+          continue;
+        }
+
+        int eIdx = fName.indexOf('.');
+
+        if (eIdx == -1) {
+          continue;
+        }
+
+        String tName = fName.substring(0, eIdx);
+
+        if (qFilesToExecute.isEmpty() || qFilesToExecute.contains(fName)) {
+          suiteAddTestCallback.addTestToSuite(suite, setup, tName);
+        }
+      }
+      br.close();
+    } catch (Exception e) {
+      Assert.fail("Unexpected exception " + org.apache.hadoop.util.StringUtils.stringifyException(e));
+    }
+  }
+
+  public static void setupMetaStoreTableColumnStatsFor30TBTPCDSWorkload(HiveConf conf) {
+    Connection conn = null;
+    ArrayList<Statement> statements = new ArrayList<Statement>(); // list of Statements, PreparedStatements
+
+    try {
+      Properties props = new Properties(); // connection properties
+      props.put("user", conf.get("javax.jdo.option.ConnectionUserName"));
+      props.put("password", conf.get("javax.jdo.option.ConnectionPassword"));
+      conn = DriverManager.getConnection(conf.get("javax.jdo.option.ConnectionURL"), props);
+      ResultSet rs = null;
+      Statement s = conn.createStatement();
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Connected to metastore database ");
+      }
+
+      String mdbPath = HIVE_ROOT + "/data/files/tpcds-perf/metastore_export/";
+
+      // Setup the table column stats
+      BufferedReader br = new BufferedReader(
+          new FileReader(
+              new File(HIVE_ROOT + "/metastore/scripts/upgrade/derby/022-HIVE-11107.derby.sql")));
+      String command;
+
+      s.execute("DROP TABLE APP.TABLE_PARAMS");
+      s.execute("DROP TABLE APP.TAB_COL_STATS");
+      // Create the column stats table
+      while ((command = br.readLine()) != null) {
+        if (!command.endsWith(";")) {
+          continue;
+        }
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Going to run command : " + command);
+        }
+        try {
+          PreparedStatement psCommand = conn.prepareStatement(command.substring(0, command.length()-1));
+          statements.add(psCommand);
+          psCommand.execute();
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("successfully completed " + command);
+          }
+        } catch (SQLException e) {
+          LOG.info("Got SQL Exception " + e.getMessage());
+        }
+      }
+      br.close();
+
+      java.nio.file.Path tabColStatsCsv = FileSystems.getDefault().getPath(mdbPath, "csv" ,"TAB_COL_STATS.txt.bz2");
+      java.nio.file.Path tabParamsCsv = FileSystems.getDefault().getPath(mdbPath, "csv", "TABLE_PARAMS.txt.bz2");
+
+      // Set up the foreign key constraints properly in the TAB_COL_STATS data
+      String tmpBaseDir =  System.getProperty(TEST_TMP_DIR_PROPERTY);
+      java.nio.file.Path tmpFileLoc1 = FileSystems.getDefault().getPath(tmpBaseDir, "TAB_COL_STATS.txt");
+      java.nio.file.Path tmpFileLoc2 = FileSystems.getDefault().getPath(tmpBaseDir, "TABLE_PARAMS.txt");
+
+      class MyComp implements Comparator<String> {
+        @Override
+        public int compare(String str1, String str2) {
+          if (str2.length() != str1.length()) {
+            return str2.length() - str1.length();
+          }
+          return str1.compareTo(str2);
+        }
+      }
+
+      final SortedMap<String, Integer> tableNameToID = new TreeMap<String, Integer>(new MyComp());
+
+     rs = s.executeQuery("SELECT * FROM APP.TBLS");
+      while(rs.next()) {
+        String tblName = rs.getString("TBL_NAME");
+        Integer tblId = rs.getInt("TBL_ID");
+        tableNameToID.put(tblName, tblId);
+
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Resultset : " +  tblName + " | " + tblId);
+        }
+      }
+
+      final Map<String, Map<String, String>> data = new HashMap<>();
+      rs = s.executeQuery("select TBLS.TBL_NAME, a.COLUMN_NAME, a.TYPE_NAME from  "
+          + "(select COLUMN_NAME, TYPE_NAME, SDS.SD_ID from APP.COLUMNS_V2 join APP.SDS on SDS.CD_ID = COLUMNS_V2.CD_ID) a"
+          + " join APP.TBLS on  TBLS.SD_ID = a.SD_ID");
+      while (rs.next()) {
+        String tblName = rs.getString(1);
+        String colName = rs.getString(2);
+        String typeName = rs.getString(3);
+        Map<String, String> cols = data.get(tblName);
+        if (null == cols) {
+          cols = new HashMap<>();
+        }
+        cols.put(colName, typeName);
+        data.put(tblName, cols);
+      }
+
+      BufferedReader reader = new BufferedReader(new InputStreamReader(
+        new BZip2CompressorInputStream(Files.newInputStream(tabColStatsCsv, StandardOpenOption.READ))));
+
+      Stream<String> replaced = reader.lines().parallel().map(str-> {
+        String[] splits = str.split(",");
+        String tblName = splits[0];
+        String colName = splits[1];
+        Integer tblID = tableNameToID.get(tblName);
+        StringBuilder sb = new StringBuilder("default@"+tblName + "@" + colName + "@" + data.get(tblName).get(colName)+"@");
+        for (int i = 2; i < splits.length; i++) {
+          sb.append(splits[i]+"@");
+        }
+        // Add tbl_id and empty bitvector
+        return sb.append(tblID).append("@").toString();
+        });
+
+      Files.write(tmpFileLoc1, (Iterable<String>)replaced::iterator);
+      replaced.close();
+      reader.close();
+
+      BufferedReader reader2 = new BufferedReader(new InputStreamReader(
+          new BZip2CompressorInputStream(Files.newInputStream(tabParamsCsv, StandardOpenOption.READ))));
+      final Map<String,String> colStats = new ConcurrentHashMap<>();
+      Stream<String> replacedStream = reader2.lines().parallel().map(str-> {
+        String[] splits = str.split("_@");
+        String tblName = splits[0];
+        Integer tblId = tableNameToID.get(tblName);
+        Map<String,String> cols = data.get(tblName);
+        StringBuilder sb = new StringBuilder();
+        sb.append("{\"COLUMN_STATS\":{");
+        for (String colName : cols.keySet()) {
+          sb.append("\""+colName+"\":\"true\",");
+        }
+        sb.append("},\"BASIC_STATS\":\"true\"}");
+        colStats.put(tblId.toString(), sb.toString());
+
+        return  tblId.toString() + "@" + splits[1];
+      });
+
+      Files.write(tmpFileLoc2, (Iterable<String>)replacedStream::iterator);
+      Files.write(tmpFileLoc2, (Iterable<String>)colStats.entrySet().stream()
+        .map(map->map.getKey()+"@COLUMN_STATS_ACCURATE@"+map.getValue())::iterator, StandardOpenOption.APPEND);
+
+      replacedStream.close();
+      reader2.close();
+      // Load the column stats and table params with 30 TB scale
+      String importStatement1 =  "CALL SYSCS_UTIL.SYSCS_IMPORT_TABLE(null, '" + "TAB_COL_STATS" +
+        "', '" + tmpFileLoc1.toAbsolutePath().toString() +
+        "', '@', null, 'UTF-8', 1)";
+      String importStatement2 =  "CALL SYSCS_UTIL.SYSCS_IMPORT_TABLE(null, '" + "TABLE_PARAMS" +
+        "', '" + tmpFileLoc2.toAbsolutePath().toString() +
+        "', '@', null, 'UTF-8', 1)";
+      try {
+        PreparedStatement psImport1 = conn.prepareStatement(importStatement1);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Going to execute : " + importStatement1);
+        }
+        statements.add(psImport1);
+        psImport1.execute();
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("successfully completed " + importStatement1);
+        }
+        PreparedStatement psImport2 = conn.prepareStatement(importStatement2);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Going to execute : " + importStatement2);
+        }
+        statements.add(psImport2);
+        psImport2.execute();
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("successfully completed " + importStatement2);
+        }
+      } catch (SQLException e) {
+        LOG.info("Got SQL Exception  " +  e.getMessage());
+      }
+    } catch (FileNotFoundException e1) {
+        LOG.info("Got File not found Exception " + e1.getMessage());
+	} catch (IOException e1) {
+        LOG.info("Got IOException " + e1.getMessage());
+	} catch (SQLException e1) {
+        LOG.info("Got SQLException " + e1.getMessage());
+	} finally {
+      // Statements and PreparedStatements
+      int i = 0;
+      while (!statements.isEmpty()) {
+        // PreparedStatement extend Statement
+        Statement st = statements.remove(i);
+        try {
+          if (st != null) {
+            st.close();
+            st = null;
+          }
+        } catch (SQLException sqle) {
+        }
+      }
+
+      //Connection
+      try {
+        if (conn != null) {
+          conn.close();
+          conn = null;
+        }
+      } catch (SQLException sqle) {
+      }
+    }
+  }
+  
+  private static String getHiveRoot() {
+      String path;
+      if (System.getProperty("hive.root") != null) {
+          try {
+              path = new File(System.getProperty("hive.root")).getCanonicalPath();
+          } catch (IOException e) {
+              throw new RuntimeException("error getting hive.root", e);
+          }
+      } else {
+          path = new File("target").getAbsolutePath();
+      }
+      return ensurePathEndsInSlash(new File(path).getAbsolutePath());
+    }
+  
+  public static String ensurePathEndsInSlash(String path) {
+      if (path == null) {
+        throw new NullPointerException("Path cannot be null");
+      }
+      if (path.endsWith(File.separator)) {
+        return path;
+      } else {
+        return path + File.separator;
+      }
+    }
+
+}