You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2015/05/15 06:31:28 UTC

[01/50] [abbrv] hive git commit: Merge branch 'ci8915'

Repository: hive
Updated Branches:
  refs/heads/beeline-cli 2ddd86de5 -> 753b2b308


Merge branch 'ci8915'


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f895b277
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f895b277
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f895b277

Branch: refs/heads/beeline-cli
Commit: f895b277399fc37cdcd349a321ddf12c49756a5c
Parents: 1988615 c40c6de
Author: Alan Gates <ga...@hortonworks.com>
Authored: Tue May 5 09:12:15 2015 -0700
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Tue May 5 09:12:15 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/txn/compactor/Cleaner.java   | 18 ++++++++----
 .../hadoop/hive/ql/txn/compactor/Worker.java    | 30 ++++++++++++++------
 2 files changed, 33 insertions(+), 15 deletions(-)
----------------------------------------------------------------------



[03/50] [abbrv] hive git commit: HIVE-8890: HiveServer2 dynamic service discovery: use persistent ephemeral nodes curator recipe (Vaibhav Gumashta reviewed by Thejas Nair)

Posted by xu...@apache.org.
HIVE-8890: HiveServer2 dynamic service discovery: use persistent ephemeral nodes curator recipe (Vaibhav Gumashta reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/652febcd
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/652febcd
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/652febcd

Branch: refs/heads/beeline-cli
Commit: 652febcdab727f39c05d6b5b3c0a6526d254ee0e
Parents: cccaa55
Author: Vaibhav Gumashta <vg...@apache.org>
Authored: Tue May 5 10:37:51 2015 -0700
Committer: Vaibhav Gumashta <vg...@apache.org>
Committed: Tue May 5 10:37:51 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   2 +-
 pom.xml                                         |   5 +
 service/pom.xml                                 |   5 +
 .../cli/thrift/ThriftBinaryCLIService.java      |   1 -
 .../apache/hive/service/server/HiveServer2.java | 106 +++++++++++++++----
 5 files changed, 97 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/652febcd/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index f04ce82..5d4dbea 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1410,7 +1410,7 @@ public class HiveConf extends Configuration {
         "The port of ZooKeeper servers to talk to.\n" +
         "If the list of Zookeeper servers specified in hive.zookeeper.quorum\n" +
         "does not contain port numbers, this value is used."),
-    HIVE_ZOOKEEPER_SESSION_TIMEOUT("hive.zookeeper.session.timeout", "600000ms",
+    HIVE_ZOOKEEPER_SESSION_TIMEOUT("hive.zookeeper.session.timeout", "1200000ms",
         new TimeValidator(TimeUnit.MILLISECONDS),
         "ZooKeeper client's session timeout (in milliseconds). The client is disconnected, and as a result, all locks released, \n" +
         "if a heartbeat is not sent in the timeout."),

http://git-wip-us.apache.org/repos/asf/hive/blob/652febcd/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index acacf81..1921b06 100644
--- a/pom.xml
+++ b/pom.xml
@@ -512,6 +512,11 @@
         <version>${curator.version}</version>
       </dependency>
       <dependency>
+        <groupId>org.apache.curator</groupId>
+        <artifactId>curator-recipes</artifactId>
+        <version>${curator.version}</version>
+      </dependency>
+      <dependency>
         <groupId>org.codehaus.groovy</groupId>
         <artifactId>groovy-all</artifactId>
         <version>${groovy.version}</version>

http://git-wip-us.apache.org/repos/asf/hive/blob/652febcd/service/pom.xml
----------------------------------------------------------------------
diff --git a/service/pom.xml b/service/pom.xml
index c5815af..d8e3126 100644
--- a/service/pom.xml
+++ b/service/pom.xml
@@ -91,6 +91,11 @@
       <artifactId>curator-framework</artifactId>
       <version>${curator.version}</version>
     </dependency>
+    <dependency>
+      <groupId>org.apache.curator</groupId>
+      <artifactId>curator-recipes</artifactId>
+      <version>${curator.version}</version>
+    </dependency>
     <!-- intra-project -->
     <dependency>
       <groupId>org.apache.hive</groupId>

http://git-wip-us.apache.org/repos/asf/hive/blob/652febcd/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java
index ca1eae6..6c9efba 100644
--- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java
+++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java
@@ -93,7 +93,6 @@ public class ThriftBinaryCLIService extends ThriftCLIService {
       // TCP Server
       server = new TThreadPoolServer(sargs);
       server.setServerEventHandler(serverEventHandler);
-      server.serve();
       String msg = "Starting " + ThriftBinaryCLIService.class.getSimpleName() + " on port "
           + portNum + " with " + minWorkerThreads + "..." + maxWorkerThreads + " worker threads";
       LOG.info(msg);

http://git-wip-us.apache.org/repos/asf/hive/blob/652febcd/service/src/java/org/apache/hive/service/server/HiveServer2.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/server/HiveServer2.java b/service/src/java/org/apache/hive/service/server/HiveServer2.java
index dc2217f..58e8e49 100644
--- a/service/src/java/org/apache/hive/service/server/HiveServer2.java
+++ b/service/src/java/org/apache/hive/service/server/HiveServer2.java
@@ -23,6 +23,8 @@ import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Properties;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.cli.GnuParser;
 import org.apache.commons.cli.HelpFormatter;
@@ -35,6 +37,10 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.curator.framework.CuratorFramework;
 import org.apache.curator.framework.CuratorFrameworkFactory;
 import org.apache.curator.framework.api.ACLProvider;
+import org.apache.curator.framework.api.BackgroundCallback;
+import org.apache.curator.framework.api.CuratorEvent;
+import org.apache.curator.framework.api.CuratorEventType;
+import org.apache.curator.framework.recipes.nodes.PersistentEphemeralNode;
 import org.apache.curator.retry.ExponentialBackoffRetry;
 import org.apache.hadoop.hive.common.LogUtils;
 import org.apache.hadoop.hive.common.LogUtils.LogInitializationException;
@@ -67,9 +73,11 @@ import org.apache.zookeeper.data.ACL;
  */
 public class HiveServer2 extends CompositeService {
   private static final Log LOG = LogFactory.getLog(HiveServer2.class);
+  private static CountDownLatch deleteSignal;
 
   private CLIService cliService;
   private ThriftCLIService thriftCLIService;
+  private PersistentEphemeralNode znode;
   private String znodePath;
   private CuratorFramework zooKeeperClient;
   private boolean registeredWithZooKeeper = false;
@@ -151,12 +159,19 @@ public class HiveServer2 extends CompositeService {
     String instanceURI = getServerInstanceURI(hiveConf);
     byte[] znodeDataUTF8 = instanceURI.getBytes(Charset.forName("UTF-8"));
     setUpZooKeeperAuth(hiveConf);
+    int sessionTimeout =
+        (int) hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT,
+            TimeUnit.MILLISECONDS);
+    int baseSleepTime =
+        (int) hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME,
+            TimeUnit.MILLISECONDS);
+    int maxRetries = hiveConf.getIntVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES);
     // Create a CuratorFramework instance to be used as the ZooKeeper client
     // Use the zooKeeperAclProvider to create appropriate ACLs
     zooKeeperClient =
         CuratorFrameworkFactory.builder().connectString(zooKeeperEnsemble)
-            .aclProvider(zooKeeperAclProvider).retryPolicy(new ExponentialBackoffRetry(1000, 3))
-            .build();
+            .sessionTimeoutMs(sessionTimeout).aclProvider(zooKeeperAclProvider)
+            .retryPolicy(new ExponentialBackoffRetry(baseSleepTime, maxRetries)).build();
     zooKeeperClient.start();
     // Create the parent znodes recursively; ignore if the parent already exists.
     try {
@@ -176,18 +191,28 @@ public class HiveServer2 extends CompositeService {
           ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace
               + ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + "serverUri=" + instanceURI + ";"
               + "version=" + HiveVersionInfo.getVersion() + ";" + "sequence=";
-      znodePath =
-          zooKeeperClient.create().creatingParentsIfNeeded()
-              .withMode(CreateMode.EPHEMERAL_SEQUENTIAL).forPath(pathPrefix, znodeDataUTF8);
+      znode =
+          new PersistentEphemeralNode(zooKeeperClient,
+              PersistentEphemeralNode.Mode.EPHEMERAL_SEQUENTIAL, pathPrefix, znodeDataUTF8);
+      znode.start();
+      // We'll wait for 120s for node creation
+      long znodeCreationTimeout = 120;
+      if (!znode.waitForInitialCreate(znodeCreationTimeout, TimeUnit.SECONDS)) {
+        throw new Exception("Max znode creation wait time: " + znodeCreationTimeout + "s exhausted");
+      }
       setRegisteredWithZooKeeper(true);
+      znodePath = znode.getActualPath();
       // Set a watch on the znode
       if (zooKeeperClient.checkExists().usingWatcher(new DeRegisterWatcher()).forPath(znodePath) == null) {
         // No node exists, throw exception
         throw new Exception("Unable to create znode for this HiveServer2 instance on ZooKeeper.");
       }
       LOG.info("Created a znode on ZooKeeper for HiveServer2 uri: " + instanceURI);
-    } catch (KeeperException e) {
+    } catch (Exception e) {
       LOG.fatal("Unable to create a znode for this server instance", e);
+      if (znode != null) {
+        znode.close();
+      }
       throw (e);
     }
   }
@@ -223,22 +248,33 @@ public class HiveServer2 extends CompositeService {
     @Override
     public void process(WatchedEvent event) {
       if (event.getType().equals(Watcher.Event.EventType.NodeDeleted)) {
-        HiveServer2.this.setRegisteredWithZooKeeper(false);
-        // If there are no more active client sessions, stop the server
-        if (cliService.getSessionManager().getOpenSessionCount() == 0) {
-          LOG.warn("This instance of HiveServer2 has been removed from the list of server "
-              + "instances available for dynamic service discovery. "
-              + "The last client session has ended - will shutdown now.");
-          HiveServer2.this.stop();
+        if (znode != null) {
+          try {
+            znode.close();
+            LOG.warn("This HiveServer2 instance is now de-registered from ZooKeeper. "
+                + "The server will be shut down after the last client sesssion completes.");
+          } catch (IOException e) {
+            LOG.error("Failed to close the persistent ephemeral znode", e);
+          } finally {
+            HiveServer2.this.setRegisteredWithZooKeeper(false);
+            // If there are no more active client sessions, stop the server
+            if (cliService.getSessionManager().getOpenSessionCount() == 0) {
+              LOG.warn("This instance of HiveServer2 has been removed from the list of server "
+                  + "instances available for dynamic service discovery. "
+                  + "The last client session has ended - will shutdown now.");
+              HiveServer2.this.stop();
+            }
+          }
         }
-        LOG.warn("This HiveServer2 instance is now de-registered from ZooKeeper. "
-            + "The server will be shut down after the last client sesssion completes.");
       }
     }
   }
 
   private void removeServerInstanceFromZooKeeper() throws Exception {
     setRegisteredWithZooKeeper(false);
+    if (znode != null) {
+      znode.close();
+    }
     zooKeeperClient.close();
     LOG.info("Server instance removed from ZooKeeper.");
   }
@@ -359,25 +395,53 @@ public class HiveServer2 extends CompositeService {
     HiveConf hiveConf = new HiveConf();
     String zooKeeperEnsemble = ZooKeeperHiveHelper.getQuorumServers(hiveConf);
     String rootNamespace = hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_ZOOKEEPER_NAMESPACE);
+    int baseSleepTime = (int) hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME, TimeUnit.MILLISECONDS);
+    int maxRetries = hiveConf.getIntVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES);
     CuratorFramework zooKeeperClient =
         CuratorFrameworkFactory.builder().connectString(zooKeeperEnsemble)
-            .retryPolicy(new ExponentialBackoffRetry(1000, 3)).build();
+            .retryPolicy(new ExponentialBackoffRetry(baseSleepTime, maxRetries)).build();
     zooKeeperClient.start();
     List<String> znodePaths =
         zooKeeperClient.getChildren().forPath(
             ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace);
+    List<String> znodePathsUpdated;
     // Now for each path that is for the given versionNumber, delete the znode from ZooKeeper
-    for (String znodePath : znodePaths) {
+    for (int i = 0; i < znodePaths.size(); i++) {
+      String znodePath = znodePaths.get(i);
+      deleteSignal = new CountDownLatch(1);
       if (znodePath.contains("version=" + versionNumber + ";")) {
-        LOG.info("Removing the znode: " + znodePath + " from ZooKeeper");
-        zooKeeperClient.delete().forPath(
+        String fullZnodePath =
             ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace
-                + ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + znodePath);
+                + ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + znodePath;
+        LOG.warn("Will attempt to remove the znode: " + fullZnodePath + " from ZooKeeper");
+        System.out.println("Will attempt to remove the znode: " + fullZnodePath + " from ZooKeeper");
+        zooKeeperClient.delete().guaranteed().inBackground(new DeleteCallBack())
+            .forPath(fullZnodePath);
+        // Wait for the delete to complete
+        deleteSignal.await();
+        // Get the updated path list
+        znodePathsUpdated =
+            zooKeeperClient.getChildren().forPath(
+                ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace);
+        // Gives a list of any new paths that may have been created to maintain the persistent ephemeral node
+        znodePathsUpdated.removeAll(znodePaths);
+        // Add the new paths to the znodes list. We'll try for their removal as well.
+        znodePaths.addAll(znodePathsUpdated);
       }
     }
     zooKeeperClient.close();
   }
 
+  private static class DeleteCallBack implements BackgroundCallback {
+    @Override
+    public void processResult(CuratorFramework zooKeeperClient, CuratorEvent event)
+        throws Exception {
+      if (event.getType() == CuratorEventType.DELETE) {
+        deleteSignal.countDown();
+      }
+    }
+  }
+
   public static void main(String[] args) {
     HiveConf.setLoadHiveServer2Config(true);
     try {
@@ -547,6 +611,8 @@ public class HiveServer2 extends CompositeService {
       } catch (Exception e) {
         LOG.fatal("Error deregistering HiveServer2 instances for version: " + versionNumber
             + " from ZooKeeper", e);
+        System.out.println("Error deregistering HiveServer2 instances for version: " + versionNumber
+            + " from ZooKeeper." + e);
         System.exit(-1);
       }
       System.exit(0);


[16/50] [abbrv] hive git commit: HIVE-9456 : Make Hive support unicode with MSSQL as Metastore backend (Xiaobing Zhou via Sushanth Sowmyan, reviewed by Thejas Nair) (part2/2)

Posted by xu...@apache.org.
HIVE-9456 : Make Hive support unicode with MSSQL as Metastore backend (Xiaobing Zhou via Sushanth Sowmyan, reviewed by Thejas Nair) (part2/2)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d39c829f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d39c829f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d39c829f

Branch: refs/heads/beeline-cli
Commit: d39c829fa42e572cf9141d00d87797ffe53c251e
Parents: 8c93f2b
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Wed May 6 11:17:08 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Wed May 6 11:17:47 2015 -0700

----------------------------------------------------------------------
 .../upgrade/mssql/006-HIVE-9456.mssql.sql       | 323 +++++++++++++++++++
 1 file changed, 323 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d39c829f/metastore/scripts/upgrade/mssql/006-HIVE-9456.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/006-HIVE-9456.mssql.sql b/metastore/scripts/upgrade/mssql/006-HIVE-9456.mssql.sql
new file mode 100644
index 0000000..707843a
--- /dev/null
+++ b/metastore/scripts/upgrade/mssql/006-HIVE-9456.mssql.sql
@@ -0,0 +1,323 @@
+
+--ALTER TABLE MASTER_KEYS
+ALTER TABLE MASTER_KEYS ALTER COLUMN MASTER_KEY nvarchar(767) NULL;
+GO
+
+--ALTER TABLE IDXS
+ALTER TABLE IDXS ALTER COLUMN INDEX_HANDLER_CLASS nvarchar(4000) NULL;
+GO
+
+DROP INDEX IDXS.UNIQUEINDEX;
+ALTER TABLE IDXS ALTER COLUMN INDEX_NAME nvarchar(128) NULL;
+CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+GO
+
+--ALTER TABLE PART_COL_STATS
+DROP INDEX PART_COL_STATS.PCS_STATS_IDX;
+ALTER TABLE PART_COL_STATS ALTER COLUMN "COLUMN_NAME" nvarchar(128) NOT NULL;
+ALTER TABLE PART_COL_STATS ALTER COLUMN COLUMN_TYPE nvarchar(128) NOT NULL;
+ALTER TABLE PART_COL_STATS ALTER COLUMN DB_NAME nvarchar(128) NOT NULL;
+ALTER TABLE PART_COL_STATS ALTER COLUMN BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL;
+ALTER TABLE PART_COL_STATS ALTER COLUMN BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL;
+ALTER TABLE PART_COL_STATS ALTER COLUMN PARTITION_NAME nvarchar(767) NOT NULL;
+ALTER TABLE PART_COL_STATS ALTER COLUMN "TABLE_NAME" nvarchar(128) NOT NULL; 
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+GO
+
+--ALTER TABLE PART_PRIVS
+DROP INDEX PART_PRIVS.PARTPRIVILEGEINDEX;
+ALTER TABLE PART_PRIVS ALTER COLUMN GRANTOR nvarchar(128) NULL;
+ALTER TABLE PART_PRIVS ALTER COLUMN GRANTOR_TYPE nvarchar(128) NULL;
+ALTER TABLE PART_PRIVS ALTER COLUMN PRINCIPAL_NAME nvarchar(128) NULL;
+ALTER TABLE PART_PRIVS ALTER COLUMN PRINCIPAL_TYPE nvarchar(128) NULL;
+ALTER TABLE PART_PRIVS ALTER COLUMN PART_PRIV nvarchar(128) NULL;
+CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+GO
+
+
+--ALTER TABLE ROLES
+DROP INDEX ROLES.ROLEENTITYINDEX;
+ALTER TABLE ROLES ALTER COLUMN OWNER_NAME nvarchar(128) NULL;
+ALTER TABLE ROLES ALTER COLUMN ROLE_NAME nvarchar(128) NULL;
+CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+GO
+
+--ALTER TABLE VERSION
+ALTER TABLE VERSION ALTER COLUMN SCHEMA_VERSION nvarchar(127) NOT NULL;
+ALTER TABLE VERSION ALTER COLUMN VERSION_COMMENT nvarchar(255) NOT NULL;
+GO
+
+--ALTER TABLE GLOBAL_PRIVS
+DROP INDEX GLOBAL_PRIVS.GLOBALPRIVILEGEINDEX;
+ALTER TABLE GLOBAL_PRIVS ALTER COLUMN GRANTOR nvarchar(128) NULL;
+ALTER TABLE GLOBAL_PRIVS ALTER COLUMN GRANTOR_TYPE nvarchar(128) NULL;
+ALTER TABLE GLOBAL_PRIVS ALTER COLUMN PRINCIPAL_NAME nvarchar(128) NULL;
+ALTER TABLE GLOBAL_PRIVS ALTER COLUMN PRINCIPAL_TYPE nvarchar(128) NULL;
+ALTER TABLE GLOBAL_PRIVS ALTER COLUMN USER_PRIV nvarchar(128) NULL;
+CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+GO
+
+
+--ALTER TABLE PART_COL_PRIVS
+DROP INDEX PART_COL_PRIVS.PARTITIONCOLUMNPRIVILEGEINDEX;
+ALTER TABLE PART_COL_PRIVS ALTER COLUMN "COLUMN_NAME" nvarchar(128) NULL;
+ALTER TABLE PART_COL_PRIVS ALTER COLUMN GRANTOR nvarchar(128) NULL;
+ALTER TABLE PART_COL_PRIVS ALTER COLUMN GRANTOR_TYPE nvarchar(128) NULL;
+ALTER TABLE PART_COL_PRIVS ALTER COLUMN PRINCIPAL_NAME nvarchar(128) NULL;
+ALTER TABLE PART_COL_PRIVS ALTER COLUMN PRINCIPAL_TYPE nvarchar(128) NULL;
+ALTER TABLE PART_COL_PRIVS ALTER COLUMN PART_COL_PRIV nvarchar(128) NULL;
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+GO
+
+--ALTER TABLE DB_PRIVS
+DROP INDEX DB_PRIVS.DBPRIVILEGEINDEX;
+ALTER TABLE DB_PRIVS ALTER COLUMN GRANTOR nvarchar(128) NULL;
+ALTER TABLE DB_PRIVS ALTER COLUMN GRANTOR_TYPE nvarchar(128) NULL;
+ALTER TABLE DB_PRIVS ALTER COLUMN PRINCIPAL_NAME nvarchar(128) NULL;
+ALTER TABLE DB_PRIVS ALTER COLUMN PRINCIPAL_TYPE nvarchar(128) NULL;
+ALTER TABLE DB_PRIVS ALTER COLUMN DB_PRIV nvarchar(128) NULL;
+CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+GO
+
+
+--ALTER TABLE TAB_COL_STATS
+ALTER TABLE TAB_COL_STATS ALTER COLUMN "COLUMN_NAME" nvarchar(128) NOT NULL;
+ALTER TABLE TAB_COL_STATS ALTER COLUMN COLUMN_TYPE nvarchar(128) NOT NULL;
+ALTER TABLE TAB_COL_STATS ALTER COLUMN DB_NAME nvarchar(128) NOT NULL;
+ALTER TABLE TAB_COL_STATS ALTER COLUMN BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL;
+ALTER TABLE TAB_COL_STATS ALTER COLUMN BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL;
+ALTER TABLE TAB_COL_STATS ALTER COLUMN "TABLE_NAME" nvarchar(128) NOT NULL;
+GO
+
+
+--ALTER TABLE TYPES
+DROP INDEX TYPES.UNIQUETYPE;
+ALTER TABLE TYPES ALTER COLUMN TYPE_NAME nvarchar(128) NULL;
+ALTER TABLE TYPES ALTER COLUMN TYPE1 nvarchar(767) NULL;
+ALTER TABLE TYPES ALTER COLUMN TYPE2 nvarchar(767) NULL;
+CREATE UNIQUE INDEX UNIQUETYPE ON TYPES (TYPE_NAME);
+GO
+
+
+--ALTER TABLE TBL_PRIVS
+DROP INDEX TBL_PRIVS.TABLEPRIVILEGEINDEX;
+ALTER TABLE TBL_PRIVS ALTER COLUMN GRANTOR nvarchar(128) NULL;
+ALTER TABLE TBL_PRIVS ALTER COLUMN GRANTOR_TYPE nvarchar(128) NULL;
+ALTER TABLE TBL_PRIVS ALTER COLUMN PRINCIPAL_NAME nvarchar(128) NULL;
+ALTER TABLE TBL_PRIVS ALTER COLUMN PRINCIPAL_TYPE nvarchar(128) NULL;
+ALTER TABLE TBL_PRIVS ALTER COLUMN TBL_PRIV nvarchar(128) NULL;
+CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+GO
+
+
+--ALTER TABLE DBS
+DROP INDEX DBS.UNIQUEDATABASE;
+ALTER TABLE DBS ALTER COLUMN "DESC" nvarchar(4000) NULL;
+ALTER TABLE DBS ALTER COLUMN DB_LOCATION_URI nvarchar(4000) NOT NULL;
+ALTER TABLE DBS ALTER COLUMN "NAME" nvarchar(128) NULL;
+ALTER TABLE DBS ALTER COLUMN OWNER_NAME nvarchar(128) NULL;
+ALTER TABLE DBS ALTER COLUMN OWNER_TYPE nvarchar(10) NULL;
+CREATE UNIQUE INDEX UNIQUEDATABASE ON DBS ("NAME");
+GO
+    
+    
+--ALTER TABLE TBL_COL_PRIVS
+DROP INDEX TBL_COL_PRIVS.TABLECOLUMNPRIVILEGEINDEX;
+ALTER TABLE TBL_COL_PRIVS ALTER COLUMN "COLUMN_NAME" nvarchar(128) NULL;
+ALTER TABLE TBL_COL_PRIVS ALTER COLUMN GRANTOR nvarchar(128) NULL;
+ALTER TABLE TBL_COL_PRIVS ALTER COLUMN GRANTOR_TYPE nvarchar(128) NULL;
+ALTER TABLE TBL_COL_PRIVS ALTER COLUMN PRINCIPAL_NAME nvarchar(128) NULL;
+ALTER TABLE TBL_COL_PRIVS ALTER COLUMN PRINCIPAL_TYPE nvarchar(128) NULL;
+ALTER TABLE TBL_COL_PRIVS ALTER COLUMN TBL_COL_PRIV nvarchar(128) NULL;
+CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+GO
+
+--ALTER TABLE DELEGATION_TOKENS
+ALTER TABLE DELEGATION_TOKENS DROP CONSTRAINT DELEGATION_TOKENS_PK;
+ALTER TABLE DELEGATION_TOKENS ALTER COLUMN TOKEN_IDENT nvarchar(767) NOT NULL;
+ALTER TABLE DELEGATION_TOKENS ALTER COLUMN TOKEN nvarchar(767) NULL;
+ALTER TABLE DELEGATION_TOKENS ADD CONSTRAINT DELEGATION_TOKENS_PK PRIMARY KEY (TOKEN_IDENT);
+GO
+
+--ALTER TABLE SERDES    
+ALTER TABLE SERDES ALTER COLUMN "NAME" nvarchar(128) NULL;
+ALTER TABLE SERDES ALTER COLUMN SLIB nvarchar(4000) NULL;
+GO
+
+
+--ALTER TABLE FUNCS    
+DROP INDEX FUNCS.UNIQUEFUNCTION;
+ALTER TABLE FUNCS ALTER COLUMN CLASS_NAME nvarchar(4000) NULL;
+ALTER TABLE FUNCS ALTER COLUMN FUNC_NAME nvarchar(128) NULL;
+ALTER TABLE FUNCS ALTER COLUMN OWNER_NAME nvarchar(128) NULL;
+ALTER TABLE FUNCS ALTER COLUMN OWNER_TYPE nvarchar(10) NULL;
+CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME,DB_ID);
+GO
+
+
+--ALTER TABLE ROLE_MAP    
+DROP INDEX ROLE_MAP.USERROLEMAPINDEX;
+ALTER TABLE ROLE_MAP ALTER COLUMN GRANTOR nvarchar(128) NULL;
+ALTER TABLE ROLE_MAP ALTER COLUMN GRANTOR_TYPE nvarchar(128) NULL;
+ALTER TABLE ROLE_MAP ALTER COLUMN PRINCIPAL_NAME nvarchar(128) NULL;
+ALTER TABLE ROLE_MAP ALTER COLUMN PRINCIPAL_TYPE nvarchar(128) NULL;
+CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+GO
+
+
+--ALTER TABLE TBLS  
+DROP INDEX TBLS.UNIQUETABLE;
+ALTER TABLE TBLS ALTER COLUMN OWNER nvarchar(767) NULL;
+ALTER TABLE TBLS ALTER COLUMN TBL_NAME nvarchar(128) NULL;
+ALTER TABLE TBLS ALTER COLUMN TBL_TYPE nvarchar(128) NULL;
+CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+GO
+
+--ALTER TABLE SDS    
+ALTER TABLE SDS ALTER COLUMN INPUT_FORMAT nvarchar(4000) NULL;
+ALTER TABLE SDS ALTER COLUMN OUTPUT_FORMAT nvarchar(4000) NULL;
+GO
+
+--ALTER TABLE PARTITION_EVENTS    
+DROP INDEX PARTITION_EVENTS.PARTITIONEVENTINDEX;
+ALTER TABLE PARTITION_EVENTS ALTER COLUMN DB_NAME nvarchar(128) NULL;
+ALTER TABLE PARTITION_EVENTS ALTER COLUMN PARTITION_NAME nvarchar(767) NULL;
+ALTER TABLE PARTITION_EVENTS ALTER COLUMN TBL_NAME nvarchar(128) NULL;
+CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+GO
+
+--ALTER TABLE SORT_COLS
+ALTER TABLE SORT_COLS ALTER COLUMN "COLUMN_NAME" nvarchar(128) NULL;
+GO
+
+--ALTER TABLE SKEWED_COL_NAMES
+ALTER TABLE SKEWED_COL_NAMES ALTER COLUMN SKEWED_COL_NAME nvarchar(255) NULL;    
+GO
+
+
+--ALTER TABLE SKEWED_COL_VALUE_LOC_MAP
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ALTER COLUMN LOCATION nvarchar(4000) NULL;    
+GO
+
+--ALTER TABLE SKEWED_STRING_LIST_VALUES
+ALTER TABLE SKEWED_STRING_LIST_VALUES ALTER COLUMN STRING_LIST_VALUE nvarchar(255) NULL;
+GO
+
+--ALTER TABLE PARTITION_KEYS 
+ALTER TABLE PARTITION_KEYS DROP CONSTRAINT PARTITION_KEY_PK;
+ALTER TABLE PARTITION_KEYS ALTER COLUMN PKEY_COMMENT nvarchar(4000) NULL;
+ALTER TABLE PARTITION_KEYS ALTER COLUMN PKEY_NAME nvarchar(128) NOT NULL;
+ALTER TABLE PARTITION_KEYS ALTER COLUMN PKEY_TYPE nvarchar(767) NOT NULL;
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+GO
+
+--ALTER TABLE SD_PARAMS
+ALTER TABLE SD_PARAMS DROP CONSTRAINT SD_PARAMS_PK;
+ALTER TABLE SD_PARAMS ALTER COLUMN PARAM_KEY nvarchar(256) NOT NULL;
+ALTER TABLE SD_PARAMS ALTER COLUMN PARAM_VALUE nvarchar(4000) NULL;
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+GO
+
+
+--ALTER TABLE FUNC_RU
+ALTER TABLE FUNC_RU ALTER COLUMN RESOURCE_URI nvarchar(4000) NULL;    
+GO
+
+
+--ALTER TABLE TYPE_FIELDS
+ALTER TABLE TYPE_FIELDS DROP CONSTRAINT TYPE_FIELDS_PK;
+ALTER TABLE TYPE_FIELDS ALTER COLUMN COMMENT nvarchar(256) NULL;
+ALTER TABLE TYPE_FIELDS ALTER COLUMN FIELD_NAME nvarchar(128) NOT NULL;
+ALTER TABLE TYPE_FIELDS ALTER COLUMN FIELD_TYPE nvarchar(767) NOT NULL;
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+GO
+
+--ALTER TABLE BUCKETING_COLS
+ALTER TABLE BUCKETING_COLS ALTER COLUMN BUCKET_COL_NAME nvarchar(255) NULL;
+GO
+
+--ALTER TABLE DATABASE_PARAMS
+ALTER TABLE DATABASE_PARAMS DROP CONSTRAINT DATABASE_PARAMS_PK;
+ALTER TABLE DATABASE_PARAMS ALTER COLUMN PARAM_KEY nvarchar(180) NOT NULL;
+ALTER TABLE DATABASE_PARAMS ALTER COLUMN PARAM_VALUE nvarchar(4000) NULL;
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+GO
+
+
+--ALTER TABLE INDEX_PARAMS
+ALTER TABLE INDEX_PARAMS DROP CONSTRAINT INDEX_PARAMS_PK;
+ALTER TABLE INDEX_PARAMS ALTER COLUMN PARAM_KEY nvarchar(256) NOT NULL;
+ALTER TABLE INDEX_PARAMS ALTER COLUMN PARAM_VALUE nvarchar(4000) NULL;
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+GO
+
+--ALTER TABLE COLUMNS_V2
+ALTER TABLE COLUMNS_V2 DROP CONSTRAINT COLUMNS_PK;
+ALTER TABLE COLUMNS_V2 ALTER COLUMN COMMENT nvarchar(256) NULL;
+ALTER TABLE COLUMNS_V2 ALTER COLUMN "COLUMN_NAME" nvarchar(128) NOT NULL;
+ALTER TABLE COLUMNS_V2 ALTER COLUMN TYPE_NAME nvarchar(4000) NOT NULL;
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+GO
+
+--ALTER TABLE SERDE_PARAMS
+ALTER TABLE SERDE_PARAMS DROP CONSTRAINT SERDE_PARAMS_PK;
+ALTER TABLE SERDE_PARAMS ALTER COLUMN PARAM_KEY nvarchar(256) NOT NULL;
+ALTER TABLE SERDE_PARAMS ALTER COLUMN PARAM_VALUE nvarchar(4000) NULL;
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+GO
+
+--ALTER TABLE PARTITION_PARAMS
+ALTER TABLE PARTITION_PARAMS DROP CONSTRAINT PARTITION_PARAMS_PK;
+ALTER TABLE PARTITION_PARAMS ALTER COLUMN PARAM_KEY nvarchar(256) NOT NULL;
+ALTER TABLE PARTITION_PARAMS ALTER COLUMN PARAM_VALUE nvarchar(4000) NULL;
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+GO
+
+--ALTER TABLE TABLE_PARAMS
+ALTER TABLE TABLE_PARAMS DROP CONSTRAINT TABLE_PARAMS_PK;
+ALTER TABLE TABLE_PARAMS ALTER COLUMN PARAM_KEY nvarchar(256) NOT NULL;
+ALTER TABLE TABLE_PARAMS ALTER COLUMN PARAM_VALUE nvarchar(4000) NULL;
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+GO
+
+--ALTER TABLE NOTIFICATION_LOG
+ALTER TABLE NOTIFICATION_LOG DROP CONSTRAINT NOTIFICATION_LOG_PK;
+ALTER TABLE NOTIFICATION_LOG ALTER COLUMN EVENT_TYPE nvarchar(32) NOT NULL;
+ALTER TABLE NOTIFICATION_LOG ALTER COLUMN DB_NAME nvarchar(128) NOT NULL;
+ALTER TABLE NOTIFICATION_LOG ALTER COLUMN TBL_NAME nvarchar(128) NOT NULL;
+ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID);
+GO
+
+--ALTER TABLE COMPACTION_QUEUE
+ALTER TABLE COMPACTION_QUEUE ALTER COLUMN CQ_DATABASE nvarchar(128) NOT NULL;
+ALTER TABLE COMPACTION_QUEUE ALTER COLUMN CQ_TABLE nvarchar(128) NOT NULL;
+ALTER TABLE COMPACTION_QUEUE ALTER COLUMN CQ_PARTITION nvarchar(767) NULL;
+ALTER TABLE COMPACTION_QUEUE ALTER COLUMN CQ_WORKER_ID nvarchar(128) NULL;
+ALTER TABLE COMPACTION_QUEUE ALTER COLUMN CQ_RUN_AS nvarchar(128) NULL;
+GO
+
+--ALTER TABLE COMPLETED_TXN_COMPONENTS
+ALTER TABLE COMPLETED_TXN_COMPONENTS ALTER COLUMN CTC_DATABASE nvarchar(128) NOT NULL;
+ALTER TABLE COMPLETED_TXN_COMPONENTS ALTER COLUMN CTC_TABLE nvarchar(128) NULL;
+ALTER TABLE COMPLETED_TXN_COMPONENTS ALTER COLUMN CTC_PARTITION nvarchar(767) NULL;
+GO
+
+--ALTER TABLE HIVE_LOCKS
+ALTER TABLE HIVE_LOCKS ALTER COLUMN HL_DB nvarchar(128) NOT NULL;
+ALTER TABLE HIVE_LOCKS ALTER COLUMN HL_TABLE nvarchar(128) NULL;
+ALTER TABLE HIVE_LOCKS ALTER COLUMN HL_PARTITION nvarchar(767) NULL;
+ALTER TABLE HIVE_LOCKS ALTER COLUMN HL_USER nvarchar(128) NOT NULL;
+ALTER TABLE HIVE_LOCKS ALTER COLUMN HL_HOST nvarchar(128) NOT NULL;
+GO
+
+
+--ALTER TABLE TXNS
+ALTER TABLE TXNS ALTER COLUMN TXN_USER nvarchar(128) NOT NULL;
+ALTER TABLE TXNS ALTER COLUMN TXN_HOST nvarchar(128) NOT NULL;
+GO
+
+--ALTER TABLE TXN_COMPONENTS
+ALTER TABLE TXN_COMPONENTS ALTER COLUMN TC_DATABASE nvarchar(128) NOT NULL;
+ALTER TABLE TXN_COMPONENTS ALTER COLUMN	TC_TABLE nvarchar(128) NULL;
+ALTER TABLE TXN_COMPONENTS ALTER COLUMN TC_PARTITION nvarchar(767) NULL;
+GO


[27/50] [abbrv] hive git commit: Revert "HIVE-9736 : StorageBasedAuthProvider should batch namenode-calls where possible (Mithun Radhakrishnan, reviewed by Chris Nauroth, Sushanth Sowmyan)"

Posted by xu...@apache.org.
Revert "HIVE-9736 : StorageBasedAuthProvider should batch namenode-calls where possible (Mithun Radhakrishnan, reviewed by Chris Nauroth, Sushanth Sowmyan)"

This reverts commit 19886150121b6081127bf1e581b24d8dcc12f1df.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ecde4ae9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ecde4ae9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ecde4ae9

Branch: refs/heads/beeline-cli
Commit: ecde4ae96f88ed88141a6593e1f935126d6466f3
Parents: 306e61a
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Wed May 6 17:53:00 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Wed May 6 17:53:00 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/common/FileUtils.java    | 155 ++++++-------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   7 -
 .../StorageBasedAuthorizationProvider.java      | 114 +-------------
 .../apache/hadoop/hive/shims/Hadoop23Shims.java |  29 +---
 .../org/apache/hadoop/fs/DefaultFileAccess.java |  65 +++-----
 .../apache/hadoop/hive/shims/HadoopShims.java   |  24 +--
 .../hadoop/hive/shims/HadoopShimsSecure.java    |   8 -
 7 files changed, 84 insertions(+), 318 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ecde4ae9/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
index 536fe11..c2c54bc 100644
--- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
@@ -25,16 +25,12 @@ import java.net.URISyntaxException;
 import java.security.AccessControlException;
 import java.security.PrivilegedExceptionAction;
 import java.util.BitSet;
-import java.util.EnumSet;
-import java.util.Iterator;
 import java.util.List;
 
-import com.google.common.base.Function;
-import com.google.common.collect.Iterators;
-import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DefaultFileAccess;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -373,54 +369,26 @@ public final class FileUtils {
   public static void checkFileAccessWithImpersonation(final FileSystem fs,
       final FileStatus stat, final FsAction action, final String user)
           throws IOException, AccessControlException, InterruptedException, Exception {
-    checkFileAccessWithImpersonation(fs,
-                                     Iterators.singletonIterator(stat),
-                                     EnumSet.of(action),
-                                     user);
-  }
-
-  /**
-   * Perform a check to determine if the user is able to access the file passed in.
-   * If the user name passed in is different from the current user, this method will
-   * attempt to do impersonate the user to do the check; the current user should be
-   * able to create proxy users in this case.
-   * @param fs   FileSystem of the path to check
-   * @param statuses FileStatus instances representing the file
-   * @param actions The FsActions that will be checked
-   * @param user User name of the user that will be checked for access.  If the user name
-   *             is null or the same as the current user, no user impersonation will be done
-   *             and the check will be done as the current user. Otherwise the file access
-   *             check will be performed within a doAs() block to use the access privileges
-   *             of this user. In this case the user must be configured to impersonate other
-   *             users, otherwise this check will fail with error.
-   * @throws IOException
-   * @throws AccessControlException
-   * @throws InterruptedException
-   * @throws Exception
-   */
-  public static void checkFileAccessWithImpersonation(final FileSystem fs,
-      final Iterator<FileStatus> statuses, final EnumSet<FsAction> actions, final String user)
-          throws IOException, AccessControlException, InterruptedException, Exception {
     UserGroupInformation ugi = Utils.getUGI();
     String currentUser = ugi.getShortUserName();
 
     if (user == null || currentUser.equals(user)) {
       // No need to impersonate user, do the checks as the currently configured user.
-      ShimLoader.getHadoopShims().checkFileAccess(fs, statuses, actions);
-    }
-    else {
-      // Otherwise, try user impersonation. Current user must be configured to do user impersonation.
-      UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(
-          user, UserGroupInformation.getLoginUser());
-      proxyUser.doAs(new PrivilegedExceptionAction<Object>() {
-        @Override
-        public Object run() throws Exception {
-          FileSystem fsAsUser = FileSystem.get(fs.getUri(), fs.getConf());
-          ShimLoader.getHadoopShims().checkFileAccess(fsAsUser, statuses, actions);
-          return null;
-        }
-      });
+      ShimLoader.getHadoopShims().checkFileAccess(fs, stat, action);
+      return;
     }
+
+    // Otherwise, try user impersonation. Current user must be configured to do user impersonation.
+    UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(
+        user, UserGroupInformation.getLoginUser());
+    proxyUser.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        FileSystem fsAsUser = FileSystem.get(fs.getUri(), fs.getConf());
+        ShimLoader.getHadoopShims().checkFileAccess(fsAsUser, stat, action);
+        return null;
+      }
+    });
   }
 
   /**
@@ -709,91 +677,70 @@ public final class FileUtils {
    * @param path
    * @param conf
    * @param user
+   * @throws AccessControlException
+   * @throws InterruptedException
    * @throws Exception
    */
-  public static void checkDeletePermission(Path path, Configuration conf, String user) throws  Exception {
+  public static void checkDeletePermission(Path path, Configuration conf, String user)
+      throws AccessControlException, InterruptedException, Exception {
+   // This requires ability to delete the given path.
+    // The following 2 conditions should be satisfied for this-
+    // 1. Write permissions on parent dir
+    // 2. If sticky bit is set on parent dir then one of following should be
+    // true
+    //   a. User is owner of the current dir/file
+    //   b. User is owner of the parent dir
+    //   Super users are also allowed to drop the file, but there is no good way of checking
+    //   if a user is a super user. Also super users running hive queries is not a common
+    //   use case. super users can also do a chown to be able to drop the file
 
     if(path == null) {
       // no file/dir to be deleted
       return;
     }
 
-    // check user has write permissions on the parent dir
     final FileSystem fs = path.getFileSystem(conf);
+    // check user has write permissions on the parent dir
     FileStatus stat = null;
     try {
       stat = fs.getFileStatus(path);
     } catch (FileNotFoundException e) {
       // ignore
     }
-
     if (stat == null) {
       // no file/dir to be deleted
       return;
     }
-
-    checkDeletePermission(fs, Lists.newArrayList(stat), conf, user);
-  }
-
-  /**
-   * Checks if delete can be performed on given path by given user.
-   * If file does not exist it just returns without throwing an Exception
-   * @param fs The FileSystem instance
-   * @param fileStatuses The FileStatus instances for the paths being checked.
-   * @param conf Configuration, corresponding to the FileSystem.
-   * @param user The user, whose permission is to be checked.
-   * @throws Exception
-   */
-  public static void checkDeletePermission(FileSystem fs, Iterable<FileStatus> fileStatuses,
-                                           Configuration conf, String user) throws Exception {
-
-    // This requires ability to delete the given path.
-    // The following 2 conditions should be satisfied for this-
-    // 1. Write permissions on parent dir
-    // 2. If sticky bit is set on parent dir then one of following should be
-    // true
-    //   a. User is owner of the current dir/file
-    //   b. User is owner of the parent dir
-    FileUtils.checkFileAccessWithImpersonation(fs, fileStatuses.iterator(), EnumSet.of(FsAction.WRITE), user);
+    FileUtils.checkFileAccessWithImpersonation(fs, stat, FsAction.WRITE, user);
 
     HadoopShims shims = ShimLoader.getHadoopShims();
     if (!shims.supportStickyBit()) {
-      // No support for sticky-bit.
+      // not supports sticky bit
       return;
     }
 
-    List<Path> allParentPaths =
-        Lists.newArrayList(
-            Iterators.transform(fileStatuses.iterator(), new Function<FileStatus, Path>() {
-              @Override
-              public Path apply(FileStatus input) {
-                return input.getPath().getParent();
-              }
-            })
-        );
-
-    Iterator<FileStatus> childStatusIterator = fileStatuses.iterator();
-    for (List<Path> parentPaths : Lists.partition(allParentPaths, getListStatusBatchSize(conf))) {
-      for (FileStatus parentFileStatus : fs.listStatus(parentPaths.toArray(new Path[parentPaths.size()]))) {
-        assert childStatusIterator.hasNext() : "Number of parent-file-statuses doesn't match children.";
-        FileStatus childFileStatus = childStatusIterator.next();
-        // Check sticky-bits on parent-dirs.
-        if (shims.hasStickyBit(parentFileStatus.getPermission())
-            && !parentFileStatus.getOwner().equals(user)
-            && !childFileStatus.getOwner().equals(user)) {
-          throw new IOException(String.format("Permission Denied: User %s can't delete %s because sticky bit is\""
-              + " set on the parent dir and user does not own this file or its parent\"", user, childFileStatus.getPath()));
-        }
-      } // for_each( parent_path );
-    } // for_each( batch_of_parentPaths );
+    // check if sticky bit is set on the parent dir
+    FileStatus parStatus = fs.getFileStatus(path.getParent());
+    if (!shims.hasStickyBit(parStatus.getPermission())) {
+      // no sticky bit, so write permission on parent dir is sufficient
+      // no further checks needed
+      return;
+    }
 
-    assert !childStatusIterator.hasNext() : "Did not process all file-statuses.";
+    // check if user is owner of parent dir
+    if (parStatus.getOwner().equals(user)) {
+      return;
+    }
 
-  } // static void checkDeletePermission();
+    // check if user is owner of current dir/file
+    FileStatus childStatus = fs.getFileStatus(path);
+    if (childStatus.getOwner().equals(user)) {
+      return;
+    }
+    String msg = String.format("Permission Denied: User %s can't delete %s because sticky bit is"
+        + " set on the parent dir and user does not own this file or its parent", user, path);
+    throw new IOException(msg);
 
-  private static int getListStatusBatchSize(Configuration configuration) {
-    return HiveConf.getIntVar(configuration,
-        HiveConf.ConfVars.HIVE_AUTHORIZATION_HDFS_LIST_STATUS_BATCH_SIZE);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/ecde4ae9/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index db17f0f..54e154c 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1629,13 +1629,6 @@ public class HiveConf extends Configuration {
         "of updating the original list means that you can append to the defaults\n" +
         "set by SQL standard authorization instead of replacing it entirely."),
 
-    HIVE_AUTHORIZATION_HDFS_LIST_STATUS_BATCH_SIZE(
-      "hive.authprovider.hdfs.liststatus.batch.size", 1000,
-      "Number of FileStatus objects to be queried for when listing files, for HDFS-based authorization.\n" +
-          "Note: If this exceeds dfs.ls.limit (as set in hdfs-site.xml), DFSClient might use the smaller value as \n" +
-          "the batch-size, internally."
-    ),
-
     HIVE_CLI_PRINT_HEADER("hive.cli.print.header", false, "Whether to print the names of the columns in query output."),
 
     HIVE_ERROR_ON_EMPTY_PARTITION("hive.error.on.empty.partition", false,

http://git-wip-us.apache.org/repos/asf/hive/blob/ecde4ae9/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
index 6a5c510..8f81ef9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
@@ -18,20 +18,15 @@
 
 package org.apache.hadoop.hive.ql.security.authorization;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.security.AccessControlException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.EnumSet;
-import java.util.Iterator;
 import java.util.List;
 
 import javax.security.auth.login.LoginException;
 
-import com.google.common.base.Function;
-import com.google.common.base.Predicate;
-import com.google.common.collect.Iterators;
-import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -40,7 +35,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
@@ -69,7 +63,7 @@ import org.apache.hadoop.hive.ql.metadata.Table;
  * out to the parent directory recursively to determine its permissions till
  * it finds a parent that does exist.
  */
-public class StorageBasedAuthorizationProvider extends HiveMultiPartitionAuthorizationProviderBase
+public class StorageBasedAuthorizationProvider extends HiveAuthorizationProviderBase
     implements HiveMetastoreAuthorizationProvider {
 
   private Warehouse wh;
@@ -248,89 +242,6 @@ public class StorageBasedAuthorizationProvider extends HiveMultiPartitionAuthori
     }
   }
 
-  @Override
-  public void authorize(Table table, Iterable<Partition> partitions,
-                        Privilege[] requiredReadPrivileges, Privilege[] requiredWritePrivileges)
-       throws HiveException, AuthorizationException {
-
-    try {
-      class MustCheckTablePermissions { // For closure.
-        public boolean value = false;
-      }
-
-      final MustCheckTablePermissions mustCheckTablePermissions = new MustCheckTablePermissions();
-      final FileSystem fs = table.getDataLocation().getFileSystem(getConf());
-
-      // Get partition paths. Filter out null-partitions, and partitions without data-locations.
-      Iterator<Partition> nonNullPartitions
-               = Iterators.filter(partitions.iterator(), new Predicate<Partition>() {
-        @Override
-        public boolean apply(Partition partition) {
-          try {
-            boolean isValidPartitionPath = partition != null
-                                           && partition.getDataLocation() != null
-                                           && fs.exists(partition.getDataLocation());
-            mustCheckTablePermissions.value |= isValidPartitionPath;
-            return isValidPartitionPath;
-          }
-          catch (IOException exception){
-            throw new RuntimeException("Could not find location for partition: " + partition, exception);
-          }
-        }
-      });
-
-      if (mustCheckTablePermissions.value) {
-        // At least one partition was null, or had a non-existent path. So check table-permissions, once.
-        // Partition path can be null in the case of a new create partition - in this case,
-        // we try to default to checking the permissions of the parent table.
-        // Partition itself can also be null, in cases where this gets called as a generic
-        // catch-all call in cases like those with CTAS onto an unpartitioned table (see HIVE-1887)
-
-        // this should be the case only if this is a create partition.
-        // The privilege needed on the table should be ALTER_DATA, and not CREATE
-        authorize(table, new Privilege[]{}, new Privilege[]{Privilege.ALTER_DATA});
-      }
-
-
-      // authorize drops if there was a drop privilege requirement
-      // extract drop privileges
-      DropPrivilegeExtractor privExtractor = new DropPrivilegeExtractor(requiredReadPrivileges, requiredWritePrivileges);
-      requiredReadPrivileges = privExtractor.getReadReqPriv();
-      requiredWritePrivileges = privExtractor.getWriteReqPriv();
-      EnumSet<FsAction> actions = getFsActions(requiredReadPrivileges);
-      actions.addAll(getFsActions(requiredWritePrivileges));
-
-      ArrayList<Path> allPartitionPaths
-                = Lists.newArrayList(Iterators.transform(nonNullPartitions, new Function<Partition, Path>() {
-        @Override
-        public Path apply(Partition input) {
-          return input.getDataLocation();
-        }
-      }));
-
-      for (List<Path> partitionPaths : Lists.partition(allPartitionPaths, getListStatusBatchSize(getConf()))) {
-
-        List<FileStatus> fileStatuses = Arrays.asList(
-            fs.listStatus(partitionPaths.toArray(new Path[partitionPaths.size()])));
-
-        if (privExtractor.hasDropPrivilege) {
-          FileUtils.checkDeletePermission(fs, fileStatuses, getConf(), authenticator.getUserName());
-        }
-
-        checkPermissions(fs, fileStatuses.iterator(), actions, authenticator.getUserName());
-      }
-
-    }
-    catch (Exception exception) {
-      throw hiveException(exception);
-    }
-  }
-
-  private static int getListStatusBatchSize(Configuration configuration) {
-    return HiveConf.getIntVar(configuration,
-                              HiveConf.ConfVars.HIVE_AUTHORIZATION_HDFS_LIST_STATUS_BATCH_SIZE);
-  }
-
   private void checkDeletePermission(Path dataLocation, Configuration conf, String userName)
       throws HiveException {
     try {
@@ -477,28 +388,17 @@ public class StorageBasedAuthorizationProvider extends HiveMultiPartitionAuthori
   protected static void checkPermissions(final FileSystem fs, final FileStatus stat,
       final EnumSet<FsAction> actions, String user) throws IOException,
       AccessControlException, HiveException {
-    checkPermissions(fs, Iterators.singletonIterator(stat), actions, user);
-  }
-
-  @SuppressWarnings("deprecation")
-  protected static void checkPermissions(final FileSystem fs, Iterator<FileStatus> fileStatuses,
-                                         final EnumSet<FsAction> actions, String user)
-      throws IOException, AccessControlException, HiveException {
 
+    if (stat == null) {
+      // File named by path doesn't exist; nothing to validate.
+      return;
+    }
     FsAction checkActions = FsAction.NONE;
     for (FsAction action : actions) {
       checkActions = checkActions.or(action);
     }
-
-    Iterator<FileStatus> nonNullFileStatuses = Iterators.filter(fileStatuses, new Predicate<FileStatus>() {
-      @Override
-      public boolean apply(FileStatus fileStatus) {
-        return fileStatus != null;
-      }
-    });
-
     try {
-      FileUtils.checkFileAccessWithImpersonation(fs, nonNullFileStatuses, EnumSet.of(checkActions), user);
+      FileUtils.checkFileAccessWithImpersonation(fs, stat, checkActions, user);
     } catch (Exception err) {
       // fs.permission.AccessControlException removed by HADOOP-11356, but Hive users on older
       // Hadoop versions may still see this exception .. have to reference by name.

http://git-wip-us.apache.org/repos/asf/hive/blob/ecde4ae9/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
----------------------------------------------------------------------
diff --git a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
index 4547baa..d349068 100644
--- a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
+++ b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
@@ -29,11 +29,11 @@ import java.security.AccessControlException;
 import java.security.NoSuchAlgorithmException;
 import java.util.ArrayList;
 import java.util.Comparator;
-import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.commons.lang.StringUtils;
@@ -986,33 +986,6 @@ public class Hadoop23Shims extends HadoopShimsSecure {
     }
   }
 
-  @Override
-  public void checkFileAccess(FileSystem fs, Iterator<FileStatus> statuses, EnumSet<FsAction> actions)
-      throws IOException, AccessControlException, Exception {
-    try {
-      if (accessMethod == null) {
-        // Have to rely on Hive implementation of filesystem permission checks.
-        DefaultFileAccess.checkFileAccess(fs, statuses, actions);
-      }
-      else {
-        while (statuses.hasNext()) {
-          accessMethod.invoke(fs, statuses.next(), combine(actions));
-        }
-      }
-
-    } catch (Exception err) {
-      throw wrapAccessException(err);
-    }
-  }
-
-  private static FsAction combine(EnumSet<FsAction> actions) {
-    FsAction resultantAction = FsAction.NONE;
-    for (FsAction action : actions) {
-      resultantAction = resultantAction.or(action);
-    }
-    return resultantAction;
-  }
-
   /**
    * If there is an AccessException buried somewhere in the chain of failures, wrap the original
    * exception in an AccessException. Othewise just return the original exception.

http://git-wip-us.apache.org/repos/asf/hive/blob/ecde4ae9/shims/common/src/main/java/org/apache/hadoop/fs/DefaultFileAccess.java
----------------------------------------------------------------------
diff --git a/shims/common/src/main/java/org/apache/hadoop/fs/DefaultFileAccess.java b/shims/common/src/main/java/org/apache/hadoop/fs/DefaultFileAccess.java
index c4261cb..45ca210 100644
--- a/shims/common/src/main/java/org/apache/hadoop/fs/DefaultFileAccess.java
+++ b/shims/common/src/main/java/org/apache/hadoop/fs/DefaultFileAccess.java
@@ -18,22 +18,23 @@
 
 package org.apache.hadoop.fs;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.security.AccessControlException;
+import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.EnumSet;
-import java.util.Iterator;
 import java.util.List;
 
 import javax.security.auth.login.LoginException;
 
-import com.google.common.collect.Iterators;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.shims.Utils;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -46,7 +47,7 @@ public class DefaultFileAccess {
 
   private static Log LOG = LogFactory.getLog(DefaultFileAccess.class);
 
-  private static List<String> emptyGroups = Collections.emptyList();
+  private static List<String> emptyGroups = new ArrayList<String>(0);
 
   public static void checkFileAccess(FileSystem fs, FileStatus stat, FsAction action)
       throws IOException, AccessControlException, LoginException {
@@ -59,62 +60,34 @@ public class DefaultFileAccess {
 
   public static void checkFileAccess(FileSystem fs, FileStatus stat, FsAction action,
       String user, List<String> groups) throws IOException, AccessControlException {
-    checkFileAccess(fs, Iterators.singletonIterator(stat), EnumSet.of(action), user, groups);
-  }
-
-  public static void checkFileAccess(FileSystem fs, Iterator<FileStatus> statuses, EnumSet<FsAction> actions,
-                                     String user, List<String> groups)
-    throws IOException, AccessControlException {
 
     if (groups == null) {
       groups = emptyGroups;
     }
 
-    // Short-circuit for super-users.
     String superGroupName = getSuperGroupName(fs.getConf());
     if (userBelongsToSuperGroup(superGroupName, groups)) {
       LOG.info("User \"" + user + "\" belongs to super-group \"" + superGroupName + "\". " +
-          "Permission granted for actions: " + actions + ".");
+          "Permission granted for action: " + action + ".");
       return;
     }
 
-    while (statuses.hasNext()) {
-
-      FileStatus stat = statuses.next();
-      final FsPermission dirPerms = stat.getPermission();
-      final String grp = stat.getGroup();
+    final FsPermission dirPerms = stat.getPermission();
+    final String grp = stat.getGroup();
 
-      FsAction combinedAction = combine(actions);
-      if (user.equals(stat.getOwner())) {
-        if (dirPerms.getUserAction().implies(combinedAction)) {
-          continue;
-        }
-      } else if (groups.contains(grp)) {
-        if (dirPerms.getGroupAction().implies(combinedAction)) {
-          continue;
-        }
-      } else if (dirPerms.getOtherAction().implies(combinedAction)) {
-        continue;
+    if (user.equals(stat.getOwner())) {
+      if (dirPerms.getUserAction().implies(action)) {
+        return;
       }
-
-      throw new AccessControlException("action " + combinedAction + " not permitted on path "
-          + stat.getPath() + " for user " + user);
-
-    } // for_each(fileStatus);
-  }
-
-  private static FsAction combine(EnumSet<FsAction> actions) {
-    FsAction resultantAction = FsAction.NONE;
-    for (FsAction action : actions) {
-      resultantAction = resultantAction.or(action);
+    } else if (groups.contains(grp)) {
+      if (dirPerms.getGroupAction().implies(action)) {
+        return;
+      }
+    } else if (dirPerms.getOtherAction().implies(action)) {
+      return;
     }
-    return resultantAction;
-  }
-
-  public static void checkFileAccess(FileSystem fs, Iterator<FileStatus> statuses, EnumSet<FsAction> actions)
-    throws IOException, AccessControlException, LoginException {
-    UserGroupInformation ugi = Utils.getUGI();
-    checkFileAccess(fs, statuses, actions, ugi.getShortUserName(), Arrays.asList(ugi.getGroupNames()));
+    throw new AccessControlException("action " + action + " not permitted on path "
+        + stat.getPath() + " for user " + user);
   }
 
   private static String getSuperGroupName(Configuration configuration) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ecde4ae9/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
----------------------------------------------------------------------
diff --git a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
index 4b79d95..5a6bc44 100644
--- a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
+++ b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
@@ -24,17 +24,19 @@ import java.net.URI;
 import java.nio.ByteBuffer;
 import java.security.AccessControlException;
 import java.security.NoSuchAlgorithmException;
+import java.security.PrivilegedExceptionAction;
 import java.util.Comparator;
-import java.util.EnumSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.TreeMap;
 
+import javax.security.auth.login.LoginException;
 
 import com.google.common.annotations.VisibleForTesting;
 
-import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -45,6 +47,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.shims.HadoopShims.StoragePolicyValue;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.mapred.ClusterStatus;
 import org.apache.hadoop.mapred.JobConf;
@@ -527,21 +530,6 @@ public interface HadoopShims {
       throws IOException, AccessControlException, Exception;
 
   /**
-   * Check if the configured UGI has access to the path for the given file system action.
-   * Method will return successfully if action is permitted. AccessControlExceptoin will
-   * be thrown if user does not have access to perform the action. Other exceptions may
-   * be thrown for non-access related errors.
-   * @param fs The FileSystem instance
-   * @param statuses The FileStatuses for the paths being checked
-   * @param actions The FsActions being checked
-   * @throws IOException
-   * @throws AccessControlException
-   * @throws Exception
-   */
-  public void checkFileAccess(FileSystem fs, Iterator<FileStatus> statuses, EnumSet<FsAction> actions)
-      throws Exception;
-
-  /**
    * Use password API (if available) to fetch credentials/password
    * @param conf
    * @param name

http://git-wip-us.apache.org/repos/asf/hive/blob/ecde4ae9/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java
----------------------------------------------------------------------
diff --git a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java
index 8e51c02..89d7798 100644
--- a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java
+++ b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java
@@ -25,9 +25,7 @@ import java.net.URI;
 import java.security.AccessControlException;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.EnumSet;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.Set;
 
 import org.apache.commons.lang.ArrayUtils;
@@ -393,11 +391,5 @@ public abstract class HadoopShimsSecure implements HadoopShims {
   }
 
   @Override
-  public void checkFileAccess(FileSystem fs, Iterator<FileStatus> statuses, EnumSet<FsAction> action)
-      throws IOException, AccessControlException, Exception {
-    DefaultFileAccess.checkFileAccess(fs, statuses, action);
-  }
-
-  @Override
   abstract public void addDelegationTokens(FileSystem fs, Credentials cred, String uname) throws IOException;
 }


[46/50] [abbrv] hive git commit: HIVE-10526: CBO (Calcite Return Path): HiveCost epsilon comparison should take row count in to account (Laljo John Pullokkaran reviewed by Ashutosh Chauhan)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/spark/join_alt_syntax.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_alt_syntax.q.out b/ql/src/test/results/clientpositive/spark/join_alt_syntax.q.out
index f0d96c4..dabdcb8 100644
--- a/ql/src/test/results/clientpositive/spark/join_alt_syntax.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_alt_syntax.q.out
@@ -390,9 +390,9 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2)
         Reducer 6 <- Map 5 (PARTITION-LEVEL SORT, 2), Map 7 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 4 <- Map 3 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Reducer 4 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -401,71 +401,69 @@ STAGE PLANS:
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
-                    Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                    predicate: p_partkey is not null (type: boolean)
+                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
-        Map 4 
+        Map 3 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: p_partkey is not null (type: boolean)
+                    predicate: p_name is not null (type: boolean)
                     Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: p_partkey (type: int), p_name (type: string)
-                      outputColumnNames: _col0, _col1
+                      expressions: p_name (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: int)
+                        key expressions: _col0 (type: string)
                         sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: string)
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (p_name is not null and p_partkey is not null) (type: boolean)
+                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
                     Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string)
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col1 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col1 (type: string)
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
                         Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: int)
         Map 7 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: p_name is not null (type: boolean)
-                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
+                    Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: p_name (type: string)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                      expressions: p_partkey (type: int), p_name (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                        Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -473,27 +471,11 @@ STAGE PLANS:
                      Inner Join 0 to 1
                 keys:
                   0 _col0 (type: int)
-                  1 _col0 (type: int)
-                outputColumnNames: _col0, _col1, _col3
-                Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
-                  Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: string)
-        Reducer 3 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 _col0 (type: int), _col1 (type: string)
-                  1 _col0 (type: int), _col1 (type: string)
-                outputColumnNames: _col1, _col3, _col5, _col6
+                  1 _col1 (type: int)
+                outputColumnNames: _col1, _col2, _col4, _col6
                 Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col1 (type: string), _col5 (type: string), _col6 (type: string), _col3 (type: string)
+                  expressions: _col4 (type: string), _col6 (type: string), _col2 (type: string), _col1 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -503,22 +485,38 @@ STAGE PLANS:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 6 
+        Reducer 4 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 _col1 (type: string)
-                  1 _col0 (type: string)
-                outputColumnNames: _col0, _col1, _col2
+                  0 _col0 (type: string)
+                  1 _col3 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col4
                 Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                  key expressions: _col1 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col1 (type: int)
                   Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: string)
+                  value expressions: _col0 (type: string), _col2 (type: string), _col4 (type: string)
+        Reducer 6 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int), _col1 (type: string)
+                  1 _col0 (type: int), _col1 (type: string)
+                outputColumnNames: _col0, _col1, _col3
+                Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col3 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col3 (type: string)
+                  Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col0 (type: int), _col1 (type: string)
 
   Stage: Stage-0
     Fetch Operator
@@ -544,9 +542,9 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2)
         Reducer 6 <- Map 5 (PARTITION-LEVEL SORT, 2), Map 7 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 4 <- Map 3 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Reducer 4 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -555,36 +553,35 @@ STAGE PLANS:
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
-                    Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                    predicate: p_partkey is not null (type: boolean)
+                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
-        Map 4 
+        Map 3 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: p_partkey is not null (type: boolean)
+                    predicate: p_name is not null (type: boolean)
                     Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: p_partkey (type: int), p_name (type: string)
-                      outputColumnNames: _col0, _col1
+                      expressions: p_name (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: int)
+                        key expressions: _col0 (type: string)
                         sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: string)
         Map 5 
             Map Operator Tree:
                 TableScan
@@ -598,28 +595,27 @@ STAGE PLANS:
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col1 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col1 (type: string)
+                        key expressions: _col1 (type: string), _col0 (type: int)
+                        sort order: ++
+                        Map-reduce partition columns: _col1 (type: string), _col0 (type: int)
                         Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: int)
         Map 7 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: p_name is not null (type: boolean)
-                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (p_name is not null and p_partkey is not null) (type: boolean)
+                    Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: p_name (type: string)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                      expressions: p_partkey (type: int), p_name (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                        key expressions: _col1 (type: string), _col0 (type: int)
+                        sort order: ++
+                        Map-reduce partition columns: _col1 (type: string), _col0 (type: int)
+                        Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -627,27 +623,11 @@ STAGE PLANS:
                      Inner Join 0 to 1
                 keys:
                   0 _col0 (type: int)
-                  1 _col0 (type: int)
-                outputColumnNames: _col0, _col1, _col3
-                Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col1 (type: string), _col0 (type: int)
-                  sort order: ++
-                  Map-reduce partition columns: _col1 (type: string), _col0 (type: int)
-                  Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: string)
-        Reducer 3 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 _col1 (type: string), _col0 (type: int)
-                  1 _col1 (type: string), _col0 (type: int)
-                outputColumnNames: _col1, _col3, _col5, _col6
+                  1 _col1 (type: int)
+                outputColumnNames: _col1, _col2, _col4, _col6
                 Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col1 (type: string), _col5 (type: string), _col6 (type: string), _col3 (type: string)
+                  expressions: _col4 (type: string), _col6 (type: string), _col2 (type: string), _col1 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -657,22 +637,38 @@ STAGE PLANS:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 6 
+        Reducer 4 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 _col1 (type: string)
-                  1 _col0 (type: string)
-                outputColumnNames: _col0, _col1, _col2
+                  0 _col0 (type: string)
+                  1 _col3 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col4
                 Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col1 (type: string), _col0 (type: int)
-                  sort order: ++
-                  Map-reduce partition columns: _col1 (type: string), _col0 (type: int)
+                  key expressions: _col1 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col1 (type: int)
                   Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: string)
+                  value expressions: _col0 (type: string), _col2 (type: string), _col4 (type: string)
+        Reducer 6 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col1 (type: string), _col0 (type: int)
+                  1 _col1 (type: string), _col0 (type: int)
+                outputColumnNames: _col0, _col1, _col3
+                Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col3 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col3 (type: string)
+                  Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col0 (type: int), _col1 (type: string)
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/spark/join_cond_pushdown_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_cond_pushdown_2.q.out b/ql/src/test/results/clientpositive/spark/join_cond_pushdown_2.q.out
index ff1626f..26e05ac 100644
--- a/ql/src/test/results/clientpositive/spark/join_cond_pushdown_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_cond_pushdown_2.q.out
@@ -150,9 +150,9 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2)
         Reducer 6 <- Map 5 (PARTITION-LEVEL SORT, 2), Map 7 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 4 <- Map 3 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Reducer 4 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -161,72 +161,72 @@ STAGE PLANS:
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
-                    Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                    predicate: p_partkey is not null (type: boolean)
+                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                      Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
-        Map 4 
+        Map 3 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: p_partkey is not null (type: boolean)
+                    predicate: p_name is not null (type: boolean)
                     Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                       Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: int)
+                        key expressions: _col1 (type: string)
                         sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
+                        Map-reduce partition columns: _col1 (type: string)
                         Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                        value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: p_name is not null (type: boolean)
-                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
+                    Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                      Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col1 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col1 (type: string)
-                        Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                        Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
         Map 7 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (p_name is not null and p_partkey is not null) (type: boolean)
+                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
                     Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                       Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col1 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col1 (type: string)
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
                         Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                        value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -234,27 +234,11 @@ STAGE PLANS:
                      Inner Join 0 to 1
                 keys:
                   0 _col0 (type: int)
-                  1 _col0 (type: int)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
-                Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
-                  Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
-        Reducer 3 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 _col0 (type: int), _col1 (type: string)
-                  1 _col9 (type: int), _col10 (type: string)
+                  1 _col9 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
                 Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: string), _col30 (type: string), _col31 (type: string), _col32 (type: int), _col33 (type: string), _col34 (type: double), _col35 (type: string), _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
+                  expressions: _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: string), _col30 (type: string), _col31 (type: string), _col32 (type: int), _col33 (type: string), _col34 (type: double), _col35 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string), _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
                   Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -264,22 +248,38 @@ STAGE PLANS:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 6 
+        Reducer 4 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
                   0 _col1 (type: string)
-                  1 _col1 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
+                  1 _col10 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26
                 Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col9 (type: int), _col10 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col9 (type: int), _col10 (type: string)
+                  key expressions: _col9 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col9 (type: int)
                   Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
+                  value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string), _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string)
+        Reducer 6 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int), _col1 (type: string)
+                  1 _col0 (type: int), _col1 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
+                Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col10 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col10 (type: string)
+                  Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: int), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/spark/join_cond_pushdown_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_cond_pushdown_4.q.out b/ql/src/test/results/clientpositive/spark/join_cond_pushdown_4.q.out
index 417ba4f..c821fe4 100644
--- a/ql/src/test/results/clientpositive/spark/join_cond_pushdown_4.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_cond_pushdown_4.q.out
@@ -154,9 +154,9 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2)
         Reducer 6 <- Map 5 (PARTITION-LEVEL SORT, 2), Map 7 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 4 <- Map 3 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Reducer 4 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -165,72 +165,72 @@ STAGE PLANS:
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
-                    Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                    predicate: p_partkey is not null (type: boolean)
+                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                      Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
-        Map 4 
+        Map 3 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: p_partkey is not null (type: boolean)
+                    predicate: p_name is not null (type: boolean)
                     Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                       Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: int)
+                        key expressions: _col1 (type: string)
                         sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
+                        Map-reduce partition columns: _col1 (type: string)
                         Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                        value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: p_name is not null (type: boolean)
-                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
+                    Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                      Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col1 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col1 (type: string)
-                        Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                        Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
         Map 7 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (p_name is not null and p_partkey is not null) (type: boolean)
+                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
                     Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                       Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col1 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col1 (type: string)
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
                         Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                        value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -238,27 +238,11 @@ STAGE PLANS:
                      Inner Join 0 to 1
                 keys:
                   0 _col0 (type: int)
-                  1 _col0 (type: int)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
-                Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
-                  Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
-        Reducer 3 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 _col0 (type: int), _col1 (type: string)
-                  1 _col9 (type: int), _col10 (type: string)
+                  1 _col9 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
                 Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: string), _col30 (type: string), _col31 (type: string), _col32 (type: int), _col33 (type: string), _col34 (type: double), _col35 (type: string), _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
+                  expressions: _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: string), _col30 (type: string), _col31 (type: string), _col32 (type: int), _col33 (type: string), _col34 (type: double), _col35 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string), _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
                   Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -268,22 +252,38 @@ STAGE PLANS:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 6 
+        Reducer 4 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
                   0 _col1 (type: string)
-                  1 _col1 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
+                  1 _col10 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26
                 Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col9 (type: int), _col10 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col9 (type: int), _col10 (type: string)
+                  key expressions: _col9 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col9 (type: int)
                   Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
+                  value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string), _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string)
+        Reducer 6 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int), _col1 (type: string)
+                  1 _col0 (type: int), _col1 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
+                Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col10 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col10 (type: string)
+                  Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: int), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
 
   Stage: Stage-0
     Fetch Operator


[36/50] [abbrv] hive git commit: HIVE-10634: The HMS upgrade test script on LXC is exiting with error even if the test was run successfuly (Sergio Pena, reviewed by Szehon Ho)

Posted by xu...@apache.org.
HIVE-10634: The HMS upgrade test script on LXC is exiting with error even if the test was run successfuly (Sergio Pena, reviewed by Szehon Ho)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e24662c3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e24662c3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e24662c3

Branch: refs/heads/beeline-cli
Commit: e24662c3d80138cdc4ee102a2da5e9a81c0bce7d
Parents: c156b32
Author: Sergio Pena <se...@cloudera.com>
Authored: Thu May 7 09:43:54 2015 -0500
Committer: Sergio Pena <se...@cloudera.com>
Committed: Thu May 7 09:43:54 2015 -0500

----------------------------------------------------------------------
 testutils/metastore/execute-test-on-lxc.sh | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e24662c3/testutils/metastore/execute-test-on-lxc.sh
----------------------------------------------------------------------
diff --git a/testutils/metastore/execute-test-on-lxc.sh b/testutils/metastore/execute-test-on-lxc.sh
index 033c445..592e696 100644
--- a/testutils/metastore/execute-test-on-lxc.sh
+++ b/testutils/metastore/execute-test-on-lxc.sh
@@ -97,7 +97,7 @@ lxc_prepare() {
 	cat>$tmpfile<<EOF
 rm -rf hive
 mkdir hive
-git clone --depth 1 -b $BRANCH https://git-wip-us.apache.org/repos/asf/hive.git >/dev/null
+git clone --depth 1 -b $BRANCH https://github.com/apache/hive.git >/dev/null
 cd hive
 wget $PATCH_URL -O hms.patch
 bash -x testutils/ptest2/src/main/resources/smart-apply-patch.sh hms.patch
@@ -153,5 +153,8 @@ do
 	log "$(lxc_print_metastore_log $name)"
 	lxc_stop $name
 
-	[ $rc != 0 ] && exit 1
+	if [[ $rc != 0 ]]; then
+		log "Tests failed. Exiting with error code (1)."
+		exit 1
+	fi
 done


[23/50] [abbrv] hive git commit: HIVE-9743: Incorrect result set for vectorized left outer join (Matt McCline, reviewed by Vikram Dixit)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/test/results/clientpositive/tez/vector_leftsemi_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_leftsemi_mapjoin.q.out b/ql/src/test/results/clientpositive/tez/vector_leftsemi_mapjoin.q.out
new file mode 100644
index 0000000..564f59d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/vector_leftsemi_mapjoin.q.out
@@ -0,0 +1,13807 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table t1 stored as orc as select cast(key as int) key, value from src where key <= 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table t1 stored as orc as select cast(key as int) key, value from src where key <= 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t1
+PREHOOK: query: select * from t1 sort by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 sort by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+2	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: create table t2 stored as orc as select cast(2*key as int) key, value from t1
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@t1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t2
+POSTHOOK: query: create table t2 stored as orc as select cast(2*key as int) key, value from t1
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t2
+PREHOOK: query: select * from t2 sort by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t2 sort by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_5
+10	val_5
+10	val_5
+16	val_8
+18	val_9
+20	val_10
+4	val_2
+8	val_4
+PREHOOK: query: create table t3 stored as orc as select * from (select * from t1 union all select * from t2) b
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t3
+POSTHOOK: query: create table t3 stored as orc as select * from (select * from t1 union all select * from t2) b
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t3
+PREHOOK: query: select * from t3 sort by key, value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t3 sort by key, value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+10	val_5
+10	val_5
+10	val_5
+16	val_8
+18	val_9
+2	val_2
+20	val_10
+4	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+8	val_4
+8	val_8
+9	val_9
+PREHOOK: query: create table t4 (key int, value string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t4
+POSTHOOK: query: create table t4 (key int, value string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t4
+PREHOOK: query: select * from t4
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t4
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t4
+#### A masked pattern was here ####
+PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: key
+                      Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+4	val_4
+8	val_8
+PREHOOK: query: explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: key
+                      Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_5
+10	val_5
+10	val_5
+4	val_2
+8	val_4
+PREHOOK: query: explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: key
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t4
+#### A masked pattern was here ####
+PREHOOK: query: explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col1 (type: int)
+                      outputColumnNames: _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col1 (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 15) (type: boolean)
+                    Statistics: Num rows: 59 Data size: 236 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: key (type: int), key (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 59 Data size: 236 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col1 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col1 (type: int)
+                        Statistics: Num rows: 59 Data size: 236 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+val_0
+val_0
+val_0
+val_10
+val_2
+val_4
+val_5
+val_5
+val_5
+val_8
+val_9
+PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((value < 'val_10') and key is not null) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 186 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: key, value
+                      Statistics: Num rows: 2 Data size: 186 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int), value (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 2 Data size: 186 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 2 Data size: 186 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+PREHOOK: query: explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t3
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key > 5) (type: boolean)
+                    Statistics: Num rows: 59 Data size: 236 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 59 Data size: 236 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 59 Data size: 236 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 59 Data size: 236 Basic stats: COMPLETE Column stats: NONE
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col1
+                      input vertices:
+                        1 Map 1
+                      Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col1 (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+val_10
+val_8
+val_9
+PREHOOK: query: explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t2
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (((key > 5) and (value <= 'val_20')) and key is not null) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int), _col1 (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col1
+                      input vertices:
+                        1 Map 1
+                      Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col1 (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+PREHOOK: query: explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key > 2) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 1
+                      Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+10	val_5
+10	val_5
+10	val_5
+4	val_2
+8	val_4
+PREHOOK: query: explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 90 Data size: 360 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 99 Data size: 396 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Statistics: Num rows: 99 Data size: 396 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: key
+                      Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 99 Data size: 396 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 99 Data size: 396 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+2
+4
+4
+5
+5
+5
+8
+8
+9
+PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 (2 * _col0) (type: int)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (2 * key) is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: key
+                      Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: (2 * _col0) (type: int)
+                          sort order: +
+                          Map-reduce partition columns: (2 * _col0) (type: int)
+                          Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+8	val_8
+PREHOOK: query: explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Inner Join 0 to 1
+                           Left Semi Join 1 to 2
+                      keys:
+                        0 key (type: int)
+                        1 key (type: int)
+                        2 _col0 (type: int)
+                      outputColumnNames: _col0, _col1, _col5, _col6
+                      input vertices:
+                        1 Map 3
+                        2 Map 4
+                      Statistics: Num rows: 198 Data size: 792 Basic stats: COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+                        outputColumnNames: _col0, _col1, _col2, _col3
+                        Statistics: Num rows: 198 Data size: 792 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int), _col1 (type: string)
+                          sort order: ++
+                          Statistics: Num rows: 198 Data size: 792 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col2 (type: int), _col3 (type: string)
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 90 Data size: 360 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: key
+                      Statistics: Num rows: 90 Data size: 360 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 90 Data size: 360 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 90 Data size: 360 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: int), VALUE._col1 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 198 Data size: 792 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 198 Data size: 792 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+10	val_10	10	val_5
+10	val_10	10	val_5
+10	val_10	10	val_5
+4	val_4	4	val_2
+8	val_8	8	val_4
+PREHOOK: query: explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 6 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and value is not null) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 238 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int), value (type: string)
+                        1 _col0 (type: int), _col1 (type: string)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and value is not null) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: key, value
+                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int), value (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int), _col1 (type: string)
+                          sort order: ++
+                          Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                          Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+2	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 90 Data size: 360 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                           Left Semi Join 0 to 2
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                        2 _col0 (type: int)
+                      outputColumnNames: _col0
+                      input vertices:
+                        1 Map 3
+                        2 Map 4
+                      Statistics: Num rows: 198 Data size: 792 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Statistics: Num rows: 198 Data size: 792 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: key
+                      Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: key
+                      Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 198 Data size: 792 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 198 Data size: 792 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+4
+4
+8
+8
+PREHOOK: query: explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                         Left Semi Join 1 to 2
+                    keys:
+                      0 key (type: int)
+                      1 key (type: int)
+                      2 _col0 (type: int)
+                    outputColumnNames: _col0
+                    input vertices:
+                      1 Map 3
+                      2 Map 4
+                    Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: key
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: key (type: int)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+4
+4
+8
+8
+PREHOOK: query: explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: key
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: key (type: int)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Outer Join 0 to 1
+                     Left Semi Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 _col0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+4
+4
+8
+8
+NULL
+NULL
+NULL
+PREHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: key
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: key (type: int)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                     Left Outer Join0 to 2
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+16
+18
+20
+4
+4
+8
+8
+PREHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: key
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: key (type: int)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                     Right Outer Join0 to 2
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+4
+4
+8
+8
+NULL
+NULL
+NULL
+NULL
+NULL
+PREHOOK: query: explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: key
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: key (type: int)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                     Outer Join 0 to 2
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+10
+10
+10
+10
+10
+10
+10
+10
+2
+4
+4
+5
+5
+5
+8
+8
+9
+NULL
+NULL
+NULL
+PREHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+        Reducer 

<TRUNCATED>

[18/50] [abbrv] hive git commit: HIVE-10614 : schemaTool upgrade from 0.14.0 to 1.3.0 causes failure (Hari Subramaniyan via Sushanth Sowmyan, Thejas Nair)

Posted by xu...@apache.org.
HIVE-10614 : schemaTool upgrade from 0.14.0 to 1.3.0 causes failure (Hari Subramaniyan via Sushanth Sowmyan, Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/dc72c873
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/dc72c873
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/dc72c873

Branch: refs/heads/beeline-cli
Commit: dc72c873690e0b0f4a9be992172d6959d8ddddfe
Parents: 3997202
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Wed May 6 13:46:54 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Wed May 6 13:47:54 2015 -0700

----------------------------------------------------------------------
 .../upgrade/mysql/021-HIVE-7018.mysql.sql       | 53 --------------------
 .../upgrade/mysql/hive-schema-1.2.0.mysql.sql   | 10 +++-
 .../upgrade/mysql/hive-schema-1.3.0.mysql.sql   | 10 +++-
 .../mysql/upgrade-1.1.0-to-1.2.0.mysql.sql      |  2 +-
 4 files changed, 17 insertions(+), 58 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/dc72c873/metastore/scripts/upgrade/mysql/021-HIVE-7018.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/021-HIVE-7018.mysql.sql b/metastore/scripts/upgrade/mysql/021-HIVE-7018.mysql.sql
deleted file mode 100644
index 08208f9..0000000
--- a/metastore/scripts/upgrade/mysql/021-HIVE-7018.mysql.sql
+++ /dev/null
@@ -1,53 +0,0 @@
-SELECT '< HIVE-7018 Remove Table and Partition tables column LINK_TARGET_ID from Mysql for other DBs do not have it >' AS ' ';
-
-DELIMITER $$
-DROP PROCEDURE IF EXISTS RM_TLBS_LINKID $$
-DROP PROCEDURE IF EXISTS RM_PARTITIONS_LINKID $$
-DROP PROCEDURE IF EXISTS RM_LINKID $$
-
-/* Call this procedure to drop column LINK_TARGET_ID for TBLS */
-CREATE PROCEDURE RM_TLBS_LINKID()
-  BEGIN
-    IF EXISTS (SELECT * FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_NAME` = 'TBLS' AND `COLUMN_NAME` = 'LINK_TARGET_ID') THEN
-      ALTER TABLE `TBLS`
-        DROP FOREIGN KEY `TBLS_FK3`
-      ;
-      ALTER TABLE `TBLS`
-        DROP KEY `TBLS_N51`
-      ;
-      ALTER TABLE `TBLS`
-        DROP COLUMN `LINK_TARGET_ID`
-      ;
-    END IF;
-  END $$
-
-/* Call this procedure to drop column LINK_TARGET_ID for PARTITIONS */
-CREATE PROCEDURE RM_PARTITIONS_LINKID()
-  BEGIN
-    IF EXISTS (SELECT * FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_NAME` = 'PARTITIONS' AND `COLUMN_NAME` = 'LINK_TARGET_ID') THEN
-      ALTER TABLE `PARTITIONS`
-        DROP FOREIGN KEY `PARTITIONS_FK3`
-      ;
-      ALTER TABLE `PARTITIONS`
-        DROP KEY `PARTITIONS_N51`
-      ;
-      ALTER TABLE `PARTITIONS`
-        DROP COLUMN `LINK_TARGET_ID`
-      ;
-    END IF;
-  END $$
-
-/*
- * Check and drop column LINK_TARGET_ID
- */
-CREATE PROCEDURE RM_LINKID()
-  BEGIN
-    call RM_PARTITIONS_LINKID();
-    call RM_TLBS_LINKID();
-    SELECT 'Completed remove LINK_TARGET_ID';
-  END $$
-
-
-DELIMITER ;
-
-CALL RM_LINKID();
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/dc72c873/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql
index cdda8db..07dce8f 100644
--- a/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql
@@ -211,12 +211,15 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS` (
   `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   `SD_ID` bigint(20) DEFAULT NULL,
   `TBL_ID` bigint(20) DEFAULT NULL,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
   PRIMARY KEY (`PART_ID`),
   UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
   KEY `PARTITIONS_N49` (`TBL_ID`),
   KEY `PARTITIONS_N50` (`SD_ID`),
+  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`)
   CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
-  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 /*!40101 SET character_set_client = @saved_cs_client */;
 
@@ -587,12 +590,15 @@ CREATE TABLE IF NOT EXISTS `TBLS` (
   `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   `VIEW_EXPANDED_TEXT` mediumtext,
   `VIEW_ORIGINAL_TEXT` mediumtext,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
   PRIMARY KEY (`TBL_ID`),
   UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
   KEY `TBLS_N50` (`SD_ID`),
   KEY `TBLS_N49` (`DB_ID`),
+  KEY `TBLS_N51` (`LINK_TARGET_ID`),
   CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
+  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 /*!40101 SET character_set_client = @saved_cs_client */;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/dc72c873/metastore/scripts/upgrade/mysql/hive-schema-1.3.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/hive-schema-1.3.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-1.3.0.mysql.sql
index 71de138..19ae264 100644
--- a/metastore/scripts/upgrade/mysql/hive-schema-1.3.0.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/hive-schema-1.3.0.mysql.sql
@@ -211,12 +211,15 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS` (
   `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   `SD_ID` bigint(20) DEFAULT NULL,
   `TBL_ID` bigint(20) DEFAULT NULL,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
   PRIMARY KEY (`PART_ID`),
   UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
   KEY `PARTITIONS_N49` (`TBL_ID`),
   KEY `PARTITIONS_N50` (`SD_ID`),
+  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`)
   CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
-  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 /*!40101 SET character_set_client = @saved_cs_client */;
 
@@ -587,12 +590,15 @@ CREATE TABLE IF NOT EXISTS `TBLS` (
   `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   `VIEW_EXPANDED_TEXT` mediumtext,
   `VIEW_ORIGINAL_TEXT` mediumtext,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
   PRIMARY KEY (`TBL_ID`),
   UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
   KEY `TBLS_N50` (`SD_ID`),
   KEY `TBLS_N49` (`DB_ID`),
+  KEY `TBLS_N51` (`LINK_TARGET_ID`),
   CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
+  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 /*!40101 SET character_set_client = @saved_cs_client */;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/dc72c873/metastore/scripts/upgrade/mysql/upgrade-1.1.0-to-1.2.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/upgrade-1.1.0-to-1.2.0.mysql.sql b/metastore/scripts/upgrade/mysql/upgrade-1.1.0-to-1.2.0.mysql.sql
index e15c758..406504b 100644
--- a/metastore/scripts/upgrade/mysql/upgrade-1.1.0-to-1.2.0.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/upgrade-1.1.0-to-1.2.0.mysql.sql
@@ -1,5 +1,5 @@
 SELECT 'Upgrading MetaStore schema from 1.1.0 to 1.2.0' AS ' ';
-SOURCE 021-HIVE-7018.mysql.sql;
+
 UPDATE VERSION SET SCHEMA_VERSION='1.2.0', VERSION_COMMENT='Hive release version 1.2.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 1.1.0 to 1.2.0' AS ' ';
 


[32/50] [abbrv] hive git commit: HIVE-10542: Full outer joins in tez produce incorrect results in certain cases (Vikram Dixit K, reviewed by Gunther Hagleitner)

Posted by xu...@apache.org.
HIVE-10542: Full outer joins in tez produce incorrect results in certain cases (Vikram Dixit K, reviewed by Gunther Hagleitner)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/26ec033c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/26ec033c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/26ec033c

Branch: refs/heads/beeline-cli
Commit: 26ec033c89a61fa0bf95b9b66da0842b22ec4c9b
Parents: 4b44408
Author: vikram <vi...@hortonworks.com>
Authored: Wed May 6 22:18:28 2015 -0700
Committer: vikram <vi...@hortonworks.com>
Committed: Wed May 6 22:18:28 2015 -0700

----------------------------------------------------------------------
 .../test/resources/testconfiguration.properties |   2 +-
 .../hive/ql/exec/CommonMergeJoinOperator.java   |  54 +-
 ql/src/test/queries/clientpositive/mergejoin.q  |  17 +
 .../test/results/clientpositive/mergejoin.q.out | 844 ++++++++++++++++---
 .../clientpositive/tez/auto_join29.q.out        | 500 +++++++++++
 .../results/clientpositive/tez/mergejoin.q.out  | 844 ++++++++++++++++---
 6 files changed, 2005 insertions(+), 256 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/26ec033c/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 134fded..3eff7d0 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -124,6 +124,7 @@ minitez.query.files.shared=alter_merge_2_orc.q,\
   mapreduce2.q,\
   merge1.q,\
   merge2.q,\
+  mergejoin.q,\
   metadataonly1.q,\
   metadata_only_queries.q,\
   optimize_nullscan.q,\
@@ -160,7 +161,6 @@ minitez.query.files.shared=alter_merge_2_orc.q,\
   transform1.q,\
   transform2.q,\
   transform_ppr1.q,\
-  mergejoin.q,\
   transform_ppr2.q,\
   union2.q,\
   union3.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/26ec033c/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
index cb0a5e7..d1d5e2b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
@@ -20,10 +20,13 @@ package org.apache.hadoop.hive.ql.exec;
 
 import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeSet;
 import java.util.concurrent.Future;
 
 import org.apache.commons.logging.Log;
@@ -36,6 +39,7 @@ import org.apache.hadoop.hive.ql.exec.tez.TezContext;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.JoinCondDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.api.OperatorType;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
@@ -83,6 +87,7 @@ public class CommonMergeJoinOperator extends AbstractMapJoinOperator<CommonMerge
 
   transient List<Operator<? extends OperatorDesc>> originalParents =
       new ArrayList<Operator<? extends OperatorDesc>>();
+  transient Set<Integer> fetchInputAtClose;
 
   public CommonMergeJoinOperator() {
     super();
@@ -93,6 +98,7 @@ public class CommonMergeJoinOperator extends AbstractMapJoinOperator<CommonMerge
   public Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
     Collection<Future<?>> result = super.initializeOp(hconf);
     firstFetchHappened = false;
+    fetchInputAtClose = getFetchInputAtCloseList();
 
     int maxAlias = 0;
     for (byte pos = 0; pos < order.length; pos++) {
@@ -145,6 +151,25 @@ public class CommonMergeJoinOperator extends AbstractMapJoinOperator<CommonMerge
     return result;
   }
 
+  /*
+   * In case of outer joins, we need to push records through even if one of the sides is done
+   * sending records. For e.g. In the case of full outer join, the right side needs to send in data
+   * for the join even after the left side has completed sending all the records on its side. This
+   * can be done once at initialize time and at close, these tags will still forward records until
+   * they have no more to send. Also, subsequent joins need to fetch their data as well since
+   * any join following the outer join could produce results with one of the outer sides depending on
+   * the join condition. We could optimize for the case of inner joins in the future here.
+   */
+  private Set<Integer> getFetchInputAtCloseList() {
+    Set<Integer> retval = new TreeSet<Integer>();
+    for (JoinCondDesc joinCondDesc : conf.getConds()) {
+      retval.add(joinCondDesc.getLeft());
+      retval.add(joinCondDesc.getRight());
+    }
+
+    return retval;
+  }
+
   @Override
   public void endGroup() throws HiveException {
     // we do not want the end group to cause a checkAndGenObject
@@ -173,7 +198,6 @@ public class CommonMergeJoinOperator extends AbstractMapJoinOperator<CommonMerge
     List<Object> value = getFilteredValue(alias, row);
     // compute keys and values as StandardObjects
     List<Object> key = mergeJoinComputeKeys(row, alias);
-
     if (!firstFetchHappened) {
       firstFetchHappened = true;
       // fetch the first group for all small table aliases
@@ -405,9 +429,37 @@ public class CommonMergeJoinOperator extends AbstractMapJoinOperator<CommonMerge
 
     while (!allFetchDone) {
       List<Byte> ret = joinOneGroup();
+      for (int i = 0; i < fetchDone.length; i++) {
+        // if the fetch is not completed for the big table
+        if (i == posBigTable) {
+          // if we are in close op phase, we have definitely exhausted the big table input
+          fetchDone[i] = true;
+          continue;
+        }
+
+        // in case of outer joins, we need to pull in records from the sides we still
+        // need to produce output for apart from the big table. for e.g. full outer join
+        if ((fetchInputAtClose.contains(i)) && (fetchDone[i] == false)) {
+          // if we have never fetched, we need to fetch before we can do the join
+          if (firstFetchHappened == false) {
+            // we need to fetch all the needed ones at least once to ensure bootstrapping
+            if (i == (fetchDone.length - 1)) {
+              firstFetchHappened = true;
+            }
+            // This is a bootstrap. The joinOneGroup automatically fetches the next rows.
+            fetchNextGroup((byte) i);
+          }
+          // Do the join. It does fetching of next row groups itself.
+          if (i == (fetchDone.length - 1)) {
+            ret = joinOneGroup();
+          }
+        }
+      }
+
       if (ret == null || ret.size() == 0) {
         break;
       }
+
       reportProgress();
       numMapRowsRead++;
       allFetchDone = allFetchDone();

http://git-wip-us.apache.org/repos/asf/hive/blob/26ec033c/ql/src/test/queries/clientpositive/mergejoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mergejoin.q b/ql/src/test/queries/clientpositive/mergejoin.q
index 257337a..59374ca 100644
--- a/ql/src/test/queries/clientpositive/mergejoin.q
+++ b/ql/src/test/queries/clientpositive/mergejoin.q
@@ -6,11 +6,14 @@ set hive.optimize.metadataonly=false;
 set hive.optimize.index.filter=true;
 set hive.vectorized.execution.enabled=true;
 
+-- SORT_QUERY_RESULTS
+
 explain
 select * from src a join src1 b on a.key = b.key;
 
 select * from src a join src1 b on a.key = b.key;
 
+
 CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
 CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS ORCFILE;
 CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
@@ -105,3 +108,17 @@ join
 (select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2
 where vt1.id=vt2.id;
 
+set mapred.reduce.tasks=18;
+select * from (select * from tab where tab.key = 0)a full outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key;
+select * from (select * from tab where tab.key = 0)a right outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key;
+
+select * from
+(select * from tab where tab.key = 0)a
+full outer join
+(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key;
+
+select * from
+(select * from tab where tab.key = 0)a
+join
+(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/26ec033c/ql/src/test/results/clientpositive/mergejoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mergejoin.q.out b/ql/src/test/results/clientpositive/mergejoin.q.out
index af3d7df..cb96ab3 100644
--- a/ql/src/test/results/clientpositive/mergejoin.q.out
+++ b/ql/src/test/results/clientpositive/mergejoin.q.out
@@ -1,7 +1,11 @@
-PREHOOK: query: explain
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
 select * from src a join src1 b on a.key = b.key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
 select * from src a join src1 b on a.key = b.key
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
@@ -338,74 +342,12 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 0	val_0	2008-04-08	0	val_0	2008-04-08
 0	val_0	2008-04-08	0	val_0	2008-04-08
 0	val_0	2008-04-08	0	val_0	2008-04-08
-2	val_2	2008-04-08	2	val_2	2008-04-08
-4	val_4	2008-04-08	4	val_4	2008-04-08
-8	val_8	2008-04-08	8	val_8	2008-04-08
-11	val_11	2008-04-08	11	val_11	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-17	val_17	2008-04-08	17	val_17	2008-04-08
-19	val_19	2008-04-08	19	val_19	2008-04-08
-20	val_20	2008-04-08	20	val_20	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-28	val_28	2008-04-08	28	val_28	2008-04-08
-33	val_33	2008-04-08	33	val_33	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-44	val_44	2008-04-08	44	val_44	2008-04-08
-51	val_51	2008-04-08	51	val_51	2008-04-08
-51	val_51	2008-04-08	51	val_51	2008-04-08
-51	val_51	2008-04-08	51	val_51	2008-04-08
-51	val_51	2008-04-08	51	val_51	2008-04-08
-53	val_53	2008-04-08	53	val_53	2008-04-08
-57	val_57	2008-04-08	57	val_57	2008-04-08
-64	val_64	2008-04-08	64	val_64	2008-04-08
-66	val_66	2008-04-08	66	val_66	2008-04-08
-77	val_77	2008-04-08	77	val_77	2008-04-08
-80	val_80	2008-04-08	80	val_80	2008-04-08
-82	val_82	2008-04-08	82	val_82	2008-04-08
-84	val_84	2008-04-08	84	val_84	2008-04-08
-84	val_84	2008-04-08	84	val_84	2008-04-08
-84	val_84	2008-04-08	84	val_84	2008-04-08
-84	val_84	2008-04-08	84	val_84	2008-04-08
-86	val_86	2008-04-08	86	val_86	2008-04-08
-95	val_95	2008-04-08	95	val_95	2008-04-08
-95	val_95	2008-04-08	95	val_95	2008-04-08
-95	val_95	2008-04-08	95	val_95	2008-04-08
-95	val_95	2008-04-08	95	val_95	2008-04-08
-97	val_97	2008-04-08	97	val_97	2008-04-08
-97	val_97	2008-04-08	97	val_97	2008-04-08
-97	val_97	2008-04-08	97	val_97	2008-04-08
-97	val_97	2008-04-08	97	val_97	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 105	val_105	2008-04-08	105	val_105	2008-04-08
+11	val_11	2008-04-08	11	val_11	2008-04-08
 114	val_114	2008-04-08	114	val_114	2008-04-08
 116	val_116	2008-04-08	116	val_116	2008-04-08
 118	val_118	2008-04-08	118	val_118	2008-04-08
@@ -447,6 +389,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
 150	val_150	2008-04-08	150	val_150	2008-04-08
 152	val_152	2008-04-08	152	val_152	2008-04-08
 152	val_152	2008-04-08	152	val_152	2008-04-08
@@ -484,6 +430,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
+17	val_17	2008-04-08	17	val_17	2008-04-08
 170	val_170	2008-04-08	170	val_170	2008-04-08
 172	val_172	2008-04-08	172	val_172	2008-04-08
 172	val_172	2008-04-08	172	val_172	2008-04-08
@@ -510,10 +457,13 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 187	val_187	2008-04-08	187	val_187	2008-04-08
 187	val_187	2008-04-08	187	val_187	2008-04-08
 189	val_189	2008-04-08	189	val_189	2008-04-08
+19	val_19	2008-04-08	19	val_19	2008-04-08
 190	val_190	2008-04-08	190	val_190	2008-04-08
 192	val_192	2008-04-08	192	val_192	2008-04-08
 194	val_194	2008-04-08	194	val_194	2008-04-08
 196	val_196	2008-04-08	196	val_196	2008-04-08
+2	val_2	2008-04-08	2	val_2	2008-04-08
+20	val_20	2008-04-08	20	val_20	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
@@ -560,6 +510,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
@@ -571,6 +525,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 255	val_255	2008-04-08	255	val_255	2008-04-08
 255	val_255	2008-04-08	255	val_255	2008-04-08
 257	val_257	2008-04-08	257	val_257	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
 260	val_260	2008-04-08	260	val_260	2008-04-08
 262	val_262	2008-04-08	262	val_262	2008-04-08
 266	val_266	2008-04-08	266	val_266	2008-04-08
@@ -600,6 +558,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
+28	val_28	2008-04-08	28	val_28	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
@@ -661,11 +620,21 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
+33	val_33	2008-04-08	33	val_33	2008-04-08
 332	val_332	2008-04-08	332	val_332	2008-04-08
 336	val_336	2008-04-08	336	val_336	2008-04-08
 338	val_338	2008-04-08	338	val_338	2008-04-08
 341	val_341	2008-04-08	341	val_341	2008-04-08
 345	val_345	2008-04-08	345	val_345	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
 356	val_356	2008-04-08	356	val_356	2008-04-08
 365	val_365	2008-04-08	365	val_365	2008-04-08
 367	val_367	2008-04-08	367	val_367	2008-04-08
@@ -681,6 +650,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
 374	val_374	2008-04-08	374	val_374	2008-04-08
 378	val_378	2008-04-08	378	val_378	2008-04-08
 389	val_389	2008-04-08	389	val_389	2008-04-08
@@ -695,6 +668,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
+4	val_4	2008-04-08	4	val_4	2008-04-08
 400	val_400	2008-04-08	400	val_400	2008-04-08
 402	val_402	2008-04-08	402	val_402	2008-04-08
 404	val_404	2008-04-08	404	val_404	2008-04-08
@@ -732,6 +706,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 417	val_417	2008-04-08	417	val_417	2008-04-08
 417	val_417	2008-04-08	417	val_417	2008-04-08
 419	val_419	2008-04-08	419	val_419	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
@@ -751,6 +729,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
+44	val_44	2008-04-08	44	val_44	2008-04-08
 444	val_444	2008-04-08	444	val_444	2008-04-08
 446	val_446	2008-04-08	446	val_446	2008-04-08
 448	val_448	2008-04-08	448	val_448	2008-04-08
@@ -809,68 +788,6 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 493	val_493	2008-04-08	493	val_493	2008-04-08
 495	val_495	2008-04-08	495	val_495	2008-04-08
 497	val_497	2008-04-08	497	val_497	2008-04-08
-PREHOOK: query: select * from tab a join tab_part b on a.key = b.key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@tab
-PREHOOK: Input: default@tab@ds=2008-04-08
-PREHOOK: Input: default@tab_part
-PREHOOK: Input: default@tab_part@ds=2008-04-08
-#### A masked pattern was here ####
-POSTHOOK: query: select * from tab a join tab_part b on a.key = b.key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@tab
-POSTHOOK: Input: default@tab@ds=2008-04-08
-POSTHOOK: Input: default@tab_part
-POSTHOOK: Input: default@tab_part@ds=2008-04-08
-#### A masked pattern was here ####
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-2	val_2	2008-04-08	2	val_2	2008-04-08
-4	val_4	2008-04-08	4	val_4	2008-04-08
-8	val_8	2008-04-08	8	val_8	2008-04-08
-11	val_11	2008-04-08	11	val_11	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-17	val_17	2008-04-08	17	val_17	2008-04-08
-19	val_19	2008-04-08	19	val_19	2008-04-08
-20	val_20	2008-04-08	20	val_20	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-28	val_28	2008-04-08	28	val_28	2008-04-08
-33	val_33	2008-04-08	33	val_33	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-44	val_44	2008-04-08	44	val_44	2008-04-08
 51	val_51	2008-04-08	51	val_51	2008-04-08
 51	val_51	2008-04-08	51	val_51	2008-04-08
 51	val_51	2008-04-08	51	val_51	2008-04-08
@@ -880,6 +797,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 64	val_64	2008-04-08	64	val_64	2008-04-08
 66	val_66	2008-04-08	66	val_66	2008-04-08
 77	val_77	2008-04-08	77	val_77	2008-04-08
+8	val_8	2008-04-08	8	val_8	2008-04-08
 80	val_80	2008-04-08	80	val_80	2008-04-08
 82	val_82	2008-04-08	82	val_82	2008-04-08
 84	val_84	2008-04-08	84	val_84	2008-04-08
@@ -895,11 +813,35 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 97	val_97	2008-04-08	97	val_97	2008-04-08
 97	val_97	2008-04-08	97	val_97	2008-04-08
 97	val_97	2008-04-08	97	val_97	2008-04-08
+PREHOOK: query: select * from tab a join tab_part b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from tab a join tab_part b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 105	val_105	2008-04-08	105	val_105	2008-04-08
+11	val_11	2008-04-08	11	val_11	2008-04-08
 114	val_114	2008-04-08	114	val_114	2008-04-08
 116	val_116	2008-04-08	116	val_116	2008-04-08
 118	val_118	2008-04-08	118	val_118	2008-04-08
@@ -941,6 +883,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
 150	val_150	2008-04-08	150	val_150	2008-04-08
 152	val_152	2008-04-08	152	val_152	2008-04-08
 152	val_152	2008-04-08	152	val_152	2008-04-08
@@ -978,6 +924,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
+17	val_17	2008-04-08	17	val_17	2008-04-08
 170	val_170	2008-04-08	170	val_170	2008-04-08
 172	val_172	2008-04-08	172	val_172	2008-04-08
 172	val_172	2008-04-08	172	val_172	2008-04-08
@@ -1004,10 +951,13 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 187	val_187	2008-04-08	187	val_187	2008-04-08
 187	val_187	2008-04-08	187	val_187	2008-04-08
 189	val_189	2008-04-08	189	val_189	2008-04-08
+19	val_19	2008-04-08	19	val_19	2008-04-08
 190	val_190	2008-04-08	190	val_190	2008-04-08
 192	val_192	2008-04-08	192	val_192	2008-04-08
 194	val_194	2008-04-08	194	val_194	2008-04-08
 196	val_196	2008-04-08	196	val_196	2008-04-08
+2	val_2	2008-04-08	2	val_2	2008-04-08
+20	val_20	2008-04-08	20	val_20	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
@@ -1054,6 +1004,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
@@ -1065,6 +1019,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 255	val_255	2008-04-08	255	val_255	2008-04-08
 255	val_255	2008-04-08	255	val_255	2008-04-08
 257	val_257	2008-04-08	257	val_257	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
 260	val_260	2008-04-08	260	val_260	2008-04-08
 262	val_262	2008-04-08	262	val_262	2008-04-08
 266	val_266	2008-04-08	266	val_266	2008-04-08
@@ -1094,6 +1052,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
+28	val_28	2008-04-08	28	val_28	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
@@ -1155,11 +1114,21 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
+33	val_33	2008-04-08	33	val_33	2008-04-08
 332	val_332	2008-04-08	332	val_332	2008-04-08
 336	val_336	2008-04-08	336	val_336	2008-04-08
 338	val_338	2008-04-08	338	val_338	2008-04-08
 341	val_341	2008-04-08	341	val_341	2008-04-08
 345	val_345	2008-04-08	345	val_345	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
 356	val_356	2008-04-08	356	val_356	2008-04-08
 365	val_365	2008-04-08	365	val_365	2008-04-08
 367	val_367	2008-04-08	367	val_367	2008-04-08
@@ -1175,6 +1144,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
 374	val_374	2008-04-08	374	val_374	2008-04-08
 378	val_378	2008-04-08	378	val_378	2008-04-08
 389	val_389	2008-04-08	389	val_389	2008-04-08
@@ -1189,6 +1162,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
+4	val_4	2008-04-08	4	val_4	2008-04-08
 400	val_400	2008-04-08	400	val_400	2008-04-08
 402	val_402	2008-04-08	402	val_402	2008-04-08
 404	val_404	2008-04-08	404	val_404	2008-04-08
@@ -1226,6 +1200,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 417	val_417	2008-04-08	417	val_417	2008-04-08
 417	val_417	2008-04-08	417	val_417	2008-04-08
 419	val_419	2008-04-08	419	val_419	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
@@ -1245,6 +1223,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
+44	val_44	2008-04-08	44	val_44	2008-04-08
 444	val_444	2008-04-08	444	val_444	2008-04-08
 446	val_446	2008-04-08	446	val_446	2008-04-08
 448	val_448	2008-04-08	448	val_448	2008-04-08
@@ -1303,6 +1282,31 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 493	val_493	2008-04-08	493	val_493	2008-04-08
 495	val_495	2008-04-08	495	val_495	2008-04-08
 497	val_497	2008-04-08	497	val_497	2008-04-08
+51	val_51	2008-04-08	51	val_51	2008-04-08
+51	val_51	2008-04-08	51	val_51	2008-04-08
+51	val_51	2008-04-08	51	val_51	2008-04-08
+51	val_51	2008-04-08	51	val_51	2008-04-08
+53	val_53	2008-04-08	53	val_53	2008-04-08
+57	val_57	2008-04-08	57	val_57	2008-04-08
+64	val_64	2008-04-08	64	val_64	2008-04-08
+66	val_66	2008-04-08	66	val_66	2008-04-08
+77	val_77	2008-04-08	77	val_77	2008-04-08
+8	val_8	2008-04-08	8	val_8	2008-04-08
+80	val_80	2008-04-08	80	val_80	2008-04-08
+82	val_82	2008-04-08	82	val_82	2008-04-08
+84	val_84	2008-04-08	84	val_84	2008-04-08
+84	val_84	2008-04-08	84	val_84	2008-04-08
+84	val_84	2008-04-08	84	val_84	2008-04-08
+84	val_84	2008-04-08	84	val_84	2008-04-08
+86	val_86	2008-04-08	86	val_86	2008-04-08
+95	val_95	2008-04-08	95	val_95	2008-04-08
+95	val_95	2008-04-08	95	val_95	2008-04-08
+95	val_95	2008-04-08	95	val_95	2008-04-08
+95	val_95	2008-04-08	95	val_95	2008-04-08
+97	val_97	2008-04-08	97	val_97	2008-04-08
+97	val_97	2008-04-08	97	val_97	2008-04-08
+97	val_97	2008-04-08	97	val_97	2008-04-08
+97	val_97	2008-04-08	97	val_97	2008-04-08
 PREHOOK: query: explain
 select count(*)
 from tab a left outer join tab_part b on a.key = b.key
@@ -2565,3 +2569,589 @@ POSTHOOK: Input: default@tab_part
 POSTHOOK: Input: default@tab_part@ds=2008-04-08
 #### A masked pattern was here ####
 480
+PREHOOK: query: select * from (select * from tab where tab.key = 0)a full outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select * from tab where tab.key = 0)a full outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+0	val_0	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	NULL	NULL	NULL
+NULL	NULL	NULL	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08
+PREHOOK: query: select * from (select * from tab where tab.key = 0)a right outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select * from tab where tab.key = 0)a right outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+NULL	NULL	NULL	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08
+PREHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+full outer join
+(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+full outer join
+(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
+Warning: Shuffle Join JOIN[9][tables = [a, b]] in Stage 'Stage-1:MAPRED' is a cross product
+PREHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+join
+(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+join
+(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+NULL	NULL	NULL	NULL	NULL	NULL	0	val_0	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	0	val_0	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	0	val_0	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	10	val_10	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	100	val_100	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	100	val_100	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	103	val_103	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	103	val_103	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	104	val_104	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	104	val_104	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	105	val_105	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	11	val_11	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	111	val_111	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	113	val_113	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	113	val_113	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	114	val_114	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	116	val_116	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	118	val_118	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	118	val_118	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	119	val_119	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	119	val_119	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	119	val_119	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	12	val_12	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	12	val_12	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	120	val_120	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	120	val_120	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	125	val_125	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	125	val_125	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	126	val_126	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	128	val_128	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	128	val_128	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	128	val_128	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	129	val_129	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	129	val_129	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	131	val_131	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	133	val_133	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	134	val_134	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	134	val_134	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	136	val_136	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	137	val_137	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	137	val_137	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	143	val_143	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	145	val_145	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	146	val_146	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	146	val_146	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	149	val_149	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	149	val_149	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	15	val_15	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	15	val_15	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	150	val_150	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	152	val_152	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	152	val_152	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	153	val_153	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	155	val_155	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	156	val_156	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	157	val_157	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	158	val_158	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	160	val_160	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	162	val_162	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	163	val_163	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	164	val_164	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	164	val_164	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	165	val_165	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	165	val_165	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	166	val_166	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	167	val_167	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	167	val_167	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	167	val_167	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	168	val_168	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	17	val_17	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	170	val_170	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	172	val_172	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	172	val_172	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	174	val_174	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	174	val_174	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	175	val_175	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	175	val_175	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	176	val_176	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	176	val_176	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	177	val_177	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	178	val_178	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	179	val_179	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	179	val_179	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	18	val_18	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	18	val_18	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	180	val_180	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	181	val_181	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	183	val_183	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	186	val_186	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	187	val_187	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	187	val_187	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	187	val_187	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	189	val_189	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	19	val_19	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	190	val_190	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	191	val_191	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	191	val_191	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	192	val_192	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	193	val_193	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	193	val_193	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	193	val_193	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	194	val_194	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	195	val_195	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	195	val_195	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	196	val_196	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	197	val_197	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	197	val_197	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	199	val_199	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	199	val_199	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	199	val_199	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	2	val_2	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	20	val_20	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	200	val_200	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	200	val_200	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	201	val_201	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	202	val_202	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	203	val_203	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	203	val_203	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	205	val_205	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	205	val_205	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	207	val_207	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	207	val_207	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	208	val_208	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	208	val_208	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	208	val_208	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	209	val_209	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	209	val_209	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	213	val_213	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	213	val_213	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	214	val_214	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	216	val_216	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	216	val_216	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	217	val_217	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	217	val_217	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	218	val_218	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	219	val_219	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	219	val_219	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	221	val_221	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	221	val_221	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	222	val_222	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	223	val_223	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	223	val_223	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	224	val_224	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	224	val_224	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	226	val_226	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	228	val_228	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	229	val_229	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	229	val_229	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	233	val_233	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	233	val_233	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	235	val_235	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	237	val_237	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	237	val_237	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	238	val_238	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	238	val_238	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	239	val_239	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	239	val_239	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	24	val_24	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	24	val_24	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	241	val_241	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	242	val_242	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	242	val_242	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	244	val_244	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	247	val_247	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	248	val_248	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	249	val_249	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	252	val_252	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	255	val_255	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	255	val_255	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	256	val_256	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	256	val_256	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	257	val_257	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	258	val_258	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	26	val_26	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	26	val_26	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	260	val_260	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	262	val_262	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	263	val_263	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	265	val_265	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	265	val_265	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	266	val_266	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	27	val_27	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	272	val_272	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	272	val_272	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	273	val_273	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	273	val_273	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	273	val_273	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	274	val_274	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	275	val_275	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	278	val_278	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	278	val_278	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	28	val_28	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	280	val_280	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	280	val_280	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	281	val_281	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	281	val_281	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	282	val_282	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	282	val_282	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	283	val_283	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	284	val_284	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	285	val_285	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	286	val_286	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	287	val_287	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	288	val_288	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	288	val_288	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	289	val_289	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	291	val_291	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	292	val_292	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	296	val_296	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	298	val_298	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	298	val_298	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	298	val_298	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	30	val_30	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	302	val_302	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	305	val_305	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	306	val_306	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	307	val_307	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	307	val_307	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	308	val_308	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	309	val_309	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	309	val_309	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	310	val_310	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	311	val_311	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	311	val_311	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	311	val_311	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	315	val_315	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	316	val_316	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	316	val_316	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	316	val_316	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	317	val_317	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	317	val_317	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	318	val_318	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	318	val_318	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	318	val_318	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	321	val_321	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	321	val_321	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	322	val_322	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	322	val_322	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	323	val_323	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	325	val_325	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	325	val_325	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	327	val_327	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	327	val_327	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	327	val_327	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	33	val_33	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	331	val_331	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	331	val_331	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	332	val_332	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	333	val_333	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	333	val_333	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	335	val_335	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	336	val_336	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	338	val_338	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	339	val_339	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	34	val_34	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	341	val_341	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	342	val_342	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	342	val_342	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	344	val_344	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	344	val_344	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	345	val_345	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	35	val_35	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	35	val_35	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	35	val_35	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	351	val_351	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	353	val_353	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	353	val_353	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	356	val_356	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	360	val_360	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	362	val_362	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	364	val_364	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	365	val_365	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	366	val_366	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	367	val_367	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	367	val_367	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	368	val_368	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	369	val_369	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	369	val_369	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	369	val_369	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	37	val_37	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	37	val_37	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	373	val_373	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	374	val_374	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	375	val_375	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	377	val_377	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	378	val_378	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	379	val_379	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	382	val_382	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	382	val_382	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	384	val_384	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	384	val_384	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	384	val_384	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	386	val_386	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	389	val_389	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	392	val_392	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	393	val_393	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	394	val_394	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	395	val_395	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	395	val_395	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	396	val_396	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	396	val_396	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	396	val_396	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	397	val_397	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	397	val_397	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	399	val_399	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	399	val_399	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	4	val_4	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	400	val_400	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	402	val_402	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	403	val_403	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	403	val_403	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	403	val_403	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	404	val_404	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	404	val_404	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	407	val_407	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	409	val_409	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	409	val_409	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	409	val_409	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	41	val_41	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	411	val_411	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	413	val_413	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	413	val_413	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	414	val_414	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	414	val_414	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	417	val_417	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	417	val_417	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	417	val_417	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	418	val_418	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	419	val_419	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	42	val_42	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	42	val_42	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	421	val_421	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	424	val_424	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	424	val_424	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	427	val_427	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	429	val_429	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	429	val_429	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	43	val_43	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	430	val_430	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	430	val_430	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	430	val_430	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	431	val_431	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	431	val_431	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	431	val_431	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	432	val_432	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	435	val_435	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	436	val_436	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	437	val_437	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	438	val_438	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	438	val_438	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	438	val_438	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	439	val_439	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	439	val_439	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	44	val_44	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	443	val_443	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	444	val_444	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	446	val_446	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	448	val_448	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	449	val_449	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	452	val_452	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	453	val_453	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	454	val_454	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	454	val_454	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	454	val_454	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	455	val_455	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	457	val_457	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	458	val_458	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	458	val_458	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	459	val_459	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	459	val_459	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	460	val_460	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	462	val_462	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	462	val_462	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	463	val_463	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	463	val_463	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	466	val_466	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	466	val_466	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	466	val_466	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	467	val_467	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	47	val_47	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	470	val_470	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	472	val_472	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	475	val_475	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	477	val_477	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	478	val_478	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	478	val_478	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	479	val_479	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	480	val_480	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	480	val_480	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	480	val_480	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	481	val_481	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	482	val_482	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	483	val_483	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	484	val_484	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	485	val_485	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	487	val_487	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	490	val_490	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	491	val_491	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	492	val_492	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	492	val_492	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	493	val_493	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	494	val_494	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	495	val_495	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	496	val_496	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	497	val_497	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	498	val_498	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	498	val_498	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	498	val_498	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	5	val_5	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	5	val_5	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	5	val_5	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	51	val_51	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	51	val_51	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	53	val_53	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	54	val_54	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	57	val_57	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	58	val_58	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	58	val_58	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	64	val_64	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	65	val_65	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	66	val_66	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	67	val_67	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	67	val_67	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	69	val_69	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	70	val_70	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	70	val_70	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	70	val_70	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	72	val_72	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	72	val_72	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	74	val_74	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	76	val_76	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	76	val_76	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	77	val_77	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	78	val_78	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	8	val_8	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	80	val_80	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	82	val_82	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	83	val_83	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	83	val_83	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	84	val_84	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	84	val_84	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	85	val_85	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	86	val_86	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	87	val_87	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	9	val_9	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	90	val_90	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	90	val_90	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	90	val_90	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	92	val_92	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	95	val_95	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	95	val_95	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	96	val_96	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	97	val_97	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	97	val_97	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	98	val_98	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	98	val_98	2008-04-08

http://git-wip-us.apache.org/repos/asf/hive/blob/26ec033c/ql/src/test/results/clientpositive/tez/auto_join29.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/auto_join29.q.out b/ql/src/test/results/clientpositive/tez/auto_join29.q.out
index 2cab06e..18c07d9 100644
--- a/ql/src/test/results/clientpositive/tez/auto_join29.q.out
+++ b/ql/src/test/results/clientpositive/tez/auto_join29.q.out
@@ -2673,6 +2673,506 @@ POSTHOOK: query: SELECT * FROM src src1 JOIN src src2 ON (src1.key = src2.key AN
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
+NULL	NULL	NULL	NULL	0	val_0
+NULL	NULL	NULL	NULL	0	val_0
+NULL	NULL	NULL	NULL	0	val_0
+NULL	NULL	NULL	NULL	10	val_10
+NULL	NULL	NULL	NULL	100	val_100
+NULL	NULL	NULL	NULL	100	val_100
+NULL	NULL	NULL	NULL	103	val_103
+NULL	NULL	NULL	NULL	103	val_103
+NULL	NULL	NULL	NULL	104	val_104
+NULL	NULL	NULL	NULL	104	val_104
+NULL	NULL	NULL	NULL	105	val_105
+NULL	NULL	NULL	NULL	11	val_11
+NULL	NULL	NULL	NULL	111	val_111
+NULL	NULL	NULL	NULL	113	val_113
+NULL	NULL	NULL	NULL	113	val_113
+NULL	NULL	NULL	NULL	114	val_114
+NULL	NULL	NULL	NULL	116	val_116
+NULL	NULL	NULL	NULL	118	val_118
+NULL	NULL	NULL	NULL	118	val_118
+NULL	NULL	NULL	NULL	119	val_119
+NULL	NULL	NULL	NULL	119	val_119
+NULL	NULL	NULL	NULL	119	val_119
+NULL	NULL	NULL	NULL	12	val_12
+NULL	NULL	NULL	NULL	12	val_12
+NULL	NULL	NULL	NULL	120	val_120
+NULL	NULL	NULL	NULL	120	val_120
+NULL	NULL	NULL	NULL	125	val_125
+NULL	NULL	NULL	NULL	125	val_125
+NULL	NULL	NULL	NULL	126	val_126
+NULL	NULL	NULL	NULL	128	val_128
+NULL	NULL	NULL	NULL	128	val_128
+NULL	NULL	NULL	NULL	128	val_128
+NULL	NULL	NULL	NULL	129	val_129
+NULL	NULL	NULL	NULL	129	val_129
+NULL	NULL	NULL	NULL	131	val_131
+NULL	NULL	NULL	NULL	133	val_133
+NULL	NULL	NULL	NULL	134	val_134
+NULL	NULL	NULL	NULL	134	val_134
+NULL	NULL	NULL	NULL	136	val_136
+NULL	NULL	NULL	NULL	137	val_137
+NULL	NULL	NULL	NULL	137	val_137
+NULL	NULL	NULL	NULL	138	val_138
+NULL	NULL	NULL	NULL	138	val_138
+NULL	NULL	NULL	NULL	138	val_138
+NULL	NULL	NULL	NULL	138	val_138
+NULL	NULL	NULL	NULL	143	val_143
+NULL	NULL	NULL	NULL	145	val_145
+NULL	NULL	NULL	NULL	146	val_146
+NULL	NULL	NULL	NULL	146	val_146
+NULL	NULL	NULL	NULL	149	val_149
+NULL	NULL	NULL	NULL	149	val_149
+NULL	NULL	NULL	NULL	15	val_15
+NULL	NULL	NULL	NULL	15	val_15
+NULL	NULL	NULL	NULL	150	val_150
+NULL	NULL	NULL	NULL	152	val_152
+NULL	NULL	NULL	NULL	152	val_152
+NULL	NULL	NULL	NULL	153	val_153
+NULL	NULL	NULL	NULL	155	val_155
+NULL	NULL	NULL	NULL	156	val_156
+NULL	NULL	NULL	NULL	157	val_157
+NULL	NULL	NULL	NULL	158	val_158
+NULL	NULL	NULL	NULL	160	val_160
+NULL	NULL	NULL	NULL	162	val_162
+NULL	NULL	NULL	NULL	163	val_163
+NULL	NULL	NULL	NULL	164	val_164
+NULL	NULL	NULL	NULL	164	val_164
+NULL	NULL	NULL	NULL	165	val_165
+NULL	NULL	NULL	NULL	165	val_165
+NULL	NULL	NULL	NULL	166	val_166
+NULL	NULL	NULL	NULL	167	val_167
+NULL	NULL	NULL	NULL	167	val_167
+NULL	NULL	NULL	NULL	167	val_167
+NULL	NULL	NULL	NULL	168	val_168
+NULL	NULL	NULL	NULL	169	val_169
+NULL	NULL	NULL	NULL	169	val_169
+NULL	NULL	NULL	NULL	169	val_169
+NULL	NULL	NULL	NULL	169	val_169
+NULL	NULL	NULL	NULL	17	val_17
+NULL	NULL	NULL	NULL	170	val_170
+NULL	NULL	NULL	NULL	172	val_172
+NULL	NULL	NULL	NULL	172	val_172
+NULL	NULL	NULL	NULL	174	val_174
+NULL	NULL	NULL	NULL	174	val_174
+NULL	NULL	NULL	NULL	175	val_175
+NULL	NULL	NULL	NULL	175	val_175
+NULL	NULL	NULL	NULL	176	val_176
+NULL	NULL	NULL	NULL	176	val_176
+NULL	NULL	NULL	NULL	177	val_177
+NULL	NULL	NULL	NULL	178	val_178
+NULL	NULL	NULL	NULL	179	val_179
+NULL	NULL	NULL	NULL	179	val_179
+NULL	NULL	NULL	NULL	18	val_18
+NULL	NULL	NULL	NULL	18	val_18
+NULL	NULL	NULL	NULL	180	val_180
+NULL	NULL	NULL	NULL	181	val_181
+NULL	NULL	NULL	NULL	183	val_183
+NULL	NULL	NULL	NULL	186	val_186
+NULL	NULL	NULL	NULL	187	val_187
+NULL	NULL	NULL	NULL	187	val_187
+NULL	NULL	NULL	NULL	187	val_187
+NULL	NULL	NULL	NULL	189	val_189
+NULL	NULL	NULL	NULL	19	val_19
+NULL	NULL	NULL	NULL	190	val_190
+NULL	NULL	NULL	NULL	191	val_191
+NULL	NULL	NULL	NULL	191	val_191
+NULL	NULL	NULL	NULL	192	val_192
+NULL	NULL	NULL	NULL	193	val_193
+NULL	NULL	NULL	NULL	193	val_193
+NULL	NULL	NULL	NULL	193	val_193
+NULL	NULL	NULL	NULL	194	val_194
+NULL	NULL	NULL	NULL	195	val_195
+NULL	NULL	NULL	NULL	195	val_195
+NULL	NULL	NULL	NULL	196	val_196
+NULL	NULL	NULL	NULL	197	val_197
+NULL	NULL	NULL	NULL	197	val_197
+NULL	NULL	NULL	NULL	199	val_199
+NULL	NULL	NULL	NULL	199	val_199
+NULL	NULL	NULL	NULL	199	val_199
+NULL	NULL	NULL	NULL	2	val_2
+NULL	NULL	NULL	NULL	20	val_20
+NULL	NULL	NULL	NULL	200	val_200
+NULL	NULL	NULL	NULL	200	val_200
+NULL	NULL	NULL	NULL	201	val_201
+NULL	NULL	NULL	NULL	202	val_202
+NULL	NULL	NULL	NULL	203	val_203
+NULL	NULL	NULL	NULL	203	val_203
+NULL	NULL	NULL	NULL	205	val_205
+NULL	NULL	NULL	NULL	205	val_205
+NULL	NULL	NULL	NULL	207	val_207
+NULL	NULL	NULL	NULL	207	val_207
+NULL	NULL	NULL	NULL	208	val_208
+NULL	NULL	NULL	NULL	208	val_208
+NULL	NULL	NULL	NULL	208	val_208
+NULL	NULL	NULL	NULL	209	val_209
+NULL	NULL	NULL	NULL	209	val_209
+NULL	NULL	NULL	NULL	213	val_213
+NULL	NULL	NULL	NULL	213	val_213
+NULL	NULL	NULL	NULL	214	val_214
+NULL	NULL	NULL	NULL	216	val_216
+NULL	NULL	NULL	NULL	216	val_216
+NULL	NULL	NULL	NULL	217	val_217
+NULL	NULL	NULL	NULL	217	val_217
+NULL	NULL	NULL	NULL	218	val_218
+NULL	NULL	NULL	NULL	219	val_219
+NULL	NULL	NULL	NULL	219	val_219
+NULL	NULL	NULL	NULL	221	val_221
+NULL	NULL	NULL	NULL	221	val_221
+NULL	NULL	NULL	NULL	222	val_222
+NULL	NULL	NULL	NULL	223	val_223
+NULL	NULL	NULL	NULL	223	val_223
+NULL	NULL	NULL	NULL	224	val_224
+NULL	NULL	NULL	NULL	224	val_224
+NULL	NULL	NULL	NULL	226	val_226
+NULL	NULL	NULL	NULL	228	val_228
+NULL	NULL	NULL	NULL	229	val_229
+NULL	NULL	NULL	NULL	229	val_229
+NULL	NULL	NULL	NULL	230	val_230
+NULL	NULL	NULL	NULL	230	val_230
+NULL	NULL	NULL	NULL	230	val_230
+NULL	NULL	NULL	NULL	230	val_230
+NULL	NULL	NULL	NULL	230	val_230
+NULL	NULL	NULL	NULL	233	val_233
+NULL	NULL	NULL	NULL	233	val_233
+NULL	NULL	NULL	NULL	235	val_235
+NULL	NULL	NULL	NULL	237	val_237
+NULL	NULL	NULL	NULL	237	val_237
+NULL	NULL	NULL	NULL	238	val_238
+NULL	NULL	NULL	NULL	238	val_238
+NULL	NULL	NULL	NULL	239	val_239
+NULL	NULL	NULL	NULL	239	val_239
+NULL	NULL	NULL	NULL	24	val_24
+NULL	NULL	NULL	NULL	24	val_24
+NULL	NULL	NULL	NULL	241	val_241
+NULL	NULL	NULL	NULL	242	val_242
+NULL	NULL	NULL	NULL	242	val_242
+NULL	NULL	NULL	NULL	244	val_244
+NULL	NULL	NULL	NULL	247	val_247
+NULL	NULL	NULL	NULL	248	val_248
+NULL	NULL	NULL	NULL	249	val_249
+NULL	NULL	NULL	NULL	252	val_252
+NULL	NULL	NULL	NULL	255	val_255
+NULL	NULL	NULL	NULL	255	val_255
+NULL	NULL	NULL	NULL	256	val_256
+NULL	NULL	NULL	NULL	256	val_256
+NULL	NULL	NULL	NULL	257	val_257
+NULL	NULL	NULL	NULL	258	val_258
+NULL	NULL	NULL	NULL	26	val_26
+NULL	NULL	NULL	NULL	26	val_26
+NULL	NULL	NULL	NULL	260	val_260
+NULL	NULL	NULL	NULL	262	val_262
+NULL	NULL	NULL	NULL	263	val_263
+NULL	NULL	NULL	NULL	265	val_265
+NULL	NULL	NULL	NULL	265	val_265
+NULL	NULL	NULL	NULL	266	val_266
+NULL	NULL	NULL	NULL	27	val_27
+NULL	NULL	NULL	NULL	272	val_272
+NULL	NULL	NULL	NULL	272	val_272
+NULL	NULL	NULL	NULL	273	val_273
+NULL	NULL	NULL	NULL	273	val_273
+NULL	NULL	NULL	NULL	273	val_273
+NULL	NULL	NULL	NULL	274	val_274
+NULL	NULL	NULL	NULL	275	val_275
+NULL	NULL	NULL	NULL	277	val_277
+NULL	NULL	NULL	NULL	277	val_277
+NULL	NULL	NULL	NULL	277	val_277
+NULL	NULL	NULL	NULL	277	val_277
+NULL	NULL	NULL	NULL	278	val_278
+NULL	NULL	NULL	NULL	278	val_278
+NULL	NULL	NULL	NULL	28	val_28
+NULL	NULL	NULL	NULL	280	val_280
+NULL	NULL	NULL	NULL	280	val_280
+NULL	NULL	NULL	NULL	281	val_281
+NULL	NULL	NULL	NULL	281	val_281
+NULL	NULL	NULL	NULL	282	val_282
+NULL	NULL	NULL	NULL	282	val_282
+NULL	NULL	NULL	NULL	283	val_283
+NULL	NULL	NULL	NULL	284	val_284
+NULL	NULL	NULL	NULL	285	val_285
+NULL	NULL	NULL	NULL	286	val_286
+NULL	NULL	NULL	NULL	287	val_287
+NULL	NULL	NULL	NULL	288	val_288
+NULL	NULL	NULL	NULL	288	val_288
+NULL	NULL	NULL	NULL	289	val_289
+NULL	NULL	NULL	NULL	291	val_291
+NULL	NULL	NULL	NULL	292	val_292
+NULL	NULL	NULL	NULL	296	val_296
+NULL	NULL	NULL	NULL	298	val_298
+NULL	NULL	NULL	NULL	298	val_298
+NULL	NULL	NULL	NULL	298	val_298
+NULL	NULL	NULL	NULL	30	val_30
+NULL	NULL	NULL	NULL	302	val_302
+NULL	NULL	NULL	NULL	305	val_305
+NULL	NULL	NULL	NULL	306	val_306
+NULL	NULL	NULL	NULL	307	val_307
+NULL	NULL	NULL	NULL	307	val_307
+NULL	NULL	NULL	NULL	308	val_308
+NULL	NULL	NULL	NULL	309	val_309
+NULL	NULL	NULL	NULL	309	val_309
+NULL	NULL	NULL	NULL	310	val_310
+NULL	NULL	NULL	NULL	311	val_311
+NULL	NULL	NULL	NULL	311	val_311
+NULL	NULL	NULL	NULL	311	val_311
+NULL	NULL	NULL	NULL	315	val_315
+NULL	NULL	NULL	NULL	316	val_316
+NULL	NULL	NULL	NULL	316	val_316
+NULL	NULL	NULL	NULL	316	val_316
+NULL	NULL	NULL	NULL	317	val_317
+NULL	NULL	NULL	NULL	317	val_317
+NULL	NULL	NULL	NULL	318	val_318
+NULL	NULL	NULL	NULL	318	val_318
+NULL	NULL	NULL	NULL	318	val_318
+NULL	NULL	NULL	NULL	321	val_321
+NULL	NULL	NULL	NULL	321	val_321
+NULL	NULL	NULL	NULL	322	val_322
+NULL	NULL	NULL	NULL	322	val_322
+NULL	NULL	NULL	NULL	323	val_323
+NULL	NULL	NULL	NULL	325	val_325
+NULL	NULL	NULL	NULL	325	val_325
+NULL	NULL	NULL	NULL	327	val_327
+NULL	NULL	NULL	NULL	327	val_327
+NULL	NULL	NULL	NULL	327	val_327
+NULL	NULL	NULL	NULL	33	val_33
+NULL	NULL	NULL	NULL	331	val_331
+NULL	NULL	NULL	NULL	331	val_331
+NULL	NULL	NULL	NULL	332	val_332
+NULL	NULL	NULL	NULL	333	val_333
+NULL	NULL	NULL	NULL	333	val_333
+NULL	NULL	NULL	NULL	335	val_335
+NULL	NULL	NULL	NULL	336	val_336
+NULL	NULL	NULL	NULL	338	val_338
+NULL	NULL	NULL	NULL	339	val_339
+NULL	NULL	NULL	NULL	34	val_34
+NULL	NULL	NULL	NULL	341	val_341
+NULL	NULL	NULL	NULL	342	val_342
+NULL	NULL	NULL	NULL	342	val_342
+NULL	NULL	NULL	NULL	344	val_344
+NULL	NULL	NULL	NULL	344	val_344
+NULL	NULL	NULL	NULL	345	val_345
+NULL	NULL	NULL	NULL	348	val_348
+NULL	NULL	NULL	NULL	348	val_348
+NULL	NULL	NULL	NULL	348	val_348
+NULL	NULL	NULL	NULL	348	val_348
+NULL	NULL	NULL	NULL	348	val_348
+NULL	NULL	NULL	NULL	35	val_35
+NULL	NULL	NULL	NULL	35	val_35
+NULL	NULL	NULL	NULL	35	val_35
+NULL	NULL	NULL	NULL	351	val_351
+NULL	NULL	NULL	NULL	353	val_353
+NULL	NULL	NULL	NULL	353	val_353
+NULL	NULL	NULL	NULL	356	val_356
+NULL	NULL	NULL	NULL	360	val_360
+NULL	NULL	NULL	NULL	362	val_362
+NULL	NULL	NULL	NULL	364	val_364
+NULL	NULL	NULL	NULL	365	val_365
+NULL	NULL	NULL	NULL	366	val_366
+NULL	NULL	NULL	NULL	367	val_367
+NULL	NULL	NULL	NULL	367	val_367
+NULL	NULL	NULL	NULL	368	val_368
+NULL	NULL	NULL	NULL	369	val_369
+NULL	NULL	NULL	NULL	369	val_369
+NULL	NULL	NULL	NULL	369	val_369
+NULL	NULL	NULL	NULL	37	val_37
+NULL	NULL	NULL	NULL	37	val_37
+NULL	NULL	NULL	NULL	373	val_373
+NULL	NULL	NULL	NULL	374	val_374
+NULL	NULL	NULL	NULL	375	val_375
+NULL	NULL	NULL	NULL	377	val_377
+NULL	NULL	NULL	NULL	378	val_378
+NULL	NULL	NULL	NULL	379	val_379
+NULL	NULL	NULL	NULL	382	val_382
+NULL	NULL	NULL	NULL	382	val_382
+NULL	NULL	NULL	NULL	384	val_384
+NULL	NULL	NULL	NULL	384	val_384
+NULL	NULL	NULL	NULL	384	val_384
+NULL	NULL	NULL	NULL	386	val_386
+NULL	NULL	NULL	NULL	389	val_389
+NULL	NULL	NULL	NULL	392	val_392
+NULL	NULL	NULL	NULL	393	val_393
+NULL	NULL	NULL	NULL	394	val_394
+NULL	NULL	NULL	NULL	395	val_395
+NULL	NULL	NULL	NULL	395	val_395
+NULL	NULL	NULL	NULL	396	val_396
+NULL	NULL	NULL	NULL	396	val_396
+NULL	NULL	NULL	NULL	396	val_396
+NULL	NULL	NULL	NULL	397	val_397
+NULL	NULL	NULL	NULL	397	val_397
+NULL	NULL	NULL	NULL	399	val_399
+NULL	NULL	NULL	NULL	399	val_399
+NULL	NULL	NULL	NULL	4	val_4
+NULL	NULL	NULL	NULL	400	val_400
+NULL	NULL	NULL	NULL	401	val_401
+NULL	NULL	NULL	NULL	401	val_401
+NULL	NULL	NULL	NULL	401	val_401
+NULL	NULL	NULL	NULL	401	val_401
+NULL	NULL	NULL	NULL	401	val_401
+NULL	NULL	NULL	NULL	402	val_402
+NULL	NULL	NULL	NULL	403	val_403
+NULL	NULL	NULL	NULL	403	val_403
+NULL	NULL	NULL	NULL	403	val_403
+NULL	NULL	NULL	NULL	404	val_404
+NULL	NULL	NULL	NULL	404	val_404
+NULL	NULL	NULL	NULL	406	val_406
+NULL	NULL	NULL	NULL	406	val_406
+NULL	NULL	NULL	NULL	406	val_406
+NULL	NULL	NULL	NULL	406	val_406
+NULL	NULL	NULL	NULL	407	val_407
+NULL	NULL	NULL	NULL	409	val_409
+NULL	NULL	NULL	NULL	409	val_409
+NULL	NULL	NULL	NULL	409	val_409
+NULL	NULL	NULL	NULL	41	val_41
+NULL	NULL	NULL	NULL	411	val_411
+NULL	NULL	NULL	NULL	413	val_413
+NULL	NULL	NULL	NULL	413	val_413
+NULL	NULL	NULL	NULL	414	val_414
+NULL	NULL	NULL	NULL	414	val_414
+NULL	NULL	NULL	NULL	417	val_417
+NULL	NULL	NULL	NULL	417	val_417
+NULL	NULL	NULL	NULL	417	val_417
+NULL	NULL	NULL	NULL	418	val_418
+NULL	NULL	NULL	NULL	419	val_419
+NULL	NULL	NULL	NULL	42	val_42
+NULL	NULL	NULL	NULL	42	val_42
+NULL	NULL	NULL	NULL	421	val_421
+NULL	NULL	NULL	NULL	424	val_424
+NULL	NULL	NULL	NULL	424	val_424
+NULL	NULL	NULL	NULL	427	val_427
+NULL	NULL	NULL	NULL	429	val_429
+NULL	NULL	NULL	NULL	429	val_429
+NULL	NULL	NULL	NULL	43	val_43
+NULL	NULL	NULL	NULL	430	val_430
+NULL	NULL	NULL	NULL	430	val_430
+NULL	NULL	NULL	NULL	430	val_430
+NULL	NULL	NULL	NULL	431	val_431
+NULL	NULL	NULL	NULL	431	val_431
+NULL	NULL	NULL	NULL	431	val_431
+NULL	NULL	NULL	NULL	432	val_432
+NULL	NULL	NULL	NULL	435	val_435
+NULL	NULL	NULL	NULL	436	val_436
+NULL	NULL	NULL	NULL	437	val_437
+NULL	NULL	NULL	NULL	438	val_438
+NULL	NULL	NULL	NULL	438	val_438
+NULL	NULL	NULL	NULL	438	val_438
+NULL	NULL	NULL	NULL	439	val_439
+NULL	NULL	NULL	NULL	439	val_439
+NULL	NULL	NULL	NULL	44	val_44
+NULL	NULL	NULL	NULL	443	val_443
+NULL	NULL	NULL	NULL	444	val_444
+NULL	NULL	NULL	NULL	446	val_446
+NULL	NULL	NULL	NULL	448	val_448
+NULL	NULL	NULL	NULL	449	val_449
+NULL	NULL	NULL	NULL	452	val_452
+NULL	NULL	NULL	NULL	453	val_453
+NULL	NULL	NULL	NULL	454	val_454
+NULL	NULL	NULL	NULL	454	val_454
+NULL	NULL	NULL	NULL	454	val_454
+NULL	NULL	NULL	NULL	455	val_455
+NULL	NULL	NULL	NULL	457	val_457
+NULL	NULL	NULL	NULL	458	val_458
+NULL	NULL	NULL	NULL	458	val_458
+NULL	NULL	NULL	NULL	459	val_459
+NULL	NULL	NULL	NULL	459	val_459
+NULL	NULL	NULL	NULL	460	val_460
+NULL	NULL	NULL	NULL	462	val_462
+NULL	NULL	NULL	NULL	462	val_462
+NULL	NULL	NULL	NULL	463	val_463
+NULL	NULL	NULL	NULL	463	val_463
+NULL	NULL	NULL	NULL	466	val_466
+NULL	NULL	NULL	NULL	466	val_466
+NULL	NULL	NULL	NULL	466	val_466
+NULL	NULL	NULL	NULL	467	val_467
+NULL	NULL	NULL	NULL	468	val_468
+NULL	NULL	NULL	NULL	468	val_468
+NULL	NULL	NULL	NULL	468	val_468
+NULL	NULL	NULL	NULL	468	val_468
+NULL	NULL	NULL	NULL	469	val_469
+NULL	NULL	NULL	NULL	469	val_469
+NULL	NULL	NULL	NULL	469	val_469
+NULL	NULL	NULL	NULL	469	val_469
+NULL	NULL	NULL	NULL	469	val_469
+NULL	NULL	NULL	NULL	47	val_47
+NULL	NULL	NULL	NULL	470	val_470
+NULL	NULL	NULL	NULL	472	val_472
+NULL	NULL	NULL	NULL	475	val_475
+NULL	NULL	NULL	NULL	477	val_477
+NULL	NULL	NULL	NULL	478	val_478
+NULL	NULL	NULL	NULL	478	val_478
+NULL	NULL	NULL	NULL	479	val_479
+NULL	NULL	NULL	NULL	480	val_480
+NULL	NULL	NULL	NULL	480	val_480
+NULL	NULL	NULL	NULL	480	val_480
+NULL	NULL	NULL	NULL	481	val_481
+NULL	NULL	NULL	NULL	482	val_482
+NULL	NULL	NULL	NULL	483	val_483
+NULL	NULL	NULL	NULL	484	val_484
+NULL	NULL	NULL	NULL	485	val_485
+NULL	NULL	NULL	NULL	487	val_487
+NULL	NULL	NULL	NULL	489	val_489
+NULL	NULL	NULL	NULL	489	val_489
+NULL	NULL	NULL	NULL	489	val_489
+NULL	NULL	NULL	NULL	489	val_489
+NULL	NULL	NULL	NULL	490	val_490
+NULL	NULL	NULL	NULL	491	val_491
+NULL	NULL	NULL	NULL	492	val_492
+NULL	NULL	NULL	NULL	492	val_492
+NULL	NULL	NULL	NULL	493	val_493
+NULL	NULL	NULL	NULL	494	val_494
+NULL	NULL	NULL	NULL	495	val_495
+NULL	NULL	NULL	NULL	496	val_496
+NULL	NULL	NULL	NULL	497	val_497
+NULL	NULL	NULL	NULL	498	val_498
+NULL	NULL	NULL	NULL	498	val_498
+NULL	NULL	NULL	NULL	498	val_498
+NULL	NULL	NULL	NULL	5	val_5
+NULL	NULL	NULL	NULL	5	val_5
+NULL	NULL	NULL	NULL	5	val_5
+NULL	NULL	NULL	NULL	51	val_51
+NULL	NULL	NULL	NULL	51	val_51
+NULL	NULL	NULL	NULL	53	val_53
+NULL	NULL	NULL	NULL	54	val_54
+NULL	NULL	NULL	NULL	57	val_57
+NULL	NULL	NULL	NULL	58	val_58
+NULL	NULL	NULL	NULL	58	val_58
+NULL	NULL	NULL	NULL	64	val_64
+NULL	NULL	NULL	NULL	65	val_65
+NULL	NULL	NULL	NULL	66	val_66
+NULL	NULL	NULL	NULL	67	val_67
+NULL	NULL	NULL	NULL	67	val_67
+NULL	NULL	NULL	NULL	69	val_69
+NULL	NULL	NULL	NULL	70	val_70
+NULL	NULL	NULL	NULL	70	val_70
+NULL	NULL	NULL	NULL	70	val_70
+NULL	NULL	NULL	NULL	72	val_72
+NULL	NULL	NULL	NULL	72	val_72
+NULL	NULL	NULL	NULL	74	val_74
+NULL	NULL	NULL	NULL	76	val_76
+NULL	NULL	NULL	NULL	76	val_76
+NULL	NULL	NULL	NULL	77	val_77
+NULL	NULL	NULL	NULL	78	val_78
+NULL	NULL	NULL	NULL	8	val_8
+NULL	NULL	NULL	NULL	80	val_80
+NULL	NULL	NULL	NULL	82	val_82
+NULL	NULL	NULL	NULL	83	val_83
+NULL	NULL	NULL	NULL	83	val_83
+NULL	NULL	NULL	NULL	84	val_84
+NULL	NULL	NULL	NULL	84	val_84
+NULL	NULL	NULL	NULL	85	val_85
+NULL	NULL	NULL	NULL	86	val_86
+NULL	NULL	NULL	NULL	87	val_87
+NULL	NULL	NULL	NULL	9	val_9
+NULL	NULL	NULL	NULL	90	val_90
+NULL	NULL	NULL	NULL	90	val_90
+NULL	NULL	NULL	NULL	90	val_90
+NULL	NULL	NULL	NULL	92	val_92
+NULL	NULL	NULL	NULL	95	val_95
+NULL	NULL	NULL	NULL	95	val_95
+NULL	NULL	NULL	NULL	96	val_96
+NULL	NULL	NULL	NULL	97	val_97
+NULL	NULL	NULL	NULL	97	val_97
+NULL	NULL	NULL	NULL	98	val_98
+NULL	NULL	NULL	NULL	98	val_98
 PREHOOK: query: explain
 SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
 PREHOOK: type: QUERY


[47/50] [abbrv] hive git commit: HIVE-10526: CBO (Calcite Return Path): HiveCost epsilon comparison should take row count in to account (Laljo John Pullokkaran reviewed by Ashutosh Chauhan)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out
index ff92d9f..98008ad 100644
--- a/ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out
+++ b/ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out
@@ -141,13 +141,13 @@ from part p1 join part p2 join part p3 on p2.p_name = p1.p_name join part p4 on
             and p1.p_partkey = p2.p_partkey
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-2
+  Stage-3 is a root stage
+  Stage-2 depends on stages: Stage-3
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -161,36 +161,36 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                 Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
                   Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                  value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
           TableScan
             alias: p1
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: p_partkey is not null (type: boolean)
-              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              predicate: (p_partkey is not null and p_name is not null) (type: boolean)
+              Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                  Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col0 (type: int)
-            1 _col0 (type: int)
+            0 _col0 (type: int), _col1 (type: string)
+            1 _col0 (type: int), _col1 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
-          Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -202,44 +202,6 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: int), _col1 (type: string)
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col9 (type: int), _col10 (type: string)
-              sort order: ++
-              Map-reduce partition columns: _col9 (type: int), _col10 (type: string)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: int), _col1 (type: string)
-            1 _col9 (type: int), _col10 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
-          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: string), _col30 (type: string), _col31 (type: string), _col32 (type: int), _col33 (type: string), _col34 (type: double), _col35 (type: string), _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
-            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
-            Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
             alias: p1
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
@@ -256,29 +218,20 @@ STAGE PLANS:
                   Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
           TableScan
-            alias: p1
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (p_name is not null and p_partkey is not null) (type: boolean)
-              Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col1 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col1 (type: string)
-                  Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+            Reduce Output Operator
+              key expressions: _col10 (type: string)
+              sort order: +
+              Map-reduce partition columns: _col10 (type: string)
+              Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: int), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
             0 _col1 (type: string)
-            1 _col1 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
+            1 _col10 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26
           Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
@@ -287,6 +240,53 @@ STAGE PLANS:
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: p1
+            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: p_partkey is not null (type: boolean)
+              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+          TableScan
+            Reduce Output Operator
+              key expressions: _col9 (type: int)
+              sort order: +
+              Map-reduce partition columns: _col9 (type: int)
+              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string), _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string)
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col0 (type: int)
+            1 _col9 (type: int)
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
+          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: string), _col30 (type: string), _col31 (type: string), _col32 (type: int), _col33 (type: string), _col34 (type: double), _col35 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string), _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
+            Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
   Stage: Stage-0
     Fetch Operator
       limit: -1

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out
index f608cfd..a1dd24e 100644
--- a/ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out
+++ b/ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out
@@ -145,13 +145,13 @@ where p2.p_name = p3.p_name and p1.p_partkey = p4.p_partkey
             and p1.p_partkey = p2.p_partkey
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-2
+  Stage-3 is a root stage
+  Stage-2 depends on stages: Stage-3
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -165,36 +165,36 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                 Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
                   Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                  value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
           TableScan
             alias: p1
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: p_partkey is not null (type: boolean)
-              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              predicate: (p_partkey is not null and p_name is not null) (type: boolean)
+              Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                  Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col0 (type: int)
-            1 _col0 (type: int)
+            0 _col0 (type: int), _col1 (type: string)
+            1 _col0 (type: int), _col1 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
-          Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -206,44 +206,6 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: int), _col1 (type: string)
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col9 (type: int), _col10 (type: string)
-              sort order: ++
-              Map-reduce partition columns: _col9 (type: int), _col10 (type: string)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: int), _col1 (type: string)
-            1 _col9 (type: int), _col10 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
-          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: string), _col30 (type: string), _col31 (type: string), _col32 (type: int), _col33 (type: string), _col34 (type: double), _col35 (type: string), _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
-            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
-            Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
             alias: p1
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
@@ -260,29 +222,20 @@ STAGE PLANS:
                   Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
           TableScan
-            alias: p1
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (p_name is not null and p_partkey is not null) (type: boolean)
-              Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col1 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col1 (type: string)
-                  Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+            Reduce Output Operator
+              key expressions: _col10 (type: string)
+              sort order: +
+              Map-reduce partition columns: _col10 (type: string)
+              Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: int), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
             0 _col1 (type: string)
-            1 _col1 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
+            1 _col10 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26
           Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
@@ -291,6 +244,53 @@ STAGE PLANS:
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: p1
+            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: p_partkey is not null (type: boolean)
+              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+          TableScan
+            Reduce Output Operator
+              key expressions: _col9 (type: int)
+              sort order: +
+              Map-reduce partition columns: _col9 (type: int)
+              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string), _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string)
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col0 (type: int)
+            1 _col9 (type: int)
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
+          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: string), _col30 (type: string), _col31 (type: string), _col32 (type: int), _col33 (type: string), _col34 (type: double), _col35 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string), _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
+            Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
   Stage: Stage-0
     Fetch Operator
       limit: -1

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/spark/join32.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join32.q.out b/ql/src/test/results/clientpositive/spark/join32.q.out
index 0f16678..a9d50b4 100644
--- a/ql/src/test/results/clientpositive/spark/join32.q.out
+++ b/ql/src/test/results/clientpositive/spark/join32.q.out
@@ -113,16 +113,16 @@ STAGE PLANS:
         Map 1 
             Map Operator Tree:
                 TableScan
-                  alias: z
+                  alias: y
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
+                    predicate: key is not null (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: value (type: string)
-                      outputColumnNames: _col0
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
@@ -136,12 +136,9 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: hr=11
+                  base file name: src
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 11
                   properties:
                     COLUMN_STATS_ACCURATE true
                     bucket_count -1
@@ -149,13 +146,11 @@ STAGE PLANS:
                     columns.comments 'default','default'
                     columns.types string:string
 #### A masked pattern was here ####
-                    name default.srcpart
+                    name default.src
                     numFiles 1
                     numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
                     rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
+                    serialization.ddl struct src { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     totalSize 5812
@@ -165,23 +160,26 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
+                      COLUMN_STATS_ACCURATE true
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
                       columns.types string:string
 #### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
+                      name default.src
+                      numFiles 1
+                      numRows 500
+                      rawDataSize 5312
+                      serialization.ddl struct src { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 5812
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
+                    name: default.src
+                  name: default.src
             Truncated Path -> Alias:
-              /srcpart/ds=2008-04-08/hr=11 [z]
+              /src [y]
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -190,7 +188,7 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (key is not null and value is not null) (type: boolean)
+                    predicate: (value is not null and key is not null) (type: boolean)
                     Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -199,7 +197,7 @@ STAGE PLANS:
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: string)
-                          1 _col0 (type: string)
+                          1 _col1 (type: string)
                         Position of Big Table: 0
             Local Work:
               Map Reduce Local Work
@@ -260,24 +258,24 @@ STAGE PLANS:
         Map 2 
             Map Operator Tree:
                 TableScan
-                  alias: y
+                  alias: z
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: key is not null (type: boolean)
+                    predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
+                      expressions: value (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
                         keys:
                           0 _col0 (type: string)
-                          1 _col0 (type: string)
-                        outputColumnNames: _col1, _col2, _col3
+                          1 _col1 (type: string)
+                        outputColumnNames: _col0, _col3
                         input vertices:
                           1 Map 3
                         Position of Big Table: 0
@@ -288,13 +286,13 @@ STAGE PLANS:
                           keys:
                             0 _col0 (type: string)
                             1 _col3 (type: string)
-                          outputColumnNames: _col0, _col4, _col5
+                          outputColumnNames: _col1, _col2, _col5
                           input vertices:
                             0 Map 1
                           Position of Big Table: 1
                           Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                           Select Operator
-                            expressions: _col5 (type: string), _col0 (type: string), _col4 (type: string)
+                            expressions: _col5 (type: string), _col2 (type: string), _col1 (type: string)
                             outputColumnNames: _col0, _col1, _col2
                             Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                             File Output Operator
@@ -330,9 +328,12 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: src
+                  base file name: hr=11
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                    hr 11
                   properties:
                     COLUMN_STATS_ACCURATE true
                     bucket_count -1
@@ -340,11 +341,13 @@ STAGE PLANS:
                     columns.comments 'default','default'
                     columns.types string:string
 #### A masked pattern was here ####
-                    name default.src
+                    name default.srcpart
                     numFiles 1
                     numRows 500
+                    partition_columns ds/hr
+                    partition_columns.types string:string
                     rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
+                    serialization.ddl struct srcpart { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     totalSize 5812
@@ -354,26 +357,23 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE true
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
                       columns.types string:string
 #### A masked pattern was here ####
-                      name default.src
-                      numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
+                      name default.srcpart
+                      partition_columns ds/hr
+                      partition_columns.types string:string
+                      serialization.ddl struct srcpart { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
+                    name: default.srcpart
+                  name: default.srcpart
             Truncated Path -> Alias:
-              /src [y]
+              /srcpart/ds=2008-04-08/hr=11 [z]
 
   Stage: Stage-0
     Move Operator
@@ -422,8 +422,8 @@ POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Output: default@dest_j1
 POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.val2 EXPRESSION [(src)y.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value EXPRESSION [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from dest_j1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_j1

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out b/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out
index 54f47f9..dac9610 100644
--- a/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out
+++ b/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out
@@ -121,16 +121,16 @@ STAGE PLANS:
         Map 1 
             Map Operator Tree:
                 TableScan
-                  alias: z
+                  alias: y
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
+                    predicate: key is not null (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: value (type: string)
-                      outputColumnNames: _col0
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
@@ -144,12 +144,9 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: hr=11
+                  base file name: src
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 11
                   properties:
                     COLUMN_STATS_ACCURATE true
                     bucket_count -1
@@ -157,13 +154,11 @@ STAGE PLANS:
                     columns.comments 'default','default'
                     columns.types string:string
 #### A masked pattern was here ####
-                    name default.srcpart
+                    name default.src
                     numFiles 1
                     numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
                     rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
+                    serialization.ddl struct src { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     totalSize 5812
@@ -173,23 +168,26 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
+                      COLUMN_STATS_ACCURATE true
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
                       columns.types string:string
 #### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
+                      name default.src
+                      numFiles 1
+                      numRows 500
+                      rawDataSize 5312
+                      serialization.ddl struct src { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 5812
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
+                    name: default.src
+                  name: default.src
             Truncated Path -> Alias:
-              /srcpart/ds=2008-04-08/hr=11 [z]
+              /src [y]
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -198,7 +196,7 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (key is not null and value is not null) (type: boolean)
+                    predicate: (value is not null and key is not null) (type: boolean)
                     Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -207,7 +205,7 @@ STAGE PLANS:
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: string)
-                          1 _col0 (type: string)
+                          1 _col1 (type: string)
                         Position of Big Table: 0
             Local Work:
               Map Reduce Local Work
@@ -268,24 +266,24 @@ STAGE PLANS:
         Map 2 
             Map Operator Tree:
                 TableScan
-                  alias: y
+                  alias: z
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: key is not null (type: boolean)
+                    predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
+                      expressions: value (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
                         keys:
                           0 _col0 (type: string)
-                          1 _col0 (type: string)
-                        outputColumnNames: _col1, _col2, _col3
+                          1 _col1 (type: string)
+                        outputColumnNames: _col0, _col3
                         input vertices:
                           1 Map 3
                         Position of Big Table: 0
@@ -296,13 +294,13 @@ STAGE PLANS:
                           keys:
                             0 _col0 (type: string)
                             1 _col3 (type: string)
-                          outputColumnNames: _col0, _col4, _col5
+                          outputColumnNames: _col1, _col2, _col5
                           input vertices:
                             0 Map 1
                           Position of Big Table: 1
                           Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                           Select Operator
-                            expressions: _col5 (type: string), _col0 (type: string), _col4 (type: string)
+                            expressions: _col5 (type: string), _col2 (type: string), _col1 (type: string)
                             outputColumnNames: _col0, _col1, _col2
                             Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                             File Output Operator
@@ -338,9 +336,12 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: src
+                  base file name: hr=11
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                    hr 11
                   properties:
                     COLUMN_STATS_ACCURATE true
                     bucket_count -1
@@ -348,11 +349,13 @@ STAGE PLANS:
                     columns.comments 'default','default'
                     columns.types string:string
 #### A masked pattern was here ####
-                    name default.src
+                    name default.srcpart
                     numFiles 1
                     numRows 500
+                    partition_columns ds/hr
+                    partition_columns.types string:string
                     rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
+                    serialization.ddl struct srcpart { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     totalSize 5812
@@ -362,26 +365,23 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE true
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
                       columns.types string:string
 #### A masked pattern was here ####
-                      name default.src
-                      numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
+                      name default.srcpart
+                      partition_columns ds/hr
+                      partition_columns.types string:string
+                      serialization.ddl struct srcpart { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
+                    name: default.srcpart
+                  name: default.srcpart
             Truncated Path -> Alias:
-              /src [y]
+              /srcpart/ds=2008-04-08/hr=11 [z]
 
   Stage: Stage-0
     Move Operator
@@ -430,8 +430,8 @@ POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Output: default@dest_j1
 POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.val2 EXPRESSION [(src)y.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value EXPRESSION [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from dest_j1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_j1
@@ -613,34 +613,35 @@ TOK_QUERY
 
 
 STAGE DEPENDENCIES:
-  Stage-3 is a root stage
+  Stage-4 is a root stage
+  Stage-3 depends on stages: Stage-4
   Stage-1 depends on stages: Stage-3
   Stage-0 depends on stages: Stage-1
   Stage-2 depends on stages: Stage-0
 
 STAGE PLANS:
-  Stage: Stage-3
+  Stage: Stage-4
     Spark
 #### A masked pattern was here ####
       Vertices:
-        Map 1 
+        Map 3 
             Map Operator Tree:
                 TableScan
-                  alias: w
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key is not null and value is not null) (type: boolean)
+                    Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: string)
-                          1 _col1 (type: string)
+                          1 _col0 (type: string)
                         Position of Big Table: 1
             Local Work:
               Map Reduce Local Work
@@ -649,7 +650,7 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: src
+                  base file name: src1
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
@@ -659,14 +660,14 @@ STAGE PLANS:
                     columns.comments 'default','default'
                     columns.types string:string
 #### A masked pattern was here ####
-                    name default.src
+                    name default.src1
                     numFiles 1
-                    numRows 500
-                    rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
+                    numRows 25
+                    rawDataSize 191
+                    serialization.ddl struct src1 { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
+                    totalSize 216
 #### A masked pattern was here ####
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 
@@ -679,39 +680,44 @@ STAGE PLANS:
                       columns.comments 'default','default'
                       columns.types string:string
 #### A masked pattern was here ####
-                      name default.src
+                      name default.src1
                       numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
+                      numRows 25
+                      rawDataSize 191
+                      serialization.ddl struct src1 { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
+                      totalSize 216
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
+                    name: default.src1
+                  name: default.src1
             Truncated Path -> Alias:
-              /src [w]
-        Map 3 
+              /src1 [x]
+
+  Stage: Stage-3
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
             Map Operator Tree:
                 TableScan
-                  alias: x
-                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                  alias: w
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (value is not null and key is not null) (type: boolean)
-                    Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: string)
                           1 _col1 (type: string)
-                        Position of Big Table: 0
+                        Position of Big Table: 1
             Local Work:
               Map Reduce Local Work
             Path -> Alias:
@@ -719,7 +725,7 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: src1
+                  base file name: src
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
@@ -729,14 +735,14 @@ STAGE PLANS:
                     columns.comments 'default','default'
                     columns.types string:string
 #### A masked pattern was here ####
-                    name default.src1
+                    name default.src
                     numFiles 1
-                    numRows 25
-                    rawDataSize 191
-                    serialization.ddl struct src1 { string key, string value}
+                    numRows 500
+                    rawDataSize 5312
+                    serialization.ddl struct src { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 216
+                    totalSize 5812
 #### A masked pattern was here ####
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 
@@ -749,20 +755,20 @@ STAGE PLANS:
                       columns.comments 'default','default'
                       columns.types string:string
 #### A masked pattern was here ####
-                      name default.src1
+                      name default.src
                       numFiles 1
-                      numRows 25
-                      rawDataSize 191
-                      serialization.ddl struct src1 { string key, string value}
+                      numRows 500
+                      rawDataSize 5312
+                      serialization.ddl struct src { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 216
+                      totalSize 5812
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src1
-                  name: default.src1
+                    name: default.src
+                  name: default.src
             Truncated Path -> Alias:
-              /src1 [x]
+              /src [w]
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -777,11 +783,22 @@ STAGE PLANS:
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                      Spark HashTable Sink Operator
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
                         keys:
-                          0 _col1 (type: string)
+                          0 _col0 (type: string)
                           1 _col0 (type: string)
-                        Position of Big Table: 0
+                        outputColumnNames: _col0, _col1, _col3
+                        input vertices:
+                          0 Map 3
+                        Position of Big Table: 1
+                        Statistics: Num rows: 14 Data size: 108 Basic stats: COMPLETE Column stats: NONE
+                        Spark HashTable Sink Operator
+                          keys:
+                            0 _col0 (type: string)
+                            1 _col1 (type: string)
+                          Position of Big Table: 0
             Local Work:
               Map Reduce Local Work
             Path -> Alias:
@@ -858,68 +875,57 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: string)
                           1 _col1 (type: string)
-                        outputColumnNames: _col1
+                        outputColumnNames: _col1, _col4
                         input vertices:
-                          1 Map 3
+                          1 Map 4
                         Position of Big Table: 0
                         Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                         Map Join Operator
                           condition map:
                                Inner Join 0 to 1
                           keys:
-                            0 _col1 (type: string)
-                            1 _col0 (type: string)
-                          outputColumnNames: _col1, _col4
+                            0 _col0 (type: string)
+                            1 _col1 (type: string)
+                          outputColumnNames: _col1, _col3, _col6
                           input vertices:
-                            1 Map 4
-                          Position of Big Table: 0
+                            0 Map 1
+                          Position of Big Table: 1
                           Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
-                          Map Join Operator
-                            condition map:
-                                 Inner Join 0 to 1
-                            keys:
-                              0 _col0 (type: string)
-                              1 _col1 (type: string)
-                            outputColumnNames: _col1, _col3, _col6
-                            input vertices:
-                              0 Map 1
-                            Position of Big Table: 1
-                            Statistics: Num rows: 332 Data size: 3534 Basic stats: COMPLETE Column stats: NONE
-                            Select Operator
-                              expressions: _col3 (type: string), _col6 (type: string), _col1 (type: string)
-                              outputColumnNames: _col0, _col1, _col2
-                              Statistics: Num rows: 332 Data size: 3534 Basic stats: COMPLETE Column stats: NONE
-                              File Output Operator
-                                compressed: false
-                                GlobalTableId: 1
+                          Select Operator
+                            expressions: _col3 (type: string), _col6 (type: string), _col1 (type: string)
+                            outputColumnNames: _col0, _col1, _col2
+                            Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+                            File Output Operator
+                              compressed: false
+                              GlobalTableId: 1
 #### A masked pattern was here ####
-                                NumFilesPerFileSink: 1
-                                Statistics: Num rows: 332 Data size: 3534 Basic stats: COMPLETE Column stats: NONE
+                              NumFilesPerFileSink: 1
+                              Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
-                                table:
-                                    input format: org.apache.hadoop.mapred.TextInputFormat
-                                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                                    properties:
-                                      COLUMN_STATS_ACCURATE true
-                                      bucket_count -1
-                                      columns key,value,val2
-                                      columns.comments 
-                                      columns.types string:string:string
+                              table:
+                                  input format: org.apache.hadoop.mapred.TextInputFormat
+                                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                                  properties:
+                                    COLUMN_STATS_ACCURATE true
+                                    bucket_count -1
+                                    columns key,value,val2
+                                    columns.comments 
+                                    columns.types string:string:string
 #### A masked pattern was here ####
-                                      name default.dest_j1
-                                      numFiles 1
-                                      numRows 85
-                                      rawDataSize 1600
-                                      serialization.ddl struct dest_j1 { string key, string value, string val2}
-                                      serialization.format 1
-                                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                                      totalSize 1685
+                                    name default.dest_j1
+                                    numFiles 1
+                                    numRows 85
+                                    rawDataSize 1600
+                                    serialization.ddl struct dest_j1 { string key, string value, string val2}
+                                    serialization.format 1
+                                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                                    totalSize 1685
 #### A masked pattern was here ####
-                                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                                    name: default.dest_j1
-                                TotalFiles: 1
-                                GatherStats: true
-                                MultiFileSpray: false
+                                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                                  name: default.dest_j1
+                              TotalFiles: 1
+                              GatherStats: true
+                              MultiFileSpray: false
             Local Work:
               Map Reduce Local Work
             Path -> Alias:

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/spark/join33.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join33.q.out b/ql/src/test/results/clientpositive/spark/join33.q.out
index 0f16678..a9d50b4 100644
--- a/ql/src/test/results/clientpositive/spark/join33.q.out
+++ b/ql/src/test/results/clientpositive/spark/join33.q.out
@@ -113,16 +113,16 @@ STAGE PLANS:
         Map 1 
             Map Operator Tree:
                 TableScan
-                  alias: z
+                  alias: y
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
+                    predicate: key is not null (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: value (type: string)
-                      outputColumnNames: _col0
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
@@ -136,12 +136,9 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: hr=11
+                  base file name: src
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 11
                   properties:
                     COLUMN_STATS_ACCURATE true
                     bucket_count -1
@@ -149,13 +146,11 @@ STAGE PLANS:
                     columns.comments 'default','default'
                     columns.types string:string
 #### A masked pattern was here ####
-                    name default.srcpart
+                    name default.src
                     numFiles 1
                     numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
                     rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
+                    serialization.ddl struct src { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     totalSize 5812
@@ -165,23 +160,26 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
+                      COLUMN_STATS_ACCURATE true
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
                       columns.types string:string
 #### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
+                      name default.src
+                      numFiles 1
+                      numRows 500
+                      rawDataSize 5312
+                      serialization.ddl struct src { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 5812
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
+                    name: default.src
+                  name: default.src
             Truncated Path -> Alias:
-              /srcpart/ds=2008-04-08/hr=11 [z]
+              /src [y]
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -190,7 +188,7 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (key is not null and value is not null) (type: boolean)
+                    predicate: (value is not null and key is not null) (type: boolean)
                     Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -199,7 +197,7 @@ STAGE PLANS:
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: string)
-                          1 _col0 (type: string)
+                          1 _col1 (type: string)
                         Position of Big Table: 0
             Local Work:
               Map Reduce Local Work
@@ -260,24 +258,24 @@ STAGE PLANS:
         Map 2 
             Map Operator Tree:
                 TableScan
-                  alias: y
+                  alias: z
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: key is not null (type: boolean)
+                    predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
+                      expressions: value (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
                         keys:
                           0 _col0 (type: string)
-                          1 _col0 (type: string)
-                        outputColumnNames: _col1, _col2, _col3
+                          1 _col1 (type: string)
+                        outputColumnNames: _col0, _col3
                         input vertices:
                           1 Map 3
                         Position of Big Table: 0
@@ -288,13 +286,13 @@ STAGE PLANS:
                           keys:
                             0 _col0 (type: string)
                             1 _col3 (type: string)
-                          outputColumnNames: _col0, _col4, _col5
+                          outputColumnNames: _col1, _col2, _col5
                           input vertices:
                             0 Map 1
                           Position of Big Table: 1
                           Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                           Select Operator
-                            expressions: _col5 (type: string), _col0 (type: string), _col4 (type: string)
+                            expressions: _col5 (type: string), _col2 (type: string), _col1 (type: string)
                             outputColumnNames: _col0, _col1, _col2
                             Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                             File Output Operator
@@ -330,9 +328,12 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: src
+                  base file name: hr=11
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                    hr 11
                   properties:
                     COLUMN_STATS_ACCURATE true
                     bucket_count -1
@@ -340,11 +341,13 @@ STAGE PLANS:
                     columns.comments 'default','default'
                     columns.types string:string
 #### A masked pattern was here ####
-                    name default.src
+                    name default.srcpart
                     numFiles 1
                     numRows 500
+                    partition_columns ds/hr
+                    partition_columns.types string:string
                     rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
+                    serialization.ddl struct srcpart { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     totalSize 5812
@@ -354,26 +357,23 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE true
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
                       columns.types string:string
 #### A masked pattern was here ####
-                      name default.src
-                      numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
+                      name default.srcpart
+                      partition_columns ds/hr
+                      partition_columns.types string:string
+                      serialization.ddl struct srcpart { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
+                    name: default.srcpart
+                  name: default.srcpart
             Truncated Path -> Alias:
-              /src [y]
+              /srcpart/ds=2008-04-08/hr=11 [z]
 
   Stage: Stage-0
     Move Operator
@@ -422,8 +422,8 @@ POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Output: default@dest_j1
 POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.val2 EXPRESSION [(src)y.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value EXPRESSION [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from dest_j1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_j1


[48/50] [abbrv] hive git commit: HIVE-10526: CBO (Calcite Return Path): HiveCost epsilon comparison should take row count in to account (Laljo John Pullokkaran reviewed by Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-10526: CBO (Calcite Return Path): HiveCost epsilon comparison should take row count in to account (Laljo John Pullokkaran reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/809fcb01
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/809fcb01
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/809fcb01

Branch: refs/heads/beeline-cli
Commit: 809fcb01457ab7ba09786f237ce558e81c53ee49
Parents: 4a0ccd1
Author: jpullokk <jp...@apache.org>
Authored: Thu May 7 14:16:25 2015 -0700
Committer: jpullokk <jp...@apache.org>
Committed: Thu May 7 14:16:25 2015 -0700

----------------------------------------------------------------------
 .../ql/optimizer/calcite/cost/HiveCost.java     |   16 +-
 .../annotate_stats_join_pkfk.q.out              |   20 +-
 .../encryption_insert_partition_static.q.out    |   14 +-
 ql/src/test/results/clientpositive/join32.q.out |   84 +-
 .../clientpositive/join32_lessSize.q.out        |  423 ++---
 ql/src/test/results/clientpositive/join33.q.out |   84 +-
 .../clientpositive/join_alt_syntax.q.out        |  306 ++--
 .../clientpositive/join_cond_pushdown_2.q.out   |  150 +-
 .../clientpositive/join_cond_pushdown_4.q.out   |  150 +-
 .../results/clientpositive/spark/join32.q.out   |   88 +-
 .../clientpositive/spark/join32_lessSize.q.out  |  286 ++--
 .../results/clientpositive/spark/join33.q.out   |   88 +-
 .../clientpositive/spark/join_alt_syntax.q.out  |  210 ++-
 .../spark/join_cond_pushdown_2.q.out            |   98 +-
 .../spark/join_cond_pushdown_4.q.out            |   98 +-
 .../clientpositive/tez/explainuser_2.q.out      | 1529 +++++++++---------
 16 files changed, 1770 insertions(+), 1874 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java
index 0755943..3c5cac2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java
@@ -21,7 +21,15 @@ import org.apache.calcite.plan.RelOptCost;
 import org.apache.calcite.plan.RelOptCostFactory;
 import org.apache.calcite.plan.RelOptUtil;
 
-// TODO: This should inherit from VolcanoCost and should just override isLE method.
+/***
+ * NOTE:<br>
+ * 1. Hivecost normalizes cpu and io in to time.<br>
+ * 2. CPU, IO cost is added together to find the query latency.<br>
+ * 3. If query latency is equal then row count is compared.
+ */
+
+// TODO: This should inherit from VolcanoCost and should just override isLE
+// method.
 public class HiveCost implements RelOptCost {
   // ~ Static fields/initializers ---------------------------------------------
 
@@ -114,8 +122,10 @@ public class HiveCost implements RelOptCost {
   }
 
   public boolean isEqWithEpsilon(RelOptCost other) {
-    return (this == other) || (Math.abs((this.cpu + this.io) -
-            (other.getCpu() + other.getIo())) < RelOptUtil.EPSILON);
+    return (this == other)
+        || ((Math.abs(this.io - other.getIo()) < RelOptUtil.EPSILON)
+            && (Math.abs(this.cpu - other.getCpu()) < RelOptUtil.EPSILON) && (Math
+            .abs(this.rowCount - other.getRows()) < RelOptUtil.EPSILON));
   }
 
   public RelOptCost minus(RelOptCost other) {

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
index 4a5d02d..66e0e9f 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
@@ -808,32 +808,32 @@ STAGE PLANS:
             alias: s
             Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: s_store_sk is not null (type: boolean)
-              Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+              predicate: ((s_floor_space > 1000) and s_store_sk is not null) (type: boolean)
+              Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: s_store_sk (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
             Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: ((s_floor_space > 1000) and s_store_sk is not null) (type: boolean)
-              Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+              predicate: s_store_sk is not null (type: boolean)
+              Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: s_store_sk (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -843,10 +843,10 @@ STAGE PLANS:
             0 _col0 (type: int)
             1 _col0 (type: int)
             2 _col0 (type: int)
-          outputColumnNames: _col2
+          outputColumnNames: _col1
           Statistics: Num rows: 322 Data size: 1288 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
-            expressions: _col2 (type: int)
+            expressions: _col1 (type: int)
             outputColumnNames: _col0
             Statistics: Num rows: 322 Data size: 1288 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out
index 9e1c1e3..96f8b6a 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out
@@ -555,16 +555,16 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: encryptedtable
-            Statistics: Num rows: 12 Data size: 2777 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 12 Data size: 2767 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Select Operator
               expressions: key (type: string), value (type: string)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 12 Data size: 2777 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 12 Data size: 2767 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 sort order: 
                 Map-reduce partition columns: _col0 (type: string)
-                Statistics: Num rows: 12 Data size: 2777 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12 Data size: 2767 Basic stats: COMPLETE Column stats: NONE
                 tag: -1
                 value expressions: _col0 (type: string), _col1 (type: string)
                 auto parallelism: false
@@ -595,7 +595,7 @@ STAGE PLANS:
               serialization.ddl struct encryptedtable { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-              totalSize 1392
+              totalSize 1385
 #### A masked pattern was here ####
             serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
           
@@ -643,7 +643,7 @@ STAGE PLANS:
               serialization.ddl struct encryptedtable { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-              totalSize 1385
+              totalSize 1382
 #### A masked pattern was here ####
             serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
           
@@ -675,14 +675,14 @@ STAGE PLANS:
         Select Operator
           expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 12 Data size: 2777 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 12 Data size: 2767 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             GlobalTableId: 1
 #### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=today/.hive-staging
             NumFilesPerFileSink: 1
             Static Partition Specification: ds=today/
-            Statistics: Num rows: 12 Data size: 2777 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 12 Data size: 2767 Basic stats: COMPLETE Column stats: NONE
 #### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=today/.hive-staging
             table:
                 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/join32.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join32.q.out b/ql/src/test/results/clientpositive/join32.q.out
index d9e8dd3..a05a356 100644
--- a/ql/src/test/results/clientpositive/join32.q.out
+++ b/ql/src/test/results/clientpositive/join32.q.out
@@ -109,71 +109,25 @@ STAGE PLANS:
   Stage: Stage-7
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:z 
+        $hdt$_0:y 
           Fetch Operator
             limit: -1
-            Partition Description:
-                Partition
-                  base file name: hr=11
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 11
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
         $hdt$_1:$hdt$_2:x 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:z 
+        $hdt$_0:y 
           TableScan
-            alias: z
+            alias: y
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
+              predicate: key is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: value (type: string)
-                outputColumnNames: _col0
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 HashTable Sink Operator
                   keys:
@@ -187,7 +141,7 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: (key is not null and value is not null) (type: boolean)
+              predicate: (value is not null and key is not null) (type: boolean)
               Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -196,31 +150,31 @@ STAGE PLANS:
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string)
-                    1 _col0 (type: string)
+                    1 _col1 (type: string)
                   Position of Big Table: 0
 
   Stage: Stage-5
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: y
+            alias: z
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: key is not null (type: boolean)
+              predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
+                expressions: value (type: string)
+                outputColumnNames: _col0
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
                     0 _col0 (type: string)
-                    1 _col0 (type: string)
-                  outputColumnNames: _col1, _col2, _col3
+                    1 _col1 (type: string)
+                  outputColumnNames: _col0, _col3
                   Position of Big Table: 0
                   Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                   Map Join Operator
@@ -229,11 +183,11 @@ STAGE PLANS:
                     keys:
                       0 _col0 (type: string)
                       1 _col3 (type: string)
-                    outputColumnNames: _col0, _col4, _col5
+                    outputColumnNames: _col1, _col2, _col5
                     Position of Big Table: 1
                     Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: _col5 (type: string), _col0 (type: string), _col4 (type: string)
+                      expressions: _col5 (type: string), _col2 (type: string), _col1 (type: string)
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
@@ -402,7 +356,7 @@ STAGE PLANS:
               name: default.srcpart
             name: default.srcpart
       Truncated Path -> Alias:
-        /src [$hdt$_1:$hdt$_1:y]
+        /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:$hdt$_1:z]
 
   Stage: Stage-0
     Move Operator
@@ -451,8 +405,8 @@ POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Output: default@dest_j1
 POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.val2 EXPRESSION [(src)y.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value EXPRESSION [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from dest_j1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_j1

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/join32_lessSize.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join32_lessSize.q.out b/ql/src/test/results/clientpositive/join32_lessSize.q.out
index 9e3d06d..136c306 100644
--- a/ql/src/test/results/clientpositive/join32_lessSize.q.out
+++ b/ql/src/test/results/clientpositive/join32_lessSize.q.out
@@ -130,7 +130,7 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: (key is not null and value is not null) (type: boolean)
+              predicate: (value is not null and key is not null) (type: boolean)
               Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -139,31 +139,31 @@ STAGE PLANS:
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string)
-                    1 _col0 (type: string)
+                    1 _col1 (type: string)
                   Position of Big Table: 0
 
   Stage: Stage-6
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: y
+            alias: z
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: key is not null (type: boolean)
+              predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
+                expressions: value (type: string)
+                outputColumnNames: _col0
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
                     0 _col0 (type: string)
-                    1 _col0 (type: string)
-                  outputColumnNames: _col1, _col2, _col3
+                    1 _col1 (type: string)
+                  outputColumnNames: _col0, _col3
                   Position of Big Table: 0
                   Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -175,8 +175,8 @@ STAGE PLANS:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                         properties:
-                          columns _col1,_col2,_col3
-                          columns.types string,string,string
+                          columns _col0,_col3
+                          columns.types string,string
                           escape.delim \
                           serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
                         serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -190,7 +190,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: src
+            base file name: src1
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
@@ -200,14 +200,14 @@ STAGE PLANS:
               columns.comments 'default','default'
               columns.types string:string
 #### A masked pattern was here ####
-              name default.src
+              name default.src1
               numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
+              numRows 25
+              rawDataSize 191
+              serialization.ddl struct src1 { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
+              totalSize 216
 #### A masked pattern was here ####
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
@@ -220,23 +220,26 @@ STAGE PLANS:
                 columns.comments 'default','default'
                 columns.types string:string
 #### A masked pattern was here ####
-                name default.src
+                name default.src1
                 numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
+                numRows 25
+                rawDataSize 191
+                serialization.ddl struct src1 { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
+                totalSize 216
 #### A masked pattern was here ####
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
+              name: default.src1
+            name: default.src1
 #### A masked pattern was here ####
           Partition
-            base file name: src1
+            base file name: hr=11
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
             properties:
               COLUMN_STATS_ACCURATE true
               bucket_count -1
@@ -244,106 +247,59 @@ STAGE PLANS:
               columns.comments 'default','default'
               columns.types string:string
 #### A masked pattern was here ####
-              name default.src1
+              name default.srcpart
               numFiles 1
-              numRows 25
-              rawDataSize 191
-              serialization.ddl struct src1 { string key, string value}
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 216
+              totalSize 5812
 #### A masked pattern was here ####
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE true
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
                 columns.types string:string
 #### A masked pattern was here ####
-                name default.src1
-                numFiles 1
-                numRows 25
-                rawDataSize 191
-                serialization.ddl struct src1 { string key, string value}
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 216
 #### A masked pattern was here ####
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src1
-            name: default.src1
+              name: default.srcpart
+            name: default.srcpart
       Truncated Path -> Alias:
-        /src [$hdt$_1:$hdt$_1:y]
+        /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:$hdt$_1:z]
 
   Stage: Stage-7
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:z 
+        $hdt$_0:y 
           Fetch Operator
             limit: -1
-            Partition Description:
-                Partition
-                  base file name: hr=11
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 11
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
       Alias -> Map Local Operator Tree:
-        $hdt$_0:z 
+        $hdt$_0:y 
           TableScan
-            alias: z
+            alias: y
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
+              predicate: key is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: value (type: string)
-                outputColumnNames: _col0
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 HashTable Sink Operator
                   keys:
@@ -362,11 +318,11 @@ STAGE PLANS:
               keys:
                 0 _col0 (type: string)
                 1 _col3 (type: string)
-              outputColumnNames: _col0, _col4, _col5
+              outputColumnNames: _col1, _col2, _col5
               Position of Big Table: 1
               Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: _col5 (type: string), _col0 (type: string), _col4 (type: string)
+                expressions: _col5 (type: string), _col2 (type: string), _col1 (type: string)
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
@@ -406,8 +362,8 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
             properties:
-              columns _col1,_col2,_col3
-              columns.types string,string,string
+              columns _col0,_col3
+              columns.types string,string
               escape.delim \
               serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
             serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -415,19 +371,16 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
               properties:
-                columns _col1,_col2,_col3
-                columns.types string,string,string
+                columns _col0,_col3
+                columns.types string,string
                 escape.delim \
                 serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
               serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 #### A masked pattern was here ####
           Partition
-            base file name: hr=11
+            base file name: src
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
             properties:
               COLUMN_STATS_ACCURATE true
               bucket_count -1
@@ -435,13 +388,11 @@ STAGE PLANS:
               columns.comments 'default','default'
               columns.types string:string
 #### A masked pattern was here ####
-              name default.srcpart
+              name default.src
               numFiles 1
               numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
               rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
+              serialization.ddl struct src { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               totalSize 5812
@@ -451,21 +402,24 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
+                COLUMN_STATS_ACCURATE true
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
                 columns.types string:string
 #### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
+                name default.src
+                numFiles 1
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
 #### A masked pattern was here ####
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
+              name: default.src
+            name: default.src
       Truncated Path -> Alias:
 #### A masked pattern was here ####
 
@@ -516,8 +470,8 @@ POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Output: default@dest_j1
 POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.val2 EXPRESSION [(src)y.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value EXPRESSION [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from dest_j1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_j1
@@ -703,28 +657,27 @@ STAGE DEPENDENCIES:
   Stage-8 depends on stages: Stage-11
   Stage-10 depends on stages: Stage-8
   Stage-7 depends on stages: Stage-10
-  Stage-0 depends on stages: Stage-7
+  Stage-9 depends on stages: Stage-7
+  Stage-6 depends on stages: Stage-9
+  Stage-0 depends on stages: Stage-6
   Stage-2 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-11
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_1:$hdt$_2:x 
-          Fetch Operator
-            limit: -1
-        $hdt$_1:$hdt$_3:x 
+        $hdt$_1:$hdt$_2:$hdt$_2:x 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_1:$hdt$_2:x 
+        $hdt$_1:$hdt$_2:$hdt$_2:x 
           TableScan
             alias: x
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: (value is not null and key is not null) (type: boolean)
+              predicate: (key is not null and value is not null) (type: boolean)
               Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -733,9 +686,12 @@ STAGE PLANS:
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string)
-                    1 _col1 (type: string)
-                  Position of Big Table: 0
-        $hdt$_1:$hdt$_3:x 
+                    1 _col0 (type: string)
+                  Position of Big Table: 1
+
+  Stage: Stage-8
+    Map Reduce
+      Map Operator Tree:
           TableScan
             alias: x
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -748,62 +704,32 @@ STAGE PLANS:
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                HashTable Sink Operator
-                  keys:
-                    0 _col1 (type: string)
-                    1 _col0 (type: string)
-                  Position of Big Table: 0
-
-  Stage: Stage-8
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: w
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: value is not null (type: boolean)
-              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: value (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
                     0 _col0 (type: string)
-                    1 _col1 (type: string)
-                  outputColumnNames: _col1
-                  Position of Big Table: 0
-                  Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-                  Map Join Operator
-                    condition map:
-                         Inner Join 0 to 1
-                    keys:
-                      0 _col1 (type: string)
-                      1 _col0 (type: string)
-                    outputColumnNames: _col1, _col4
-                    Position of Big Table: 0
-                    Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      GlobalTableId: 0
+                    1 _col0 (type: string)
+                  outputColumnNames: _col0, _col1, _col3
+                  Position of Big Table: 1
+                  Statistics: Num rows: 14 Data size: 108 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
 #### A masked pattern was here ####
-                      NumFilesPerFileSink: 1
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          properties:
-                            columns _col1,_col4
-                            columns.types string,string
-                            escape.delim \
-                            serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                          serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                      TotalFiles: 1
-                      GatherStats: false
-                      MultiFileSpray: false
+                    NumFilesPerFileSink: 1
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        properties:
+                          columns _col0,_col1,_col3
+                          columns.types string,string,string
+                          escape.delim \
+                          serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                        serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
       Local Work:
         Map Reduce Local Work
       Path -> Alias:
@@ -811,7 +737,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: src
+            base file name: src1
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
@@ -821,14 +747,14 @@ STAGE PLANS:
               columns.comments 'default','default'
               columns.types string:string
 #### A masked pattern was here ####
-              name default.src
+              name default.src1
               numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
+              numRows 25
+              rawDataSize 191
+              serialization.ddl struct src1 { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
+              totalSize 216
 #### A masked pattern was here ####
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
@@ -841,21 +767,106 @@ STAGE PLANS:
                 columns.comments 'default','default'
                 columns.types string:string
 #### A masked pattern was here ####
-                name default.src
+                name default.src1
                 numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
+                numRows 25
+                rawDataSize 191
+                serialization.ddl struct src1 { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
+                totalSize 216
 #### A masked pattern was here ####
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
+              name: default.src1
+            name: default.src1
+      Truncated Path -> Alias:
+        /src1 [$hdt$_1:$hdt$_2:$hdt$_3:x]
+
+  Stage: Stage-10
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        $hdt$_1:$hdt$_1:w 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        $hdt$_1:$hdt$_1:w 
+          TableScan
+            alias: w
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: value is not null (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: value (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                HashTable Sink Operator
+                  keys:
+                    0 _col0 (type: string)
+                    1 _col1 (type: string)
+                  Position of Big Table: 1
+
+  Stage: Stage-7
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            GatherStats: false
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              keys:
+                0 _col0 (type: string)
+                1 _col1 (type: string)
+              outputColumnNames: _col1, _col4
+              Position of Big Table: 1
+              Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 0
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    properties:
+                      columns _col1,_col4
+                      columns.types string,string
+                      escape.delim \
+                      serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                TotalFiles: 1
+                GatherStats: false
+                MultiFileSpray: false
+      Local Work:
+        Map Reduce Local Work
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: src1
+            base file name: -mr-10002
+            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+            properties:
+              columns _col0,_col1,_col3
+              columns.types string,string,string
+              escape.delim \
+              serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+          
+              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+              properties:
+                columns _col0,_col1,_col3
+                columns.types string,string,string
+                escape.delim \
+                serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+#### A masked pattern was here ####
+          Partition
+            base file name: src
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
@@ -865,14 +876,14 @@ STAGE PLANS:
               columns.comments 'default','default'
               columns.types string:string
 #### A masked pattern was here ####
-              name default.src1
+              name default.src
               numFiles 1
-              numRows 25
-              rawDataSize 191
-              serialization.ddl struct src1 { string key, string value}
+              numRows 500
+              rawDataSize 5312
+              serialization.ddl struct src { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 216
+              totalSize 5812
 #### A masked pattern was here ####
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
@@ -885,22 +896,22 @@ STAGE PLANS:
                 columns.comments 'default','default'
                 columns.types string:string
 #### A masked pattern was here ####
-                name default.src1
+                name default.src
                 numFiles 1
-                numRows 25
-                rawDataSize 191
-                serialization.ddl struct src1 { string key, string value}
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 216
+                totalSize 5812
 #### A masked pattern was here ####
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src1
-            name: default.src1
+              name: default.src
+            name: default.src
       Truncated Path -> Alias:
-        /src [$hdt$_1:$hdt$_1:w]
+#### A masked pattern was here ####
 
-  Stage: Stage-10
+  Stage: Stage-9
     Map Reduce Local Work
       Alias -> Map Local Tables:
         $hdt$_0:w 
@@ -926,7 +937,7 @@ STAGE PLANS:
                     1 _col1 (type: string)
                   Position of Big Table: 1
 
-  Stage: Stage-7
+  Stage: Stage-6
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -939,17 +950,17 @@ STAGE PLANS:
                 1 _col1 (type: string)
               outputColumnNames: _col1, _col3, _col6
               Position of Big Table: 1
-              Statistics: Num rows: 332 Data size: 3534 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: _col3 (type: string), _col6 (type: string), _col1 (type: string)
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 332 Data size: 3534 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
                   GlobalTableId: 1
 #### A masked pattern was here ####
                   NumFilesPerFileSink: 1
-                  Statistics: Num rows: 332 Data size: 3534 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
@@ -982,7 +993,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: -mr-10002
+            base file name: -mr-10001
             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
             properties:

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/join33.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join33.q.out b/ql/src/test/results/clientpositive/join33.q.out
index d9e8dd3..a05a356 100644
--- a/ql/src/test/results/clientpositive/join33.q.out
+++ b/ql/src/test/results/clientpositive/join33.q.out
@@ -109,71 +109,25 @@ STAGE PLANS:
   Stage: Stage-7
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:z 
+        $hdt$_0:y 
           Fetch Operator
             limit: -1
-            Partition Description:
-                Partition
-                  base file name: hr=11
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 11
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
         $hdt$_1:$hdt$_2:x 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:z 
+        $hdt$_0:y 
           TableScan
-            alias: z
+            alias: y
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
+              predicate: key is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: value (type: string)
-                outputColumnNames: _col0
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 HashTable Sink Operator
                   keys:
@@ -187,7 +141,7 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: (key is not null and value is not null) (type: boolean)
+              predicate: (value is not null and key is not null) (type: boolean)
               Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -196,31 +150,31 @@ STAGE PLANS:
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string)
-                    1 _col0 (type: string)
+                    1 _col1 (type: string)
                   Position of Big Table: 0
 
   Stage: Stage-5
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: y
+            alias: z
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: key is not null (type: boolean)
+              predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
+                expressions: value (type: string)
+                outputColumnNames: _col0
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
                     0 _col0 (type: string)
-                    1 _col0 (type: string)
-                  outputColumnNames: _col1, _col2, _col3
+                    1 _col1 (type: string)
+                  outputColumnNames: _col0, _col3
                   Position of Big Table: 0
                   Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                   Map Join Operator
@@ -229,11 +183,11 @@ STAGE PLANS:
                     keys:
                       0 _col0 (type: string)
                       1 _col3 (type: string)
-                    outputColumnNames: _col0, _col4, _col5
+                    outputColumnNames: _col1, _col2, _col5
                     Position of Big Table: 1
                     Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: _col5 (type: string), _col0 (type: string), _col4 (type: string)
+                      expressions: _col5 (type: string), _col2 (type: string), _col1 (type: string)
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
@@ -402,7 +356,7 @@ STAGE PLANS:
               name: default.srcpart
             name: default.srcpart
       Truncated Path -> Alias:
-        /src [$hdt$_1:$hdt$_1:y]
+        /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:$hdt$_1:z]
 
   Stage: Stage-0
     Move Operator
@@ -451,8 +405,8 @@ POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Output: default@dest_j1
 POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.val2 EXPRESSION [(src)y.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value EXPRESSION [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from dest_j1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_j1

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/join_alt_syntax.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_alt_syntax.q.out b/ql/src/test/results/clientpositive/join_alt_syntax.q.out
index 007e4c6..cc908c1 100644
--- a/ql/src/test/results/clientpositive/join_alt_syntax.q.out
+++ b/ql/src/test/results/clientpositive/join_alt_syntax.q.out
@@ -359,13 +359,13 @@ where p2.p_name = p3.p_name and p1.p_partkey = p4.p_partkey
             and p1.p_partkey = p2.p_partkey
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-2
+  Stage-3 is a root stage
+  Stage-2 depends on stages: Stage-3
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -379,36 +379,34 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
                   Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string)
           TableScan
             alias: p1
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: p_partkey is not null (type: boolean)
-              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              predicate: (p_partkey is not null and p_name is not null) (type: boolean)
+              Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: p_partkey (type: int), p_name (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string)
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                  Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col0 (type: int)
-            1 _col0 (type: int)
+            0 _col0 (type: int), _col1 (type: string)
+            1 _col0 (type: int), _col1 (type: string)
           outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -420,60 +418,6 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: int), _col1 (type: string)
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col3 (type: string)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: int), _col1 (type: string)
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col2 (type: string)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: int), _col1 (type: string)
-            1 _col0 (type: int), _col1 (type: string)
-          outputColumnNames: _col1, _col3, _col5, _col6
-          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col1 (type: string), _col5 (type: string), _col6 (type: string), _col3 (type: string)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: p1
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (p_name is not null and p_partkey is not null) (type: boolean)
-              Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: p_partkey (type: int), p_name (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col1 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col1 (type: string)
-                  Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: int)
-          TableScan
             alias: p1
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
@@ -488,14 +432,21 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+          TableScan
+            Reduce Output Operator
+              key expressions: _col3 (type: string)
+              sort order: +
+              Map-reduce partition columns: _col3 (type: string)
+              Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: int), _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col1 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col2
+            0 _col0 (type: string)
+            1 _col3 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col4
           Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
@@ -504,6 +455,53 @@ STAGE PLANS:
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: p1
+            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: p_partkey is not null (type: boolean)
+              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: p_partkey (type: int), p_name (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: string)
+          TableScan
+            Reduce Output Operator
+              key expressions: _col1 (type: int)
+              sort order: +
+              Map-reduce partition columns: _col1 (type: int)
+              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: string), _col2 (type: string), _col4 (type: string)
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col0 (type: int)
+            1 _col1 (type: int)
+          outputColumnNames: _col1, _col2, _col4, _col6
+          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col4 (type: string), _col6 (type: string), _col2 (type: string), _col1 (type: string)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
   Stage: Stage-0
     Fetch Operator
       limit: -1
@@ -521,56 +519,54 @@ where p2.p_name = p3.p_name and p1.p_partkey = p4.p_partkey
             and p1.p_partkey = p2.p_partkey
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-2
+  Stage-3 is a root stage
+  Stage-2 depends on stages: Stage-3
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
             alias: p1
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (p_partkey is not null and p_name is not null) (type: boolean)
+              predicate: (p_name is not null and p_partkey is not null) (type: boolean)
               Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: p_partkey (type: int), p_name (type: string)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
+                  key expressions: _col1 (type: string), _col0 (type: int)
+                  sort order: ++
+                  Map-reduce partition columns: _col1 (type: string), _col0 (type: int)
                   Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string)
           TableScan
             alias: p1
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: p_partkey is not null (type: boolean)
-              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              predicate: (p_name is not null and p_partkey is not null) (type: boolean)
+              Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: p_partkey (type: int), p_name (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string)
+                  key expressions: _col1 (type: string), _col0 (type: int)
+                  sort order: ++
+                  Map-reduce partition columns: _col1 (type: string), _col0 (type: int)
+                  Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col0 (type: int)
-            1 _col0 (type: int)
+            0 _col1 (type: string), _col0 (type: int)
+            1 _col1 (type: string), _col0 (type: int)
           outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -582,60 +578,6 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            Reduce Output Operator
-              key expressions: _col1 (type: string), _col0 (type: int)
-              sort order: ++
-              Map-reduce partition columns: _col1 (type: string), _col0 (type: int)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col3 (type: string)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col1 (type: string), _col0 (type: int)
-              sort order: ++
-              Map-reduce partition columns: _col1 (type: string), _col0 (type: int)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col2 (type: string)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col1 (type: string), _col0 (type: int)
-            1 _col1 (type: string), _col0 (type: int)
-          outputColumnNames: _col1, _col3, _col5, _col6
-          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col1 (type: string), _col5 (type: string), _col6 (type: string), _col3 (type: string)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: p1
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (p_name is not null and p_partkey is not null) (type: boolean)
-              Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: p_partkey (type: int), p_name (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col1 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col1 (type: string)
-                  Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: int)
-          TableScan
             alias: p1
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
@@ -650,14 +592,21 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+          TableScan
+            Reduce Output Operator
+              key expressions: _col3 (type: string)
+              sort order: +
+              Map-reduce partition columns: _col3 (type: string)
+              Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: int), _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col1 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col2
+            0 _col0 (type: string)
+            1 _col3 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col4
           Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
@@ -666,6 +615,53 @@ STAGE PLANS:
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: p1
+            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: p_partkey is not null (type: boolean)
+              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: p_partkey (type: int), p_name (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: string)
+          TableScan
+            Reduce Output Operator
+              key expressions: _col1 (type: int)
+              sort order: +
+              Map-reduce partition columns: _col1 (type: int)
+              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: string), _col2 (type: string), _col4 (type: string)
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col0 (type: int)
+            1 _col1 (type: int)
+          outputColumnNames: _col1, _col2, _col4, _col6
+          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col4 (type: string), _col6 (type: string), _col2 (type: string), _col1 (type: string)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
   Stage: Stage-0
     Fetch Operator
       limit: -1


[10/50] [abbrv] hive git commit: HIVE-10605 - Make hive version number update automatically in webhcat-default.xml during hive tar generation (Eugene Koifman, reviewed by Thejas Nair)

Posted by xu...@apache.org.
HIVE-10605 - Make hive version number update automatically in webhcat-default.xml during hive tar generation (Eugene Koifman, reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/eefb0718
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/eefb0718
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/eefb0718

Branch: refs/heads/beeline-cli
Commit: eefb0718ea347680850620c5dd9eff5ec202566d
Parents: 02b6cd1
Author: Eugene Koifman <ek...@hortonworks.com>
Authored: Tue May 5 18:00:15 2015 -0700
Committer: Eugene Koifman <ek...@hortonworks.com>
Committed: Tue May 5 18:00:15 2015 -0700

----------------------------------------------------------------------
 .../deployers/config/webhcat/webhcat-site.xml          |  9 +--------
 hcatalog/src/test/e2e/templeton/deployers/env.sh       |  1 +
 hcatalog/webhcat/svr/pom.xml                           | 13 +++++++------
 .../webhcat/svr/src/main/config/webhcat-default.xml    | 10 +++++-----
 packaging/src/main/assembly/bin.xml                    |  1 +
 5 files changed, 15 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/eefb0718/hcatalog/src/test/e2e/templeton/deployers/config/webhcat/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/hcatalog/src/test/e2e/templeton/deployers/config/webhcat/webhcat-site.xml b/hcatalog/src/test/e2e/templeton/deployers/config/webhcat/webhcat-site.xml
index 7a2d450..8bcb1f0 100644
--- a/hcatalog/src/test/e2e/templeton/deployers/config/webhcat/webhcat-site.xml
+++ b/hcatalog/src/test/e2e/templeton/deployers/config/webhcat/webhcat-site.xml
@@ -24,8 +24,7 @@
 <!-- install. -->
 
 <configuration>
-<!--TODO:
-1. make pig/hive versions env variables-->
+<!--TODO:-->
 
   <property>
     <name>templeton.hcat</name>
@@ -34,12 +33,6 @@
   </property>
 
     <property>
-        <name>templeton.libjars</name>
-        <value>${env.TEMPLETON_HOME}/../lib/zookeeper-3.4.6.jar,${env.TEMPLETON_HOME}/../lib/hive-common-1.2.0-SNAPSHOT.jar</value>
-        <description>Jars to add to the classpath.</description>
-    </property>
-
-    <property>
         <name>templeton.pig.archive</name>
         <value>hdfs:///apps/templeton/pig-${env.PIG_VERSION}.tar.gz</value>
         <description>The path to the Pig archive.</description>

http://git-wip-us.apache.org/repos/asf/hive/blob/eefb0718/hcatalog/src/test/e2e/templeton/deployers/env.sh
----------------------------------------------------------------------
diff --git a/hcatalog/src/test/e2e/templeton/deployers/env.sh b/hcatalog/src/test/e2e/templeton/deployers/env.sh
index a9cc2d7..8b719f2 100755
--- a/hcatalog/src/test/e2e/templeton/deployers/env.sh
+++ b/hcatalog/src/test/e2e/templeton/deployers/env.sh
@@ -22,6 +22,7 @@
 
 # define necessary env vars here and source it in other files
 
+#todo: most of these variables are defined in pom.xml - see this can be integrated
 echo ${HADOOP_VERSION};
 
 if [ -z ${HADOOP_VERSION} ]; then

http://git-wip-us.apache.org/repos/asf/hive/blob/eefb0718/hcatalog/webhcat/svr/pom.xml
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/pom.xml b/hcatalog/webhcat/svr/pom.xml
index 54b8c98..5b6696e 100644
--- a/hcatalog/webhcat/svr/pom.xml
+++ b/hcatalog/webhcat/svr/pom.xml
@@ -151,12 +151,13 @@
   </profiles>
 
   <build>
-      <resources>
-          <resource>
-              <targetPath>.</targetPath>
-              <directory>src/main/config</directory>
-          </resource>
-      </resources>
+    <resources>
+      <resource>
+        <targetPath>.</targetPath>
+        <directory>src/main/config</directory>
+        <filtering>true</filtering>
+      </resource>
+    </resources>
     <plugins>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>

http://git-wip-us.apache.org/repos/asf/hive/blob/eefb0718/hcatalog/webhcat/svr/src/main/config/webhcat-default.xml
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/config/webhcat-default.xml b/hcatalog/webhcat/svr/src/main/config/webhcat-default.xml
index dc6521a..801f3a5 100644
--- a/hcatalog/webhcat/svr/src/main/config/webhcat-default.xml
+++ b/hcatalog/webhcat/svr/src/main/config/webhcat-default.xml
@@ -39,7 +39,7 @@
 
   <property>
     <name>templeton.libjars</name>
-    <value>${env.TEMPLETON_HOME}/../lib/zookeeper-3.4.6.jar,${env.TEMPLETON_HOME}/../lib/hive-common-1.2.0-SNAPSHOT.jar</value>
+    <value>${env.TEMPLETON_HOME}/../lib/zookeeper-${zookeeper.version}.jar,${env.TEMPLETON_HOME}/../lib/hive-common-${project.version}.jar</value>
     <description>Jars to add to the classpath.</description>
   </property>
 
@@ -87,7 +87,7 @@
 
   <property>
     <name>templeton.pig.path</name>
-    <value>pig-0.11.1.tar.gz/pig-0.11.1/bin/pig</value>
+    <value>pig-${pig.version}.tar.gz/pig-${pig.version}/bin/pig</value>
     <description>The path to the Pig executable.</description>
   </property>
 
@@ -105,7 +105,7 @@
 
   <property>
     <name>templeton.hive.path</name>
-    <value>hive-0.11.0.tar.gz/hive-0.11.0/bin/hive</value>
+    <value>hive-${project.version}.tar.gz/hive-${project.version}/bin/hive</value>
     <description>The path to the Hive executable.  Applies only if templeton.hive.archive is defined.</description>
   </property>
 
@@ -125,7 +125,7 @@
 
   <property>
     <name>templeton.hive.home</name>
-    <value>hive-0.14.0-SNAPSHOT-bin.tar.gz/hive-0.14.0-SNAPSHOT-bin</value>
+    <value>hive-${project.version}-bin.tar.gz/hive-${project.version}-bin</value>
     <description>
       The path to the Hive home within the tar.  This is needed if Hive is not installed on all
       nodes in the cluster and needs to be shipped to the target node in the cluster to execute Pig
@@ -134,7 +134,7 @@
   </property>
   <property>
     <name>templeton.hcat.home</name>
-    <value>hive-0.14.0-SNAPSHOT-bin.tar.gz/hive-0.14.0-SNAPSHOT-bin/hcatalog</value>
+    <value>hive-${project.version}-bin.tar.gz/hive-${project.version}-bin/hcatalog</value>
     <description>
       The path to the HCat home within the tar.  This is needed if Hive is not installed on all
       nodes in the cluster and needs to be shipped to the target node in the cluster to execute Pig

http://git-wip-us.apache.org/repos/asf/hive/blob/eefb0718/packaging/src/main/assembly/bin.xml
----------------------------------------------------------------------
diff --git a/packaging/src/main/assembly/bin.xml b/packaging/src/main/assembly/bin.xml
index 2cda623..a1c176f 100644
--- a/packaging/src/main/assembly/bin.xml
+++ b/packaging/src/main/assembly/bin.xml
@@ -276,6 +276,7 @@
         <include>webhcat-default.xml</include>
         <include>webhcat-log4j.properties</include>
       </includes>
+      <filtered>true</filtered>
       <outputDirectory>hcatalog/etc/webhcat</outputDirectory>
     </fileSet>
 


[19/50] [abbrv] hive git commit: HIVE-9845 : HCatSplit repeats information making input split data size huge (Mithun Radhakrishnan via Sushanth Sowmyan)

Posted by xu...@apache.org.
HIVE-9845 : HCatSplit repeats information making input split data size huge (Mithun Radhakrishnan via Sushanth Sowmyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/18fb4601
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/18fb4601
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/18fb4601

Branch: refs/heads/beeline-cli
Commit: 18fb460179ff48d2c1e65f324799b4315616f14b
Parents: dc72c87
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Wed May 6 14:03:37 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Wed May 6 14:04:32 2015 -0700

----------------------------------------------------------------------
 .../hcatalog/mapreduce/HCatBaseInputFormat.java |  20 ++--
 .../hive/hcatalog/mapreduce/HCatSplit.java      |  21 +---
 .../hive/hcatalog/mapreduce/HCatTableInfo.java  |  12 ++
 .../hive/hcatalog/mapreduce/InputJobInfo.java   |   5 +
 .../hive/hcatalog/mapreduce/PartInfo.java       | 117 +++++++++++++++++--
 .../mapreduce/TestHCatOutputFormat.java         |   5 +-
 6 files changed, 139 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/18fb4601/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatBaseInputFormat.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatBaseInputFormat.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatBaseInputFormat.java
index 55b97dd..adfaf4e 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatBaseInputFormat.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatBaseInputFormat.java
@@ -130,16 +130,6 @@ public abstract class HCatBaseInputFormat
       setInputPath(jobConf, partitionInfo.getLocation());
       Map<String, String> jobProperties = partitionInfo.getJobProperties();
 
-      HCatSchema allCols = new HCatSchema(new LinkedList<HCatFieldSchema>());
-      for (HCatFieldSchema field :
-        inputJobInfo.getTableInfo().getDataColumns().getFields()) {
-        allCols.append(field);
-      }
-      for (HCatFieldSchema field :
-        inputJobInfo.getTableInfo().getPartitionColumns().getFields()) {
-        allCols.append(field);
-      }
-
       HCatUtil.copyJobPropertiesToJobConf(jobProperties, jobConf);
 
       storageHandler = HCatUtil.getStorageHandler(
@@ -163,9 +153,7 @@ public abstract class HCatBaseInputFormat
         inputFormat.getSplits(jobConf, desiredNumSplits);
 
       for (org.apache.hadoop.mapred.InputSplit split : baseSplits) {
-        splits.add(new HCatSplit(
-          partitionInfo,
-          split, allCols));
+        splits.add(new HCatSplit(partitionInfo, split));
       }
     }
 
@@ -190,6 +178,12 @@ public abstract class HCatBaseInputFormat
 
     HCatSplit hcatSplit = InternalUtil.castToHCatSplit(split);
     PartInfo partitionInfo = hcatSplit.getPartitionInfo();
+    // Ensure PartInfo's TableInfo is initialized.
+    if (partitionInfo.getTableInfo() == null) {
+      partitionInfo.setTableInfo(((InputJobInfo)HCatUtil.deserialize(
+          taskContext.getConfiguration().get(HCatConstants.HCAT_KEY_JOB_INFO)
+      )).getTableInfo());
+    }
     JobContext jobContext = taskContext;
     Configuration conf = jobContext.getConfiguration();
 

http://git-wip-us.apache.org/repos/asf/hive/blob/18fb4601/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatSplit.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatSplit.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatSplit.java
index bcedb3a..0aa498a 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatSplit.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatSplit.java
@@ -24,7 +24,6 @@ import java.io.IOException;
 import java.lang.reflect.Constructor;
 
 import org.apache.hadoop.hive.common.JavaUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.mapreduce.InputSplit;
@@ -44,11 +43,6 @@ public class HCatSplit extends InputSplit
   /** The split returned by the underlying InputFormat split. */
   private org.apache.hadoop.mapred.InputSplit baseMapRedSplit;
 
-  /** The schema for the HCatTable */
-  private HCatSchema tableSchema;
-
-  private HiveConf hiveConf;
-
   /**
    * Instantiates a new hcat split.
    */
@@ -60,16 +54,13 @@ public class HCatSplit extends InputSplit
    *
    * @param partitionInfo the partition info
    * @param baseMapRedSplit the base mapred split
-   * @param tableSchema the table level schema
    */
   public HCatSplit(PartInfo partitionInfo,
-           org.apache.hadoop.mapred.InputSplit baseMapRedSplit,
-           HCatSchema tableSchema) {
+           org.apache.hadoop.mapred.InputSplit baseMapRedSplit) {
 
     this.partitionInfo = partitionInfo;
     // dataSchema can be obtained from partitionInfo.getPartitionSchema()
     this.baseMapRedSplit = baseMapRedSplit;
-    this.tableSchema = tableSchema;
   }
 
   /**
@@ -101,7 +92,8 @@ public class HCatSplit extends InputSplit
    * @return the table schema
    */
   public HCatSchema getTableSchema() {
-    return this.tableSchema;
+    assert this.partitionInfo.getTableInfo() != null : "TableInfo should have been set at this point.";
+    return this.partitionInfo.getTableInfo().getAllColumns();
   }
 
   /* (non-Javadoc)
@@ -159,9 +151,6 @@ public class HCatSplit extends InputSplit
     } catch (Exception e) {
       throw new IOException("Exception from " + baseSplitClassName, e);
     }
-
-    String tableSchemaString = WritableUtils.readString(input);
-    tableSchema = (HCatSchema) HCatUtil.deserialize(tableSchemaString);
   }
 
   /* (non-Javadoc)
@@ -178,10 +167,6 @@ public class HCatSplit extends InputSplit
     Writable baseSplitWritable = (Writable) baseMapRedSplit;
     //write  baseSplit into output
     baseSplitWritable.write(output);
-
-    //write the table schema into output
-    String tableSchemaString = HCatUtil.serialize(tableSchema);
-    WritableUtils.writeString(output, tableSchemaString);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/18fb4601/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatTableInfo.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatTableInfo.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatTableInfo.java
index 13faf15..14c93ab 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatTableInfo.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatTableInfo.java
@@ -21,10 +21,13 @@ package org.apache.hive.hcatalog.mapreduce;
 
 import java.io.IOException;
 import java.io.Serializable;
+import java.util.List;
 
+import com.google.common.collect.Lists;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hive.hcatalog.common.HCatUtil;
+import org.apache.hive.hcatalog.data.schema.HCatFieldSchema;
 import org.apache.hive.hcatalog.data.schema.HCatSchema;
 
 /**
@@ -112,6 +115,15 @@ public class HCatTableInfo implements Serializable {
   }
 
   /**
+   * @return HCatSchema with all columns (i.e. data and partition columns).
+   */
+  public HCatSchema getAllColumns() {
+    List<HCatFieldSchema> allColumns = Lists.newArrayList(dataColumns.getFields());
+    allColumns.addAll(partitionColumns.getFields());
+    return new HCatSchema(allColumns);
+  }
+
+  /**
    * @return the storerInfo
    */
   public StorerInfo getStorerInfo() {

http://git-wip-us.apache.org/repos/asf/hive/blob/18fb4601/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InputJobInfo.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InputJobInfo.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InputJobInfo.java
index 360e77b..1f23f3f 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InputJobInfo.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InputJobInfo.java
@@ -182,5 +182,10 @@ public class InputJobInfo implements Serializable {
     ObjectInputStream partInfoReader =
       new ObjectInputStream(new InflaterInputStream(ois));
     partitions = (List<PartInfo>)partInfoReader.readObject();
+    for (PartInfo partInfo : partitions) {
+      if (partInfo.getTableInfo() == null) {
+        partInfo.setTableInfo(this.tableInfo);
+      }
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/18fb4601/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/PartInfo.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/PartInfo.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/PartInfo.java
index 651a9a0..fca0a92 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/PartInfo.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/PartInfo.java
@@ -18,27 +18,32 @@
  */
 package org.apache.hive.hcatalog.mapreduce;
 
+import java.io.IOException;
+import java.io.ObjectOutputStream;
 import java.io.Serializable;
 import java.util.Map;
 import java.util.Properties;
 
 import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
 import org.apache.hive.hcatalog.data.schema.HCatSchema;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** The Class used to serialize the partition information read from the metadata server that maps to a partition. */
 public class PartInfo implements Serializable {
 
+  private static Logger LOG = LoggerFactory.getLogger(PartInfo.class);
   /** The serialization version */
   private static final long serialVersionUID = 1L;
 
-  /** The partition schema. */
-  private final HCatSchema partitionSchema;
+  /** The partition data-schema. */
+  private HCatSchema partitionSchema;
 
   /** The information about which input storage handler to use */
-  private final String storageHandlerClassName;
-  private final String inputFormatClassName;
-  private final String outputFormatClassName;
-  private final String serdeClassName;
+  private String storageHandlerClassName;
+  private String inputFormatClassName;
+  private String outputFormatClassName;
+  private String serdeClassName;
 
   /** HCat-specific properties set at the partition */
   private final Properties hcatProperties;
@@ -52,8 +57,11 @@ public class PartInfo implements Serializable {
   /** Job properties associated with this parition */
   Map<String, String> jobProperties;
 
-  /** the table info associated with this partition */
-  HCatTableInfo tableInfo;
+  /**
+   * The table info associated with this partition.
+   * Not serialized per PartInfo instance. Constant, per table.
+   */
+  transient HCatTableInfo tableInfo;
 
   /**
    * Instantiates a new hcat partition info.
@@ -162,4 +170,97 @@ public class PartInfo implements Serializable {
   public HCatTableInfo getTableInfo() {
     return tableInfo;
   }
+
+  void setTableInfo(HCatTableInfo thatTableInfo) {
+    this.tableInfo = thatTableInfo;
+
+    if (partitionSchema == null) {
+      partitionSchema = tableInfo.getDataColumns();
+    }
+
+    if (storageHandlerClassName == null) {
+      storageHandlerClassName = tableInfo.getStorerInfo().getStorageHandlerClass();
+    }
+
+    if (inputFormatClassName == null) {
+      inputFormatClassName = tableInfo.getStorerInfo().getIfClass();
+    }
+
+    if (outputFormatClassName == null) {
+      outputFormatClassName = tableInfo.getStorerInfo().getOfClass();
+    }
+
+    if (serdeClassName == null) {
+      serdeClassName = tableInfo.getStorerInfo().getSerdeClass();
+    }
+  }
+
+  /**
+   * Serialization method. Suppresses serialization of redundant information that's already
+   * available from TableInfo.
+   */
+  private void writeObject(ObjectOutputStream oos)
+      throws IOException {
+    // Suppress commonality with TableInfo.
+
+    assert tableInfo != null : "TableInfo can't be null at this point.";
+
+    if (partitionSchema != null) {
+      if (partitionSchema.equals(tableInfo.getDataColumns())) {
+        partitionSchema = null;
+      } else {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Can't suppress data-schema. Partition-schema and table-schema seem to differ! "
+              + " partitionSchema: " + partitionSchema.getFields()
+              + " tableSchema: " + tableInfo.getDataColumns());
+        }
+      }
+    }
+
+    if (storageHandlerClassName != null) {
+      if (storageHandlerClassName.equals(tableInfo.getStorerInfo().getStorageHandlerClass())) {
+        storageHandlerClassName = null;
+      } else {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Partition's storageHandler (" + storageHandlerClassName + ") " +
+              "differs from table's storageHandler (" + tableInfo.getStorerInfo().getStorageHandlerClass() + ").");
+        }
+      }
+    }
+
+    if (inputFormatClassName != null) {
+      if (inputFormatClassName.equals(tableInfo.getStorerInfo().getIfClass())) {
+        inputFormatClassName = null;
+      } else {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Partition's InputFormat (" + inputFormatClassName + ") " +
+              "differs from table's InputFormat (" + tableInfo.getStorerInfo().getIfClass() + ").");
+        }
+      }
+    }
+
+    if (outputFormatClassName != null) {
+      if (outputFormatClassName.equals(tableInfo.getStorerInfo().getOfClass())) {
+        outputFormatClassName = null;
+      } else {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Partition's OutputFormat (" + outputFormatClassName + ") " +
+              "differs from table's OutputFormat (" + tableInfo.getStorerInfo().getOfClass() + ").");
+        }
+      }
+    }
+
+    if (serdeClassName != null) {
+      if (serdeClassName.equals(tableInfo.getStorerInfo().getSerdeClass())) {
+        serdeClassName = null;
+      } else {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Partition's SerDe (" + serdeClassName + ") " +
+              "differs from table's SerDe (" + tableInfo.getStorerInfo().getSerdeClass() + ").");
+        }
+      }
+    }
+
+    oos.defaultWriteObject();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/18fb4601/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatOutputFormat.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatOutputFormat.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatOutputFormat.java
index add9d41..f716da9 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatOutputFormat.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatOutputFormat.java
@@ -25,6 +25,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import com.google.common.collect.Lists;
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
@@ -106,7 +107,7 @@ public class TestHCatOutputFormat extends TestCase {
     tbl.setDbName(dbName);
     tbl.setTableName(tblName);
     StorageDescriptor sd = new StorageDescriptor();
-    sd.setCols(fields);
+    sd.setCols(Lists.newArrayList(new FieldSchema("data_column", serdeConstants.STRING_TYPE_NAME, "")));
     tbl.setSd(sd);
 
     //sd.setLocation("hdfs://tmp");
@@ -151,7 +152,7 @@ public class TestHCatOutputFormat extends TestCase {
     assertEquals(1, jobInfo.getPartitionValues().size());
     assertEquals("p1", jobInfo.getPartitionValues().get("colname"));
     assertEquals(1, jobInfo.getTableInfo().getDataColumns().getFields().size());
-    assertEquals("colname", jobInfo.getTableInfo().getDataColumns().getFields().get(0).getName());
+    assertEquals("data_column", jobInfo.getTableInfo().getDataColumns().getFields().get(0).getName());
 
     publishTest(job);
   }


[22/50] [abbrv] hive git commit: HIVE-9743: Incorrect result set for vectorized left outer join (Matt McCline, reviewed by Vikram Dixit)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/test/results/clientpositive/vector_left_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_left_outer_join2.q.out b/ql/src/test/results/clientpositive/vector_left_outer_join2.q.out
new file mode 100644
index 0000000..23d3f32
--- /dev/null
+++ b/ql/src/test/results/clientpositive/vector_left_outer_join2.q.out
@@ -0,0 +1,568 @@
+PREHOOK: query: drop table if exists TJOIN1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists TJOIN1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table if exists TJOIN2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists TJOIN2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table if not exists TJOIN1 (RNUM int , C1 int, C2 int) STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN1
+POSTHOOK: query: create table if not exists TJOIN1 (RNUM int , C1 int, C2 int) STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN1
+PREHOOK: query: create table if not exists TJOIN2 (RNUM int , C1 int, C2 char(2)) STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN2
+POSTHOOK: query: create table if not exists TJOIN2 (RNUM int , C1 int, C2 char(2)) STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN2
+PREHOOK: query: create table if not exists TJOIN1STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN1STAGE
+POSTHOOK: query: create table if not exists TJOIN1STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN1STAGE
+PREHOOK: query: create table if not exists TJOIN2STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN2STAGE
+POSTHOOK: query: create table if not exists TJOIN2STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN2STAGE
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin1.txt' OVERWRITE INTO TABLE TJOIN1STAGE
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@tjoin1stage
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin1.txt' OVERWRITE INTO TABLE TJOIN1STAGE
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@tjoin1stage
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin2.txt' OVERWRITE INTO TABLE TJOIN2STAGE
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@tjoin2stage
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin2.txt' OVERWRITE INTO TABLE TJOIN2STAGE
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@tjoin2stage
+PREHOOK: query: INSERT INTO TABLE TJOIN1 SELECT * from TJOIN1STAGE
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1stage
+PREHOOK: Output: default@tjoin1
+POSTHOOK: query: INSERT INTO TABLE TJOIN1 SELECT * from TJOIN1STAGE
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1stage
+POSTHOOK: Output: default@tjoin1
+POSTHOOK: Lineage: tjoin1.c1 SIMPLE [(tjoin1stage)tjoin1stage.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin1.c2 EXPRESSION [(tjoin1stage)tjoin1stage.FieldSchema(name:c2, type:char(2), comment:null), ]
+POSTHOOK: Lineage: tjoin1.rnum SIMPLE [(tjoin1stage)tjoin1stage.FieldSchema(name:rnum, type:int, comment:null), ]
+PREHOOK: query: INSERT INTO TABLE TJOIN2 SELECT * from TJOIN2STAGE
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin2stage
+PREHOOK: Output: default@tjoin2
+POSTHOOK: query: INSERT INTO TABLE TJOIN2 SELECT * from TJOIN2STAGE
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin2stage
+POSTHOOK: Output: default@tjoin2
+POSTHOOK: Lineage: tjoin2.c1 SIMPLE [(tjoin2stage)tjoin2stage.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin2.c2 SIMPLE [(tjoin2stage)tjoin2stage.FieldSchema(name:c2, type:char(2), comment:null), ]
+POSTHOOK: Lineage: tjoin2.rnum SIMPLE [(tjoin2stage)tjoin2stage.FieldSchema(name:rnum, type:int, comment:null), ]
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        tjoin2 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        tjoin2 
+          TableScan
+            alias: tjoin2
+            Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: tjoin1
+            Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Outer Join0 to 1
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+              outputColumnNames: _col0, _col1, _col2, _col8
+              Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+0	10	15	NULL
+1	20	25	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        tjoin2 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        tjoin2 
+          TableScan
+            alias: tjoin2
+            Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: tjoin1
+            Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Outer Join0 to 1
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+              outputColumnNames: _col0, _col1, _col2, _col8
+              Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+0	10	15	NULL
+1	20	25	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        tjoin2 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        tjoin2 
+          TableScan
+            alias: tjoin2
+            Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: tjoin1
+            Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Outer Join0 to 1
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+              outputColumnNames: _col0, _col1, _col2, _col8
+              Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+0	10	15	NULL
+1	20	25	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        tjoin2 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        tjoin2 
+          TableScan
+            alias: tjoin2
+            Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: tjoin1
+            Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Outer Join0 to 1
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+              outputColumnNames: _col0, _col1, _col2, _col8
+              Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+0	10	15	NULL
+1	20	25	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        tjoin2 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        tjoin2 
+          TableScan
+            alias: tjoin2
+            Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: tjoin1
+            Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Outer Join0 to 1
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+              outputColumnNames: _col0, _col1, _col2, _col8
+              Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+0	10	15	NULL
+1	20	25	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        tjoin2 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        tjoin2 
+          TableScan
+            alias: tjoin2
+            Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: tjoin1
+            Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Outer Join0 to 1
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+              outputColumnNames: _col0, _col1, _col2, _col8
+              Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+0	10	15	NULL
+1	20	25	NULL
+2	NULL	50	NULL


[35/50] [abbrv] hive git commit: HIVE-10595 Dropping a table can cause NPEs in the compactor (Alan Gates, reviewed by Eugene Koifman)

Posted by xu...@apache.org.
HIVE-10595 Dropping a table can cause NPEs in the compactor (Alan Gates, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c156b32b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c156b32b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c156b32b

Branch: refs/heads/beeline-cli
Commit: c156b32b49aeb5943e45a68fc7600c9244afb128
Parents: 72088ca
Author: Alan Gates <ga...@hortonworks.com>
Authored: Thu May 7 12:49:21 2015 +0100
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Thu May 7 12:49:21 2015 +0100

----------------------------------------------------------------------
 .../hadoop/hive/ql/txn/compactor/Cleaner.java   | 20 ++++++-
 .../hive/ql/txn/compactor/CompactorThread.java  | 12 ++--
 .../hadoop/hive/ql/txn/compactor/Initiator.java | 11 +++-
 .../hadoop/hive/ql/txn/compactor/Worker.java    | 12 ++++
 .../hive/ql/txn/compactor/TestCleaner.java      | 56 ++++++++++++++++-
 .../hive/ql/txn/compactor/TestInitiator.java    | 63 +++++++++++++++++++-
 .../hive/ql/txn/compactor/TestWorker.java       | 45 ++++++++++++++
 7 files changed, 207 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/c156b32b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
index 83b0d3d..16d2c81 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
@@ -26,10 +26,12 @@ import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.ValidReadTxnList;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -183,7 +185,23 @@ public class Cleaner extends CompactorThread {
   private void clean(CompactionInfo ci) throws MetaException {
     LOG.info("Starting cleaning for " + ci.getFullPartitionName());
     try {
-      StorageDescriptor sd = resolveStorageDescriptor(resolveTable(ci), resolvePartition(ci));
+      Table t = resolveTable(ci);
+      if (t == null) {
+        // The table was dropped before we got around to cleaning it.
+        LOG.info("Unable to find table " + ci.getFullTableName() + ", assuming it was dropped");
+        return;
+      }
+      Partition p = null;
+      if (ci.partName != null) {
+        p = resolvePartition(ci);
+        if (p == null) {
+          // The partition was dropped before we got around to cleaning it.
+          LOG.info("Unable to find partition " + ci.getFullPartitionName() +
+              ", assuming it was dropped");
+          return;
+        }
+      }
+      StorageDescriptor sd = resolveStorageDescriptor(t, p);
       final String location = sd.getLocation();
 
       // Create a bogus validTxnList with a high water mark set to MAX_LONG and no open

http://git-wip-us.apache.org/repos/asf/hive/blob/c156b32b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
index 7d097fd..38cd95e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
@@ -32,13 +32,13 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
 import org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler;
-import org.apache.hadoop.hive.metastore.txn.TxnHandler;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 
@@ -105,13 +105,15 @@ abstract class CompactorThread extends Thread implements MetaStoreThread {
    * one partition.
    */
   protected Partition resolvePartition(CompactionInfo ci) throws Exception {
-    Partition p = null;
     if (ci.partName != null) {
-      List<String> names = new ArrayList<String>(1);
-      names.add(ci.partName);
       List<Partition> parts = null;
       try {
-        parts = rs.getPartitionsByNames(ci.dbname, ci.tableName, names);
+        parts = rs.getPartitionsByNames(ci.dbname, ci.tableName,
+            Collections.singletonList(ci.partName));
+        if (parts == null || parts.size() == 0) {
+          // The partition got dropped before we went looking for it.
+          return null;
+        }
       } catch (Exception e) {
         LOG.error("Unable to find partition " + ci.getFullPartitionName() + ", " + e.getMessage());
         throw e;

http://git-wip-us.apache.org/repos/asf/hive/blob/c156b32b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
index f706ac1..847d751 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
@@ -85,13 +85,13 @@ public class Initiator extends CompactorThread {
           LOG.debug("Found " + potentials.size() + " potential compactions, " +
               "checking to see if we should compact any of them");
           for (CompactionInfo ci : potentials) {
-            LOG.debug("Checking to see if we should compact " + ci.getFullPartitionName());
+            LOG.info("Checking to see if we should compact " + ci.getFullPartitionName());
             try {
               Table t = resolveTable(ci);
               if (t == null) {
                 // Most likely this means it's a temp table
-                LOG.debug("Can't find table " + ci.getFullTableName() + ", assuming it's a temp " +
-                    "table and moving on.");
+                LOG.info("Can't find table " + ci.getFullTableName() + ", assuming it's a temp " +
+                    "table or has been dropped and moving on.");
                 continue;
               }
 
@@ -121,6 +121,11 @@ public class Initiator extends CompactorThread {
 
               // Figure out who we should run the file operations as
               Partition p = resolvePartition(ci);
+              if (p == null && ci.partName != null) {
+                LOG.info("Can't find partition " + ci.getFullPartitionName() +
+                    ", assuming it has been dropped and moving on.");
+                continue;
+              }
               StorageDescriptor sd = resolveStorageDescriptor(t, p);
               String runAs = findUserToRunAs(sd.getLocation(), t);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/c156b32b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
index 3ce9ffd..f26225a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
@@ -94,6 +94,12 @@ public class Worker extends CompactorThread {
         Table t1 = null;
         try {
           t1 = resolveTable(ci);
+          if (t1 == null) {
+            LOG.info("Unable to find table " + ci.getFullTableName() +
+                ", assuming it was dropped and moving on.");
+            txnHandler.markCleaned(ci);
+            continue;
+          }
         } catch (MetaException e) {
           txnHandler.markCleaned(ci);
           continue;
@@ -106,6 +112,12 @@ public class Worker extends CompactorThread {
         Partition p = null;
         try {
           p = resolvePartition(ci);
+          if (p == null && ci.partName != null) {
+            LOG.info("Unable to find partition " + ci.getFullPartitionName() +
+                ", assuming it was dropped and moving on.");
+            txnHandler.markCleaned(ci);
+            continue;
+          }
         } catch (Exception e) {
           txnHandler.markCleaned(ci);
           continue;

http://git-wip-us.apache.org/repos/asf/hive/blob/c156b32b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
index 7687851..ffdbb9a 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
@@ -17,17 +17,17 @@
  */
 package org.apache.hadoop.hive.ql.txn.compactor;
 
-import junit.framework.Assert;
+import org.junit.Assert;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreThread;
 import org.apache.hadoop.hive.metastore.api.*;
 import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
 import org.junit.Test;
 
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -428,4 +428,56 @@ public class TestCleaner extends CompactorTest {
     Assert.assertEquals(1, paths.size());
     Assert.assertEquals("base_25", paths.get(0).getName());
   }
+
+  @Test
+  public void droppedTable() throws Exception {
+    Table t = newTable("default", "dt", false);
+
+    addDeltaFile(t, null, 1L, 22L, 22);
+    addDeltaFile(t, null, 23L, 24L, 2);
+    addBaseFile(t, null, 25L, 25);
+
+    burnThroughTransactions(25);
+
+    CompactionRequest rqst = new CompactionRequest("default", "dt", CompactionType.MINOR);
+    txnHandler.compact(rqst);
+    CompactionInfo ci = txnHandler.findNextToCompact("fred");
+    txnHandler.markCompacted(ci);
+    txnHandler.setRunAs(ci.id, System.getProperty("user.name"));
+
+    ms.dropTable("default", "dt");
+
+    startCleaner();
+
+    // Check there are no compactions requests left.
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    Assert.assertEquals(0, rsp.getCompactsSize());
+  }
+
+  @Test
+  public void droppedPartition() throws Exception {
+    Table t = newTable("default", "dp", true);
+    Partition p = newPartition(t, "today");
+
+    addDeltaFile(t, p, 1L, 22L, 22);
+    addDeltaFile(t, p, 23L, 24L, 2);
+    addBaseFile(t, p, 25L, 25);
+
+    burnThroughTransactions(25);
+
+    CompactionRequest rqst = new CompactionRequest("default", "dp", CompactionType.MAJOR);
+    rqst.setPartitionname("ds=today");
+    txnHandler.compact(rqst);
+    CompactionInfo ci = txnHandler.findNextToCompact("fred");
+    txnHandler.markCompacted(ci);
+    txnHandler.setRunAs(ci.id, System.getProperty("user.name"));
+
+    ms.dropPartition("default", "dp", Collections.singletonList("today"), true);
+
+    startCleaner();
+
+    // Check there are no compactions requests left.
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    Assert.assertEquals(0, rsp.getCompactsSize());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c156b32b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
index 1a9cbca..00b13de 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hive.ql.txn.compactor;
 
-import junit.framework.Assert;
+import org.junit.Assert;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -27,6 +27,7 @@ import org.junit.Before;
 import org.junit.Test;
 
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -653,4 +654,64 @@ public class TestInitiator extends CompactorTest {
     Assert.assertEquals(0, compacts.size());
   }
 
+  @Test
+  public void dropTable() throws Exception {
+    Table t = newTable("default", "dt", false);
+
+    addBaseFile(t, null, 20L, 20);
+    addDeltaFile(t, null, 21L, 22L, 2);
+    addDeltaFile(t, null, 23L, 24L, 2);
+
+    burnThroughTransactions(23);
+
+    long txnid = openTxn();
+    LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
+    comp.setTablename("dt");
+    List<LockComponent> components = new ArrayList<LockComponent>(1);
+    components.add(comp);
+    LockRequest req = new LockRequest(components, "me", "localhost");
+    req.setTxnid(txnid);
+    LockResponse res = txnHandler.lock(req);
+    txnHandler.commitTxn(new CommitTxnRequest(txnid));
+
+    ms.dropTable("default", "dt");
+
+    startInitiator();
+
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
+    Assert.assertEquals(0, compacts.size());
+  }
+
+  @Test
+  public void dropPartition() throws Exception {
+    Table t = newTable("default", "dp", true);
+    Partition p = newPartition(t, "today");
+
+    addBaseFile(t, p, 20L, 20);
+    addDeltaFile(t, p, 21L, 22L, 2);
+    addDeltaFile(t, p, 23L, 24L, 2);
+
+    burnThroughTransactions(23);
+
+    long txnid = openTxn();
+    LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
+    comp.setTablename("dp");
+    comp.setPartitionname("ds=today");
+    List<LockComponent> components = new ArrayList<LockComponent>(1);
+    components.add(comp);
+    LockRequest req = new LockRequest(components, "me", "localhost");
+    req.setTxnid(txnid);
+    LockResponse res = txnHandler.lock(req);
+    txnHandler.commitTxn(new CommitTxnRequest(txnid));
+
+    ms.dropPartition("default", "dp", Collections.singletonList("today"), true);
+
+    startInitiator();
+
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
+    Assert.assertEquals(0, compacts.size());
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c156b32b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
index 78a7f9e..bebac54 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
@@ -29,6 +29,7 @@ import org.junit.Test;
 import java.io.*;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -799,4 +800,48 @@ public class TestWorker extends CompactorTest {
     Assert.assertEquals("delta_23_25", stat[3].getPath().getName());
     Assert.assertEquals("delta_26_27", stat[4].getPath().getName());
   }
+
+  @Test
+  public void droppedTable() throws Exception {
+    Table t = newTable("default", "dt", false);
+
+    addDeltaFile(t, null, 1L, 2L, 2);
+    addDeltaFile(t, null, 3L, 4L, 2);
+    burnThroughTransactions(4);
+
+    CompactionRequest rqst = new CompactionRequest("default", "dt", CompactionType.MAJOR);
+    txnHandler.compact(rqst);
+
+    ms.dropTable("default", "dt");
+
+    startWorker();
+
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
+    Assert.assertEquals(0, compacts.size());
+  }
+
+  @Test
+  public void droppedPartition() throws Exception {
+    Table t = newTable("default", "dp", true);
+    Partition p = newPartition(t, "today");
+
+    addBaseFile(t, p, 20L, 20);
+    addDeltaFile(t, p, 21L, 22L, 2);
+    addDeltaFile(t, p, 23L, 24L, 2);
+
+    burnThroughTransactions(25);
+
+    CompactionRequest rqst = new CompactionRequest("default", "dp", CompactionType.MINOR);
+    rqst.setPartitionname("ds=today");
+    txnHandler.compact(rqst);
+
+    ms.dropPartition("default", "dp", Collections.singletonList("today"), true);
+
+    startWorker();
+
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
+    Assert.assertEquals(0, compacts.size());
+  }
 }


[29/50] [abbrv] hive git commit: HIVE-10592: ORC file dump in JSON format (Prasanth Jayachandran reviewed by Gopal V)

Posted by xu...@apache.org.
HIVE-10592: ORC file dump in JSON format (Prasanth Jayachandran reviewed by Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/80fb8913
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/80fb8913
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/80fb8913

Branch: refs/heads/beeline-cli
Commit: 80fb8913196eef8e4125544c3138b0c73be267b7
Parents: 93995c8
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Wed May 6 18:52:17 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Wed May 6 18:52:17 2015 -0700

----------------------------------------------------------------------
 bin/ext/orcfiledump.sh                          |    9 +-
 .../hive/ql/io/orc/ColumnStatisticsImpl.java    |   16 +-
 .../apache/hadoop/hive/ql/io/orc/FileDump.java  |   91 +-
 .../hadoop/hive/ql/io/orc/JsonFileDump.java     |  365 +++++
 .../hadoop/hive/ql/io/orc/TestJsonFileDump.java |  138 ++
 ql/src/test/resources/orc-file-dump.json        | 1354 ++++++++++++++++++
 6 files changed, 1929 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/80fb8913/bin/ext/orcfiledump.sh
----------------------------------------------------------------------
diff --git a/bin/ext/orcfiledump.sh b/bin/ext/orcfiledump.sh
index 752e437..6139de2 100644
--- a/bin/ext/orcfiledump.sh
+++ b/bin/ext/orcfiledump.sh
@@ -23,5 +23,12 @@ orcfiledump () {
 }
 
 orcfiledump_help () {
-  echo "usage ./hive orcfiledump [-d] [--rowindex <col_ids>] <path_to_file>"
+  echo "usage ./hive orcfiledump [-h] [-j] [-p] [-t] [-d] [-r <col_ids>] <path_to_file>"
+  echo ""
+  echo "  --json (-j)                 Print metadata in JSON format"
+  echo "  --pretty (-p)               Pretty print json metadata output"
+  echo "  --timezone (-t)             Print writer's time zone"
+  echo "  --data (-d)                 Should the data be printed"
+  echo "  --rowindex (-r) <_col_ids_> Comma separated list of column ids for which row index should be printed"
+  echo "  --help (-h)                 Print help message"
 } 

http://git-wip-us.apache.org/repos/asf/hive/blob/80fb8913/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java
index 7cfbd81..ffba3c6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java
@@ -699,12 +699,18 @@ class ColumnStatisticsImpl implements ColumnStatistics {
 
     @Override
     public Date getMinimum() {
+      if (minimum == null) {
+        return null;
+      }
       minDate.set(minimum);
       return minDate.get();
     }
 
     @Override
     public Date getMaximum() {
+      if (maximum == null) {
+        return null;
+      }
       maxDate.set(maximum);
       return maxDate.get();
     }
@@ -793,14 +799,12 @@ class ColumnStatisticsImpl implements ColumnStatistics {
 
     @Override
     public Timestamp getMinimum() {
-      Timestamp minTimestamp = new Timestamp(minimum);
-      return minTimestamp;
+      return minimum == null ? null : new Timestamp(minimum);
     }
 
     @Override
     public Timestamp getMaximum() {
-      Timestamp maxTimestamp = new Timestamp(maximum);
-      return maxTimestamp;
+      return maximum == null ? null : new Timestamp(maximum);
     }
 
     @Override
@@ -808,9 +812,9 @@ class ColumnStatisticsImpl implements ColumnStatistics {
       StringBuilder buf = new StringBuilder(super.toString());
       if (getNumberOfValues() != 0) {
         buf.append(" min: ");
-        buf.append(minimum);
+        buf.append(getMinimum());
         buf.append(" max: ");
-        buf.append(maximum);
+        buf.append(getMaximum());
       }
       return buf.toString();
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/80fb8913/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
index cd4db75..33c4cd8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -50,10 +50,11 @@ import org.codehaus.jettison.json.JSONWriter;
  * A tool for printing out the file structure of ORC files.
  */
 public final class FileDump {
-  private static final String UNKNOWN = "UNKNOWN";
+  public static final String UNKNOWN = "UNKNOWN";
 
   // not used
-  private FileDump() {}
+  private FileDump() {
+  }
 
   public static void main(String[] args) throws Exception {
     Configuration conf = new Configuration();
@@ -69,21 +70,28 @@ public final class FileDump {
     }
 
     boolean dumpData = cli.hasOption('d');
-    if (cli.hasOption("rowindex")) {
-      String[] colStrs = cli.getOptionValue("rowindex").split(",");
+    if (cli.hasOption("r")) {
+      String[] colStrs = cli.getOptionValue("r").split(",");
       rowIndexCols = new ArrayList<Integer>(colStrs.length);
       for (String colStr : colStrs) {
         rowIndexCols.add(Integer.parseInt(colStr));
       }
     }
 
-    boolean printTimeZone = false;
-    if (cli.hasOption('t')) {
-      printTimeZone = true;
-    }
+    boolean printTimeZone = cli.hasOption('t');
+    boolean jsonFormat = cli.hasOption('j');
     String[] files = cli.getArgs();
-    if (dumpData) printData(Arrays.asList(files), conf);
-    else printMetaData(Arrays.asList(files), conf, rowIndexCols, printTimeZone);
+    if (dumpData) {
+      printData(Arrays.asList(files), conf);
+    } else {
+      if (jsonFormat) {
+        boolean prettyPrint = cli.hasOption('p');
+        JsonFileDump.printJsonMetaData(Arrays.asList(files), conf, rowIndexCols, prettyPrint,
+            printTimeZone);
+      } else {
+        printMetaData(Arrays.asList(files), conf, rowIndexCols, printTimeZone);
+      }
+    }
   }
 
   private static void printData(List<String> files, Configuration conf) throws IOException,
@@ -100,7 +108,7 @@ public final class FileDump {
       Path path = new Path(filename);
       Reader reader = OrcFile.createReader(path, OrcFile.readerOptions(conf));
       System.out.println("File Version: " + reader.getFileVersion().getName() +
-                         " with " + reader.getWriterVersion());
+          " with " + reader.getWriterVersion());
       RecordReaderImpl rows = (RecordReaderImpl) reader.rows();
       System.out.println("Rows: " + reader.getNumberOfRows());
       System.out.println("Compression: " + reader.getCompression());
@@ -121,7 +129,7 @@ public final class FileDump {
       ColumnStatistics[] stats = reader.getStatistics();
       int colCount = stats.length;
       System.out.println("\nFile Statistics:");
-      for(int i=0; i < stats.length; ++i) {
+      for (int i = 0; i < stats.length; ++i) {
         System.out.println("  Column " + i + ": " + stats[i].toString());
       }
       System.out.println("\nStripes:");
@@ -140,7 +148,7 @@ public final class FileDump {
           System.out.println("  Stripe: " + stripe.toString());
         }
         long sectionStart = stripeStart;
-        for(OrcProto.Stream section: footer.getStreamsList()) {
+        for (OrcProto.Stream section : footer.getStreamsList()) {
           String kind = section.hasKind() ? section.getKind().name() : UNKNOWN;
           System.out.println("    Stream: column " + section.getColumn() +
               " section " + kind + " start: " + sectionStart +
@@ -270,7 +278,7 @@ public final class FileDump {
     return buf.toString();
   }
 
-  private static long getTotalPaddingSize(Reader reader) throws IOException {
+  public static long getTotalPaddingSize(Reader reader) throws IOException {
     long paddedBytes = 0;
     List<org.apache.hadoop.hive.ql.io.orc.StripeInformation> stripes = reader.getStripes();
     for (int i = 1; i < stripes.size(); i++) {
@@ -307,21 +315,30 @@ public final class FileDump {
         .withArgName("comma separated list of column ids for which row index should be printed")
         .withDescription("Dump stats for column number(s)")
         .hasArg()
-        .create());
+        .create('r'));
+
+    result.addOption(OptionBuilder
+        .withLongOpt("json")
+        .withDescription("Print metadata in JSON format")
+        .create('j'));
 
+    result.addOption(OptionBuilder
+            .withLongOpt("pretty")
+            .withDescription("Pretty print json metadata output")
+            .create('p'));
 
     return result;
   }
 
   private static void printMap(JSONWriter writer,
-                               Map<Object, Object> obj,
-                               List<OrcProto.Type> types,
-                               OrcProto.Type type
+      Map<Object, Object> obj,
+      List<OrcProto.Type> types,
+      OrcProto.Type type
   ) throws IOException, JSONException {
     writer.array();
     int keyType = type.getSubtypes(0);
     int valueType = type.getSubtypes(1);
-    for(Map.Entry<Object,Object> item: obj.entrySet()) {
+    for (Map.Entry<Object, Object> item : obj.entrySet()) {
       writer.object();
       writer.key("_key");
       printObject(writer, item.getKey(), types, keyType);
@@ -333,34 +350,34 @@ public final class FileDump {
   }
 
   private static void printList(JSONWriter writer,
-                                List<Object> obj,
-                                List<OrcProto.Type> types,
-                                OrcProto.Type type
+      List<Object> obj,
+      List<OrcProto.Type> types,
+      OrcProto.Type type
   ) throws IOException, JSONException {
     int subtype = type.getSubtypes(0);
     writer.array();
-    for(Object item: obj) {
+    for (Object item : obj) {
       printObject(writer, item, types, subtype);
     }
     writer.endArray();
   }
 
   private static void printUnion(JSONWriter writer,
-                                 OrcUnion obj,
-                                 List<OrcProto.Type> types,
-                                 OrcProto.Type type
+      OrcUnion obj,
+      List<OrcProto.Type> types,
+      OrcProto.Type type
   ) throws IOException, JSONException {
     int subtype = type.getSubtypes(obj.getTag());
     printObject(writer, obj.getObject(), types, subtype);
   }
 
   static void printStruct(JSONWriter writer,
-                          OrcStruct obj,
-                          List<OrcProto.Type> types,
-                          OrcProto.Type type) throws IOException, JSONException {
+      OrcStruct obj,
+      List<OrcProto.Type> types,
+      OrcProto.Type type) throws IOException, JSONException {
     writer.object();
     List<Integer> fieldTypes = type.getSubtypesList();
-    for(int i=0; i < fieldTypes.size(); ++i) {
+    for (int i = 0; i < fieldTypes.size(); ++i) {
       writer.key(type.getFieldNames(i));
       printObject(writer, obj.getFieldValue(i), types, fieldTypes.get(i));
     }
@@ -368,9 +385,9 @@ public final class FileDump {
   }
 
   static void printObject(JSONWriter writer,
-                          Object obj,
-                          List<OrcProto.Type> types,
-                          int typeId) throws IOException, JSONException {
+      Object obj,
+      List<OrcProto.Type> types,
+      int typeId) throws IOException, JSONException {
     OrcProto.Type type = types.get(typeId);
     if (obj == null) {
       writer.value(null);
@@ -417,7 +434,7 @@ public final class FileDump {
   }
 
   static void printJsonData(Configuration conf,
-                            String filename) throws IOException, JSONException {
+      String filename) throws IOException, JSONException {
     Path path = new Path(filename);
     Reader reader = OrcFile.createReader(path.getFileSystem(conf), path);
     OutputStreamWriter out = new OutputStreamWriter(System.out, "UTF-8");

http://git-wip-us.apache.org/repos/asf/hive/blob/80fb8913/ql/src/java/org/apache/hadoop/hive/ql/io/orc/JsonFileDump.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/JsonFileDump.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/JsonFileDump.java
new file mode 100644
index 0000000..c33004e
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/JsonFileDump.java
@@ -0,0 +1,365 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.io.orc;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.io.filters.BloomFilterIO;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONStringer;
+import org.codehaus.jettison.json.JSONWriter;
+
+/**
+ * File dump tool with json formatted output.
+ */
+public class JsonFileDump {
+
+  public static void printJsonMetaData(List<String> files, Configuration conf,
+      List<Integer> rowIndexCols, boolean prettyPrint, boolean printTimeZone) throws JSONException, IOException {
+    JSONStringer writer = new JSONStringer();
+    boolean multiFile = files.size() > 1;
+    if (multiFile) {
+      writer.array();
+    } else {
+      writer.object();
+    }
+    for (String filename : files) {
+      if (multiFile) {
+        writer.object();
+      }
+      writer.key("fileName").value(filename);
+      Path path = new Path(filename);
+      Reader reader = OrcFile.createReader(path, OrcFile.readerOptions(conf));
+      writer.key("fileVersion").value(reader.getFileVersion().getName());
+      writer.key("writerVersion").value(reader.getWriterVersion());
+      RecordReaderImpl rows = (RecordReaderImpl) reader.rows();
+      writer.key("numberOfRows").value(reader.getNumberOfRows());
+      writer.key("compression").value(reader.getCompression());
+      if (reader.getCompression() != CompressionKind.NONE) {
+        writer.key("compressionBufferSize").value(reader.getCompressionSize());
+      }
+      writer.key("schemaString").value(reader.getObjectInspector().getTypeName());
+      writer.key("schema").array();
+      writeSchema(writer, reader.getTypes());
+      writer.endArray();
+
+      writer.key("stripeStatistics").array();
+      Metadata metadata = reader.getMetadata();
+      for (int n = 0; n < metadata.getStripeStatistics().size(); n++) {
+        writer.object();
+        writer.key("stripeNumber").value(n + 1);
+        StripeStatistics ss = metadata.getStripeStatistics().get(n);
+        writer.key("columnStatistics").array();
+        for (int i = 0; i < ss.getColumnStatistics().length; i++) {
+          writer.object();
+          writer.key("columnId").value(i);
+          writeColumnStatistics(writer, ss.getColumnStatistics()[i]);
+          writer.endObject();
+        }
+        writer.endArray();
+        writer.endObject();
+      }
+      writer.endArray();
+
+      ColumnStatistics[] stats = reader.getStatistics();
+      int colCount = stats.length;
+      writer.key("fileStatistics").array();
+      for (int i = 0; i < stats.length; ++i) {
+        writer.object();
+        writer.key("columnId").value(i);
+        writeColumnStatistics(writer, stats[i]);
+        writer.endObject();
+      }
+      writer.endArray();
+
+      writer.key("stripes").array();
+      int stripeIx = -1;
+      for (StripeInformation stripe : reader.getStripes()) {
+        ++stripeIx;
+        long stripeStart = stripe.getOffset();
+        OrcProto.StripeFooter footer = rows.readStripeFooter(stripe);
+        writer.object(); // start of stripe information
+        writer.key("stripeNumber").value(stripeIx + 1);
+        writer.key("stripeInformation");
+        writeStripeInformation(writer, stripe);
+        if (printTimeZone) {
+          writer.key("writerTimezone").value(
+              footer.hasWriterTimezone() ? footer.getWriterTimezone() : FileDump.UNKNOWN);
+        }
+        long sectionStart = stripeStart;
+
+        writer.key("streams").array();
+        for (OrcProto.Stream section : footer.getStreamsList()) {
+          writer.object();
+          String kind = section.hasKind() ? section.getKind().name() : FileDump.UNKNOWN;
+          writer.key("columnId").value(section.getColumn());
+          writer.key("section").value(kind);
+          writer.key("startOffset").value(sectionStart);
+          writer.key("length").value(section.getLength());
+          sectionStart += section.getLength();
+          writer.endObject();
+        }
+        writer.endArray();
+
+        writer.key("encodings").array();
+        for (int i = 0; i < footer.getColumnsCount(); ++i) {
+          writer.object();
+          OrcProto.ColumnEncoding encoding = footer.getColumns(i);
+          writer.key("columnId").value(i);
+          writer.key("kind").value(encoding.getKind());
+          if (encoding.getKind() == OrcProto.ColumnEncoding.Kind.DICTIONARY ||
+              encoding.getKind() == OrcProto.ColumnEncoding.Kind.DICTIONARY_V2) {
+            writer.key("dictionarySize").value(encoding.getDictionarySize());
+          }
+          writer.endObject();
+        }
+        writer.endArray();
+
+        if (rowIndexCols != null && !rowIndexCols.isEmpty()) {
+          // include the columns that are specified, only if the columns are included, bloom filter
+          // will be read
+          boolean[] sargColumns = new boolean[colCount];
+          for (int colIdx : rowIndexCols) {
+            sargColumns[colIdx] = true;
+          }
+          RecordReaderImpl.Index indices = rows.readRowIndex(stripeIx, null, sargColumns);
+          writer.key("indexes").array();
+          for (int col : rowIndexCols) {
+            writer.object();
+            writer.key("columnId").value(col);
+            writeRowGroupIndexes(writer, col, indices.getRowGroupIndex());
+            writeBloomFilterIndexes(writer, col, indices.getBloomFilterIndex());
+            writer.endObject();
+          }
+          writer.endArray();
+        }
+        writer.endObject(); // end of stripe information
+      }
+      writer.endArray();
+
+      FileSystem fs = path.getFileSystem(conf);
+      long fileLen = fs.getContentSummary(path).getLength();
+      long paddedBytes = FileDump.getTotalPaddingSize(reader);
+      // empty ORC file is ~45 bytes. Assumption here is file length always >0
+      double percentPadding = ((double) paddedBytes / (double) fileLen) * 100;
+      writer.key("fileLength").value(fileLen);
+      writer.key("paddingLength").value(paddedBytes);
+      writer.key("paddingRatio").value(percentPadding);
+      rows.close();
+
+      writer.endObject();
+    }
+    if (multiFile) {
+      writer.endArray();
+    }
+
+    if (prettyPrint) {
+      final String prettyJson;
+      if (multiFile) {
+        JSONArray jsonArray = new JSONArray(writer.toString());
+        prettyJson = jsonArray.toString(2);
+      } else {
+        JSONObject jsonObject = new JSONObject(writer.toString());
+        prettyJson = jsonObject.toString(2);
+      }
+      System.out.println(prettyJson);
+    } else {
+      System.out.println(writer.toString());
+    }
+  }
+
+  private static void writeSchema(JSONStringer writer, List<OrcProto.Type> types)
+      throws JSONException {
+    int i = 0;
+    for(OrcProto.Type type : types) {
+      writer.object();
+      writer.key("columnId").value(i++);
+      writer.key("columnType").value(type.getKind());
+      if (type.getFieldNamesCount() > 0) {
+        writer.key("childColumnNames").array();
+        for (String field : type.getFieldNamesList()) {
+          writer.value(field);
+        }
+        writer.endArray();
+        writer.key("childColumnIds").array();
+        for (Integer colId : type.getSubtypesList()) {
+          writer.value(colId);
+        }
+        writer.endArray();
+      }
+      if (type.hasPrecision()) {
+        writer.key("precision").value(type.getPrecision());
+      }
+
+      if (type.hasScale()) {
+        writer.key("scale").value(type.getScale());
+      }
+
+      if (type.hasMaximumLength()) {
+        writer.key("maxLength").value(type.getMaximumLength());
+      }
+      writer.endObject();
+    }
+  }
+
+  private static void writeStripeInformation(JSONWriter writer, StripeInformation stripe)
+      throws JSONException {
+    writer.object();
+    writer.key("offset").value(stripe.getOffset());
+    writer.key("indexLength").value(stripe.getIndexLength());
+    writer.key("dataLength").value(stripe.getDataLength());
+    writer.key("footerLength").value(stripe.getFooterLength());
+    writer.key("rowCount").value(stripe.getNumberOfRows());
+    writer.endObject();
+  }
+
+  private static void writeColumnStatistics(JSONWriter writer, ColumnStatistics cs)
+      throws JSONException {
+    if (cs != null) {
+      writer.key("count").value(cs.getNumberOfValues());
+      writer.key("hasNull").value(cs.hasNull());
+      if (cs instanceof BinaryColumnStatistics) {
+        writer.key("totalLength").value(((BinaryColumnStatistics) cs).getSum());
+        writer.key("type").value(OrcProto.Type.Kind.BINARY);
+      } else if (cs instanceof BooleanColumnStatistics) {
+        writer.key("trueCount").value(((BooleanColumnStatistics) cs).getTrueCount());
+        writer.key("falseCount").value(((BooleanColumnStatistics) cs).getFalseCount());
+        writer.key("type").value(OrcProto.Type.Kind.BOOLEAN);
+      } else if (cs instanceof IntegerColumnStatistics) {
+        writer.key("min").value(((IntegerColumnStatistics) cs).getMinimum());
+        writer.key("max").value(((IntegerColumnStatistics) cs).getMaximum());
+        if (((IntegerColumnStatistics) cs).isSumDefined()) {
+          writer.key("sum").value(((IntegerColumnStatistics) cs).getSum());
+        }
+        writer.key("type").value(OrcProto.Type.Kind.LONG);
+      } else if (cs instanceof DoubleColumnStatistics) {
+        writer.key("min").value(((DoubleColumnStatistics) cs).getMinimum());
+        writer.key("max").value(((DoubleColumnStatistics) cs).getMaximum());
+        writer.key("sum").value(((DoubleColumnStatistics) cs).getSum());
+        writer.key("type").value(OrcProto.Type.Kind.DOUBLE);
+      } else if (cs instanceof StringColumnStatistics) {
+        writer.key("min").value(((StringColumnStatistics) cs).getMinimum());
+        writer.key("max").value(((StringColumnStatistics) cs).getMaximum());
+        writer.key("totalLength").value(((StringColumnStatistics) cs).getSum());
+        writer.key("type").value(OrcProto.Type.Kind.STRING);
+      } else if (cs instanceof DateColumnStatistics) {
+        if (((DateColumnStatistics) cs).getMaximum() != null) {
+          writer.key("min").value(((DateColumnStatistics) cs).getMinimum());
+          writer.key("max").value(((DateColumnStatistics) cs).getMaximum());
+        }
+        writer.key("type").value(OrcProto.Type.Kind.DATE);
+      } else if (cs instanceof TimestampColumnStatistics) {
+        if (((TimestampColumnStatistics) cs).getMaximum() != null) {
+          writer.key("min").value(((TimestampColumnStatistics) cs).getMinimum());
+          writer.key("max").value(((TimestampColumnStatistics) cs).getMaximum());
+        }
+        writer.key("type").value(OrcProto.Type.Kind.TIMESTAMP);
+      } else if (cs instanceof DecimalColumnStatistics) {
+        if (((DecimalColumnStatistics) cs).getMaximum() != null) {
+          writer.key("min").value(((DecimalColumnStatistics) cs).getMinimum());
+          writer.key("max").value(((DecimalColumnStatistics) cs).getMaximum());
+          writer.key("sum").value(((DecimalColumnStatistics) cs).getSum());
+        }
+        writer.key("type").value(OrcProto.Type.Kind.DECIMAL);
+      }
+    }
+  }
+
+  private static void writeBloomFilterIndexes(JSONWriter writer, int col,
+      OrcProto.BloomFilterIndex[] bloomFilterIndex) throws JSONException {
+
+    BloomFilterIO stripeLevelBF = null;
+    if (bloomFilterIndex != null && bloomFilterIndex[col] != null) {
+      int entryIx = 0;
+      writer.key("bloomFilterIndexes").array();
+      for (OrcProto.BloomFilter bf : bloomFilterIndex[col].getBloomFilterList()) {
+        writer.object();
+        writer.key("entryId").value(entryIx++);
+        BloomFilterIO toMerge = new BloomFilterIO(bf);
+        writeBloomFilterStats(writer, toMerge);
+        if (stripeLevelBF == null) {
+          stripeLevelBF = toMerge;
+        } else {
+          stripeLevelBF.merge(toMerge);
+        }
+        writer.endObject();
+      }
+      writer.endArray();
+    }
+    if (stripeLevelBF != null) {
+      writer.key("stripeLevelBloomFilter");
+      writer.object();
+      writeBloomFilterStats(writer, stripeLevelBF);
+      writer.endObject();
+    }
+  }
+
+  private static void writeBloomFilterStats(JSONWriter writer, BloomFilterIO bf)
+      throws JSONException {
+    int bitCount = bf.getBitSize();
+    int popCount = 0;
+    for (long l : bf.getBitSet()) {
+      popCount += Long.bitCount(l);
+    }
+    int k = bf.getNumHashFunctions();
+    float loadFactor = (float) popCount / (float) bitCount;
+    float expectedFpp = (float) Math.pow(loadFactor, k);
+    writer.key("numHashFunctions").value(k);
+    writer.key("bitCount").value(bitCount);
+    writer.key("popCount").value(popCount);
+    writer.key("loadFactor").value(loadFactor);
+    writer.key("expectedFpp").value(expectedFpp);
+  }
+
+  private static void writeRowGroupIndexes(JSONWriter writer, int col,
+      OrcProto.RowIndex[] rowGroupIndex)
+      throws JSONException {
+
+    OrcProto.RowIndex index;
+    if (rowGroupIndex == null || (col >= rowGroupIndex.length) ||
+        ((index = rowGroupIndex[col]) == null)) {
+      return;
+    }
+
+    writer.key("rowGroupIndexes").array();
+    for (int entryIx = 0; entryIx < index.getEntryCount(); ++entryIx) {
+      writer.object();
+      writer.key("entryId").value(entryIx);
+      OrcProto.RowIndexEntry entry = index.getEntry(entryIx);
+      if (entry == null) {
+        continue;
+      }
+      OrcProto.ColumnStatistics colStats = entry.getStatistics();
+      writeColumnStatistics(writer, ColumnStatisticsImpl.deserialize(colStats));
+      writer.key("positions").array();
+      for (int posIx = 0; posIx < entry.getPositionsCount(); ++posIx) {
+        writer.value(entry.getPositions(posIx));
+      }
+      writer.endArray();
+      writer.endObject();
+    }
+    writer.endArray();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/80fb8913/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestJsonFileDump.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestJsonFileDump.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestJsonFileDump.java
new file mode 100644
index 0000000..d17c528
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestJsonFileDump.java
@@ -0,0 +1,138 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.io.orc;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FileReader;
+import java.io.PrintStream;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
+import org.apache.hive.common.util.HiveTestUtils;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestJsonFileDump {
+
+  Path workDir = new Path(System.getProperty("test.tmp.dir"));
+  Configuration conf;
+  FileSystem fs;
+  Path testFilePath;
+
+  @Before
+  public void openFileSystem () throws Exception {
+    conf = new Configuration();
+    fs = FileSystem.getLocal(conf);
+    fs.setWorkingDirectory(workDir);
+    testFilePath = new Path("TestFileDump.testDump.orc");
+    fs.delete(testFilePath, false);
+  }
+
+  static class MyRecord {
+    int i;
+    long l;
+    String s;
+    MyRecord(int i, long l, String s) {
+      this.i = i;
+      this.l = l;
+      this.s = s;
+    }
+  }
+
+  static void checkOutput(String expected,
+                                  String actual) throws Exception {
+    BufferedReader eStream =
+        new BufferedReader(new FileReader(HiveTestUtils.getFileFromClasspath(expected)));
+    BufferedReader aStream =
+        new BufferedReader(new FileReader(actual));
+    String expectedLine = eStream.readLine();
+    while (expectedLine != null) {
+      String actualLine = aStream.readLine();
+      System.out.println("actual:   " + actualLine);
+      System.out.println("expected: " + expectedLine);
+      assertEquals(expectedLine, actualLine);
+      expectedLine = eStream.readLine();
+    }
+    assertNull(eStream.readLine());
+    assertNull(aStream.readLine());
+  }
+
+  @Test
+  public void testJsonDump() throws Exception {
+    ObjectInspector inspector;
+    synchronized (TestOrcFile.class) {
+      inspector = ObjectInspectorFactory.getReflectionObjectInspector
+          (MyRecord.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
+    }
+    conf.set(HiveConf.ConfVars.HIVE_ORC_ENCODING_STRATEGY.varname, "COMPRESSION");
+    OrcFile.WriterOptions options = OrcFile.writerOptions(conf)
+        .fileSystem(fs)
+        .inspector(inspector)
+        .stripeSize(100000)
+        .compress(CompressionKind.ZLIB)
+        .bufferSize(10000)
+        .rowIndexStride(1000)
+        .bloomFilterColumns("s");
+    Writer writer = OrcFile.createWriter(testFilePath, options);
+    Random r1 = new Random(1);
+    String[] words = new String[]{"It", "was", "the", "best", "of", "times,",
+        "it", "was", "the", "worst", "of", "times,", "it", "was", "the", "age",
+        "of", "wisdom,", "it", "was", "the", "age", "of", "foolishness,", "it",
+        "was", "the", "epoch", "of", "belief,", "it", "was", "the", "epoch",
+        "of", "incredulity,", "it", "was", "the", "season", "of", "Light,",
+        "it", "was", "the", "season", "of", "Darkness,", "it", "was", "the",
+        "spring", "of", "hope,", "it", "was", "the", "winter", "of", "despair,",
+        "we", "had", "everything", "before", "us,", "we", "had", "nothing",
+        "before", "us,", "we", "were", "all", "going", "direct", "to",
+        "Heaven,", "we", "were", "all", "going", "direct", "the", "other",
+        "way"};
+    for(int i=0; i < 21000; ++i) {
+      if (i % 100 == 0) {
+        writer.addRow(new MyRecord(r1.nextInt(), r1.nextLong(), null));
+      } else {
+        writer.addRow(new MyRecord(r1.nextInt(), r1.nextLong(),
+            words[r1.nextInt(words.length)]));
+      }
+    }
+
+    writer.close();
+    PrintStream origOut = System.out;
+    String outputFilename = "orc-file-dump.json";
+    FileOutputStream myOut = new FileOutputStream(workDir + File.separator + outputFilename);
+
+    // replace stdout and run command
+    System.setOut(new PrintStream(myOut));
+    FileDump.main(new String[]{testFilePath.toString(), "-j", "-p", "--rowindex=3"});
+    System.out.flush();
+    System.setOut(origOut);
+
+
+    checkOutput(outputFilename, workDir + File.separator + outputFilename);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/80fb8913/ql/src/test/resources/orc-file-dump.json
----------------------------------------------------------------------
diff --git a/ql/src/test/resources/orc-file-dump.json b/ql/src/test/resources/orc-file-dump.json
new file mode 100644
index 0000000..125a32e
--- /dev/null
+++ b/ql/src/test/resources/orc-file-dump.json
@@ -0,0 +1,1354 @@
+{
+  "fileName": "TestFileDump.testDump.orc",
+  "fileVersion": "0.12",
+  "writerVersion": "HIVE_8732",
+  "numberOfRows": 21000,
+  "compression": "ZLIB",
+  "compressionBufferSize": 10000,
+  "schemaString": "struct<i:int,l:bigint,s:string>",
+  "schema": [
+    {
+      "columnId": 0,
+      "columnType": "STRUCT",
+      "childColumnNames": [
+        "i",
+        "l",
+        "s"
+      ],
+      "childColumnIds": [
+        1,
+        2,
+        3
+      ]
+    },
+    {
+      "columnId": 1,
+      "columnType": "INT"
+    },
+    {
+      "columnId": 2,
+      "columnType": "LONG"
+    },
+    {
+      "columnId": 3,
+      "columnType": "STRING"
+    }
+  ],
+  "stripeStatistics": [
+    {
+      "stripeNumber": 1,
+      "columnStatistics": [
+        {
+          "columnId": 0,
+          "count": 5000,
+          "hasNull": false
+        },
+        {
+          "columnId": 1,
+          "count": 5000,
+          "hasNull": false,
+          "min": -2147115959,
+          "max": 2145210552,
+          "sum": 50111854553,
+          "type": "LONG"
+        },
+        {
+          "columnId": 2,
+          "count": 5000,
+          "hasNull": false,
+          "min": -9223180583305557329,
+          "max": 9221614132680747961,
+          "type": "LONG"
+        },
+        {
+          "columnId": 3,
+          "count": 4950,
+          "hasNull": true,
+          "min": "Darkness,",
+          "max": "worst",
+          "totalLength": 19283,
+          "type": "STRING"
+        }
+      ]
+    },
+    {
+      "stripeNumber": 2,
+      "columnStatistics": [
+        {
+          "columnId": 0,
+          "count": 5000,
+          "hasNull": false
+        },
+        {
+          "columnId": 1,
+          "count": 5000,
+          "hasNull": false,
+          "min": -2147390285,
+          "max": 2147224606,
+          "sum": -22290798217,
+          "type": "LONG"
+        },
+        {
+          "columnId": 2,
+          "count": 5000,
+          "hasNull": false,
+          "min": -9219295160509160427,
+          "max": 9217571024994660020,
+          "type": "LONG"
+        },
+        {
+          "columnId": 3,
+          "count": 4950,
+          "hasNull": true,
+          "min": "Darkness,",
+          "max": "worst",
+          "totalLength": 19397,
+          "type": "STRING"
+        }
+      ]
+    },
+    {
+      "stripeNumber": 3,
+      "columnStatistics": [
+        {
+          "columnId": 0,
+          "count": 5000,
+          "hasNull": false
+        },
+        {
+          "columnId": 1,
+          "count": 5000,
+          "hasNull": false,
+          "min": -2146954065,
+          "max": 2146722468,
+          "sum": 20639652136,
+          "type": "LONG"
+        },
+        {
+          "columnId": 2,
+          "count": 5000,
+          "hasNull": false,
+          "min": -9214076359988107846,
+          "max": 9222919052987871506,
+          "type": "LONG"
+        },
+        {
+          "columnId": 3,
+          "count": 4950,
+          "hasNull": true,
+          "min": "Darkness,",
+          "max": "worst",
+          "totalLength": 19031,
+          "type": "STRING"
+        }
+      ]
+    },
+    {
+      "stripeNumber": 4,
+      "columnStatistics": [
+        {
+          "columnId": 0,
+          "count": 5000,
+          "hasNull": false
+        },
+        {
+          "columnId": 1,
+          "count": 5000,
+          "hasNull": false,
+          "min": -2146969085,
+          "max": 2146025044,
+          "sum": -5156814387,
+          "type": "LONG"
+        },
+        {
+          "columnId": 2,
+          "count": 5000,
+          "hasNull": false,
+          "min": -9222731174895935707,
+          "max": 9220625004936875965,
+          "type": "LONG"
+        },
+        {
+          "columnId": 3,
+          "count": 4950,
+          "hasNull": true,
+          "min": "Darkness,",
+          "max": "worst",
+          "totalLength": 19459,
+          "type": "STRING"
+        }
+      ]
+    },
+    {
+      "stripeNumber": 5,
+      "columnStatistics": [
+        {
+          "columnId": 0,
+          "count": 1000,
+          "hasNull": false
+        },
+        {
+          "columnId": 1,
+          "count": 1000,
+          "hasNull": false,
+          "min": -2144303438,
+          "max": 2127599049,
+          "sum": 62841564778,
+          "type": "LONG"
+        },
+        {
+          "columnId": 2,
+          "count": 1000,
+          "hasNull": false,
+          "min": -9195133638801798919,
+          "max": 9218626063131504414,
+          "type": "LONG"
+        },
+        {
+          "columnId": 3,
+          "count": 990,
+          "hasNull": true,
+          "min": "Darkness,",
+          "max": "worst",
+          "totalLength": 3963,
+          "type": "STRING"
+        }
+      ]
+    }
+  ],
+  "fileStatistics": [
+    {
+      "columnId": 0,
+      "count": 21000,
+      "hasNull": false
+    },
+    {
+      "columnId": 1,
+      "count": 21000,
+      "hasNull": false,
+      "min": -2147390285,
+      "max": 2147224606,
+      "sum": 106145458863,
+      "type": "LONG"
+    },
+    {
+      "columnId": 2,
+      "count": 21000,
+      "hasNull": false,
+      "min": -9223180583305557329,
+      "max": 9222919052987871506,
+      "type": "LONG"
+    },
+    {
+      "columnId": 3,
+      "count": 20790,
+      "hasNull": true,
+      "min": "Darkness,",
+      "max": "worst",
+      "totalLength": 81133,
+      "type": "STRING"
+    }
+  ],
+  "stripes": [
+    {
+      "stripeNumber": 1,
+      "stripeInformation": {
+        "offset": 3,
+        "indexLength": 863,
+        "dataLength": 63749,
+        "footerLength": 103,
+        "rowCount": 5000
+      },
+      "streams": [
+        {
+          "columnId": 0,
+          "section": "ROW_INDEX",
+          "startOffset": 3,
+          "length": 17
+        },
+        {
+          "columnId": 1,
+          "section": "ROW_INDEX",
+          "startOffset": 20,
+          "length": 165
+        },
+        {
+          "columnId": 2,
+          "section": "ROW_INDEX",
+          "startOffset": 185,
+          "length": 174
+        },
+        {
+          "columnId": 3,
+          "section": "ROW_INDEX",
+          "startOffset": 359,
+          "length": 103
+        },
+        {
+          "columnId": 3,
+          "section": "BLOOM_FILTER",
+          "startOffset": 462,
+          "length": 404
+        },
+        {
+          "columnId": 1,
+          "section": "DATA",
+          "startOffset": 866,
+          "length": 20029
+        },
+        {
+          "columnId": 2,
+          "section": "DATA",
+          "startOffset": 20895,
+          "length": 40035
+        },
+        {
+          "columnId": 3,
+          "section": "PRESENT",
+          "startOffset": 60930,
+          "length": 17
+        },
+        {
+          "columnId": 3,
+          "section": "DATA",
+          "startOffset": 60947,
+          "length": 3510
+        },
+        {
+          "columnId": 3,
+          "section": "LENGTH",
+          "startOffset": 64457,
+          "length": 25
+        },
+        {
+          "columnId": 3,
+          "section": "DICTIONARY_DATA",
+          "startOffset": 64482,
+          "length": 133
+        }
+      ],
+      "encodings": [
+        {
+          "columnId": 0,
+          "kind": "DIRECT"
+        },
+        {
+          "columnId": 1,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 2,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 3,
+          "kind": "DICTIONARY_V2",
+          "dictionarySize": 35
+        }
+      ],
+      "indexes": [{
+        "columnId": 3,
+        "rowGroupIndexes": [
+          {
+            "entryId": 0,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3873,
+            "type": "STRING",
+            "positions": [
+              0,
+              0,
+              0,
+              0,
+              0,
+              0,
+              0
+            ]
+          },
+          {
+            "entryId": 1,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3861,
+            "type": "STRING",
+            "positions": [
+              0,
+              38,
+              12,
+              0,
+              0,
+              736,
+              23
+            ]
+          },
+          {
+            "entryId": 2,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3946,
+            "type": "STRING",
+            "positions": [
+              0,
+              78,
+              12,
+              0,
+              0,
+              1473,
+              43
+            ]
+          },
+          {
+            "entryId": 3,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3774,
+            "type": "STRING",
+            "positions": [
+              0,
+              118,
+              12,
+              0,
+              0,
+              2067,
+              261
+            ]
+          },
+          {
+            "entryId": 4,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3829,
+            "type": "STRING",
+            "positions": [
+              0,
+              158,
+              12,
+              0,
+              0,
+              2992,
+              35
+            ]
+          }
+        ],
+        "bloomFilterIndexes": [
+          {
+            "entryId": 0,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 1,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 2,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 3,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 4,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          }
+        ],
+        "stripeLevelBloomFilter": {
+          "numHashFunctions": 4,
+          "bitCount": 6272,
+          "popCount": 138,
+          "loadFactor": 0.022002551704645157,
+          "expectedFpp": 2.3436470542037569E-7
+        }
+      }]
+    },
+    {
+      "stripeNumber": 2,
+      "stripeInformation": {
+        "offset": 64718,
+        "indexLength": 854,
+        "dataLength": 63742,
+        "footerLength": 103,
+        "rowCount": 5000
+      },
+      "streams": [
+        {
+          "columnId": 0,
+          "section": "ROW_INDEX",
+          "startOffset": 64718,
+          "length": 17
+        },
+        {
+          "columnId": 1,
+          "section": "ROW_INDEX",
+          "startOffset": 64735,
+          "length": 164
+        },
+        {
+          "columnId": 2,
+          "section": "ROW_INDEX",
+          "startOffset": 64899,
+          "length": 169
+        },
+        {
+          "columnId": 3,
+          "section": "ROW_INDEX",
+          "startOffset": 65068,
+          "length": 100
+        },
+        {
+          "columnId": 3,
+          "section": "BLOOM_FILTER",
+          "startOffset": 65168,
+          "length": 404
+        },
+        {
+          "columnId": 1,
+          "section": "DATA",
+          "startOffset": 65572,
+          "length": 20029
+        },
+        {
+          "columnId": 2,
+          "section": "DATA",
+          "startOffset": 85601,
+          "length": 40035
+        },
+        {
+          "columnId": 3,
+          "section": "PRESENT",
+          "startOffset": 125636,
+          "length": 17
+        },
+        {
+          "columnId": 3,
+          "section": "DATA",
+          "startOffset": 125653,
+          "length": 3503
+        },
+        {
+          "columnId": 3,
+          "section": "LENGTH",
+          "startOffset": 129156,
+          "length": 25
+        },
+        {
+          "columnId": 3,
+          "section": "DICTIONARY_DATA",
+          "startOffset": 129181,
+          "length": 133
+        }
+      ],
+      "encodings": [
+        {
+          "columnId": 0,
+          "kind": "DIRECT"
+        },
+        {
+          "columnId": 1,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 2,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 3,
+          "kind": "DICTIONARY_V2",
+          "dictionarySize": 35
+        }
+      ],
+      "indexes": [{
+        "columnId": 3,
+        "rowGroupIndexes": [
+          {
+            "entryId": 0,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3946,
+            "type": "STRING",
+            "positions": [
+              0,
+              0,
+              0,
+              0,
+              0,
+              0,
+              0
+            ]
+          },
+          {
+            "entryId": 1,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3836,
+            "type": "STRING",
+            "positions": [
+              0,
+              38,
+              12,
+              0,
+              0,
+              746,
+              11
+            ]
+          },
+          {
+            "entryId": 2,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3791,
+            "type": "STRING",
+            "positions": [
+              0,
+              78,
+              12,
+              0,
+              0,
+              1430,
+              95
+            ]
+          },
+          {
+            "entryId": 3,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3904,
+            "type": "STRING",
+            "positions": [
+              0,
+              118,
+              12,
+              0,
+              0,
+              2239,
+              23
+            ]
+          },
+          {
+            "entryId": 4,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3920,
+            "type": "STRING",
+            "positions": [
+              0,
+              158,
+              12,
+              0,
+              0,
+              2994,
+              17
+            ]
+          }
+        ],
+        "bloomFilterIndexes": [
+          {
+            "entryId": 0,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 1,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 2,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 3,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 4,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          }
+        ],
+        "stripeLevelBloomFilter": {
+          "numHashFunctions": 4,
+          "bitCount": 6272,
+          "popCount": 138,
+          "loadFactor": 0.022002551704645157,
+          "expectedFpp": 2.3436470542037569E-7
+        }
+      }]
+    },
+    {
+      "stripeNumber": 3,
+      "stripeInformation": {
+        "offset": 129417,
+        "indexLength": 853,
+        "dataLength": 63749,
+        "footerLength": 103,
+        "rowCount": 5000
+      },
+      "streams": [
+        {
+          "columnId": 0,
+          "section": "ROW_INDEX",
+          "startOffset": 129417,
+          "length": 17
+        },
+        {
+          "columnId": 1,
+          "section": "ROW_INDEX",
+          "startOffset": 129434,
+          "length": 160
+        },
+        {
+          "columnId": 2,
+          "section": "ROW_INDEX",
+          "startOffset": 129594,
+          "length": 170
+        },
+        {
+          "columnId": 3,
+          "section": "ROW_INDEX",
+          "startOffset": 129764,
+          "length": 102
+        },
+        {
+          "columnId": 3,
+          "section": "BLOOM_FILTER",
+          "startOffset": 129866,
+          "length": 404
+        },
+        {
+          "columnId": 1,
+          "section": "DATA",
+          "startOffset": 130270,
+          "length": 20029
+        },
+        {
+          "columnId": 2,
+          "section": "DATA",
+          "startOffset": 150299,
+          "length": 40035
+        },
+        {
+          "columnId": 3,
+          "section": "PRESENT",
+          "startOffset": 190334,
+          "length": 17
+        },
+        {
+          "columnId": 3,
+          "section": "DATA",
+          "startOffset": 190351,
+          "length": 3510
+        },
+        {
+          "columnId": 3,
+          "section": "LENGTH",
+          "startOffset": 193861,
+          "length": 25
+        },
+        {
+          "columnId": 3,
+          "section": "DICTIONARY_DATA",
+          "startOffset": 193886,
+          "length": 133
+        }
+      ],
+      "encodings": [
+        {
+          "columnId": 0,
+          "kind": "DIRECT"
+        },
+        {
+          "columnId": 1,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 2,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 3,
+          "kind": "DICTIONARY_V2",
+          "dictionarySize": 35
+        }
+      ],
+      "indexes": [{
+        "columnId": 3,
+        "rowGroupIndexes": [
+          {
+            "entryId": 0,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3829,
+            "type": "STRING",
+            "positions": [
+              0,
+              0,
+              0,
+              0,
+              0,
+              0,
+              0
+            ]
+          },
+          {
+            "entryId": 1,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3853,
+            "type": "STRING",
+            "positions": [
+              0,
+              38,
+              12,
+              0,
+              0,
+              698,
+              74
+            ]
+          },
+          {
+            "entryId": 2,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3796,
+            "type": "STRING",
+            "positions": [
+              0,
+              78,
+              12,
+              0,
+              0,
+              1483,
+              39
+            ]
+          },
+          {
+            "entryId": 3,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3736,
+            "type": "STRING",
+            "positions": [
+              0,
+              118,
+              12,
+              0,
+              0,
+              2148,
+              155
+            ]
+          },
+          {
+            "entryId": 4,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3817,
+            "type": "STRING",
+            "positions": [
+              0,
+              158,
+              12,
+              0,
+              0,
+              3018,
+              8
+            ]
+          }
+        ],
+        "bloomFilterIndexes": [
+          {
+            "entryId": 0,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 1,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 2,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 3,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 4,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          }
+        ],
+        "stripeLevelBloomFilter": {
+          "numHashFunctions": 4,
+          "bitCount": 6272,
+          "popCount": 138,
+          "loadFactor": 0.022002551704645157,
+          "expectedFpp": 2.3436470542037569E-7
+        }
+      }]
+    },
+    {
+      "stripeNumber": 4,
+      "stripeInformation": {
+        "offset": 194122,
+        "indexLength": 866,
+        "dataLength": 63735,
+        "footerLength": 103,
+        "rowCount": 5000
+      },
+      "streams": [
+        {
+          "columnId": 0,
+          "section": "ROW_INDEX",
+          "startOffset": 194122,
+          "length": 17
+        },
+        {
+          "columnId": 1,
+          "section": "ROW_INDEX",
+          "startOffset": 194139,
+          "length": 164
+        },
+        {
+          "columnId": 2,
+          "section": "ROW_INDEX",
+          "startOffset": 194303,
+          "length": 174
+        },
+        {
+          "columnId": 3,
+          "section": "ROW_INDEX",
+          "startOffset": 194477,
+          "length": 107
+        },
+        {
+          "columnId": 3,
+          "section": "BLOOM_FILTER",
+          "startOffset": 194584,
+          "length": 404
+        },
+        {
+          "columnId": 1,
+          "section": "DATA",
+          "startOffset": 194988,
+          "length": 20029
+        },
+        {
+          "columnId": 2,
+          "section": "DATA",
+          "startOffset": 215017,
+          "length": 40035
+        },
+        {
+          "columnId": 3,
+          "section": "PRESENT",
+          "startOffset": 255052,
+          "length": 17
+        },
+        {
+          "columnId": 3,
+          "section": "DATA",
+          "startOffset": 255069,
+          "length": 3496
+        },
+        {
+          "columnId": 3,
+          "section": "LENGTH",
+          "startOffset": 258565,
+          "length": 25
+        },
+        {
+          "columnId": 3,
+          "section": "DICTIONARY_DATA",
+          "startOffset": 258590,
+          "length": 133
+        }
+      ],
+      "encodings": [
+        {
+          "columnId": 0,
+          "kind": "DIRECT"
+        },
+        {
+          "columnId": 1,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 2,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 3,
+          "kind": "DICTIONARY_V2",
+          "dictionarySize": 35
+        }
+      ],
+      "indexes": [{
+        "columnId": 3,
+        "rowGroupIndexes": [
+          {
+            "entryId": 0,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3959,
+            "type": "STRING",
+            "positions": [
+              0,
+              0,
+              0,
+              0,
+              0,
+              0,
+              0
+            ]
+          },
+          {
+            "entryId": 1,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3816,
+            "type": "STRING",
+            "positions": [
+              0,
+              38,
+              12,
+              0,
+              0,
+              495,
+              338
+            ]
+          },
+          {
+            "entryId": 2,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3883,
+            "type": "STRING",
+            "positions": [
+              0,
+              78,
+              12,
+              0,
+              0,
+              1449,
+              71
+            ]
+          },
+          {
+            "entryId": 3,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3938,
+            "type": "STRING",
+            "positions": [
+              0,
+              118,
+              12,
+              0,
+              0,
+              2207,
+              59
+            ]
+          },
+          {
+            "entryId": 4,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3863,
+            "type": "STRING",
+            "positions": [
+              0,
+              158,
+              12,
+              0,
+              0,
+              2838,
+              223
+            ]
+          }
+        ],
+        "bloomFilterIndexes": [
+          {
+            "entryId": 0,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 1,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 2,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 3,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 4,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          }
+        ],
+        "stripeLevelBloomFilter": {
+          "numHashFunctions": 4,
+          "bitCount": 6272,
+          "popCount": 138,
+          "loadFactor": 0.022002551704645157,
+          "expectedFpp": 2.3436470542037569E-7
+        }
+      }]
+    },
+    {
+      "stripeNumber": 5,
+      "stripeInformation": {
+        "offset": 258826,
+        "indexLength": 433,
+        "dataLength": 12940,
+        "footerLength": 95,
+        "rowCount": 1000
+      },
+      "streams": [
+        {
+          "columnId": 0,
+          "section": "ROW_INDEX",
+          "startOffset": 258826,
+          "length": 12
+        },
+        {
+          "columnId": 1,
+          "section": "ROW_INDEX",
+          "startOffset": 258838,
+          "length": 38
+        },
+        {
+          "columnId": 2,
+          "section": "ROW_INDEX",
+          "startOffset": 258876,
+          "length": 41
+        },
+        {
+          "columnId": 3,
+          "section": "ROW_INDEX",
+          "startOffset": 258917,
+          "length": 41
+        },
+        {
+          "columnId": 3,
+          "section": "BLOOM_FILTER",
+          "startOffset": 258958,
+          "length": 301
+        },
+        {
+          "columnId": 1,
+          "section": "DATA",
+          "startOffset": 259259,
+          "length": 4007
+        },
+        {
+          "columnId": 2,
+          "section": "DATA",
+          "startOffset": 263266,
+          "length": 8007
+        },
+        {
+          "columnId": 3,
+          "section": "PRESENT",
+          "startOffset": 271273,
+          "length": 16
+        },
+        {
+          "columnId": 3,
+          "section": "DATA",
+          "startOffset": 271289,
+          "length": 752
+        },
+        {
+          "columnId": 3,
+          "section": "LENGTH",
+          "startOffset": 272041,
+          "length": 25
+        },
+        {
+          "columnId": 3,
+          "section": "DICTIONARY_DATA",
+          "startOffset": 272066,
+          "length": 133
+        }
+      ],
+      "encodings": [
+        {
+          "columnId": 0,
+          "kind": "DIRECT"
+        },
+        {
+          "columnId": 1,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 2,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 3,
+          "kind": "DICTIONARY_V2",
+          "dictionarySize": 35
+        }
+      ],
+      "indexes": [{
+        "columnId": 3,
+        "rowGroupIndexes": [{
+          "entryId": 0,
+          "count": 990,
+          "hasNull": true,
+          "min": "Darkness,",
+          "max": "worst",
+          "totalLength": 3963,
+          "type": "STRING",
+          "positions": [
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0
+          ]
+        }],
+        "bloomFilterIndexes": [{
+          "entryId": 0,
+          "numHashFunctions": 4,
+          "bitCount": 6272,
+          "popCount": 138,
+          "loadFactor": 0.022002551704645157,
+          "expectedFpp": 2.3436470542037569E-7
+        }],
+        "stripeLevelBloomFilter": {
+          "numHashFunctions": 4,
+          "bitCount": 6272,
+          "popCount": 138,
+          "loadFactor": 0.022002551704645157,
+          "expectedFpp": 2.3436470542037569E-7
+        }
+      }]
+    }
+  ],
+  "fileLength": 272842,
+  "paddingLength": 0,
+  "paddingRatio": 0
+}


[08/50] [abbrv] hive git commit: HIVE-10576 : add jar command does not work with Windows OS ( Hari Sankar Sivarama Subramaniyan via Thejas Nair)

Posted by xu...@apache.org.
HIVE-10576 : add jar command does not work with Windows OS ( Hari Sankar Sivarama Subramaniyan via Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7276cd2a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7276cd2a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7276cd2a

Branch: refs/heads/beeline-cli
Commit: 7276cd2a563b3159e7bba0c1d74b2ec1b505913e
Parents: 92d0b81
Author: Thejas Nair <th...@hortonworks.com>
Authored: Tue May 5 12:27:57 2015 -0700
Committer: Thejas Nair <th...@hortonworks.com>
Committed: Tue May 5 12:27:57 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/session/SessionState.java    | 27 +++++++++++++++-----
 1 file changed, 21 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/7276cd2a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index b531cc9..8db78e5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -84,6 +84,7 @@ import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.shims.Utils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.Shell;
 
 import com.google.common.base.Preconditions;
 
@@ -1160,7 +1161,7 @@ public class SessionState {
 
         if (getURLType(value).equals("ivy")) {
           // get the key to store in map
-          key = new URI(value).getAuthority();
+          key = createURI(value).getAuthority();
         } else {
           // for local file and hdfs, key and value are same.
           key = downloadedURLs.get(0).toString();
@@ -1201,8 +1202,22 @@ public class SessionState {
     return localized;
   }
 
+  /**
+   * @param path
+   * @return URI corresponding to the path.
+   */
+  private static URI createURI(String path) throws URISyntaxException {
+    if (!Shell.WINDOWS) {
+      // If this is not windows shell, path better follow unix convention.
+      // Else, the below call will throw an URISyntaxException
+      return new URI(path);
+    } else {
+      return new Path(path).toUri();
+    }
+  }
+
   private static String getURLType(String value) throws URISyntaxException {
-    URI uri = new URI(value);
+    URI uri = createURI(value);
     String scheme = uri.getScheme() == null ? null : uri.getScheme().toLowerCase();
     if (scheme == null || scheme.equals("file")) {
       return "file";
@@ -1215,13 +1230,13 @@ public class SessionState {
 
   List<URI> resolveAndDownload(ResourceType t, String value, boolean convertToUnix) throws URISyntaxException,
       IOException {
-    URI uri = new URI(value);
+    URI uri = createURI(value);
     if (getURLType(value).equals("file")) {
       return Arrays.asList(uri);
     } else if (getURLType(value).equals("ivy")) {
       return dependencyResolver.downloadDependencies(uri);
     } else if (getURLType(value).equals("hdfs")) {
-      return Arrays.asList(new URI(downloadResource(value, convertToUnix)));
+      return Arrays.asList(createURI(downloadResource(value, convertToUnix)));
     } else {
       throw new RuntimeException("Invalid url " + uri);
     }
@@ -1252,7 +1267,7 @@ public class SessionState {
         throw new RuntimeException("Couldn't create directory " + resourceDir);
       }
       try {
-        FileSystem fs = FileSystem.get(new URI(value), conf);
+        FileSystem fs = FileSystem.get(createURI(value), conf);
         fs.copyToLocalFile(new Path(value), new Path(destinationFile.getCanonicalPath()));
         value = destinationFile.getCanonicalPath();
 
@@ -1286,7 +1301,7 @@ public class SessionState {
       String key = value;
       try {
         if (getURLType(value).equals("ivy")) {
-          key = new URI(value).getAuthority();
+          key = createURI(value).getAuthority();
         }
       } catch (URISyntaxException e) {
         throw new RuntimeException("Invalid uri string " + value + ", " + e.getMessage());


[21/50] [abbrv] hive git commit: HIVE-9743: Incorrect result set for vectorized left outer join (Matt McCline, reviewed by Vikram Dixit)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/test/results/clientpositive/vector_leftsemi_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_leftsemi_mapjoin.q.out b/ql/src/test/results/clientpositive/vector_leftsemi_mapjoin.q.out
new file mode 100644
index 0000000..23a8adb
--- /dev/null
+++ b/ql/src/test/results/clientpositive/vector_leftsemi_mapjoin.q.out
@@ -0,0 +1,13572 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table t1 stored as orc as select cast(key as int) key, value from src where key <= 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table t1 stored as orc as select cast(key as int) key, value from src where key <= 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t1
+PREHOOK: query: select * from t1 sort by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 sort by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+2	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: create table t2 stored as orc as select cast(2*key as int) key, value from t1
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@t1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t2
+POSTHOOK: query: create table t2 stored as orc as select cast(2*key as int) key, value from t1
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t2
+PREHOOK: query: select * from t2 sort by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t2 sort by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_5
+10	val_5
+10	val_5
+16	val_8
+18	val_9
+20	val_10
+4	val_2
+8	val_4
+PREHOOK: query: create table t3 stored as orc as select * from (select * from t1 union all select * from t2) b
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t3
+POSTHOOK: query: create table t3 stored as orc as select * from (select * from t1 union all select * from t2) b
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t3
+PREHOOK: query: select * from t3 sort by key, value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t3 sort by key, value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+10	val_5
+10	val_5
+10	val_5
+16	val_8
+18	val_9
+2	val_2
+20	val_10
+4	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+8	val_4
+8	val_8
+9	val_9
+PREHOOK: query: create table t4 (key int, value string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t4
+POSTHOOK: query: create table t4 (key int, value string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t4
+PREHOOK: query: select * from t4
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t4
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t4
+#### A masked pattern was here ####
+PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+4	val_4
+8	val_8
+PREHOOK: query: explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_5
+10	val_5
+10	val_5
+4	val_2
+8	val_4
+PREHOOK: query: explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t4
+#### A masked pattern was here ####
+PREHOOK: query: explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key < 15) (type: boolean)
+              Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: key (type: int), key (type: int)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
+                HashTable Sink Operator
+                  keys:
+                    0 key (type: int)
+                    1 _col1 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col1 (type: int)
+                outputColumnNames: _col1
+                Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col1 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+val_0
+val_0
+val_0
+val_10
+val_2
+val_4
+val_5
+val_5
+val_5
+val_8
+val_9
+PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: ((value < 'val_10') and key is not null) (type: boolean)
+              Statistics: Num rows: 2 Data size: 186 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string)
+                outputColumnNames: key, value
+                Statistics: Num rows: 2 Data size: 186 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int), value (type: string)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 2 Data size: 186 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+PREHOOK: query: explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b:t3 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b:t3 
+          TableScan
+            alias: t3
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key > 5) (type: boolean)
+              Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: _col0 (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col1
+                Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col1 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+val_10
+val_8
+val_9
+PREHOOK: query: explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b:t2 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b:t2 
+          TableScan
+            alias: t2
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (((key > 5) and (value <= 'val_20')) and key is not null) (type: boolean)
+              Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: _col0 (type: int), _col1 (type: string)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col1 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+PREHOOK: query: explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b:t1 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b:t1 
+          TableScan
+            alias: t1
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key > 2) (type: boolean)
+              Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: _col0 (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+10	val_5
+10	val_5
+10	val_5
+4	val_2
+8	val_4
+PREHOOK: query: explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-3 is a root stage
+  Stage-1 depends on stages: Stage-3
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-3
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+2
+4
+4
+5
+5
+5
+8
+8
+9
+PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (2 * key) is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 (2 * _col0) (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 (2 * _col0) (type: int)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+8	val_8
+PREHOOK: query: explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-6 is a root stage
+  Stage-2 depends on stages: Stage-6
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-6
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        a 
+          Fetch Operator
+            limit: -1
+        c 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        a 
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              HashTable Sink Operator
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 _col0 (type: int)
+        c 
+          TableScan
+            alias: c
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 key (type: int)
+                      2 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                     Left Semi Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 _col0 (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6
+                Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int), _col1 (type: string)
+                    sort order: ++
+                    Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col2 (type: int), _col3 (type: string)
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: int), VALUE._col1 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+10	val_10	10	val_5
+10	val_10	10	val_5
+10	val_10	10	val_5
+4	val_4	4	val_2
+8	val_8	8	val_4
+PREHOOK: query: explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key is not null and value is not null) (type: boolean)
+              Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string)
+                outputColumnNames: key, value
+                Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int), value (type: string)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int), value (type: string)
+                      1 _col0 (type: int), _col1 (type: string)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key is not null and value is not null) (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int), value (type: string)
+                  1 _col0 (type: int), _col1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+2	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-1 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+        c 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+                      2 _col0 (type: int)
+        c 
+          TableScan
+            alias: c
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+                      2 _col0 (type: int)
+
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                     Left Semi Join 0 to 2
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                  2 _col0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+4
+4
+8
+8
+PREHOOK: query: explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-6 is a root stage
+  Stage-2 depends on stages: Stage-6
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-6
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+        c 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              keys:
+                0 key (type: int)
+                1 key (type: int)
+                2 _col0 (type: int)
+        c 
+          TableScan
+            alias: c
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: int)
+              outputColumnNames: key
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: key (type: int)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                HashTable Sink Operator
+                  keys:
+                    0 key (type: int)
+                    1 key (type: int)
+                    2 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Outer Join0 to 1
+                   Left Semi Join 1 to 2
+              keys:
+                0 key (type: int)
+                1 key (type: int)
+                2 _col0 (type: int)
+              outputColumnNames: _col0
+              Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: int)
+                sort order: +
+                Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+4
+4
+8
+8
+PREHOOK: query: explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Reduce Output Operator
+              key expressions: key (type: int)
+              sort order: +
+              Map-reduce partition columns: key (type: int)
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+          TableScan
+            alias: b
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Reduce Output Operator
+              key expressions: key (type: int)
+              sort order: +
+              Map-reduce partition columns: key (type: int)
+              Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+          TableScan
+            alias: c
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: int)
+              outputColumnNames: key
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: key (type: int)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Outer Join 0 to 1
+               Left Semi Join 1 to 2
+          keys:
+            0 key (type: int)
+            1 key (type: int)
+            2 _col0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: int)
+              sort order: +
+              Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+4
+4
+8
+8
+NULL
+NULL
+NULL
+PREHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-6 is a root stage
+  Stage-2 depends on stages: Stage-6
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-6
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+        c 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: int)
+              outputColumnNames: key
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: key (type: int)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                HashTable Sink Operator
+                  keys:
+                    0 key (type: int)
+                    1 _col0 (type: int)
+                    2 key (type: int)
+        c 
+          TableScan
+            alias: c
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              keys:
+                0 key (type: int)
+                1 _col0 (type: int)
+                2 key (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Semi Join 0 to 1
+                   Left Outer Join0 to 2
+              keys:
+                0 key (type: int)
+                1 _col0 (type: int)
+                2 key (type: int)
+              outputColumnNames: _col0
+              Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: int)
+                sort order: +
+                Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+16
+18
+20
+4
+4
+8
+8
+PREHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-6 is a root stage
+  Stage-2 depends on stages: Stage-6
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-6
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        a 
+          Fetch Operator
+            limit: -1
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        a 
+          TableScan
+            alias: a
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              keys:
+                0 key (type: int)
+                1 _col0 (type: int)
+                2 key (type: int)
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: int)
+              outputColumnNames: key
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: key (type: int)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                HashTable Sink Operator
+                  keys:
+                    0 key (type: int)
+                    1 _col0 (type: int)
+                    2 key (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: c
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Semi Join 0 to 1
+                   Right Outer Join0 to 2
+              keys:
+                0 key (type: int)
+                1 _col0 (type: int)
+                2 key (type: int)
+              outputColumnNames: _col0
+              Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: int)
+                sort order: +
+                Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+4
+4
+8
+8
+NULL
+NULL
+NULL
+NULL
+NULL
+PREHOOK: query: explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Reduce Output Operator
+              key expressions: key (type: int)
+              sort order: +
+              Map-reduce partition columns: key (type: int)
+              Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: int)
+              outputColumnNames: key
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: key (type: int)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+          TableScan
+            alias: c
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Reduce Output Operator
+              key expressions: key (type: int)
+              sort order: +
+              Map-reduce partition columns: key (type: int)
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Left Semi Join 0 to 1
+               Outer Join 0 to 2
+          keys:
+            0 key (type: int)
+            1 _col0 (type: int)
+            2 key (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: int)
+              sort order: +
+              Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+10
+10
+10
+10
+10
+10
+10
+10
+2
+4
+4
+5
+5
+5
+8
+8
+9
+NULL
+NULL
+NULL
+PREHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-8 is a root stage
+  Stage-3 depends on stages: Stage-8
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-8
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+        c 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+        c 
+          TableScan
+            alias: c
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              keys:
+                0 _col1 (type: string)
+                1 value (type: string)
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                Map Join Operator
+                  condition map:
+                       Left Outer Join0 to 1
+                  keys:
+                    0 _col1 (type: string)
+                    1 value (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 13 Data size: 1237 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    sort order: +
+                    Statistics: Num rows: 13 Data size: 1237 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 13 Data size: 1237 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 13 Data size: 1237 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+10
+10
+10
+10
+10
+10
+16
+18
+20
+4
+4
+8
+8
+PREHOOK: query: explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        $hdt$_1:b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        $hdt$_1:b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: value is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: value (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: _col0 (type: string)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 _col1 (type: string)
+                      1 _col0 (type: string)
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: ((key > 100) and value is not null) (type: boolean)
+              Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                Map Join Operator
+                  condition map:
+                       Left Semi Join 0 to 1
+                  keys:
+                    0 _col1 (type: string)
+                    1 _col0 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      

<TRUNCATED>

[07/50] [abbrv] hive git commit: HIVE-7375 : Add option in test infra to compile in other profiles (like hadoop-1) (Szehon, reviewed by Xuefu and Brock)

Posted by xu...@apache.org.
HIVE-7375 : Add option in test infra to compile in other profiles (like hadoop-1) (Szehon, reviewed by Xuefu and Brock)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/92d0b81e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/92d0b81e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/92d0b81e

Branch: refs/heads/beeline-cli
Commit: 92d0b81e99b4889d9a831244ab019ec84fc84408
Parents: bb3a665
Author: Szehon Ho <sz...@cloudera.com>
Authored: Tue May 5 12:12:39 2015 -0700
Committer: Szehon Ho <sz...@cloudera.com>
Committed: Tue May 5 12:12:39 2015 -0700

----------------------------------------------------------------------
 .../ptest2/src/main/resources/source-prep.vm    |  11 ++
 .../hive/ptest/execution/TestScripts.java       |  23 ++++
 .../TestScripts.testPrepGit.approved.txt        |   2 +-
 .../TestScripts.testPrepHadoop1.approved.txt    | 111 +++++++++++++++++++
 .../TestScripts.testPrepNone.approved.txt       |   2 +-
 .../TestScripts.testPrepSvn.approved.txt        |   2 +-
 6 files changed, 148 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/92d0b81e/testutils/ptest2/src/main/resources/source-prep.vm
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/main/resources/source-prep.vm b/testutils/ptest2/src/main/resources/source-prep.vm
index babc245..97fb69c 100644
--- a/testutils/ptest2/src/main/resources/source-prep.vm
+++ b/testutils/ptest2/src/main/resources/source-prep.vm
@@ -85,6 +85,17 @@ cd $workingDir/
   if [[ "${buildTool}" == "maven" ]]
   then
     rm -rf $workingDir/maven/org/apache/hive
+    #if($additionalProfiles)
+    echo "Compile check on additional Profiles: $additionalProfiles"
+    export ADDITIONAL_PROFILES=$additionalProfiles
+    for i in $(echo $ADDITIONAL_PROFILES | tr "," "\n")
+      do
+        mvn clean install -DskipTests -P$i;
+        cd itests
+        mvn clean install -DskipTests -P$i;
+        cd ..
+      done
+    #end
     mvn -B clean install -DskipTests -Dmaven.repo.local=$workingDir/maven $mavenArgs $mavenBuildArgs
     mvn -B test -Dmaven.repo.local=$workingDir/maven -Dtest=TestDummy $mavenArgs $mavenTestArgs
     cd itests

http://git-wip-us.apache.org/repos/asf/hive/blob/92d0b81e/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.java
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.java b/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.java
index 3e543a7..b55e979 100644
--- a/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.java
+++ b/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.java
@@ -151,6 +151,29 @@ public class TestScripts  {
     String actual = getTemplateResult(template, templateVariables);
     Approvals.verify(actual);
   }
+
+  @Test
+  public void testPrepHadoop1() throws Throwable {
+    Map<String, String> templateVariables = Maps.newHashMap();
+    templateVariables.put("repository", "https://svn.apache.org/repos/asf/hive/trunk");
+    templateVariables.put("repositoryName", "apache");
+    templateVariables.put("branch", "");
+    templateVariables.put("localDir", "/some/local/dir");
+    templateVariables.put("workingDir", "/some/working/dir");
+    templateVariables.put("buildTool", "maven");
+    templateVariables.put("antArgs", "-Dant=arg1");
+    templateVariables.put("buildTag", "build-1");
+    templateVariables.put("logDir", "/some/log/dir");
+    templateVariables.put("testArguments", "-Dtest=arg1");
+    templateVariables.put("clearLibraryCache", "true");
+    templateVariables.put("javaHome", "/usr/java/jdk1.7");
+    templateVariables.put("antEnvOpts", "-Dhttp.proxyHost=somehost -Dhttp.proxyPort=3128");
+    templateVariables.put("repositoryType", "svn");
+    templateVariables.put("additionalProfiles", "hadoop-1");
+    String template = readResource("source-prep.vm");
+    String actual = getTemplateResult(template, templateVariables);
+    Approvals.verify(actual);
+  }
   @Test
   public void testPrepSvn() throws Throwable {
     Map<String, String> templateVariables = Maps.newHashMap();

http://git-wip-us.apache.org/repos/asf/hive/blob/92d0b81e/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepGit.approved.txt
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepGit.approved.txt b/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepGit.approved.txt
index 8dce26f..673614b 100644
--- a/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepGit.approved.txt
+++ b/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepGit.approved.txt
@@ -84,7 +84,7 @@ cd /some/working/dir/
   if [[ "${buildTool}" == "maven" ]]
   then
     rm -rf /some/working/dir/maven/org/apache/hive
-    mvn -B clean install -DskipTests -Dmaven.repo.local=/some/working/dir/maven -X -Phadoop-2
+        mvn -B clean install -DskipTests -Dmaven.repo.local=/some/working/dir/maven -X -Phadoop-2
     mvn -B test -Dmaven.repo.local=/some/working/dir/maven -Dtest=TestDummy -X -Phadoop-1
     cd itests
     mvn -B clean install -DskipTests -Dmaven.repo.local=/some/working/dir/maven -X -Phadoop-2

http://git-wip-us.apache.org/repos/asf/hive/blob/92d0b81e/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepHadoop1.approved.txt
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepHadoop1.approved.txt b/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepHadoop1.approved.txt
new file mode 100644
index 0000000..dbb6a6d
--- /dev/null
+++ b/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepHadoop1.approved.txt
@@ -0,0 +1,111 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+set -x
+if [[ -n "/usr/java/jdk1.7" ]]
+then
+  export JAVA_HOME=/usr/java/jdk1.7
+  export PATH=$JAVA_HOME/bin/:$PATH
+fi
+export ANT_OPTS="-Xmx1g -XX:MaxPermSize=256m -Dhttp.proxyHost=somehost -Dhttp.proxyPort=3128"
+export M2_OPTS="-Xmx1g -XX:MaxPermSize=256m ${mavenEnvOpts}"
+cd /some/working/dir/
+(
+  if [[ "true" == "true" ]]
+  then
+    rm -rf ivy maven
+  fi
+  mkdir -p maven ivy
+  if [[ "svn" = "svn" ]]
+  then
+    if [[ -n "" ]]
+    then
+      echo "Illegal argument for svn: branch ''."
+      exit 1
+    fi
+    if [[ -d apache-source ]] && [[ ! -d apache-source/.svn ]]
+    then
+      rm -rf apache-source
+    fi
+    if [[ ! -d apache-source ]]
+    then
+      svn co https://svn.apache.org/repos/asf/hive/trunk apache-source
+    fi
+    cd apache-source
+    svn revert -R .
+    rm -rf $(svn status --no-ignore | egrep -v '^X|^Performing status on external' | awk '{print $2}')
+    svn update
+  elif [[ "svn" = "git" ]]
+  then
+     if [[ -z "" ]]
+    then
+      echo "Illegal argument for git: branch name is required."
+      exit 1
+    fi
+    if [[ -d apache-source ]] && [[ ! -d apache-source/.git ]]
+    then
+      rm -rf apache-source
+    fi
+    if [[ ! -d apache-source ]]
+    then
+      git clone https://svn.apache.org/repos/asf/hive/trunk apache-source
+    fi
+    cd apache-source
+    git fetch origin
+    git reset --hard HEAD && git clean -f -d
+    git checkout  || git checkout -b  origin/
+    git reset --hard origin/
+    git merge --ff-only origin/
+    git gc
+  else
+    echo "Unknown repository type 'svn'"
+    exit 1
+  fi
+  patchCommandPath=/some/working/dir/scratch/smart-apply-patch.sh
+  patchFilePath=/some/working/dir/scratch/build.patch
+  if [[ -f $patchFilePath ]]
+  then
+    chmod +x $patchCommandPath
+    $patchCommandPath $patchFilePath
+  fi
+  if [[ "maven" == "maven" ]]
+  then
+    rm -rf /some/working/dir/maven/org/apache/hive
+        echo "Compile check on additional Profiles: hadoop-1"
+    export ADDITIONAL_PROFILES=hadoop-1
+    for i in $(echo $ADDITIONAL_PROFILES | tr "," "\n")
+      do
+        mvn clean install -DskipTests -P$i;
+        cd itests
+        mvn clean install -DskipTests -P$i;
+        cd ..
+      done
+        mvn -B clean install -DskipTests -Dmaven.repo.local=/some/working/dir/maven $mavenArgs $mavenBuildArgs
+    mvn -B test -Dmaven.repo.local=/some/working/dir/maven -Dtest=TestDummy $mavenArgs $mavenTestArgs
+    cd itests
+    mvn -B clean install -DskipTests -Dmaven.repo.local=/some/working/dir/maven $mavenArgs $mavenBuildArgs
+    mvn -B test -Dmaven.repo.local=/some/working/dir/maven -Dtest=TestDummy $mavenArgs $mavenTestArgs
+  elif [[ "maven" == "ant" ]]
+  then
+    ant -Dant=arg1 -Divy.default.ivy.user.dir=/some/working/dir/ivy \
+      -Dmvn.local.repo=/some/working/dir/maven clean package test \
+      -Dtestcase=nothing
+   else
+     echo "Unknown build tool maven"
+     exit 127
+   fi
+) 2>&1 | tee /some/log/dir/source-prep.txt
+exit ${PIPESTATUS[0]}

http://git-wip-us.apache.org/repos/asf/hive/blob/92d0b81e/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepNone.approved.txt
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepNone.approved.txt b/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepNone.approved.txt
index 98afe93..c05dae9 100644
--- a/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepNone.approved.txt
+++ b/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepNone.approved.txt
@@ -84,7 +84,7 @@ cd /some/working/dir/
   if [[ "ant" == "maven" ]]
   then
     rm -rf /some/working/dir/maven/org/apache/hive
-    mvn -B clean install -DskipTests -Dmaven.repo.local=/some/working/dir/maven $mavenArgs $mavenBuildArgs
+        mvn -B clean install -DskipTests -Dmaven.repo.local=/some/working/dir/maven $mavenArgs $mavenBuildArgs
     mvn -B test -Dmaven.repo.local=/some/working/dir/maven -Dtest=TestDummy $mavenArgs $mavenTestArgs
     cd itests
     mvn -B clean install -DskipTests -Dmaven.repo.local=/some/working/dir/maven $mavenArgs $mavenBuildArgs

http://git-wip-us.apache.org/repos/asf/hive/blob/92d0b81e/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepSvn.approved.txt
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepSvn.approved.txt b/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepSvn.approved.txt
index d267c5f..c8a4b6e 100644
--- a/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepSvn.approved.txt
+++ b/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/TestScripts.testPrepSvn.approved.txt
@@ -84,7 +84,7 @@ cd /some/working/dir/
   if [[ "maven" == "maven" ]]
   then
     rm -rf /some/working/dir/maven/org/apache/hive
-    mvn -B clean install -DskipTests -Dmaven.repo.local=/some/working/dir/maven $mavenArgs $mavenBuildArgs
+        mvn -B clean install -DskipTests -Dmaven.repo.local=/some/working/dir/maven $mavenArgs $mavenBuildArgs
     mvn -B test -Dmaven.repo.local=/some/working/dir/maven -Dtest=TestDummy $mavenArgs $mavenTestArgs
     cd itests
     mvn -B clean install -DskipTests -Dmaven.repo.local=/some/working/dir/maven $mavenArgs $mavenBuildArgs


[50/50] [abbrv] hive git commit: Merge branch 'master' into beeline-cli

Posted by xu...@apache.org.
Merge branch 'master' into beeline-cli


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/753b2b30
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/753b2b30
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/753b2b30

Branch: refs/heads/beeline-cli
Commit: 753b2b308afeb3808a3c10de5474f863d975a46f
Parents: 2ddd86d 3e713bc
Author: Xuefu Zhang <xz...@Cloudera.com>
Authored: Thu May 14 21:31:01 2015 -0700
Committer: Xuefu Zhang <xz...@Cloudera.com>
Committed: Thu May 14 21:31:01 2015 -0700

----------------------------------------------------------------------
 .../src/test/templates/TestAccumuloCliDriver.vm |    19 +-
 .../apache/hadoop/hive/ant/QTestGenTask.java    |    25 +
 .../java/org/apache/hive/beeline/BeeLine.java   |    10 +-
 bin/beeline.cmd                                 |    11 +-
 bin/ext/hiveserver2.cmd                         |     2 +-
 bin/ext/orcfiledump.sh                          |     9 +-
 bin/hive                                        |     2 +-
 .../org/apache/hadoop/hive/conf/HiveConf.java   |    27 +-
 data/files/tjoin1.txt                           |     3 +
 data/files/tjoin2.txt                           |     4 +
 dev-support/jenkins-execute-build.sh            |     2 +-
 dev-support/jenkins-execute-hms-test.sh         |     4 +-
 .../src/test/templates/TestHBaseCliDriver.vm    |    18 +-
 .../templates/TestHBaseNegativeCliDriver.vm     |    19 +-
 .../apache/hive/hcatalog/common/HCatUtil.java   |    37 +-
 .../hive/hcatalog/common/HiveClientCache.java   |    94 +-
 .../DefaultOutputCommitterContainer.java        |     6 +-
 .../mapreduce/FileOutputCommitterContainer.java |    22 +-
 .../mapreduce/FileOutputFormatContainer.java    |     8 +-
 .../hcatalog/mapreduce/HCatBaseInputFormat.java |    50 +-
 .../hcatalog/mapreduce/HCatOutputFormat.java    |     6 +-
 .../hcatalog/mapreduce/HCatRecordReader.java    |     4 +-
 .../hive/hcatalog/mapreduce/HCatSplit.java      |    21 +-
 .../hive/hcatalog/mapreduce/HCatTableInfo.java  |    12 +
 .../hcatalog/mapreduce/InitializeInput.java     |     6 +-
 .../hive/hcatalog/mapreduce/InputJobInfo.java   |     5 +
 .../hive/hcatalog/mapreduce/PartInfo.java       |   117 +-
 .../hive/hcatalog/mapreduce/Security.java       |    10 +-
 .../mapreduce/TaskCommitContextRegistry.java    |     6 +-
 .../hcatalog/common/TestHiveClientCache.java    |    37 +-
 .../hcatalog/mapreduce/HCatMapReduceTest.java   |     3 +-
 .../mapreduce/TestHCatOutputFormat.java         |     5 +-
 .../hcatalog/mapreduce/TestHCatPartitioned.java |    32 +-
 .../hcatalog/mapreduce/TestPassProperties.java  |     5 +-
 .../apache/hive/hcatalog/pig/PigHCatUtil.java   |    10 +-
 .../deployers/config/webhcat/webhcat-site.xml   |     9 +-
 .../templeton/deployers/deploy_e2e_artifacts.sh |     3 +
 .../src/test/e2e/templeton/deployers/env.sh     |     1 +
 .../streaming/AbstractRecordWriter.java         |    11 +-
 .../hive/hcatalog/streaming/HiveEndPoint.java   |     9 +-
 .../hive/hcatalog/api/HCatClientHMSImpl.java    |    17 +-
 .../hcatalog/api/repl/TestReplicationTask.java  |    14 +-
 hcatalog/webhcat/svr/pom.xml                    |    13 +-
 .../svr/src/main/config/webhcat-default.xml     |    10 +-
 .../hcatalog/templeton/CompleteDelegator.java   |     6 +-
 .../hcatalog/templeton/SecureProxySupport.java  |     9 +-
 .../hcatalog/templeton/tool/LaunchMapper.java   |     2 +-
 .../templeton/tool/TempletonControllerJob.java  |    15 +-
 .../hive/metastore/TestHiveMetaStore.java       |    30 +
 .../hive/beeline/TestBeeLineWithArgs.java       |    12 +
 .../test/resources/testconfiguration.properties |     9 +-
 .../org/apache/hadoop/hive/ql/QTestUtil.java    |    46 +
 .../org/apache/hive/jdbc/HiveConnection.java    |     3 +-
 metastore/dbs/derby/execute.sh                  |    37 +
 metastore/dbs/derby/prepare.sh                  |    63 +
 metastore/dbs/postgres/execute.sh               |    29 +
 metastore/dbs/postgres/prepare.sh               |    72 +
 .../upgrade/mssql/006-HIVE-9456.mssql.sql       |   323 +
 .../upgrade/mssql/hive-schema-1.2.0.mssql.sql   |   256 +-
 .../upgrade/mssql/hive-schema-1.3.0.mssql.sql   |   256 +-
 .../mssql/upgrade-1.1.0-to-1.2.0.mssql.sql      |     1 +
 .../upgrade/mysql/021-HIVE-7018.mysql.sql       |    53 -
 .../upgrade/mysql/hive-schema-1.2.0.mysql.sql   |    10 +-
 .../upgrade/mysql/hive-schema-1.3.0.mysql.sql   |    10 +-
 .../mysql/upgrade-1.1.0-to-1.2.0.mysql.sql      |     2 +-
 .../hive/metastore/AggregateStatsCache.java     |    33 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |    27 +-
 .../hive/metastore/HiveMetaStoreClient.java     |     6 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |     3 +
 .../hive/metastore/MetaStoreDirectSql.java      |    24 +-
 .../hive/metastore/RetryingMetaStoreClient.java |    89 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java   |    35 +-
 .../hive/metastore/TestHiveMetastoreCli.java    |    63 +
 .../hive/metastore/txn/TestTxnHandler.java      |    39 +-
 packaging/src/main/assembly/bin.xml             |     1 +
 pom.xml                                         |     5 +
 .../java/org/apache/hadoop/hive/ql/Context.java |    10 +-
 .../hive/ql/exec/CommonMergeJoinOperator.java   |    54 +-
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |     7 +-
 .../hadoop/hive/ql/exec/HashTableLoader.java    |     4 +-
 .../hadoop/hive/ql/exec/MapJoinOperator.java    |   227 +-
 .../apache/hadoop/hive/ql/exec/ObjectCache.java |     7 +
 .../apache/hadoop/hive/ql/exec/Operator.java    |     4 +
 .../hadoop/hive/ql/exec/ReduceSinkOperator.java |     3 +-
 .../apache/hadoop/hive/ql/exec/Registry.java    |    29 +-
 .../apache/hadoop/hive/ql/exec/Utilities.java   |    18 +
 .../hadoop/hive/ql/exec/mr/ExecDriver.java      |     1 +
 .../hadoop/hive/ql/exec/mr/HashTableLoader.java |     2 +-
 .../hadoop/hive/ql/exec/mr/ObjectCache.java     |     5 +
 .../persistence/BytesBytesMultiHashMap.java     |     1 +
 .../exec/persistence/HybridHashTableConf.java   |    86 +
 .../persistence/HybridHashTableContainer.java   |   248 +-
 .../ql/exec/persistence/KeyValueContainer.java  |    31 +-
 .../ql/exec/persistence/ObjectContainer.java    |    31 +-
 .../hive/ql/exec/spark/HashTableLoader.java     |     2 +-
 .../hadoop/hive/ql/exec/tez/DagUtils.java       |     1 +
 .../hive/ql/exec/tez/HashTableLoader.java       |    65 +-
 .../hadoop/hive/ql/exec/tez/ObjectCache.java    |     6 +
 .../hive/ql/exec/tez/TezSessionState.java       |    13 +-
 .../ql/exec/vector/VectorColumnSetInfo.java     |     3 +-
 .../exec/vector/VectorMapJoinBaseOperator.java  |   185 +
 .../ql/exec/vector/VectorMapJoinOperator.java   |   127 +-
 .../VectorMapJoinOuterFilteredOperator.java     |   120 +
 .../mapjoin/VectorMapJoinCommonOperator.java    |    16 +-
 .../VectorMapJoinGenerateResultOperator.java    |    34 +-
 .../VectorMapJoinInnerBigOnlyLongOperator.java  |     5 +
 ...ctorMapJoinInnerBigOnlyMultiKeyOperator.java |     5 +
 ...VectorMapJoinInnerBigOnlyStringOperator.java |     5 +
 ...ectorMapJoinInnerGenerateResultOperator.java |    15 +
 .../mapjoin/VectorMapJoinInnerLongOperator.java |     4 +
 .../VectorMapJoinInnerMultiKeyOperator.java     |     4 +
 .../VectorMapJoinInnerStringOperator.java       |     4 +
 .../VectorMapJoinLeftSemiLongOperator.java      |     5 +
 .../VectorMapJoinLeftSemiMultiKeyOperator.java  |     5 +
 .../VectorMapJoinLeftSemiStringOperator.java    |     5 +
 ...ectorMapJoinOuterGenerateResultOperator.java |    31 +-
 .../mapjoin/VectorMapJoinOuterLongOperator.java |     4 +
 .../VectorMapJoinOuterMultiKeyOperator.java     |     4 +
 .../VectorMapJoinOuterStringOperator.java       |     4 +
 .../mapjoin/VectorMapJoinRowBytesContainer.java |     2 +-
 .../fast/VectorMapJoinFastBytesHashMap.java     |     4 +-
 .../VectorMapJoinFastBytesHashMultiSet.java     |     4 +-
 .../fast/VectorMapJoinFastBytesHashSet.java     |     4 +-
 .../fast/VectorMapJoinFastBytesHashTable.java   |     4 +-
 .../mapjoin/fast/VectorMapJoinFastHashMap.java  |     4 +-
 .../fast/VectorMapJoinFastHashMultiSet.java     |     4 +-
 .../mapjoin/fast/VectorMapJoinFastHashSet.java  |     4 +-
 .../fast/VectorMapJoinFastHashTable.java        |     4 +-
 .../fast/VectorMapJoinFastHashTableLoader.java  |     4 +-
 .../fast/VectorMapJoinFastLongHashMap.java      |     4 +-
 .../fast/VectorMapJoinFastLongHashMultiSet.java |     4 +-
 .../fast/VectorMapJoinFastLongHashSet.java      |     4 +-
 .../fast/VectorMapJoinFastLongHashTable.java    |     4 +-
 .../fast/VectorMapJoinFastMultiKeyHashMap.java  |     6 +-
 .../VectorMapJoinFastMultiKeyHashMultiSet.java  |     4 +-
 .../fast/VectorMapJoinFastMultiKeyHashSet.java  |     4 +-
 .../fast/VectorMapJoinFastStringHashMap.java    |     4 +-
 .../VectorMapJoinFastStringHashMultiSet.java    |     4 +-
 .../fast/VectorMapJoinFastStringHashSet.java    |     4 +-
 .../fast/VectorMapJoinFastTableContainer.java   |    23 +-
 .../hive/ql/io/orc/ColumnStatisticsImpl.java    |    33 +-
 .../hive/ql/io/orc/DateColumnStatistics.java    |     6 +-
 .../apache/hadoop/hive/ql/io/orc/FileDump.java  |    91 +-
 .../hadoop/hive/ql/io/orc/JsonFileDump.java     |   365 +
 .../hadoop/hive/ql/io/orc/RecordReaderImpl.java |   222 +-
 .../hive/ql/io/sarg/SearchArgumentImpl.java     |    85 +-
 .../ql/lockmgr/zookeeper/ZooKeeperHiveLock.java |    22 +
 .../BucketingSortingReduceSinkOptimizer.java    |    13 +
 .../optimizer/ConstantPropagateProcFactory.java |    83 +-
 .../ql/optimizer/IdentityProjectRemover.java    |    12 +
 .../ql/optimizer/LimitPushdownOptimizer.java    |     9 +-
 .../ql/optimizer/NonBlockingOpDeDupProc.java    |    11 +
 .../hadoop/hive/ql/optimizer/Optimizer.java     |    12 +-
 .../ql/optimizer/calcite/cost/HiveCost.java     |    16 +-
 .../calcite/reloperators/HiveSortExchange.java  |    37 +-
 .../rules/HiveInsertExchange4JoinRule.java      |     6 +-
 .../calcite/translator/HiveOpConverter.java     |    28 +-
 .../translator/HiveOpConverterPostProc.java     |    10 +
 .../hive/ql/optimizer/physical/Vectorizer.java  |    23 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |     1 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |    19 +-
 .../org/apache/hadoop/hive/ql/parse/QB.java     |    10 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   104 +-
 .../hadoop/hive/ql/parse/WindowingSpec.java     |     7 +-
 .../apache/hadoop/hive/ql/plan/MapJoinDesc.java |     2 +-
 .../apache/hadoop/hive/ql/plan/PlanUtils.java   |    46 +-
 .../MetaStoreAuthzAPIAuthorizerEmbedOnly.java   |     3 +-
 .../hadoop/hive/ql/session/SessionState.java    |    31 +-
 .../hadoop/hive/ql/txn/compactor/Cleaner.java   |    38 +-
 .../hive/ql/txn/compactor/CompactorThread.java  |    12 +-
 .../hadoop/hive/ql/txn/compactor/Initiator.java |    11 +-
 .../hadoop/hive/ql/txn/compactor/Worker.java    |    42 +-
 .../org/apache/hadoop/hive/ql/udf/UDFMd5.java   |    79 +
 .../hive/ql/udf/generic/GenericUDFQuarter.java  |    85 +
 .../apache/hadoop/hive/ql/TestTxnCommands2.java |    18 +-
 .../fast/TestVectorMapJoinFastLongHashMap.java  |    14 +-
 .../TestVectorMapJoinFastMultiKeyHashMap.java   |    14 +-
 .../hive/ql/io/orc/TestColumnStatistics.java    |    20 +-
 .../hadoop/hive/ql/io/orc/TestJsonFileDump.java |   138 +
 .../hive/ql/io/orc/TestRecordReaderImpl.java    |   170 +-
 .../hive/ql/io/sarg/TestSearchArgumentImpl.java |   104 +-
 ...tedCharsInColumnNameCreateTableNegative.java |    87 +
 .../hadoop/hive/ql/plan/TestViewEntity.java     |   108 +
 .../hive/ql/txn/compactor/TestCleaner.java      |    56 +-
 .../hive/ql/txn/compactor/TestInitiator.java    |    63 +-
 .../hive/ql/txn/compactor/TestWorker.java       |    45 +
 .../hadoop/hive/ql/udf/TestGenericUDFDate.java  |    92 -
 .../hive/ql/udf/TestGenericUDFDateAdd.java      |   145 -
 .../hive/ql/udf/TestGenericUDFDateDiff.java     |   116 -
 .../hive/ql/udf/TestGenericUDFDateSub.java      |   143 -
 .../hadoop/hive/ql/udf/TestGenericUDFUtils.java |    58 -
 .../apache/hadoop/hive/ql/udf/TestUDFMd5.java   |    57 +
 .../hive/ql/udf/generic/TestGenericUDFDate.java |    92 +
 .../ql/udf/generic/TestGenericUDFDateAdd.java   |   143 +
 .../ql/udf/generic/TestGenericUDFDateDiff.java  |   116 +
 .../ql/udf/generic/TestGenericUDFDateSub.java   |   143 +
 .../ql/udf/generic/TestGenericUDFQuarter.java   |   182 +
 .../ql/udf/generic/TestGenericUDFUtils.java     |    57 +
 .../clientpositive/auto_sortmerge_join_13.q     |     2 +
 .../test/queries/clientpositive/bucket_many.q   |    16 +
 .../test/queries/clientpositive/explainuser_2.q |     1 +
 .../extrapolate_part_stats_partial.q            |     2 +
 .../extrapolate_part_stats_partial_ndv.q        |     2 +
 ql/src/test/queries/clientpositive/fold_case.q  |    12 +
 ql/src/test/queries/clientpositive/fold_when.q  |    31 +
 .../clientpositive/hybridgrace_hashjoin_1.q     |   258 +
 .../clientpositive/hybridgrace_hashjoin_2.q     |   152 +
 .../queries/clientpositive/hybridhashjoin.q     |   250 -
 .../clientpositive/insert_overwrite_directory.q |   141 +
 .../queries/clientpositive/limit_pushdown.q     |     4 +
 .../queries/clientpositive/mapjoin_mapjoin.q    |     1 +
 ql/src/test/queries/clientpositive/mergejoin.q  |    17 +
 .../test/queries/clientpositive/tez_join_hash.q |     2 +
 .../test/queries/clientpositive/tez_smb_main.q  |     6 +
 ql/src/test/queries/clientpositive/udf_md5.q    |    13 +
 .../test/queries/clientpositive/udf_quarter.q   |   100 +
 .../clientpositive/vector_binary_join_groupby.q |    55 +
 .../clientpositive/vector_left_outer_join2.q    |    62 +
 .../clientpositive/vector_leftsemi_mapjoin.q    |   403 +
 .../clientpositive/windowing_windowspec.q       |     2 +
 ql/src/test/resources/orc-file-dump.json        |  1354 ++
 .../annotate_stats_join_pkfk.q.out              |    20 +-
 .../results/clientpositive/bucket_many.q.out    |   230 +
 .../encryption_insert_partition_static.q.out    |    14 +-
 .../test/results/clientpositive/fold_case.q.out |   301 +
 .../test/results/clientpositive/fold_when.q.out |   480 +
 .../insert_overwrite_directory.q.out            |  1813 +++
 ql/src/test/results/clientpositive/join32.q.out |    84 +-
 .../clientpositive/join32_lessSize.q.out        |   423 +-
 ql/src/test/results/clientpositive/join33.q.out |    84 +-
 .../clientpositive/join_alt_syntax.q.out        |   306 +-
 .../clientpositive/join_cond_pushdown_2.q.out   |   150 +-
 .../clientpositive/join_cond_pushdown_4.q.out   |   150 +-
 .../results/clientpositive/limit_pushdown.q.out |    88 +
 .../test/results/clientpositive/mergejoin.q.out |   844 +-
 .../ql_rewrite_gbtoidx_cbo_2.q.out              |    14 +-
 .../results/clientpositive/show_functions.q.out |     3 +
 .../results/clientpositive/spark/cbo_gby.q.out  |     4 +-
 .../clientpositive/spark/cbo_udf_udaf.q.out     |     2 +-
 ...pby_complex_types_multi_single_reducer.q.out |    38 +-
 .../results/clientpositive/spark/join32.q.out   |    88 +-
 .../clientpositive/spark/join32_lessSize.q.out  |   286 +-
 .../results/clientpositive/spark/join33.q.out   |    88 +-
 .../clientpositive/spark/join_alt_syntax.q.out  |   210 +-
 .../spark/join_cond_pushdown_2.q.out            |    98 +-
 .../spark/join_cond_pushdown_4.q.out            |    98 +-
 .../spark/lateral_view_explode2.q.out           |     4 +-
 .../clientpositive/spark/limit_pushdown.q.out   |    94 +
 .../clientpositive/spark/union_remove_25.q.out  |     2 +-
 .../clientpositive/spark/union_top_level.q.out  |    16 +-
 .../spark/vector_cast_constant.q.java1.7.out    |    16 +-
 .../spark/vector_cast_constant.q.java1.8.out    |    16 +-
 .../spark/vectorized_timestamp_funcs.q.out      |     4 +-
 .../clientpositive/tez/auto_join29.q.out        |   500 +
 .../clientpositive/tez/explainuser_2.q.out      |  1529 +-
 .../tez/hybridgrace_hashjoin_1.q.out            |  1587 ++
 .../tez/hybridgrace_hashjoin_2.q.out            |  1417 ++
 .../clientpositive/tez/hybridhashjoin.q.out     |  1566 --
 .../clientpositive/tez/limit_pushdown.q.out     |    94 +
 .../results/clientpositive/tez/mergejoin.q.out  |   844 +-
 .../tez/vector_binary_join_groupby.q.out        |   303 +
 .../tez/vector_left_outer_join2.q.out           |   553 +
 .../tez/vector_leftsemi_mapjoin.q.out           | 13807 +++++++++++++++++
 .../test/results/clientpositive/udf_md5.q.out   |    61 +
 .../results/clientpositive/udf_quarter.q.out    |   246 +
 .../vector_binary_join_groupby.q.out            |   293 +
 .../vector_left_outer_join2.q.out               |   568 +
 .../vector_leftsemi_mapjoin.q.out               | 13572 ++++++++++++++++
 .../clientpositive/windowing_windowspec.q.out   |   108 +
 ql/src/test/templates/TestCliDriver.vm          |    18 +-
 ql/src/test/templates/TestCompareCliDriver.vm   |    21 +-
 ql/src/test/templates/TestNegativeCliDriver.vm  |    18 +-
 ql/src/test/templates/TestParseNegative.vm      |    17 +-
 .../hadoop/hive/ql/io/sarg/PredicateLeaf.java   |    19 +-
 service/pom.xml                                 |     5 +
 .../auth/LdapAuthenticationProviderImpl.java    |     2 +-
 .../thrift/EmbeddedThriftBinaryCLIService.java  |     5 +
 .../cli/thrift/ThriftBinaryCLIService.java      |     1 -
 .../apache/hive/service/server/HiveServer2.java |   106 +-
 .../service/cli/session/TestSessionHooks.java   |     3 +-
 testutils/metastore/execute-test-on-lxc.sh      |     7 +-
 .../ptest2/src/main/resources/source-prep.vm    |    11 +
 .../hive/ptest/execution/TestScripts.java       |    23 +
 .../TestScripts.testPrepGit.approved.txt        |     2 +-
 .../TestScripts.testPrepHadoop1.approved.txt    |   111 +
 .../TestScripts.testPrepNone.approved.txt       |     2 +-
 .../TestScripts.testPrepSvn.approved.txt        |     2 +-
 287 files changed, 47828 insertions(+), 6152 deletions(-)
----------------------------------------------------------------------



[24/50] [abbrv] hive git commit: HIVE-9743: Incorrect result set for vectorized left outer join (Matt McCline, reviewed by Vikram Dixit)

Posted by xu...@apache.org.
HIVE-9743: Incorrect result set for vectorized left outer join (Matt McCline, reviewed by Vikram Dixit)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/25310407
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/25310407
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/25310407

Branch: refs/heads/beeline-cli
Commit: 2531040758e796c5cc469a893c50a8f5a388ded6
Parents: 632a309
Author: Jason Dere <jd...@hortonworks.com>
Authored: Wed May 6 16:19:59 2015 -0700
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Wed May 6 16:19:59 2015 -0700

----------------------------------------------------------------------
 data/files/tjoin1.txt                           |     3 +
 data/files/tjoin2.txt                           |     4 +
 .../test/resources/testconfiguration.properties |     2 +
 .../exec/vector/VectorMapJoinBaseOperator.java  |   185 +
 .../ql/exec/vector/VectorMapJoinOperator.java   |   129 +-
 .../VectorMapJoinOuterFilteredOperator.java     |   120 +
 .../VectorMapJoinGenerateResultOperator.java    |     5 +
 .../hive/ql/optimizer/physical/Vectorizer.java  |    23 +-
 .../clientpositive/vector_left_outer_join2.q    |    62 +
 .../clientpositive/vector_leftsemi_mapjoin.q    |   403 +
 .../tez/vector_left_outer_join2.q.out           |   553 +
 .../tez/vector_leftsemi_mapjoin.q.out           | 13807 +++++++++++++++++
 .../vector_left_outer_join2.q.out               |   568 +
 .../vector_leftsemi_mapjoin.q.out               | 13572 ++++++++++++++++
 14 files changed, 29317 insertions(+), 119 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/data/files/tjoin1.txt
----------------------------------------------------------------------
diff --git a/data/files/tjoin1.txt b/data/files/tjoin1.txt
new file mode 100644
index 0000000..897e0c5
--- /dev/null
+++ b/data/files/tjoin1.txt
@@ -0,0 +1,3 @@
+0|10|15
+1|20|25
+2|\N|50
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/data/files/tjoin2.txt
----------------------------------------------------------------------
diff --git a/data/files/tjoin2.txt b/data/files/tjoin2.txt
new file mode 100644
index 0000000..24820e9
--- /dev/null
+++ b/data/files/tjoin2.txt
@@ -0,0 +1,4 @@
+0|10|BB
+1|15|DD
+2|\N|EE
+3|10|FF
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 8e9984a..134fded 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -222,6 +222,8 @@ minitez.query.files.shared=alter_merge_2_orc.q,\
   vector_interval_1.q,\
   vector_interval_2.q,\
   vector_left_outer_join.q,\
+  vector_left_outer_join2.q,\
+  vector_leftsemi_mapjoin.q,\
   vector_mapjoin_reduce.q,\
   vector_mr_diff_schema_alias.q,\
   vector_multi_insert.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinBaseOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinBaseOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinBaseOperator.java
new file mode 100644
index 0000000..0baec2c
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinBaseOperator.java
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.vector;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.Future;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
+import org.apache.hadoop.hive.ql.exec.persistence.HybridHashTableContainer;
+import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer;
+import org.apache.hadoop.hive.ql.exec.persistence.ObjectContainer;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
+import org.apache.hadoop.hive.ql.plan.OperatorDesc;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.io.DataOutputBuffer;
+
+/**
+ * The *NON-NATIVE* base vector map join operator class used by VectorMapJoinOperator and
+ * VectorMapJoinOuterFilteredOperator.
+ *
+ * It has common variables and code for the output batch, Hybrid Grace spill batch, and more.
+ */
+public class VectorMapJoinBaseOperator extends MapJoinOperator implements VectorizationContextRegion {
+
+  private static final Log LOG = LogFactory.getLog(VectorMapJoinBaseOperator.class.getName());
+
+  private static final long serialVersionUID = 1L;
+
+  protected VectorizationContext vOutContext;
+
+  // The above members are initialized by the constructor and must not be
+  // transient.
+  //---------------------------------------------------------------------------
+
+  protected transient VectorizedRowBatch outputBatch;
+  protected transient VectorizedRowBatch scratchBatch;  // holds restored (from disk) big table rows
+
+  protected transient Map<ObjectInspector, VectorAssignRowSameBatch> outputVectorAssignRowMap;
+
+  protected transient VectorizedRowBatchCtx vrbCtx = null;
+
+  protected transient int tag;  // big table alias
+
+  public VectorMapJoinBaseOperator() {
+    super();
+  }
+
+  public VectorMapJoinBaseOperator (VectorizationContext vContext, OperatorDesc conf)
+    throws HiveException {
+    super();
+
+    MapJoinDesc desc = (MapJoinDesc) conf;
+    this.conf = desc;
+
+    order = desc.getTagOrder();
+    numAliases = desc.getExprs().size();
+    posBigTable = (byte) desc.getPosBigTable();
+    filterMaps = desc.getFilterMap();
+    noOuterJoin = desc.isNoOuterJoin();
+
+     // We are making a new output vectorized row batch.
+    vOutContext = new VectorizationContext(getName(), desc.getOutputColumnNames());
+  }
+
+  @Override
+  public Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+
+    Collection<Future<?>> result = super.initializeOp(hconf);
+
+    vrbCtx = new VectorizedRowBatchCtx();
+    vrbCtx.init(vOutContext.getScratchColumnTypeMap(), (StructObjectInspector) this.outputObjInspector);
+
+    outputBatch = vrbCtx.createVectorizedRowBatch();
+
+    outputVectorAssignRowMap = new HashMap<ObjectInspector, VectorAssignRowSameBatch>();
+
+    return result;
+  }
+
+  /**
+   * 'forwards' the (row-mode) record into the (vectorized) output batch
+   */
+  @Override
+  protected void internalForward(Object row, ObjectInspector outputOI) throws HiveException {
+    Object[] values = (Object[]) row;
+    VectorAssignRowSameBatch va = outputVectorAssignRowMap.get(outputOI);
+    if (va == null) {
+      va = new VectorAssignRowSameBatch();
+      va.init((StructObjectInspector) outputOI, vOutContext.getProjectedColumns());
+      va.setOneBatch(outputBatch);
+      outputVectorAssignRowMap.put(outputOI, va);
+    }
+
+    va.assignRow(outputBatch.size, values);
+
+    ++outputBatch.size;
+    if (outputBatch.size == VectorizedRowBatch.DEFAULT_SIZE) {
+      flushOutput();
+    }
+  }
+
+  private void flushOutput() throws HiveException {
+    forward(outputBatch, null);
+    outputBatch.reset();
+  }
+
+  @Override
+  public void closeOp(boolean aborted) throws HiveException {
+    super.closeOp(aborted);
+    for (MapJoinTableContainer tableContainer : mapJoinTables) {
+      if (tableContainer != null) {
+        tableContainer.dumpMetrics();
+      }
+    }
+    if (!aborted && 0 < outputBatch.size) {
+      flushOutput();
+    }
+  }
+
+  /**
+   * For a vectorized row batch from the rows feed from the super MapJoinOperator.
+   */
+  @Override
+  protected void reProcessBigTable(int partitionId)
+      throws HiveException {
+
+    if (scratchBatch == null) {
+      // The process method was not called -- no big table rows.
+      return;
+    }
+
+    HybridHashTableContainer.HashPartition partition = firstSmallTable.getHashPartitions()[partitionId];
+    ObjectContainer bigTable = partition.getMatchfileObjContainer();
+
+    DataOutputBuffer dataOutputBuffer = new DataOutputBuffer();
+    while (bigTable.hasNext()) {
+      Object row = bigTable.next();
+      VectorizedBatchUtil.addProjectedRowToBatchFrom(row,
+          (StructObjectInspector) inputObjInspectors[posBigTable],
+          scratchBatch.size, scratchBatch, dataOutputBuffer);
+      scratchBatch.size++;
+
+      if (scratchBatch.size == VectorizedRowBatch.DEFAULT_SIZE) {
+        process(scratchBatch, tag); // call process once we have a full batch
+        scratchBatch.reset();
+        dataOutputBuffer.reset();
+      }
+    }
+    // Process the row batch that has less than DEFAULT_SIZE rows
+    if (scratchBatch.size > 0) {
+      process(scratchBatch, tag);
+      scratchBatch.reset();
+      dataOutputBuffer.reset();
+    }
+    bigTable.clear();
+  }
+
+  @Override
+  public VectorizationContext getOuputVectorizationContext() {
+    return vOutContext;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
index 0547346..15c747e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
@@ -18,10 +18,8 @@
 
 package org.apache.hadoop.hive.ql.exec.vector;
 
-import java.util.Arrays;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.Future;
@@ -31,11 +29,8 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator;
 import org.apache.hadoop.hive.ql.exec.JoinUtil;
-import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
-import org.apache.hadoop.hive.ql.exec.persistence.HybridHashTableContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor;
-import org.apache.hadoop.hive.ql.exec.persistence.ObjectContainer;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory;
@@ -45,36 +40,28 @@ import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.io.DataOutputBuffer;
 
 /**
  * The vectorized version of the MapJoinOperator.
  */
-public class VectorMapJoinOperator extends MapJoinOperator implements VectorizationContextRegion {
+public class VectorMapJoinOperator extends VectorMapJoinBaseOperator {
 
-  private static final Log LOG = LogFactory.getLog(
-      VectorMapJoinOperator.class.getName());
-
-   /**
-   *
-   */
   private static final long serialVersionUID = 1L;
 
-  private VectorExpression[] keyExpressions;
+  private static final Log LOG = LogFactory.getLog(
+      VectorMapJoinOperator.class.getName());
 
-  private VectorExpression[] bigTableFilterExpressions;
-  private VectorExpression[] bigTableValueExpressions;
+  protected VectorExpression[] keyExpressions;
 
-  private VectorizationContext vOutContext;
+  protected VectorExpression[] bigTableFilterExpressions;
+  protected VectorExpression[] bigTableValueExpressions;
 
   // The above members are initialized by the constructor and must not be
   // transient.
   //---------------------------------------------------------------------------
 
-  private transient VectorizedRowBatch outputBatch;
-  private transient VectorizedRowBatch scratchBatch;  // holds restored (from disk) big table rows
+
   private transient VectorExpressionWriter[] valueWriters;
-  private transient Map<ObjectInspector, VectorAssignRowSameBatch> outputVectorAssignRowMap;
 
   // These members are used as out-of-band params
   // for the inner-loop supper.processOp callbacks
@@ -84,9 +71,6 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
   private transient VectorHashKeyWrapperBatch keyWrapperBatch;
   private transient VectorExpressionWriter[] keyOutputWriters;
 
-  private transient VectorizedRowBatchCtx vrbCtx = null;
-
-  private transient int tag;  // big table alias
   private VectorExpressionWriter[] rowWriters;  // Writer for producing row from input batch
   protected transient Object[] singleRow;
 
@@ -97,16 +81,10 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
 
   public VectorMapJoinOperator (VectorizationContext vContext, OperatorDesc conf)
     throws HiveException {
-    this();
 
-    MapJoinDesc desc = (MapJoinDesc) conf;
-    this.conf = desc;
+    super(vContext, conf);
 
-    order = desc.getTagOrder();
-    numAliases = desc.getExprs().size();
-    posBigTable = (byte) desc.getPosBigTable();
-    filterMaps = desc.getFilterMap();
-    noOuterJoin = desc.isNoOuterJoin();
+    MapJoinDesc desc = (MapJoinDesc) conf;
 
     Map<Byte, List<ExprNodeDesc>> filterExpressions = desc.getFilters();
     bigTableFilterExpressions = vContext.getVectorExpressions(filterExpressions.get(posBigTable),
@@ -118,9 +96,6 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
     // We're only going to evaluate the big table vectorized expressions,
     Map<Byte, List<ExprNodeDesc>> exprs = desc.getExprs();
     bigTableValueExpressions = vContext.getVectorExpressions(exprs.get(posBigTable));
-
-    // We are making a new output vectorized row batch.
-    vOutContext = new VectorizationContext(getName(), desc.getOutputColumnNames());
   }
 
   @Override
@@ -144,12 +119,7 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
     List<ExprNodeDesc> keyDesc = conf.getKeys().get(posBigTable);
     keyOutputWriters = VectorExpressionWriterFactory.getExpressionWriters(keyDesc);
 
-    vrbCtx = new VectorizedRowBatchCtx();
-    vrbCtx.init(vOutContext.getScratchColumnTypeMap(), (StructObjectInspector) this.outputObjInspector);
-
-    outputBatch = vrbCtx.createVectorizedRowBatch();
-
-    keyWrapperBatch =VectorHashKeyWrapperBatch.compileKeyWrapperBatch(keyExpressions);
+    keyWrapperBatch = VectorHashKeyWrapperBatch.compileKeyWrapperBatch(keyExpressions);
 
     Map<Byte, List<ExprNodeDesc>> valueExpressions = conf.getExprs();
     List<ExprNodeDesc> bigTableExpressions = valueExpressions.get(posBigTable);
@@ -203,51 +173,9 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
     // Filtering is handled in the input batch processing
     filterMaps[posBigTable] = null;
 
-    outputVectorAssignRowMap = new HashMap<ObjectInspector, VectorAssignRowSameBatch>();
-
     return result;
   }
 
-  /**
-   * 'forwards' the (row-mode) record into the (vectorized) output batch
-   */
-  @Override
-  protected void internalForward(Object row, ObjectInspector outputOI) throws HiveException {
-    Object[] values = (Object[]) row;
-    VectorAssignRowSameBatch va = outputVectorAssignRowMap.get(outputOI);
-    if (va == null) {
-      va = new VectorAssignRowSameBatch();
-      va.init((StructObjectInspector) outputOI, vOutContext.getProjectedColumns());
-      va.setOneBatch(outputBatch);
-      outputVectorAssignRowMap.put(outputOI, va);
-    }
-
-    va.assignRow(outputBatch.size, values);
-
-    ++outputBatch.size;
-    if (outputBatch.size == VectorizedRowBatch.DEFAULT_SIZE) {
-      flushOutput();
-    }
-  }
-
-  private void flushOutput() throws HiveException {
-    forward(outputBatch, null);
-    outputBatch.reset();
-  }
-
-  @Override
-  public void closeOp(boolean aborted) throws HiveException {
-    super.closeOp(aborted);
-    for (MapJoinTableContainer tableContainer : mapJoinTables) {
-      if (tableContainer != null) {
-        tableContainer.dumpMetrics();
-      }
-    }
-    if (!aborted && 0 < outputBatch.size) {
-      flushOutput();
-    }
-  }
-
   @Override
   protected JoinUtil.JoinResult setMapJoinKey(ReusableGetAdaptor dest, Object row, byte alias)
       throws HiveException {
@@ -256,7 +184,7 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
 
   @Override
   public void process(Object row, int tag) throws HiveException {
-    byte alias = (byte) tag;
+
     VectorizedRowBatch inBatch = (VectorizedRowBatch) row;
 
     // Preparation for hybrid grace hash join
@@ -297,11 +225,6 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
   }
 
   @Override
-  public VectorizationContext getOuputVectorizationContext() {
-    return vOutContext;
-  }
-
-  @Override
   protected void spillBigTableRow(MapJoinTableContainer hybridHtContainer, Object row)
       throws HiveException {
     // Extract the actual row from row batch
@@ -310,36 +233,6 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
     super.spillBigTableRow(hybridHtContainer, actualRow);
   }
 
-  @Override
-  protected void reProcessBigTable(int partitionId)
-      throws HiveException {
-
-    HybridHashTableContainer.HashPartition partition = firstSmallTable.getHashPartitions()[partitionId];
-    ObjectContainer bigTable = partition.getMatchfileObjContainer();
-
-    DataOutputBuffer dataOutputBuffer = new DataOutputBuffer();
-    while (bigTable.hasNext()) {
-      Object row = bigTable.next();
-      VectorizedBatchUtil.addProjectedRowToBatchFrom(row,
-          (StructObjectInspector) inputObjInspectors[posBigTable],
-          scratchBatch.size, scratchBatch, dataOutputBuffer);
-      scratchBatch.size++;
-
-      if (scratchBatch.size == VectorizedRowBatch.DEFAULT_SIZE) {
-        process(scratchBatch, tag); // call process once we have a full batch
-        scratchBatch.reset();
-        dataOutputBuffer.reset();
-      }
-    }
-    // Process the row batch that has less than DEFAULT_SIZE rows
-    if (scratchBatch.size > 0) {
-      process(scratchBatch, tag);
-      scratchBatch.reset();
-      dataOutputBuffer.reset();
-    }
-    bigTable.clear();
-  }
-
   // Code borrowed from VectorReduceSinkOperator
   private Object[] getRowObject(VectorizedRowBatch vrb, int rowIndex) throws HiveException {
     int batchIndex = rowIndex;

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOuterFilteredOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOuterFilteredOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOuterFilteredOperator.java
new file mode 100644
index 0000000..5aecfcc
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOuterFilteredOperator.java
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.vector;
+
+import java.util.Collection;
+import java.util.concurrent.Future;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.plan.OperatorDesc;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+
+/**
+ * This is the *NON-NATIVE* vector map join operator for just LEFT OUTER JOIN and filtered.
+ *
+ * It is a row pass-thru so that super MapJoinOperator can do the outer join filtering properly.
+ *
+ */
+public class VectorMapJoinOuterFilteredOperator extends VectorMapJoinBaseOperator {
+
+  private static final long serialVersionUID = 1L;
+
+  private VectorizationContext vContext;
+
+  // The above members are initialized by the constructor and must not be
+  // transient.
+  //---------------------------------------------------------------------------
+
+  private transient boolean firstBatch;
+
+  private transient VectorExtractRowDynBatch vectorExtractRowDynBatch;
+
+  protected transient Object[] singleRow;
+
+  public VectorMapJoinOuterFilteredOperator() {
+    super();
+  }
+
+  public VectorMapJoinOuterFilteredOperator(VectorizationContext vContext, OperatorDesc conf)
+      throws HiveException {
+    super(vContext, conf);
+
+    this.vContext = vContext;
+  }
+
+  @Override
+  public Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+
+    // We need a input object inspector that is for the row we will extract out of the
+    // vectorized row batch, not for example, an original inspector for an ORC table, etc.
+    inputObjInspectors[0] =
+        VectorizedBatchUtil.convertToStandardStructObjectInspector((StructObjectInspector) inputObjInspectors[0]);
+
+    // Call super VectorMapJoinOuterFilteredOperator, which calls super MapJoinOperator with
+    // new input inspector.
+    Collection<Future<?>> result = super.initializeOp(hconf);
+
+    firstBatch = true;
+
+    return result;
+  }
+
+  @Override
+  public void process(Object data, int tag) throws HiveException {
+
+    VectorizedRowBatch batch = (VectorizedRowBatch) data;
+
+    // Preparation for hybrid grace hash join
+    this.tag = tag;
+    if (scratchBatch == null) {
+      scratchBatch = VectorizedBatchUtil.makeLike(batch);
+    }
+
+    if (firstBatch) {
+      vectorExtractRowDynBatch = new VectorExtractRowDynBatch();
+      vectorExtractRowDynBatch.init((StructObjectInspector) inputObjInspectors[0], vContext.getProjectedColumns());
+
+      singleRow = new Object[vectorExtractRowDynBatch.getCount()];
+
+      firstBatch = false;
+    }
+
+
+    vectorExtractRowDynBatch.setBatchOnEntry(batch);
+
+    // VectorizedBatchUtil.debugDisplayBatch( batch, "VectorReduceSinkOperator processOp ");
+
+    if (batch.selectedInUse) {
+      int selected[] = batch.selected;
+      for (int logical = 0 ; logical < batch.size; logical++) {
+        int batchIndex = selected[logical];
+        vectorExtractRowDynBatch.extractRow(batchIndex, singleRow);
+        super.process(singleRow, tag);
+      }
+    } else {
+      for (int batchIndex = 0 ; batchIndex < batch.size; batchIndex++) {
+        vectorExtractRowDynBatch.extractRow(batchIndex, singleRow);
+        super.process(singleRow, tag);
+      }
+    }
+
+    vectorExtractRowDynBatch.forgetBatchOnExit();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
index 0f1c7a8..860ebb5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
@@ -523,6 +523,11 @@ public abstract class VectorMapJoinGenerateResultOperator extends VectorMapJoinC
 
     LOG.info(CLASS_NAME + " reProcessBigTable enter...");
 
+    if (spillReplayBatch == null) {
+      // The process method was not called -- no big table rows.
+      return;
+    }
+
     HashPartition partition = firstSmallTable.getHashPartitions()[partitionId];
 
     int rowCount = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index 64d7c3e..096239e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -54,6 +54,9 @@ import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinLeftSemiString
 import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinOuterLongOperator;
 import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinOuterMultiKeyOperator;
 import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinOuterStringOperator;
+import org.apache.hadoop.hive.ql.exec.vector.VectorMapJoinOperator;
+import org.apache.hadoop.hive.ql.exec.vector.VectorMapJoinOuterFilteredOperator;
+import org.apache.hadoop.hive.ql.exec.vector.VectorSMBMapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizationContextRegion;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedInputFormatInterface;
@@ -1596,7 +1599,25 @@ public class Vectorizer implements PhysicalPlanResolver {
           boolean specialize = canSpecializeMapJoin(op, desc, isTez);
 
           if (!specialize) {
-            vectorOp = OperatorFactory.getVectorOperator(desc, vContext);
+
+            Class<? extends Operator<?>> opClass = null;
+            if (op instanceof MapJoinOperator) {
+
+              // *NON-NATIVE* vector map differences for LEFT OUTER JOIN and Filtered...
+
+              List<ExprNodeDesc> bigTableFilters = desc.getFilters().get((byte) desc.getPosBigTable());
+              boolean isOuterAndFiltered = (!desc.isNoOuterJoin() && bigTableFilters.size() > 0);
+              if (!isOuterAndFiltered) {
+                opClass = VectorMapJoinOperator.class;
+              } else {
+                opClass = VectorMapJoinOuterFilteredOperator.class;
+              }
+            } else if (op instanceof SMBMapJoinOperator) {
+              opClass = VectorSMBMapJoinOperator.class;
+            }
+
+            vectorOp = OperatorFactory.getVectorOperator(opClass, op.getConf(), vContext);
+
           } else {
 
             // TEMPORARY Until Native Vector Map Join with Hybrid passes tests...

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/test/queries/clientpositive/vector_left_outer_join2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_left_outer_join2.q b/ql/src/test/queries/clientpositive/vector_left_outer_join2.q
new file mode 100644
index 0000000..098d002
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/vector_left_outer_join2.q
@@ -0,0 +1,62 @@
+set hive.fetch.task.conversion=none;
+set hive.auto.convert.join=true;
+set hive.auto.convert.join.noconditionaltask=true;
+set hive.auto.convert.join.noconditionaltask.size=10000;
+
+drop table if exists TJOIN1;
+drop table if exists TJOIN2;
+create table if not exists TJOIN1 (RNUM int , C1 int, C2 int) STORED AS orc;
+create table if not exists TJOIN2 (RNUM int , C1 int, C2 char(2)) STORED AS orc;
+create table if not exists TJOIN1STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE ;
+create table if not exists TJOIN2STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE ;
+LOAD DATA LOCAL INPATH '../../data/files/tjoin1.txt' OVERWRITE INTO TABLE TJOIN1STAGE;
+LOAD DATA LOCAL INPATH '../../data/files/tjoin2.txt' OVERWRITE INTO TABLE TJOIN2STAGE;
+INSERT INTO TABLE TJOIN1 SELECT * from TJOIN1STAGE;
+INSERT INTO TABLE TJOIN2 SELECT * from TJOIN2STAGE;
+
+set hive.vectorized.execution.enabled=false;
+set hive.mapjoin.hybridgrace.hashtable=false;
+explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+set hive.vectorized.execution.enabled=false;
+set hive.mapjoin.hybridgrace.hashtable=true;
+explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=false;
+SET hive.vectorized.execution.mapjoin.native.enabled=false;
+explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=true;
+SET hive.vectorized.execution.mapjoin.native.enabled=false;
+explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=false;
+SET hive.vectorized.execution.mapjoin.native.enabled=true;
+explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=true;
+SET hive.vectorized.execution.mapjoin.native.enabled=true;
+explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q b/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q
new file mode 100644
index 0000000..522ab12
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q
@@ -0,0 +1,403 @@
+set hive.fetch.task.conversion=none;
+set hive.auto.convert.join=true;
+set hive.auto.convert.join.noconditionaltask=true;
+set hive.auto.convert.join.noconditionaltask.size=10000;
+
+-- SORT_QUERY_RESULTS
+
+create table t1 stored as orc as select cast(key as int) key, value from src where key <= 10;
+
+select * from t1 sort by key;
+
+create table t2 stored as orc as select cast(2*key as int) key, value from t1;
+
+select * from t2 sort by key;
+
+create table t3 stored as orc as select * from (select * from t1 union all select * from t2) b;
+select * from t3 sort by key, value;
+
+create table t4 (key int, value string) stored as orc;
+select * from t4;
+
+
+set hive.vectorized.execution.enabled=false;
+set hive.mapjoin.hybridgrace.hashtable=false;
+
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+ 
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+
+set hive.vectorized.execution.enabled=false;
+set hive.mapjoin.hybridgrace.hashtable=true;
+
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+ 
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=false;
+SET hive.vectorized.execution.mapjoin.native.enabled=false;
+
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+ 
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=true;
+SET hive.vectorized.execution.mapjoin.native.enabled=false;
+
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+ 
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=false;
+SET hive.vectorized.execution.mapjoin.native.enabled=true;
+
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+ 
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=true;
+SET hive.vectorized.execution.mapjoin.native.enabled=true;
+
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+ 
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/test/results/clientpositive/tez/vector_left_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_left_outer_join2.q.out b/ql/src/test/results/clientpositive/tez/vector_left_outer_join2.q.out
new file mode 100644
index 0000000..929194e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/vector_left_outer_join2.q.out
@@ -0,0 +1,553 @@
+PREHOOK: query: drop table if exists TJOIN1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists TJOIN1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table if exists TJOIN2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists TJOIN2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table if not exists TJOIN1 (RNUM int , C1 int, C2 int) STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN1
+POSTHOOK: query: create table if not exists TJOIN1 (RNUM int , C1 int, C2 int) STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN1
+PREHOOK: query: create table if not exists TJOIN2 (RNUM int , C1 int, C2 char(2)) STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN2
+POSTHOOK: query: create table if not exists TJOIN2 (RNUM int , C1 int, C2 char(2)) STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN2
+PREHOOK: query: create table if not exists TJOIN1STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN1STAGE
+POSTHOOK: query: create table if not exists TJOIN1STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN1STAGE
+PREHOOK: query: create table if not exists TJOIN2STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN2STAGE
+POSTHOOK: query: create table if not exists TJOIN2STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN2STAGE
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin1.txt' OVERWRITE INTO TABLE TJOIN1STAGE
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@tjoin1stage
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin1.txt' OVERWRITE INTO TABLE TJOIN1STAGE
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@tjoin1stage
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin2.txt' OVERWRITE INTO TABLE TJOIN2STAGE
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@tjoin2stage
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin2.txt' OVERWRITE INTO TABLE TJOIN2STAGE
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@tjoin2stage
+PREHOOK: query: INSERT INTO TABLE TJOIN1 SELECT * from TJOIN1STAGE
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1stage
+PREHOOK: Output: default@tjoin1
+POSTHOOK: query: INSERT INTO TABLE TJOIN1 SELECT * from TJOIN1STAGE
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1stage
+POSTHOOK: Output: default@tjoin1
+POSTHOOK: Lineage: tjoin1.c1 SIMPLE [(tjoin1stage)tjoin1stage.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin1.c2 EXPRESSION [(tjoin1stage)tjoin1stage.FieldSchema(name:c2, type:char(2), comment:null), ]
+POSTHOOK: Lineage: tjoin1.rnum SIMPLE [(tjoin1stage)tjoin1stage.FieldSchema(name:rnum, type:int, comment:null), ]
+PREHOOK: query: INSERT INTO TABLE TJOIN2 SELECT * from TJOIN2STAGE
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin2stage
+PREHOOK: Output: default@tjoin2
+POSTHOOK: query: INSERT INTO TABLE TJOIN2 SELECT * from TJOIN2STAGE
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin2stage
+POSTHOOK: Output: default@tjoin2
+POSTHOOK: Lineage: tjoin2.c1 SIMPLE [(tjoin2stage)tjoin2stage.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin2.c2 SIMPLE [(tjoin2stage)tjoin2stage.FieldSchema(name:c2, type:char(2), comment:null), ]
+POSTHOOK: Lineage: tjoin2.rnum SIMPLE [(tjoin2stage)tjoin2stage.FieldSchema(name:rnum, type:int, comment:null), ]
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                    filter predicates:
+                      0 {(c2 > 15)}
+                      1 
+                    keys:
+                      0 c1 (type: int)
+                      1 c1 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col8
+                    input vertices:
+                      1 Map 2
+                    Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: c1 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: c1 (type: int)
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: c2 (type: char(2))
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+0	10	15	NULL
+1	20	25	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                    filter predicates:
+                      0 {(c2 > 15)}
+                      1 
+                    keys:
+                      0 c1 (type: int)
+                      1 c1 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col8
+                    input vertices:
+                      1 Map 2
+                    Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                    HybridGraceHashJoin: true
+                    Select Operator
+                      expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: c1 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: c1 (type: int)
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: c2 (type: char(2))
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+1	20	25	NULL
+0	10	15	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                    filter predicates:
+                      0 {(c2 > 15)}
+                      1 
+                    keys:
+                      0 c1 (type: int)
+                      1 c1 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col8
+                    input vertices:
+                      1 Map 2
+                    Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: c1 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: c1 (type: int)
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: c2 (type: char(2))
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+0	10	15	NULL
+1	20	25	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                    filter predicates:
+                      0 {(c2 > 15)}
+                      1 
+                    keys:
+                      0 c1 (type: int)
+                      1 c1 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col8
+                    input vertices:
+                      1 Map 2
+                    Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                    HybridGraceHashJoin: true
+                    Select Operator
+                      expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: c1 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: c1 (type: int)
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: c2 (type: char(2))
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+1	20	25	NULL
+0	10	15	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                    filter predicates:
+                      0 {(c2 > 15)}
+                      1 
+                    keys:
+                      0 c1 (type: int)
+                      1 c1 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col8
+                    input vertices:
+                      1 Map 2
+                    Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: c1 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: c1 (type: int)
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: c2 (type: char(2))
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+1	20	25	NULL
+2	NULL	50	NULL
+0	10	15	BB
+0	10	15	FF
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                    filter predicates:
+                      0 {(c2 > 15)}
+                      1 
+                    keys:
+                      0 c1 (type: int)
+                      1 c1 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col8
+                    input vertices:
+                      1 Map 2
+                    Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                    HybridGraceHashJoin: true
+                    Select Operator
+                      expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: c1 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: c1 (type: int)
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: c2 (type: char(2))
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+2	NULL	50	NULL
+1	20	25	NULL
+0	10	15	BB
+0	10	15	FF


[39/50] [abbrv] hive git commit: HIVE-10608 : Fix useless 'if' stamement in RetryingMetaStoreClient (135) (Alexander Pivovarov via Szehon)

Posted by xu...@apache.org.
HIVE-10608 : Fix useless 'if' stamement in RetryingMetaStoreClient (135) (Alexander Pivovarov via Szehon)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7149ab15
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7149ab15
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7149ab15

Branch: refs/heads/beeline-cli
Commit: 7149ab15787a5f26954ece2283944ab78e8694ec
Parents: 48a243e
Author: Szehon Ho <sz...@cloudera.com>
Authored: Thu May 7 11:13:59 2015 -0700
Committer: Szehon Ho <sz...@cloudera.com>
Committed: Thu May 7 11:13:59 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/metastore/RetryingMetaStoreClient.java | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/7149ab15/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
index fb44484..e282981 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
@@ -173,8 +173,11 @@ public class RetryingMetaStoreClient implements InvocationHandler {
           throw e.getCause();
         }
       } catch (MetaException e) {
-        if (e.getMessage().matches("(?s).*(IO|TTransport)Exception.*"));
-        caughtException = e;
+        if (e.getMessage().matches("(?s).*(IO|TTransport)Exception.*")) {
+          caughtException = e;
+        } else {
+          throw e;
+        }
       }
 
       if (retriesMade >=  retryLimit) {


[14/50] [abbrv] hive git commit: HIVE-10610 : hive command fails to get hadoop version (Shwetha G S via Sushanth Sowmyan)

Posted by xu...@apache.org.
HIVE-10610 : hive command fails to get hadoop version (Shwetha G S via Sushanth Sowmyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/377ba4ba
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/377ba4ba
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/377ba4ba

Branch: refs/heads/beeline-cli
Commit: 377ba4ba164d46ca91390d7d38c13e4e2fcda383
Parents: a9d70a0
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Wed May 6 10:58:08 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Wed May 6 10:58:50 2015 -0700

----------------------------------------------------------------------
 bin/hive | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/377ba4ba/bin/hive
----------------------------------------------------------------------
diff --git a/bin/hive b/bin/hive
index 50bd726..5dc93fb 100755
--- a/bin/hive
+++ b/bin/hive
@@ -201,7 +201,7 @@ fi
 
 # Make sure we're using a compatible version of Hadoop
 if [ "x$HADOOP_VERSION" == "x" ]; then
-    HADOOP_VERSION=$($HADOOP version | awk '{if (NR == 1) {print $2;}}');
+    HADOOP_VERSION=$($HADOOP version | awk -F"\t" '/Hadoop/ {print $0}' | cut -d' ' -f 2);
 fi
 
 # Save the regex to a var to workaround quoting incompatabilities


[31/50] [abbrv] hive git commit: HIVE-10542: Full outer joins in tez produce incorrect results in certain cases (Vikram Dixit K, reviewed by Gunther Hagleitner)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/26ec033c/ql/src/test/results/clientpositive/tez/mergejoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/mergejoin.q.out b/ql/src/test/results/clientpositive/tez/mergejoin.q.out
index 48cd2a1..97df12a 100644
--- a/ql/src/test/results/clientpositive/tez/mergejoin.q.out
+++ b/ql/src/test/results/clientpositive/tez/mergejoin.q.out
@@ -1,7 +1,11 @@
-PREHOOK: query: explain
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
 select * from src a join src1 b on a.key = b.key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
 select * from src a join src1 b on a.key = b.key
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
@@ -347,74 +351,12 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 0	val_0	2008-04-08	0	val_0	2008-04-08
 0	val_0	2008-04-08	0	val_0	2008-04-08
 0	val_0	2008-04-08	0	val_0	2008-04-08
-2	val_2	2008-04-08	2	val_2	2008-04-08
-4	val_4	2008-04-08	4	val_4	2008-04-08
-8	val_8	2008-04-08	8	val_8	2008-04-08
-11	val_11	2008-04-08	11	val_11	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-17	val_17	2008-04-08	17	val_17	2008-04-08
-19	val_19	2008-04-08	19	val_19	2008-04-08
-20	val_20	2008-04-08	20	val_20	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-28	val_28	2008-04-08	28	val_28	2008-04-08
-33	val_33	2008-04-08	33	val_33	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-44	val_44	2008-04-08	44	val_44	2008-04-08
-51	val_51	2008-04-08	51	val_51	2008-04-08
-51	val_51	2008-04-08	51	val_51	2008-04-08
-51	val_51	2008-04-08	51	val_51	2008-04-08
-51	val_51	2008-04-08	51	val_51	2008-04-08
-53	val_53	2008-04-08	53	val_53	2008-04-08
-57	val_57	2008-04-08	57	val_57	2008-04-08
-64	val_64	2008-04-08	64	val_64	2008-04-08
-66	val_66	2008-04-08	66	val_66	2008-04-08
-77	val_77	2008-04-08	77	val_77	2008-04-08
-80	val_80	2008-04-08	80	val_80	2008-04-08
-82	val_82	2008-04-08	82	val_82	2008-04-08
-84	val_84	2008-04-08	84	val_84	2008-04-08
-84	val_84	2008-04-08	84	val_84	2008-04-08
-84	val_84	2008-04-08	84	val_84	2008-04-08
-84	val_84	2008-04-08	84	val_84	2008-04-08
-86	val_86	2008-04-08	86	val_86	2008-04-08
-95	val_95	2008-04-08	95	val_95	2008-04-08
-95	val_95	2008-04-08	95	val_95	2008-04-08
-95	val_95	2008-04-08	95	val_95	2008-04-08
-95	val_95	2008-04-08	95	val_95	2008-04-08
-97	val_97	2008-04-08	97	val_97	2008-04-08
-97	val_97	2008-04-08	97	val_97	2008-04-08
-97	val_97	2008-04-08	97	val_97	2008-04-08
-97	val_97	2008-04-08	97	val_97	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 105	val_105	2008-04-08	105	val_105	2008-04-08
+11	val_11	2008-04-08	11	val_11	2008-04-08
 114	val_114	2008-04-08	114	val_114	2008-04-08
 116	val_116	2008-04-08	116	val_116	2008-04-08
 118	val_118	2008-04-08	118	val_118	2008-04-08
@@ -456,6 +398,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
 150	val_150	2008-04-08	150	val_150	2008-04-08
 152	val_152	2008-04-08	152	val_152	2008-04-08
 152	val_152	2008-04-08	152	val_152	2008-04-08
@@ -493,6 +439,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
+17	val_17	2008-04-08	17	val_17	2008-04-08
 170	val_170	2008-04-08	170	val_170	2008-04-08
 172	val_172	2008-04-08	172	val_172	2008-04-08
 172	val_172	2008-04-08	172	val_172	2008-04-08
@@ -519,10 +466,13 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 187	val_187	2008-04-08	187	val_187	2008-04-08
 187	val_187	2008-04-08	187	val_187	2008-04-08
 189	val_189	2008-04-08	189	val_189	2008-04-08
+19	val_19	2008-04-08	19	val_19	2008-04-08
 190	val_190	2008-04-08	190	val_190	2008-04-08
 192	val_192	2008-04-08	192	val_192	2008-04-08
 194	val_194	2008-04-08	194	val_194	2008-04-08
 196	val_196	2008-04-08	196	val_196	2008-04-08
+2	val_2	2008-04-08	2	val_2	2008-04-08
+20	val_20	2008-04-08	20	val_20	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
@@ -569,6 +519,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
@@ -580,6 +534,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 255	val_255	2008-04-08	255	val_255	2008-04-08
 255	val_255	2008-04-08	255	val_255	2008-04-08
 257	val_257	2008-04-08	257	val_257	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
 260	val_260	2008-04-08	260	val_260	2008-04-08
 262	val_262	2008-04-08	262	val_262	2008-04-08
 266	val_266	2008-04-08	266	val_266	2008-04-08
@@ -609,6 +567,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
+28	val_28	2008-04-08	28	val_28	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
@@ -670,11 +629,21 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
+33	val_33	2008-04-08	33	val_33	2008-04-08
 332	val_332	2008-04-08	332	val_332	2008-04-08
 336	val_336	2008-04-08	336	val_336	2008-04-08
 338	val_338	2008-04-08	338	val_338	2008-04-08
 341	val_341	2008-04-08	341	val_341	2008-04-08
 345	val_345	2008-04-08	345	val_345	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
 356	val_356	2008-04-08	356	val_356	2008-04-08
 365	val_365	2008-04-08	365	val_365	2008-04-08
 367	val_367	2008-04-08	367	val_367	2008-04-08
@@ -690,6 +659,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
 374	val_374	2008-04-08	374	val_374	2008-04-08
 378	val_378	2008-04-08	378	val_378	2008-04-08
 389	val_389	2008-04-08	389	val_389	2008-04-08
@@ -704,6 +677,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
+4	val_4	2008-04-08	4	val_4	2008-04-08
 400	val_400	2008-04-08	400	val_400	2008-04-08
 402	val_402	2008-04-08	402	val_402	2008-04-08
 404	val_404	2008-04-08	404	val_404	2008-04-08
@@ -741,6 +715,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 417	val_417	2008-04-08	417	val_417	2008-04-08
 417	val_417	2008-04-08	417	val_417	2008-04-08
 419	val_419	2008-04-08	419	val_419	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
@@ -760,6 +738,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
+44	val_44	2008-04-08	44	val_44	2008-04-08
 444	val_444	2008-04-08	444	val_444	2008-04-08
 446	val_446	2008-04-08	446	val_446	2008-04-08
 448	val_448	2008-04-08	448	val_448	2008-04-08
@@ -818,68 +797,6 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 493	val_493	2008-04-08	493	val_493	2008-04-08
 495	val_495	2008-04-08	495	val_495	2008-04-08
 497	val_497	2008-04-08	497	val_497	2008-04-08
-PREHOOK: query: select * from tab a join tab_part b on a.key = b.key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@tab
-PREHOOK: Input: default@tab@ds=2008-04-08
-PREHOOK: Input: default@tab_part
-PREHOOK: Input: default@tab_part@ds=2008-04-08
-#### A masked pattern was here ####
-POSTHOOK: query: select * from tab a join tab_part b on a.key = b.key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@tab
-POSTHOOK: Input: default@tab@ds=2008-04-08
-POSTHOOK: Input: default@tab_part
-POSTHOOK: Input: default@tab_part@ds=2008-04-08
-#### A masked pattern was here ####
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-2	val_2	2008-04-08	2	val_2	2008-04-08
-4	val_4	2008-04-08	4	val_4	2008-04-08
-8	val_8	2008-04-08	8	val_8	2008-04-08
-11	val_11	2008-04-08	11	val_11	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-17	val_17	2008-04-08	17	val_17	2008-04-08
-19	val_19	2008-04-08	19	val_19	2008-04-08
-20	val_20	2008-04-08	20	val_20	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-28	val_28	2008-04-08	28	val_28	2008-04-08
-33	val_33	2008-04-08	33	val_33	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-44	val_44	2008-04-08	44	val_44	2008-04-08
 51	val_51	2008-04-08	51	val_51	2008-04-08
 51	val_51	2008-04-08	51	val_51	2008-04-08
 51	val_51	2008-04-08	51	val_51	2008-04-08
@@ -889,6 +806,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 64	val_64	2008-04-08	64	val_64	2008-04-08
 66	val_66	2008-04-08	66	val_66	2008-04-08
 77	val_77	2008-04-08	77	val_77	2008-04-08
+8	val_8	2008-04-08	8	val_8	2008-04-08
 80	val_80	2008-04-08	80	val_80	2008-04-08
 82	val_82	2008-04-08	82	val_82	2008-04-08
 84	val_84	2008-04-08	84	val_84	2008-04-08
@@ -904,11 +822,35 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 97	val_97	2008-04-08	97	val_97	2008-04-08
 97	val_97	2008-04-08	97	val_97	2008-04-08
 97	val_97	2008-04-08	97	val_97	2008-04-08
+PREHOOK: query: select * from tab a join tab_part b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from tab a join tab_part b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 105	val_105	2008-04-08	105	val_105	2008-04-08
+11	val_11	2008-04-08	11	val_11	2008-04-08
 114	val_114	2008-04-08	114	val_114	2008-04-08
 116	val_116	2008-04-08	116	val_116	2008-04-08
 118	val_118	2008-04-08	118	val_118	2008-04-08
@@ -950,6 +892,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
 150	val_150	2008-04-08	150	val_150	2008-04-08
 152	val_152	2008-04-08	152	val_152	2008-04-08
 152	val_152	2008-04-08	152	val_152	2008-04-08
@@ -987,6 +933,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
+17	val_17	2008-04-08	17	val_17	2008-04-08
 170	val_170	2008-04-08	170	val_170	2008-04-08
 172	val_172	2008-04-08	172	val_172	2008-04-08
 172	val_172	2008-04-08	172	val_172	2008-04-08
@@ -1013,10 +960,13 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 187	val_187	2008-04-08	187	val_187	2008-04-08
 187	val_187	2008-04-08	187	val_187	2008-04-08
 189	val_189	2008-04-08	189	val_189	2008-04-08
+19	val_19	2008-04-08	19	val_19	2008-04-08
 190	val_190	2008-04-08	190	val_190	2008-04-08
 192	val_192	2008-04-08	192	val_192	2008-04-08
 194	val_194	2008-04-08	194	val_194	2008-04-08
 196	val_196	2008-04-08	196	val_196	2008-04-08
+2	val_2	2008-04-08	2	val_2	2008-04-08
+20	val_20	2008-04-08	20	val_20	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
@@ -1063,6 +1013,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
@@ -1074,6 +1028,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 255	val_255	2008-04-08	255	val_255	2008-04-08
 255	val_255	2008-04-08	255	val_255	2008-04-08
 257	val_257	2008-04-08	257	val_257	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
 260	val_260	2008-04-08	260	val_260	2008-04-08
 262	val_262	2008-04-08	262	val_262	2008-04-08
 266	val_266	2008-04-08	266	val_266	2008-04-08
@@ -1103,6 +1061,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
+28	val_28	2008-04-08	28	val_28	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
@@ -1164,11 +1123,21 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
+33	val_33	2008-04-08	33	val_33	2008-04-08
 332	val_332	2008-04-08	332	val_332	2008-04-08
 336	val_336	2008-04-08	336	val_336	2008-04-08
 338	val_338	2008-04-08	338	val_338	2008-04-08
 341	val_341	2008-04-08	341	val_341	2008-04-08
 345	val_345	2008-04-08	345	val_345	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
 356	val_356	2008-04-08	356	val_356	2008-04-08
 365	val_365	2008-04-08	365	val_365	2008-04-08
 367	val_367	2008-04-08	367	val_367	2008-04-08
@@ -1184,6 +1153,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
 374	val_374	2008-04-08	374	val_374	2008-04-08
 378	val_378	2008-04-08	378	val_378	2008-04-08
 389	val_389	2008-04-08	389	val_389	2008-04-08
@@ -1198,6 +1171,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
+4	val_4	2008-04-08	4	val_4	2008-04-08
 400	val_400	2008-04-08	400	val_400	2008-04-08
 402	val_402	2008-04-08	402	val_402	2008-04-08
 404	val_404	2008-04-08	404	val_404	2008-04-08
@@ -1235,6 +1209,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 417	val_417	2008-04-08	417	val_417	2008-04-08
 417	val_417	2008-04-08	417	val_417	2008-04-08
 419	val_419	2008-04-08	419	val_419	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
@@ -1254,6 +1232,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
+44	val_44	2008-04-08	44	val_44	2008-04-08
 444	val_444	2008-04-08	444	val_444	2008-04-08
 446	val_446	2008-04-08	446	val_446	2008-04-08
 448	val_448	2008-04-08	448	val_448	2008-04-08
@@ -1312,6 +1291,31 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 493	val_493	2008-04-08	493	val_493	2008-04-08
 495	val_495	2008-04-08	495	val_495	2008-04-08
 497	val_497	2008-04-08	497	val_497	2008-04-08
+51	val_51	2008-04-08	51	val_51	2008-04-08
+51	val_51	2008-04-08	51	val_51	2008-04-08
+51	val_51	2008-04-08	51	val_51	2008-04-08
+51	val_51	2008-04-08	51	val_51	2008-04-08
+53	val_53	2008-04-08	53	val_53	2008-04-08
+57	val_57	2008-04-08	57	val_57	2008-04-08
+64	val_64	2008-04-08	64	val_64	2008-04-08
+66	val_66	2008-04-08	66	val_66	2008-04-08
+77	val_77	2008-04-08	77	val_77	2008-04-08
+8	val_8	2008-04-08	8	val_8	2008-04-08
+80	val_80	2008-04-08	80	val_80	2008-04-08
+82	val_82	2008-04-08	82	val_82	2008-04-08
+84	val_84	2008-04-08	84	val_84	2008-04-08
+84	val_84	2008-04-08	84	val_84	2008-04-08
+84	val_84	2008-04-08	84	val_84	2008-04-08
+84	val_84	2008-04-08	84	val_84	2008-04-08
+86	val_86	2008-04-08	86	val_86	2008-04-08
+95	val_95	2008-04-08	95	val_95	2008-04-08
+95	val_95	2008-04-08	95	val_95	2008-04-08
+95	val_95	2008-04-08	95	val_95	2008-04-08
+95	val_95	2008-04-08	95	val_95	2008-04-08
+97	val_97	2008-04-08	97	val_97	2008-04-08
+97	val_97	2008-04-08	97	val_97	2008-04-08
+97	val_97	2008-04-08	97	val_97	2008-04-08
+97	val_97	2008-04-08	97	val_97	2008-04-08
 PREHOOK: query: explain
 select count(*)
 from tab a left outer join tab_part b on a.key = b.key
@@ -2526,3 +2530,589 @@ POSTHOOK: Input: default@tab_part
 POSTHOOK: Input: default@tab_part@ds=2008-04-08
 #### A masked pattern was here ####
 480
+PREHOOK: query: select * from (select * from tab where tab.key = 0)a full outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select * from tab where tab.key = 0)a full outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+0	val_0	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	NULL	NULL	NULL
+NULL	NULL	NULL	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08
+PREHOOK: query: select * from (select * from tab where tab.key = 0)a right outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select * from tab where tab.key = 0)a right outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+NULL	NULL	NULL	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08
+PREHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+full outer join
+(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+full outer join
+(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
+Warning: Shuffle Join MERGEJOIN[17][tables = [a, b]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+join
+(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+join
+(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+NULL	NULL	NULL	NULL	NULL	NULL	0	val_0	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	0	val_0	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	0	val_0	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	10	val_10	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	100	val_100	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	100	val_100	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	103	val_103	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	103	val_103	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	104	val_104	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	104	val_104	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	105	val_105	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	11	val_11	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	111	val_111	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	113	val_113	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	113	val_113	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	114	val_114	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	116	val_116	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	118	val_118	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	118	val_118	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	119	val_119	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	119	val_119	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	119	val_119	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	12	val_12	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	12	val_12	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	120	val_120	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	120	val_120	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	125	val_125	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	125	val_125	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	126	val_126	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	128	val_128	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	128	val_128	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	128	val_128	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	129	val_129	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	129	val_129	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	131	val_131	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	133	val_133	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	134	val_134	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	134	val_134	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	136	val_136	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	137	val_137	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	137	val_137	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	143	val_143	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	145	val_145	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	146	val_146	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	146	val_146	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	149	val_149	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	149	val_149	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	15	val_15	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	15	val_15	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	150	val_150	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	152	val_152	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	152	val_152	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	153	val_153	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	155	val_155	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	156	val_156	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	157	val_157	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	158	val_158	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	160	val_160	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	162	val_162	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	163	val_163	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	164	val_164	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	164	val_164	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	165	val_165	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	165	val_165	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	166	val_166	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	167	val_167	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	167	val_167	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	167	val_167	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	168	val_168	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	17	val_17	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	170	val_170	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	172	val_172	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	172	val_172	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	174	val_174	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	174	val_174	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	175	val_175	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	175	val_175	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	176	val_176	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	176	val_176	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	177	val_177	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	178	val_178	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	179	val_179	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	179	val_179	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	18	val_18	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	18	val_18	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	180	val_180	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	181	val_181	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	183	val_183	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	186	val_186	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	187	val_187	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	187	val_187	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	187	val_187	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	189	val_189	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	19	val_19	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	190	val_190	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	191	val_191	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	191	val_191	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	192	val_192	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	193	val_193	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	193	val_193	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	193	val_193	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	194	val_194	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	195	val_195	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	195	val_195	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	196	val_196	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	197	val_197	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	197	val_197	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	199	val_199	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	199	val_199	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	199	val_199	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	2	val_2	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	20	val_20	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	200	val_200	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	200	val_200	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	201	val_201	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	202	val_202	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	203	val_203	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	203	val_203	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	205	val_205	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	205	val_205	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	207	val_207	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	207	val_207	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	208	val_208	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	208	val_208	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	208	val_208	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	209	val_209	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	209	val_209	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	213	val_213	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	213	val_213	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	214	val_214	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	216	val_216	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	216	val_216	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	217	val_217	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	217	val_217	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	218	val_218	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	219	val_219	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	219	val_219	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	221	val_221	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	221	val_221	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	222	val_222	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	223	val_223	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	223	val_223	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	224	val_224	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	224	val_224	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	226	val_226	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	228	val_228	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	229	val_229	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	229	val_229	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	233	val_233	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	233	val_233	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	235	val_235	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	237	val_237	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	237	val_237	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	238	val_238	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	238	val_238	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	239	val_239	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	239	val_239	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	24	val_24	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	24	val_24	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	241	val_241	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	242	val_242	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	242	val_242	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	244	val_244	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	247	val_247	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	248	val_248	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	249	val_249	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	252	val_252	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	255	val_255	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	255	val_255	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	256	val_256	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	256	val_256	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	257	val_257	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	258	val_258	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	26	val_26	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	26	val_26	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	260	val_260	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	262	val_262	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	263	val_263	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	265	val_265	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	265	val_265	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	266	val_266	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	27	val_27	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	272	val_272	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	272	val_272	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	273	val_273	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	273	val_273	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	273	val_273	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	274	val_274	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	275	val_275	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	278	val_278	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	278	val_278	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	28	val_28	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	280	val_280	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	280	val_280	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	281	val_281	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	281	val_281	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	282	val_282	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	282	val_282	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	283	val_283	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	284	val_284	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	285	val_285	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	286	val_286	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	287	val_287	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	288	val_288	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	288	val_288	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	289	val_289	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	291	val_291	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	292	val_292	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	296	val_296	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	298	val_298	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	298	val_298	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	298	val_298	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	30	val_30	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	302	val_302	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	305	val_305	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	306	val_306	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	307	val_307	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	307	val_307	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	308	val_308	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	309	val_309	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	309	val_309	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	310	val_310	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	311	val_311	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	311	val_311	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	311	val_311	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	315	val_315	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	316	val_316	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	316	val_316	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	316	val_316	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	317	val_317	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	317	val_317	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	318	val_318	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	318	val_318	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	318	val_318	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	321	val_321	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	321	val_321	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	322	val_322	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	322	val_322	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	323	val_323	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	325	val_325	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	325	val_325	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	327	val_327	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	327	val_327	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	327	val_327	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	33	val_33	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	331	val_331	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	331	val_331	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	332	val_332	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	333	val_333	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	333	val_333	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	335	val_335	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	336	val_336	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	338	val_338	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	339	val_339	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	34	val_34	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	341	val_341	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	342	val_342	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	342	val_342	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	344	val_344	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	344	val_344	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	345	val_345	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	35	val_35	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	35	val_35	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	35	val_35	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	351	val_351	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	353	val_353	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	353	val_353	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	356	val_356	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	360	val_360	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	362	val_362	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	364	val_364	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	365	val_365	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	366	val_366	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	367	val_367	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	367	val_367	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	368	val_368	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	369	val_369	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	369	val_369	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	369	val_369	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	37	val_37	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	37	val_37	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	373	val_373	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	374	val_374	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	375	val_375	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	377	val_377	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	378	val_378	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	379	val_379	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	382	val_382	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	382	val_382	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	384	val_384	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	384	val_384	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	384	val_384	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	386	val_386	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	389	val_389	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	392	val_392	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	393	val_393	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	394	val_394	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	395	val_395	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	395	val_395	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	396	val_396	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	396	val_396	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	396	val_396	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	397	val_397	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	397	val_397	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	399	val_399	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	399	val_399	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	4	val_4	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	400	val_400	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	402	val_402	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	403	val_403	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	403	val_403	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	403	val_403	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	404	val_404	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	404	val_404	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	407	val_407	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	409	val_409	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	409	val_409	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	409	val_409	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	41	val_41	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	411	val_411	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	413	val_413	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	413	val_413	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	414	val_414	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	414	val_414	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	417	val_417	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	417	val_417	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	417	val_417	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	418	val_418	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	419	val_419	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	42	val_42	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	42	val_42	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	421	val_421	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	424	val_424	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	424	val_424	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	427	val_427	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	429	val_429	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	429	val_429	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	43	val_43	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	430	val_430	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	430	val_430	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	430	val_430	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	431	val_431	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	431	val_431	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	431	val_431	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	432	val_432	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	435	val_435	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	436	val_436	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	437	val_437	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	438	val_438	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	438	val_438	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	438	val_438	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	439	val_439	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	439	val_439	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	44	val_44	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	443	val_443	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	444	val_444	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	446	val_446	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	448	val_448	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	449	val_449	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	452	val_452	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	453	val_453	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	454	val_454	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	454	val_454	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	454	val_454	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	455	val_455	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	457	val_457	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	458	val_458	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	458	val_458	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	459	val_459	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	459	val_459	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	460	val_460	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	462	val_462	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	462	val_462	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	463	val_463	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	463	val_463	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	466	val_466	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	466	val_466	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	466	val_466	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	467	val_467	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	47	val_47	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	470	val_470	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	472	val_472	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	475	val_475	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	477	val_477	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	478	val_478	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	478	val_478	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	479	val_479	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	480	val_480	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	480	val_480	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	480	val_480	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	481	val_481	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	482	val_482	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	483	val_483	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	484	val_484	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	485	val_485	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	487	val_487	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	490	val_490	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	491	val_491	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	492	val_492	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	492	val_492	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	493	val_493	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	494	val_494	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	495	val_495	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	496	val_496	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	497	val_497	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	498	val_498	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	498	val_498	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	498	val_498	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	5	val_5	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	5	val_5	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	5	val_5	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	51	val_51	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	51	val_51	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	53	val_53	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	54	val_54	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	57	val_57	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	58	val_58	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	58	val_58	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	64	val_64	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	65	val_65	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	66	val_66	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	67	val_67	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	67	val_67	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	69	val_69	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	70	val_70	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	70	val_70	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	70	val_70	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	72	val_72	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	72	val_72	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	74	val_74	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	76	val_76	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	76	val_76	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	77	val_77	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	78	val_78	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	8	val_8	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	80	val_80	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	82	val_82	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	83	val_83	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	83	val_83	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	84	val_84	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	84	val_84	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	85	val_85	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	86	val_86	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	87	val_87	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	9	val_9	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	90	val_90	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	90	val_90	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	90	val_90	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	92	val_92	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	95	val_95	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	95	val_95	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	96	val_96	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	97	val_97	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	97	val_97	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	98	val_98	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	98	val_98	2008-04-08


[49/50] [abbrv] hive git commit: HIVE-9828: Semantic analyzer does not capture view parent entity for tables referred in view with union all (Prasad via Xuefu)

Posted by xu...@apache.org.
HIVE-9828: Semantic analyzer does not capture view parent entity for tables referred in view with union all (Prasad via Xuefu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3e713bcc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3e713bcc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3e713bcc

Branch: refs/heads/beeline-cli
Commit: 3e713bcc1f74c90aba1da654b63b85878ab23768
Parents: 809fcb0
Author: Xuefu Zhang <xz...@Cloudera.com>
Authored: Sat May 9 02:32:13 2015 -0700
Committer: Xuefu Zhang <xz...@Cloudera.com>
Committed: Sat May 9 02:32:13 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  15 +--
 .../apache/hadoop/hive/ql/plan/PlanUtils.java   |   4 +
 .../hadoop/hive/ql/plan/TestViewEntity.java     | 108 +++++++++++++++++++
 3 files changed, 121 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3e713bcc/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index cbc5466..2993539 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -222,6 +222,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   public static final String DUMMY_DATABASE = "_dummy_database";
   public static final String DUMMY_TABLE = "_dummy_table";
+  public static final String SUBQUERY_TAG_1 = "-subquery1";
+  public static final String SUBQUERY_TAG_2 = "-subquery2";
+
   // Max characters when auto generating the column name with func name
   private static final int AUTOGEN_COLALIAS_PRFX_MAXLENGTH = 20;
 
@@ -429,16 +432,16 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       qbexpr.setOpcode(QBExpr.Opcode.UNION);
       // query 1
       assert (ast.getChild(0) != null);
-      QBExpr qbexpr1 = new QBExpr(alias + "-subquery1");
-      doPhase1QBExpr((ASTNode) ast.getChild(0), qbexpr1, id + "-subquery1",
-          alias + "-subquery1");
+      QBExpr qbexpr1 = new QBExpr(alias + SUBQUERY_TAG_1);
+      doPhase1QBExpr((ASTNode) ast.getChild(0), qbexpr1, id + SUBQUERY_TAG_1,
+          alias + SUBQUERY_TAG_1);
       qbexpr.setQBExpr1(qbexpr1);
 
       // query 2
       assert (ast.getChild(1) != null);
-      QBExpr qbexpr2 = new QBExpr(alias + "-subquery2");
-      doPhase1QBExpr((ASTNode) ast.getChild(1), qbexpr2, id + "-subquery2",
-          alias + "-subquery2");
+      QBExpr qbexpr2 = new QBExpr(alias + SUBQUERY_TAG_2);
+      doPhase1QBExpr((ASTNode) ast.getChild(1), qbexpr2, id + SUBQUERY_TAG_2,
+          alias + SUBQUERY_TAG_2);
       qbexpr.setQBExpr2(qbexpr2);
     }
       break;

http://git-wip-us.apache.org/repos/asf/hive/blob/3e713bcc/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
index 80e11a3..87a2548 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
 import org.apache.hadoop.hive.ql.metadata.HiveUtils;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
+import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory;
 import org.apache.hadoop.hive.ql.session.SessionState;
@@ -975,6 +976,9 @@ public final class PlanUtils {
     // T's parent would be V1
     for (int pos = 0; pos < aliases.length; pos++) {
       currentAlias = currentAlias == null ? aliases[pos] : currentAlias + ":" + aliases[pos];
+
+      currentAlias = currentAlias.replace(SemanticAnalyzer.SUBQUERY_TAG_1, "")
+          .replace(SemanticAnalyzer.SUBQUERY_TAG_2, "");
       ReadEntity input = viewAliasToInput.get(currentAlias);
       if (input == null) {
         return currentInput;

http://git-wip-us.apache.org/repos/asf/hive/blob/3e713bcc/ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java
new file mode 100644
index 0000000..17a4e06
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.plan;
+
+import static org.junit.Assert.*;
+
+import java.io.Serializable;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.CommandNeedRetryException;
+import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.hooks.ReadEntity;
+import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook;
+import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.TestReadEntityDirect.CheckInputReadEntityDirect;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestViewEntity {
+  /**
+   * Hook used in the test to capture the set of ReadEntities
+   */
+  public static class CheckInputReadEntity extends
+      AbstractSemanticAnalyzerHook {
+    public static ReadEntity[] readEntities;
+
+    @Override
+    public void postAnalyze(HiveSemanticAnalyzerHookContext context,
+        List<Task<? extends Serializable>> rootTasks) throws SemanticException {
+      readEntities = context.getInputs().toArray(new ReadEntity[0]);
+    }
+
+  }
+
+  private static Driver driver;
+
+  @BeforeClass
+  public static void onetimeSetup() throws Exception {
+    HiveConf conf = new HiveConf(Driver.class);
+    conf.setVar(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK,
+        CheckInputReadEntity.class.getName());
+    HiveConf
+        .setBoolVar(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+    SessionState.start(conf);
+    driver = new Driver(conf);
+    driver.init();
+  }
+
+  @AfterClass
+  public static void cleanUp() throws Exception {
+    driver.close();
+    driver.destroy();
+  }
+
+  /**
+   * Verify that the parent entities are captured correctly for union views
+   * @throws Exception
+   */
+  @Test
+  public void testUnionView() throws Exception {
+    int ret = driver.run("create table t1(id int)").getResponseCode();
+    assertEquals("Checking command success", 0, ret);
+    ret = driver.run("create table t2(id int)").getResponseCode();
+    assertEquals("Checking command success", 0, ret);
+    ret = driver.run("create view v1 as select t.id from "
+            + "(select t1.id from t1 union all select t2.id from t2) as t")
+        .getResponseCode();
+    assertEquals("Checking command success", 0, ret);
+
+    driver.compile("select * from v1");
+    // view entity
+    assertEquals("default@v1", CheckInputReadEntity.readEntities[0].getName());
+
+    // first table in union query with view as parent
+    assertEquals("default@t1", CheckInputReadEntity.readEntities[1].getName());
+    assertEquals("default@v1", CheckInputReadEntity.readEntities[1]
+        .getParents()
+        .iterator().next().getName());
+    // second table in union query with view as parent
+    assertEquals("default@t2", CheckInputReadEntity.readEntities[2].getName());
+    assertEquals("default@v1", CheckInputReadEntity.readEntities[2]
+        .getParents()
+        .iterator().next().getName());
+
+  }
+
+}


[20/50] [abbrv] hive git commit: HIVE-10611: Mini tez tests wait for 5 minutes before shutting down (Vikram Dixit K, reviewed by Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-10611: Mini tez tests wait for 5 minutes before shutting down (Vikram Dixit K, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/632a3090
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/632a3090
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/632a3090

Branch: refs/heads/beeline-cli
Commit: 632a30908d4680c0ee31ba04e4cfc9da3554e4f5
Parents: 18fb460
Author: vikram <vi...@hortonworks.com>
Authored: Wed May 6 14:28:23 2015 -0700
Committer: vikram <vi...@hortonworks.com>
Committed: Wed May 6 14:30:14 2015 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/hive/ql/QTestUtil.java |  3 +++
 .../hadoop/hive/ql/exec/tez/TezSessionState.java       | 13 +++++++------
 2 files changed, 10 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/632a3090/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index d1104b3..3e29d3c 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -430,6 +430,9 @@ public class QTestUtil {
       cleanUp();
     }
 
+    if (clusterType == MiniClusterType.tez) {
+      SessionState.get().getTezSession().close(false);
+    }
     setup.tearDown();
     if (sparkSession != null) {
       try {

http://git-wip-us.apache.org/repos/asf/hive/blob/632a3090/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
index 89286e5..cef3303 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
@@ -249,13 +249,14 @@ public class TezSessionState {
   }
 
   /**
-   * Close a tez session. Will cleanup any tez/am related resources. After closing a session
-   * no further DAGs can be executed against it.
-   * @param keepTmpDir whether or not to remove the scratch dir at the same time.
-   * @throws IOException
-   * @throws TezException
+   * Close a tez session. Will cleanup any tez/am related resources. After closing a session no
+   * further DAGs can be executed against it.
+   * 
+   * @param keepTmpDir
+   *          whether or not to remove the scratch dir at the same time.
+   * @throws Exception
    */
-  public void close(boolean keepTmpDir) throws TezException, IOException {
+  public void close(boolean keepTmpDir) throws Exception {
     if (!isOpen()) {
       return;
     }


[05/50] [abbrv] hive git commit: HIVE-10529: Remove references to tez task context before storing operator plan in object cache (Rajesh Balamohan reviewed by Gunther Hagleitner)

Posted by xu...@apache.org.
HIVE-10529: Remove references to tez task context before storing operator plan in object cache (Rajesh Balamohan reviewed by Gunther Hagleitner)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4aff07e3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4aff07e3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4aff07e3

Branch: refs/heads/beeline-cli
Commit: 4aff07e3e8da9b6f946df605e369f1054e76823a
Parents: e2a12c9
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Tue May 5 11:04:54 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Tue May 5 11:04:54 2015 -0700

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4aff07e3/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
index 1cfc411..b1352f3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
@@ -77,7 +77,7 @@ public class MapJoinOperator extends AbstractMapJoinOperator<MapJoinDesc> implem
   private transient String cacheKey;
   private transient ObjectCache cache;
 
-  private HashTableLoader loader;
+  protected HashTableLoader loader;
   private boolean loadCalled;
 
   protected transient MapJoinTableContainer[] mapJoinTables;
@@ -528,6 +528,7 @@ public class MapJoinOperator extends AbstractMapJoinOperator<MapJoinDesc> implem
       clearAllTableContainers();
     }
 
+    this.loader = null;
     super.closeOp(abort);
   }
 


[44/50] [abbrv] hive git commit: HIVE-10530: Aggregate stats cache: bug fixes for RDBMS path (Vaibhav Gumashta reviewed by Mostafa Mokhtar, Thejas Nair)

Posted by xu...@apache.org.
HIVE-10530: Aggregate stats cache: bug fixes for RDBMS path (Vaibhav Gumashta reviewed by Mostafa Mokhtar, Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4a0ccd11
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4a0ccd11
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4a0ccd11

Branch: refs/heads/beeline-cli
Commit: 4a0ccd11f56f4e47b76eae4e60668e78bedfc20b
Parents: 3633db2
Author: Vaibhav Gumashta <vg...@apache.org>
Authored: Thu May 7 13:58:34 2015 -0700
Committer: Vaibhav Gumashta <vg...@apache.org>
Committed: Thu May 7 13:58:34 2015 -0700

----------------------------------------------------------------------
 .../hive/metastore/AggregateStatsCache.java     | 33 +++++++++-----------
 .../hive/metastore/MetaStoreDirectSql.java      | 24 +++++++++-----
 .../test/queries/clientpositive/explainuser_2.q |  1 +
 .../extrapolate_part_stats_partial.q            |  2 ++
 .../extrapolate_part_stats_partial_ndv.q        |  2 ++
 .../queries/clientpositive/mapjoin_mapjoin.q    |  1 +
 6 files changed, 37 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4a0ccd11/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java b/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java
index 6a85936..44106f5 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java
@@ -55,7 +55,7 @@ public class AggregateStatsCache {
   // Run the cleaner thread until cache is cleanUntil% occupied
   private final float cleanUntil;
   // Nodes go stale after this
-  private final long timeToLive;
+  private final long timeToLiveMs;
   // Max time when waiting for write locks on node list
   private final long maxWriterWaitTime;
   // Max time when waiting for read locks on node list
@@ -73,12 +73,12 @@ public class AggregateStatsCache {
   // To track cleaner metrics
   int numRemovedTTL = 0, numRemovedLRU = 0;
 
-  private AggregateStatsCache(int maxCacheNodes, int maxPartsPerCacheNode, long timeToLive,
+  private AggregateStatsCache(int maxCacheNodes, int maxPartsPerCacheNode, long timeToLiveMs,
       float falsePositiveProbability, float maxVariance, long maxWriterWaitTime,
       long maxReaderWaitTime, float maxFull, float cleanUntil) {
     this.maxCacheNodes = maxCacheNodes;
     this.maxPartsPerCacheNode = maxPartsPerCacheNode;
-    this.timeToLive = timeToLive;
+    this.timeToLiveMs = timeToLiveMs;
     this.falsePositiveProbability = falsePositiveProbability;
     this.maxVariance = maxVariance;
     this.maxWriterWaitTime = maxWriterWaitTime;
@@ -97,9 +97,9 @@ public class AggregateStatsCache {
       int maxPartitionsPerCacheNode =
           HiveConf
               .getIntVar(conf, HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS);
-      long timeToLive =
+      long timeToLiveMs =
           HiveConf.getTimeVar(conf, HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_TTL,
-              TimeUnit.SECONDS);
+              TimeUnit.SECONDS)*1000;
       // False positives probability we are ready to tolerate for the underlying bloom filter
       float falsePositiveProbability =
           HiveConf.getFloatVar(conf, HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_FPP);
@@ -120,7 +120,7 @@ public class AggregateStatsCache {
       float cleanUntil =
           HiveConf.getFloatVar(conf, HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL);
       self =
-          new AggregateStatsCache(maxCacheNodes, maxPartitionsPerCacheNode, timeToLive,
+          new AggregateStatsCache(maxCacheNodes, maxPartitionsPerCacheNode, timeToLiveMs,
               falsePositiveProbability, maxVariance, maxWriterWaitTime, maxReaderWaitTime, maxFull,
               cleanUntil);
     }
@@ -213,7 +213,7 @@ public class AggregateStatsCache {
    * @return best matched node or null
    */
   private AggrColStats findBestMatch(List<String> partNames, List<AggrColStats> candidates) {
-    // Hits, misses, shouldSkip for a node
+    // Hits, misses tracked for a candidate node
     MatchStats matchStats;
     // MatchStats for each candidate
     Map<AggrColStats, MatchStats> candidateMatchStats = new HashMap<AggrColStats, MatchStats>();
@@ -227,26 +227,23 @@ public class AggregateStatsCache {
     // Note: we're not creating a copy of the list for saving memory
     for (AggrColStats candidate : candidates) {
       // Variance check
-      if ((float) Math.abs((candidate.getNumPartsCached() - numPartsRequested)
-          / numPartsRequested) > maxVariance) {
+      if ((float) Math.abs((candidate.getNumPartsCached() - numPartsRequested) / numPartsRequested)
+          > maxVariance) {
         continue;
       }
       // TTL check
       if (isExpired(candidate)) {
         continue;
-      }
-      else {
+      } else {
         candidateMatchStats.put(candidate, new MatchStats(0, 0));
       }
     }
     // We'll count misses as we iterate
     int maxMisses = (int) maxVariance * numPartsRequested;
     for (String partName : partNames) {
-      for (AggrColStats candidate : candidates) {
-        matchStats = candidateMatchStats.get(candidate);
-        if (matchStats == null) {
-          continue;
-        }
+      for (Map.Entry<AggrColStats, MatchStats> entry : candidateMatchStats.entrySet()) {
+        AggrColStats candidate = entry.getKey();
+        matchStats = entry.getValue();
         if (candidate.getBloomFilter().test(partName.getBytes())) {
           ++matchStats.hits;
         } else {
@@ -464,7 +461,7 @@ public class AggregateStatsCache {
   }
 
   private boolean isExpired(AggrColStats aggrColStats) {
-    return System.currentTimeMillis() - aggrColStats.lastAccessTime > timeToLive;
+    return (System.currentTimeMillis() - aggrColStats.lastAccessTime) > timeToLiveMs;
   }
 
   /**
@@ -502,7 +499,7 @@ public class AggregateStatsCache {
 
     @Override
     public String toString() {
-      return "Database: " + dbName + ", Table: " + tblName + ", Column: " + colName;
+      return "database:" + dbName + ", table:" + tblName + ", column:" + colName;
     }
 
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/4a0ccd11/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
index 5ef3b9a..8bee978 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
@@ -1106,24 +1106,23 @@ class MetaStoreDirectSql {
     if (isAggregateStatsCacheEnabled) {
       AggrColStats colStatsAggrCached;
       List<ColumnStatisticsObj> colStatsAggrFromDB;
-      int maxPartitionsPerCacheNode = aggrStatsCache.getMaxPartsPerCacheNode();
-      float falsePositiveProbability = aggrStatsCache.getFalsePositiveProbability();
+      int maxPartsPerCacheNode = aggrStatsCache.getMaxPartsPerCacheNode();
+      float fpp = aggrStatsCache.getFalsePositiveProbability();
       int partitionsRequested = partNames.size();
-      if (partitionsRequested > maxPartitionsPerCacheNode) {
+      if (partitionsRequested > maxPartsPerCacheNode) {
         colStatsList =
             columnStatisticsObjForPartitions(dbName, tableName, partNames, colNames, partsFound,
                 useDensityFunctionForNDVEstimation);
       } else {
         colStatsList = new ArrayList<ColumnStatisticsObj>();
+        // Bloom filter for the new node that we will eventually add to the cache
+        BloomFilter bloomFilter = createPartsBloomFilter(maxPartsPerCacheNode, fpp, partNames);
         for (String colName : colNames) {
           // Check the cache first
           colStatsAggrCached = aggrStatsCache.get(dbName, tableName, colName, partNames);
           if (colStatsAggrCached != null) {
             colStatsList.add(colStatsAggrCached.getColStats());
           } else {
-            // Bloom filter for the new node that we will eventually add to the cache
-            BloomFilter bloomFilter =
-                new BloomFilter(maxPartitionsPerCacheNode, falsePositiveProbability);
             List<String> colNamesForDB = new ArrayList<String>();
             colNamesForDB.add(colName);
             // Read aggregated stats for one column
@@ -1148,6 +1147,15 @@ class MetaStoreDirectSql {
     return new AggrStats(colStatsList, partsFound);
   }
 
+  private BloomFilter createPartsBloomFilter(int maxPartsPerCacheNode, float fpp,
+      List<String> partNames) {
+    BloomFilter bloomFilter = new BloomFilter(maxPartsPerCacheNode, fpp);
+    for (String partName : partNames) {
+      bloomFilter.add(partName.getBytes());
+    }
+    return bloomFilter;
+  }
+
   private long partsFoundForPartitions(String dbName, String tableName,
       List<String> partNames, List<String> colNames) throws MetaException {
     long partsFound = 0;
@@ -1174,8 +1182,8 @@ class MetaStoreDirectSql {
   }
 
   private List<ColumnStatisticsObj> columnStatisticsObjForPartitions(String dbName,
-      String tableName, List<String> partNames, List<String> colNames, long partsFound, boolean useDensityFunctionForNDVEstimation)
-      throws MetaException {
+      String tableName, List<String> partNames, List<String> colNames, long partsFound,
+      boolean useDensityFunctionForNDVEstimation) throws MetaException {
     // TODO: all the extrapolation logic should be moved out of this class,
     // only mechanical data retrieval should remain here.
     String commonPrefix = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", "

http://git-wip-us.apache.org/repos/asf/hive/blob/4a0ccd11/ql/src/test/queries/clientpositive/explainuser_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explainuser_2.q b/ql/src/test/queries/clientpositive/explainuser_2.q
index 8e8ac92..6e98fa0 100644
--- a/ql/src/test/queries/clientpositive/explainuser_2.q
+++ b/ql/src/test/queries/clientpositive/explainuser_2.q
@@ -1,4 +1,5 @@
 set hive.explain.user=true;
+set hive.metastore.aggregate.stats.cache.enabled=false;
 
 CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4a0ccd11/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial.q b/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial.q
index 8ae9a90..5c062ee 100644
--- a/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial.q
+++ b/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial.q
@@ -1,6 +1,8 @@
 set hive.stats.fetch.column.stats=true;
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.metastore.aggregate.stats.cache.enabled=false;
+
 
 create table if not exists ext_loc (
   state string,

http://git-wip-us.apache.org/repos/asf/hive/blob/4a0ccd11/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial_ndv.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial_ndv.q b/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial_ndv.q
index b7fc4e3..5f0160a 100644
--- a/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial_ndv.q
+++ b/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial_ndv.q
@@ -2,6 +2,8 @@ set hive.metastore.stats.ndv.densityfunction=true;
 set hive.stats.fetch.column.stats=true;
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.metastore.aggregate.stats.cache.enabled=false;
+
 
 drop table if exists ext_loc;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4a0ccd11/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q b/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q
index 5bf4ab1..7f66ff2 100644
--- a/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q
+++ b/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q
@@ -1,6 +1,7 @@
 set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join.noconditionaltask.size=10000;
+set hive.metastore.aggregate.stats.cache.enabled=false;
 
 -- Since the inputs are small, it should be automatically converted to mapjoin
 


[17/50] [abbrv] hive git commit: HIVE-10239: Create scripts to do metastore upgrade tests on jenkins for Derby and PostgreSQL (Naveen Gangam, reviewed by Sergio Pena)

Posted by xu...@apache.org.
HIVE-10239: Create scripts to do metastore upgrade tests on jenkins for Derby and PostgreSQL (Naveen Gangam, reviewed by Sergio Pena)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/39972026
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/39972026
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/39972026

Branch: refs/heads/beeline-cli
Commit: 399720263d875897cdd31a3de2521872cc565eb7
Parents: d39c829
Author: Sergio Pena <se...@cloudera.com>
Authored: Wed May 6 14:29:23 2015 -0500
Committer: Sergio Pena <se...@cloudera.com>
Committed: Wed May 6 14:29:23 2015 -0500

----------------------------------------------------------------------
 metastore/dbs/derby/execute.sh    | 37 +++++++++++++++++
 metastore/dbs/derby/prepare.sh    | 63 +++++++++++++++++++++++++++++
 metastore/dbs/postgres/execute.sh | 29 ++++++++++++++
 metastore/dbs/postgres/prepare.sh | 72 ++++++++++++++++++++++++++++++++++
 4 files changed, 201 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/39972026/metastore/dbs/derby/execute.sh
----------------------------------------------------------------------
diff --git a/metastore/dbs/derby/execute.sh b/metastore/dbs/derby/execute.sh
new file mode 100644
index 0000000..d60f05b
--- /dev/null
+++ b/metastore/dbs/derby/execute.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script executes all hive metastore upgrade scripts on an specific
+# database server in order to verify that upgrade scripts are working
+# properly.
+
+cd $(dirname $1)
+
+echo "####################################################"
+echo "Executing script for Derby SQL: $1"
+echo "####################################################"
+
+export DERBY_HOME=/usr/share/javadb
+export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
+export PATH=$PATH:$DERBY_HOME/bin:$JAVA_HOME/bin
+export CLASSPATH=$CLASSPATH:$DERBY_HOME/lib/derby.jar:$DERBY_HOME/lib/derbytools.jar:$DERBY_HOME/lib/derbyclient.jar
+
+echo "connect 'jdbc:derby:/tmp/hive_hms_testing;create=true';" > /tmp/derbyRun.sql
+echo "run '$1';" >> /tmp/derbyRun.sql
+echo "quit;" >> /tmp/derbyRun.sql
+
+ij /tmp/derbyRun.sql

http://git-wip-us.apache.org/repos/asf/hive/blob/39972026/metastore/dbs/derby/prepare.sh
----------------------------------------------------------------------
diff --git a/metastore/dbs/derby/prepare.sh b/metastore/dbs/derby/prepare.sh
new file mode 100644
index 0000000..fe4b2c3
--- /dev/null
+++ b/metastore/dbs/derby/prepare.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script executes all hive metastore upgrade scripts on an specific
+# database server in order to verify that upgrade scripts are working
+# properly.
+
+export DEBIAN_FRONTEND=noninteractive
+OS_VERSION=`lsb_release -c | cut -d":" -f2`
+
+echo "####################################################"
+echo "Begin for OS version $OS_VERSION"
+echo "####################################################"
+
+HTTPS_INFO=($(dpkg -l apt-transport-https | grep ^i | tr -s ' '))
+if [[ ${HTTPS_INFO[1]} == "apt-transport-https" ]]
+then
+  echo "apt-transport-https package installed"
+else
+  echo "apt-transport-https package not installed"
+  apt-get install -y --force-yes apt-transport-https
+fi
+
+INSTALL_INFO=($(dpkg -l \*javadb-core\* | grep ^ii | tr -s ' '))
+
+if [[ ${INSTALL_INFO[1]} == "sun-javadb-core" ]]
+then
+  echo "Derby already installed...Skipping"
+else
+  echo "Derby not installed"
+  # Cleanup existing installation + configuration.
+  apt-get purge -y --force-yes derby-tools sun-javadb-client sun-javadb-core sun-javadb-common libderby-java openjdk-7-jre openjdk-7-jre openjdk-7-jre-headless || /bin/true
+  echo "####################################################"
+  echo "Installing Derby dependencies:"
+  echo "####################################################"
+  apt-get update || /bin/true
+  apt-get install -y --force-yes -o Dpkg::Options::="--force-overwrite" sun-javadb-core sun-javadb-client derby-tools
+fi
+
+export DERBY_HOME=/usr/share/javadb
+export JAVA_HOME=//usr/lib/jvm/java-7-openjdk-amd64
+export PATH=$PATH:/usr/share/javadb/bin:$JAVA_HOME/bin
+export CLASSPATH=$CLASSPATH:$DERBY_HOME/lib/derby.jar:$DERBY_HOME/lib/derbytools.jar:$DERBY_HOME/lib/derbyclient.jar
+rm -rf /tmp/hive_hms_testing;
+
+echo "connect 'jdbc:derby:/tmp/hive_hms_testing;create=true';" > /tmp/derbyInit.sql
+ij /tmp/derbyInit.sql 
+
+echo "DONE!!!"
+

http://git-wip-us.apache.org/repos/asf/hive/blob/39972026/metastore/dbs/postgres/execute.sh
----------------------------------------------------------------------
diff --git a/metastore/dbs/postgres/execute.sh b/metastore/dbs/postgres/execute.sh
new file mode 100644
index 0000000..cabcae4
--- /dev/null
+++ b/metastore/dbs/postgres/execute.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script executes all hive metastore upgrade scripts on an specific
+# database server in order to verify that upgrade scripts are working
+# properly.
+
+cd $(dirname $1)
+
+echo "####################################################"
+echo "Executing script for PostgreSQL: $1"
+echo "####################################################"
+
+export PGPASSWORD=hivepw
+psql -h localhost -U hiveuser -d hive_hms_testing -f $1

http://git-wip-us.apache.org/repos/asf/hive/blob/39972026/metastore/dbs/postgres/prepare.sh
----------------------------------------------------------------------
diff --git a/metastore/dbs/postgres/prepare.sh b/metastore/dbs/postgres/prepare.sh
new file mode 100644
index 0000000..2036354
--- /dev/null
+++ b/metastore/dbs/postgres/prepare.sh
@@ -0,0 +1,72 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script executes all hive metastore upgrade scripts on an specific
+# database server in order to verify that upgrade scripts are working
+# properly.
+
+export DEBIAN_FRONTEND=noninteractive
+OS_VERSION=`lsb_release -c | cut -d":" -f2`
+echo "$OS_VERSION"
+
+echo "####################################################"
+echo "Begin for OS version $OS_VERSION"
+echo "####################################################"
+
+HTTPS_INFO=($(dpkg -l apt-transport-https | grep ^i | tr -s ' '))
+if [[ ${HTTPS_INFO[1]} == "apt-transport-https" ]]
+then
+  echo "apt-transport-https package installed"
+else
+  echo "apt-transport-https package not installed"
+  apt-get install -y --force-yes apt-transport-https
+fi
+
+INSTALL_INFO=($(dpkg -l postgresql-9.4\* | grep ^i | tr -s ' '))
+
+if [[ ${INSTALL_INFO[1]} == "postgresql-9.4" ]]
+then
+  echo "PostgreSQL already installed...Skipping"
+else
+  echo "PostgreSQL not installed"
+  # Cleanup existing installation + configuration.
+  apt-get purge -y --force-yes postgressql-9.4 || /bin/true
+  echo "####################################################"
+  echo "Installing PostgreSQL dependencies:"
+  echo "####################################################"
+  if grep -q "deb http://apt.postgresql.org/pub/repos/apt/ $OS_VERSION-pgdg main" /etc/apt/sources.list.d/postgreSQL.list
+  then
+    echo "Sources already listed"
+  else
+    echo "deb http://apt.postgresql.org/pub/repos/apt/ $OS_VERSION-pgdg main" >> /etc/apt/sources.list.d/postgreSQL.list
+  fi
+
+  wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
+  apt-get update  || /bin/true
+  apt-get install -y --force-yes postgresql-9.4
+fi
+
+echo "####################################################"
+echo "Configuring PostgreSQL Environment:"
+echo "####################################################"
+echo "drop database if exists hive_hms_testing;" > /tmp/postgresInit.sql
+echo "drop user if exists hiveuser;" >> /tmp/postgresInit.sql
+echo "create user hiveuser createdb createuser password 'hivepw';" >> /tmp/postgresInit.sql
+echo "create database hive_hms_testing owner hiveuser;" >> /tmp/postgresInit.sql
+sudo -u postgres psql -f /tmp/postgresInit.sql
+
+echo "DONE!!!"
+


[11/50] [abbrv] hive git commit: HIVE-10607 : Combination of ReducesinkDedup + TopN optimization yields incorrect result if there are multiple GBY in reducer (Ashutosh Chauhan via Sergey Shelukhin)

Posted by xu...@apache.org.
HIVE-10607 : Combination of ReducesinkDedup + TopN optimization yields incorrect result if there are multiple GBY in reducer (Ashutosh Chauhan via Sergey Shelukhin)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c0116739
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c0116739
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c0116739

Branch: refs/heads/beeline-cli
Commit: c0116739972bcffcc65498eb721f6b8c1b8e305d
Parents: eefb071
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Mon May 4 22:25:12 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Tue May 5 23:52:38 2015 -0700

----------------------------------------------------------------------
 .../ql/optimizer/LimitPushdownOptimizer.java    |  9 +-
 .../queries/clientpositive/limit_pushdown.q     |  4 +
 .../results/clientpositive/limit_pushdown.q.out | 88 ++++++++++++++++++
 .../clientpositive/spark/limit_pushdown.q.out   | 94 ++++++++++++++++++++
 .../clientpositive/tez/limit_pushdown.q.out     | 94 ++++++++++++++++++++
 5 files changed, 288 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/c0116739/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java
index f80941e..e850550 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.GroupByOperator;
 import org.apache.hadoop.hive.ql.exec.LimitOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
+import org.apache.hadoop.hive.ql.exec.OperatorUtils;
 import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
 import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
 import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
@@ -86,6 +87,7 @@ import org.apache.hadoop.hive.ql.parse.SemanticException;
  */
 public class LimitPushdownOptimizer implements Transform {
 
+  @Override
   public ParseContext transform(ParseContext pctx) throws SemanticException {
     Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
     opRules.put(new RuleRegExp("R1",
@@ -105,6 +107,7 @@ public class LimitPushdownOptimizer implements Transform {
 
   private static class TopNReducer implements NodeProcessor {
 
+    @Override
     public Object process(Node nd, Stack<Node> stack,
         NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
       ReduceSinkOperator rs = null;
@@ -122,6 +125,10 @@ public class LimitPushdownOptimizer implements Transform {
         }
       }
       if (rs != null) {
+        if (OperatorUtils.findOperators(rs, GroupByOperator.class).size() > 1){
+          // Not safe to continue for RS-GBY-GBY-LIM kind of pipelines. See HIVE-10607 for more.
+          return false;
+        }
         LimitOperator limit = (LimitOperator) nd;
         rs.getConf().setTopN(limit.getConf().getLimit());
         rs.getConf().setTopNMemoryUsage(((LimitPushdownContext) procCtx).threshold);
@@ -135,7 +142,7 @@ public class LimitPushdownOptimizer implements Transform {
 
   private static class LimitPushdownContext implements NodeProcessorCtx {
 
-    private float threshold;
+    private final float threshold;
 
     public LimitPushdownContext(HiveConf conf) throws SemanticException {
       threshold = conf.getFloatVar(HiveConf.ConfVars.HIVELIMITPUSHDOWNMEMORYUSAGE);

http://git-wip-us.apache.org/repos/asf/hive/blob/c0116739/ql/src/test/queries/clientpositive/limit_pushdown.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/limit_pushdown.q b/ql/src/test/queries/clientpositive/limit_pushdown.q
index d93e246..3940564 100644
--- a/ql/src/test/queries/clientpositive/limit_pushdown.q
+++ b/ql/src/test/queries/clientpositive/limit_pushdown.q
@@ -31,6 +31,10 @@ explain
 select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20;
 select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20;
 
+explain 
+select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20;
+select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20;
+
 -- multi distinct
 explain
 select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint order by ctinyint limit 20;

http://git-wip-us.apache.org/repos/asf/hive/blob/c0116739/ql/src/test/results/clientpositive/limit_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/limit_pushdown.q.out b/ql/src/test/results/clientpositive/limit_pushdown.q.out
index c7ab7b3..6ace047 100644
--- a/ql/src/test/results/clientpositive/limit_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/limit_pushdown.q.out
@@ -504,6 +504,94 @@ POSTHOOK: Input: default@alltypesorc
 -63	19
 -64	24
 NULL	2932
+PREHOOK: query: explain 
+select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain 
+select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: alltypesorc
+            Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: ctinyint (type: tinyint), cdouble (type: double)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: _col0 (type: tinyint), _col1 (type: double)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: tinyint), _col1 (type: double)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: tinyint)
+                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: tinyint), KEY._col1 (type: double)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+          Group By Operator
+            aggregations: count(_col1)
+            keys: _col0 (type: tinyint)
+            mode: complete
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 3072 Data size: 660491 Basic stats: COMPLETE Column stats: NONE
+            Limit
+              Number of rows: 20
+              Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+-46	24
+-47	22
+-48	29
+-49	26
+-50	30
+-51	21
+-52	33
+-53	22
+-54	26
+-55	29
+-56	36
+-57	35
+-58	23
+-59	31
+-60	27
+-61	25
+-62	27
+-63	19
+-64	24
+NULL	2932
 PREHOOK: query: -- multi distinct
 explain
 select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint order by ctinyint limit 20

http://git-wip-us.apache.org/repos/asf/hive/blob/c0116739/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out b/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out
index 01106a4..40af253 100644
--- a/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out
@@ -540,6 +540,100 @@ POSTHOOK: Input: default@alltypesorc
 -63	19
 -64	24
 NULL	2932
+PREHOOK: query: explain 
+select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain 
+select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cdouble (type: double)
+                    outputColumnNames: ctinyint, cdouble
+                    Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: ctinyint (type: tinyint), cdouble (type: double)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: tinyint), _col1 (type: double)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: tinyint)
+                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: double)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count(_col1)
+                  keys: _col0 (type: tinyint)
+                  mode: complete
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 3072 Data size: 94309 Basic stats: COMPLETE Column stats: NONE
+                  Limit
+                    Number of rows: 20
+                    Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+-46	24
+-47	22
+-48	29
+-49	26
+-50	30
+-51	21
+-52	33
+-53	22
+-54	26
+-55	29
+-56	36
+-57	35
+-58	23
+-59	31
+-60	27
+-61	25
+-62	27
+-63	19
+-64	24
+NULL	2932
 PREHOOK: query: -- multi distinct
 explain
 select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint order by ctinyint limit 20

http://git-wip-us.apache.org/repos/asf/hive/blob/c0116739/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out b/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out
index 952d7ff..7038b4d 100644
--- a/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out
@@ -540,6 +540,100 @@ POSTHOOK: Input: default@alltypesorc
 -63	19
 -64	24
 NULL	2932
+PREHOOK: query: explain 
+select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain 
+select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cdouble (type: double)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: _col0 (type: tinyint), _col1 (type: double)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: tinyint), _col1 (type: double)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: tinyint)
+                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: double)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count(_col1)
+                  keys: _col0 (type: tinyint)
+                  mode: complete
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 3072 Data size: 660491 Basic stats: COMPLETE Column stats: NONE
+                  Limit
+                    Number of rows: 20
+                    Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+-46	24
+-47	22
+-48	29
+-49	26
+-50	30
+-51	21
+-52	33
+-53	22
+-54	26
+-55	29
+-56	36
+-57	35
+-58	23
+-59	31
+-60	27
+-61	25
+-62	27
+-63	19
+-64	24
+NULL	2932
 PREHOOK: query: -- multi distinct
 explain
 select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint order by ctinyint limit 20


[06/50] [abbrv] hive git commit: HIVE-10587 : ExprNodeColumnDesc should be created with isPartitionColOrVirtualCol true for DP column (Chaoyu Tang, reviewed by Ashutosh, via Szehon)

Posted by xu...@apache.org.
HIVE-10587 : ExprNodeColumnDesc should be created with isPartitionColOrVirtualCol true for DP column (Chaoyu Tang, reviewed by Ashutosh, via Szehon)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bb3a665a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bb3a665a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bb3a665a

Branch: refs/heads/beeline-cli
Commit: bb3a665afa3f7b457085408e5789c462978a0b07
Parents: 4aff07e
Author: Szehon Ho <sz...@cloudera.com>
Authored: Tue May 5 11:24:48 2015 -0700
Committer: Szehon Ho <sz...@cloudera.com>
Committed: Tue May 5 11:25:54 2015 -0700

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/bb3a665a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index dec0e38..cbc5466 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -6821,7 +6821,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       for (int i = tableFields.size() + (updating() ? 1 : 0); i < rowFields.size(); ++i) {
         TypeInfo rowFieldTypeInfo = rowFields.get(i).getType();
         ExprNodeDesc column = new ExprNodeColumnDesc(
-            rowFieldTypeInfo, rowFields.get(i).getInternalName(), "", false);
+            rowFieldTypeInfo, rowFields.get(i).getInternalName(), "", true);
         expressions.add(column);
       }
       // converted = true; // [TODO]: should we check & convert type to String and set it to true?


[38/50] [abbrv] hive git commit: HIVE-9644 : CASE comparison operator rotation optimization (Ashutosh Chauhan via Gopal V)

Posted by xu...@apache.org.
HIVE-9644 : CASE comparison operator rotation optimization (Ashutosh Chauhan via Gopal V)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/48a243ef
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/48a243ef
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/48a243ef

Branch: refs/heads/beeline-cli
Commit: 48a243efdf91a7f5334b28810b30fc8d82925d51
Parents: d434f64
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Sat May 2 13:17:47 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Thu May 7 10:27:35 2015 -0700

----------------------------------------------------------------------
 .../optimizer/ConstantPropagateProcFactory.java |  83 +++-
 ql/src/test/queries/clientpositive/fold_case.q  |  12 +
 ql/src/test/queries/clientpositive/fold_when.q  |  31 ++
 .../test/results/clientpositive/fold_case.q.out | 301 ++++++++++++
 .../test/results/clientpositive/fold_when.q.out | 480 +++++++++++++++++++
 .../ql_rewrite_gbtoidx_cbo_2.q.out              |  14 +-
 6 files changed, 912 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/48a243ef/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
index e9436e5..f536ef6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hive.ql.exec.RowSchema;
 import org.apache.hadoop.hive.ql.exec.SelectOperator;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.UDF;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.lib.Node;
 import org.apache.hadoop.hive.ql.lib.NodeProcessor;
@@ -65,11 +66,15 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredJavaObject;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseCompare;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFCase;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNot;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotEqual;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotNull;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFWhen;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
@@ -79,9 +84,11 @@ import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantBooleanObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.io.BooleanWritable;
 
 import com.google.common.collect.ImmutableSet;
 
@@ -199,10 +206,11 @@ public final class ConstantPropagateProcFactory {
    * @param op processing operator
    * @param propagate if true, assignment expressions will be added to constants.
    * @return fold expression
+   * @throws UDFArgumentException
    */
   private static ExprNodeDesc foldExpr(ExprNodeDesc desc, Map<ColumnInfo, ExprNodeDesc> constants,
       ConstantPropagateProcCtx cppCtx, Operator<? extends Serializable> op, int tag,
-      boolean propagate) {
+      boolean propagate) throws UDFArgumentException {
     if (desc instanceof ExprNodeGenericFuncDesc) {
       ExprNodeGenericFuncDesc funcDesc = (ExprNodeGenericFuncDesc) desc;
 
@@ -356,7 +364,7 @@ public final class ConstantPropagateProcFactory {
     return (expr instanceof ExprNodeColumnDesc) ? (ExprNodeColumnDesc)expr : null;
   }
 
-  private static ExprNodeDesc shortcutFunction(GenericUDF udf, List<ExprNodeDesc> newExprs) {
+  private static ExprNodeDesc shortcutFunction(GenericUDF udf, List<ExprNodeDesc> newExprs) throws UDFArgumentException {
     if (udf instanceof GenericUDFOPAnd) {
       for (int i = 0; i < 2; i++) {
         ExprNodeDesc childExpr = newExprs.get(i);
@@ -407,6 +415,77 @@ public final class ConstantPropagateProcFactory {
       }
     }
 
+    if (udf instanceof GenericUDFWhen) {
+      if (!(newExprs.size() == 2 || newExprs.size() == 3)) {
+        // In general, when can have unlimited # of branches,
+        // we currently only handle either 1 or 2 branch.
+        return null;
+      }
+      ExprNodeDesc thenExpr = newExprs.get(1);
+      if (thenExpr instanceof ExprNodeNullDesc && (newExprs.size() == 2 || newExprs.get(2) instanceof ExprNodeNullDesc)) {
+        return thenExpr;
+      }
+      ExprNodeDesc elseExpr = newExprs.size() == 3 ? newExprs.get(2) :
+        new ExprNodeConstantDesc(newExprs.get(2).getTypeInfo(),null);
+
+      ExprNodeDesc whenExpr = newExprs.get(0);
+      if (whenExpr instanceof ExprNodeConstantDesc) {
+        Boolean whenVal = (Boolean)((ExprNodeConstantDesc) whenExpr).getValue();
+        return (whenVal == null || Boolean.FALSE.equals(whenVal)) ? elseExpr : thenExpr;
+      }
+
+      if (thenExpr instanceof ExprNodeConstantDesc && elseExpr instanceof ExprNodeConstantDesc) {
+        ExprNodeConstantDesc constThen = (ExprNodeConstantDesc) thenExpr;
+        ExprNodeConstantDesc constElse = (ExprNodeConstantDesc) elseExpr;
+        Object thenVal = constThen.getValue();
+        Object elseVal = constElse.getValue();
+        if (thenVal == null) {
+          return elseVal == null ? thenExpr : null;
+        } else if(thenVal.equals(elseVal)){
+          return thenExpr;
+        } else if (thenVal instanceof Boolean && elseVal instanceof Boolean) {
+          return Boolean.TRUE.equals(thenVal) ? newExprs.get(0) :
+            ExprNodeGenericFuncDesc.newInstance(new GenericUDFOPNot(), newExprs.subList(0, 1));
+        } else {
+          return null;
+        }
+      }
+    }
+    if (udf instanceof GenericUDFCase) {
+      // HIVE-9644 Attempt to fold expression like :
+      // where (case ss_sold_date when '1998-01-01' then 1=1 else null=1 end);
+      // where ss_sold_date= '1998-01-01' ;
+      if (!(newExprs.size() == 3 || newExprs.size() == 4)) {
+        // In general case can have unlimited # of branches,
+        // we currently only handle either 1 or 2 branch.
+        return null;
+      }
+      ExprNodeDesc thenExpr = newExprs.get(2);
+      if (thenExpr instanceof ExprNodeNullDesc && (newExprs.size() == 3 || newExprs.get(3) instanceof ExprNodeNullDesc)) {
+        return thenExpr;
+      }
+
+      ExprNodeDesc elseExpr = newExprs.size() == 4 ? newExprs.get(3) :
+        new ExprNodeConstantDesc(newExprs.get(2).getTypeInfo(),null);
+
+      if (thenExpr instanceof ExprNodeConstantDesc && elseExpr instanceof ExprNodeConstantDesc) {
+        ExprNodeConstantDesc constThen = (ExprNodeConstantDesc) thenExpr;
+        ExprNodeConstantDesc constElse = (ExprNodeConstantDesc) elseExpr;
+        Object thenVal = constThen.getValue();
+        Object elseVal = constElse.getValue();
+        if (thenVal == null) {
+          return elseVal == null ? thenExpr : null;
+        } else if(thenVal.equals(elseVal)){
+          return thenExpr;
+        } else if (thenVal instanceof Boolean && elseVal instanceof Boolean) {
+          return Boolean.TRUE.equals(thenVal) ? ExprNodeGenericFuncDesc.newInstance(new GenericUDFOPEqual(), newExprs.subList(0, 2)) :
+            ExprNodeGenericFuncDesc.newInstance(new GenericUDFOPNotEqual(), newExprs.subList(0, 2));
+        } else {
+          return null;
+        }
+      }
+    }
+
     return null;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/48a243ef/ql/src/test/queries/clientpositive/fold_case.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/fold_case.q b/ql/src/test/queries/clientpositive/fold_case.q
new file mode 100644
index 0000000..3f9e3a3
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/fold_case.q
@@ -0,0 +1,12 @@
+explain
+select count(1) from src where (case key when '238' then true else false end);
+explain 
+select count(1) from src where (case key when '238' then 1=2 else 1=1 end);
+explain 
+select count(1) from src where (case key when '238' then 1=2 else 1=31 end);
+explain 
+select count(1) from src where (case key when '238' then true else 1=1 end);
+explain
+select count(1) from src where (case key when '238' then 1=1 else 1=null end);
+explain 
+select count(1) from src where (case key when '238' then null else 1=1 end);

http://git-wip-us.apache.org/repos/asf/hive/blob/48a243ef/ql/src/test/queries/clientpositive/fold_when.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/fold_when.q b/ql/src/test/queries/clientpositive/fold_when.q
new file mode 100644
index 0000000..e827a5c
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/fold_when.q
@@ -0,0 +1,31 @@
+explain
+select key from src where ((case when (key = '238') then null     end) = 1);
+explain
+select key from src where ((case when (key = '238') then null else null end) = 1);
+explain
+select key from src where ((case when (key = '238') then 1 else 1 end) = 1);
+explain
+select key from src where ((case when (key = '238') then 1 else 1 end) = 2);
+explain
+select key from src where ((case when (key = '238') then 1 else null end) = 1);
+explain
+select key from src where ((case when (key = '238') then 1=1 else null=1 end));
+explain
+select key from src where ((case when (key = '238') then 1=1 else 2=2 end));
+explain
+select key from src where ((case when (key = '238') then 1=3 else 2=1 end));
+explain
+select key from src where ((case when (key = '238') then 1=1 else 2=1 end));
+explain
+select key from src where ((case when (key = '238') then 1=3 else 1=1 end));
+explain
+select key from src where ((case when ('23' = '23') then 1 else 1 end) = 1);
+explain
+select key from src where ((case when ('2' = '238') then 1 else 2 end) = 2);
+explain
+select key from src where ((case when (true=null) then 1 else 1 end) = 1);
+explain
+select key from src where ((case when (key = (case when (key = '238') then '11' else '11'  end)) then false else true end));
+explain
+select key from src where ((case when (key = (case when (key = '238') then '12' else '11'  end)) then 2=2   else true end));
+

http://git-wip-us.apache.org/repos/asf/hive/blob/48a243ef/ql/src/test/results/clientpositive/fold_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/fold_case.q.out b/ql/src/test/results/clientpositive/fold_case.q.out
new file mode 100644
index 0000000..de6c43e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/fold_case.q.out
@@ -0,0 +1,301 @@
+PREHOOK: query: explain
+select count(1) from src where (case key when '238' then true else false end)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(1) from src where (case key when '238' then true else false end)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key = '238') (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count(1)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain 
+select count(1) from src where (case key when '238' then 1=2 else 1=1 end)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain 
+select count(1) from src where (case key when '238' then 1=2 else 1=1 end)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key <> '238') (type: boolean)
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count(1)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain 
+select count(1) from src where (case key when '238' then 1=2 else 1=31 end)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain 
+select count(1) from src where (case key when '238' then 1=2 else 1=31 end)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            Filter Operator
+              predicate: false (type: boolean)
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+              Group By Operator
+                aggregations: count(1)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain 
+select count(1) from src where (case key when '238' then true else 1=1 end)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain 
+select count(1) from src where (case key when '238' then true else 1=1 end)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                aggregations: count(1)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select count(1) from src where (case key when '238' then 1=1 else 1=null end)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(1) from src where (case key when '238' then 1=1 else 1=null end)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: CASE (key) WHEN ('238') THEN (true) ELSE (null) END (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count(1)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain 
+select count(1) from src where (case key when '238' then null else 1=1 end)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain 
+select count(1) from src where (case key when '238' then null else 1=1 end)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: CASE (key) WHEN ('238') THEN (null) ELSE (true) END (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count(1)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/48a243ef/ql/src/test/results/clientpositive/fold_when.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/fold_when.q.out b/ql/src/test/results/clientpositive/fold_when.q.out
new file mode 100644
index 0000000..37803e0
--- /dev/null
+++ b/ql/src/test/results/clientpositive/fold_when.q.out
@@ -0,0 +1,480 @@
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then null     end) = 1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then null     end) = 1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: false (type: boolean)
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then null else null end) = 1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then null else null end) = 1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: false (type: boolean)
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then 1 else 1 end) = 1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then 1 else 1 end) = 1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then 1 else 1 end) = 2)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then 1 else 1 end) = 2)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: false (type: boolean)
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then 1 else null end) = 1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then 1 else null end) = 1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (CASE WHEN ((key = '238')) THEN (1) ELSE (null) END = 1) (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=1 else null=1 end))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=1 else null=1 end))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: CASE WHEN ((key = '238')) THEN (true) ELSE (null) END (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=1 else 2=2 end))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=1 else 2=2 end))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=3 else 2=1 end))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=3 else 2=1 end))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: false (type: boolean)
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=1 else 2=1 end))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=1 else 2=1 end))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key = '238') (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: '238' (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=3 else 1=1 end))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=3 else 1=1 end))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (not (key = '238')) (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when ('23' = '23') then 1 else 1 end) = 1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when ('23' = '23') then 1 else 1 end) = 1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when ('2' = '238') then 1 else 2 end) = 2)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when ('2' = '238') then 1 else 2 end) = 2)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (true=null) then 1 else 1 end) = 1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (true=null) then 1 else 1 end) = 1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = (case when (key = '238') then '11' else '11'  end)) then false else true end))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = (case when (key = '238') then '11' else '11'  end)) then false else true end))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (not (key = '11')) (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = (case when (key = '238') then '12' else '11'  end)) then 2=2   else true end))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = (case when (key = '238') then '12' else '11'  end)) then 2=2   else true end))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/48a243ef/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out b/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out
index 6340a75..381e58f 100644
--- a/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out
+++ b/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out
@@ -3814,22 +3814,22 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: lineitem_ix
-            Statistics: Num rows: 3024 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 0 Data size: 12099 Basic stats: PARTIAL Column stats: COMPLETE
             Select Operator
-              expressions: CASE (l_orderkey) WHEN (null) THEN (1) ELSE (1) END (type: int)
+              expressions: 1 (type: int)
               outputColumnNames: _col0
-              Statistics: Num rows: 3024 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 0 Data size: 12099 Basic stats: PARTIAL Column stats: COMPLETE
               Group By Operator
                 aggregations: count(_col0)
                 keys: _col0 (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 3024 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 3024 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -3837,10 +3837,10 @@ STAGE PLANS:
           keys: KEY._col0 (type: int)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat


[15/50] [abbrv] hive git commit: HIVE-9456 : Make Hive support unicode with MSSQL as Metastore backend (Xiaobing Zhou via Sushanth Sowmyan, reviewed by Thejas Nair)

Posted by xu...@apache.org.
HIVE-9456 : Make Hive support unicode with MSSQL as Metastore backend (Xiaobing Zhou via Sushanth Sowmyan, reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8c93f2ba
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8c93f2ba
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8c93f2ba

Branch: refs/heads/beeline-cli
Commit: 8c93f2ba6390ecaf683c4f30d1383d9ce0fdc3b4
Parents: 377ba4b
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Wed May 6 11:14:01 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Wed May 6 11:14:55 2015 -0700

----------------------------------------------------------------------
 .../upgrade/mssql/hive-schema-1.2.0.mssql.sql   | 256 +++++++++----------
 .../upgrade/mssql/hive-schema-1.3.0.mssql.sql   | 256 +++++++++----------
 .../mssql/upgrade-1.1.0-to-1.2.0.mssql.sql      |   1 +
 3 files changed, 257 insertions(+), 256 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/8c93f2ba/metastore/scripts/upgrade/mssql/hive-schema-1.2.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/hive-schema-1.2.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-1.2.0.mssql.sql
index e78dcf1..0bbd647 100644
--- a/metastore/scripts/upgrade/mssql/hive-schema-1.2.0.mssql.sql
+++ b/metastore/scripts/upgrade/mssql/hive-schema-1.2.0.mssql.sql
@@ -49,7 +49,7 @@
 CREATE TABLE MASTER_KEYS
 (
     KEY_ID int NOT NULL,
-    MASTER_KEY varchar(767) NULL
+    MASTER_KEY nvarchar(767) NULL
 );
 
 ALTER TABLE MASTER_KEYS ADD CONSTRAINT MASTER_KEYS_PK PRIMARY KEY (KEY_ID);
@@ -60,8 +60,8 @@ CREATE TABLE IDXS
     INDEX_ID bigint NOT NULL,
     CREATE_TIME int NOT NULL,
     DEFERRED_REBUILD bit NOT NULL,
-    INDEX_HANDLER_CLASS varchar(4000) NULL,
-    INDEX_NAME varchar(128) NULL,
+    INDEX_HANDLER_CLASS nvarchar(4000) NULL,
+    INDEX_NAME nvarchar(128) NULL,
     INDEX_TBL_ID bigint NULL,
     LAST_ACCESS_TIME int NOT NULL,
     ORIG_TBL_ID bigint NULL,
@@ -75,11 +75,11 @@ CREATE TABLE PART_COL_STATS
 (
     CS_ID bigint NOT NULL,
     AVG_COL_LEN float NULL,
-    "COLUMN_NAME" varchar(128) NOT NULL,
-    COLUMN_TYPE varchar(128) NOT NULL,
-    DB_NAME varchar(128) NOT NULL,
-    BIG_DECIMAL_HIGH_VALUE varchar(255) NULL,
-    BIG_DECIMAL_LOW_VALUE varchar(255) NULL,
+    "COLUMN_NAME" nvarchar(128) NOT NULL,
+    COLUMN_TYPE nvarchar(128) NOT NULL,
+    DB_NAME nvarchar(128) NOT NULL,
+    BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
+    BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
     DOUBLE_HIGH_VALUE float NULL,
     DOUBLE_LOW_VALUE float NULL,
     LAST_ANALYZED bigint NOT NULL,
@@ -91,8 +91,8 @@ CREATE TABLE PART_COL_STATS
     NUM_NULLS bigint NOT NULL,
     NUM_TRUES bigint NULL,
     PART_ID bigint NULL,
-    PARTITION_NAME varchar(767) NOT NULL,
-    "TABLE_NAME" varchar(128) NOT NULL
+    PARTITION_NAME nvarchar(767) NOT NULL,
+    "TABLE_NAME" nvarchar(128) NOT NULL
 );
 
 ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
@@ -105,12 +105,12 @@ CREATE TABLE PART_PRIVS
     PART_GRANT_ID bigint NOT NULL,
     CREATE_TIME int NOT NULL,
     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR varchar(128) NULL,
-    GRANTOR_TYPE varchar(128) NULL,
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
     PART_ID bigint NULL,
-    PRINCIPAL_NAME varchar(128) NULL,
-    PRINCIPAL_TYPE varchar(128) NULL,
-    PART_PRIV varchar(128) NULL
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    PART_PRIV nvarchar(128) NULL
 );
 
 ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
@@ -128,8 +128,8 @@ CREATE TABLE ROLES
 (
     ROLE_ID bigint NOT NULL,
     CREATE_TIME int NOT NULL,
-    OWNER_NAME varchar(128) NULL,
-    ROLE_NAME varchar(128) NULL
+    OWNER_NAME nvarchar(128) NULL,
+    ROLE_NAME nvarchar(128) NULL
 );
 
 ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
@@ -159,8 +159,8 @@ ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
 CREATE TABLE VERSION
 (
     VER_ID bigint NOT NULL,
-    SCHEMA_VERSION varchar(127) NOT NULL,
-    VERSION_COMMENT varchar(255) NOT NULL
+    SCHEMA_VERSION nvarchar(127) NOT NULL,
+    VERSION_COMMENT nvarchar(255) NOT NULL
 );
 
 ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
@@ -171,11 +171,11 @@ CREATE TABLE GLOBAL_PRIVS
     USER_GRANT_ID bigint NOT NULL,
     CREATE_TIME int NOT NULL,
     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR varchar(128) NULL,
-    GRANTOR_TYPE varchar(128) NULL,
-    PRINCIPAL_NAME varchar(128) NULL,
-    PRINCIPAL_TYPE varchar(128) NULL,
-    USER_PRIV varchar(128) NULL
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    USER_PRIV nvarchar(128) NULL
 );
 
 ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
@@ -184,15 +184,15 @@ ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_
 CREATE TABLE PART_COL_PRIVS
 (
     PART_COLUMN_GRANT_ID bigint NOT NULL,
-    "COLUMN_NAME" varchar(128) NULL,
+    "COLUMN_NAME" nvarchar(128) NULL,
     CREATE_TIME int NOT NULL,
     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR varchar(128) NULL,
-    GRANTOR_TYPE varchar(128) NULL,
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
     PART_ID bigint NULL,
-    PRINCIPAL_NAME varchar(128) NULL,
-    PRINCIPAL_TYPE varchar(128) NULL,
-    PART_COL_PRIV varchar(128) NULL
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    PART_COL_PRIV nvarchar(128) NULL
 );
 
 ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
@@ -204,11 +204,11 @@ CREATE TABLE DB_PRIVS
     CREATE_TIME int NOT NULL,
     DB_ID bigint NULL,
     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR varchar(128) NULL,
-    GRANTOR_TYPE varchar(128) NULL,
-    PRINCIPAL_NAME varchar(128) NULL,
-    PRINCIPAL_TYPE varchar(128) NULL,
-    DB_PRIV varchar(128) NULL
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    DB_PRIV nvarchar(128) NULL
 );
 
 ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
@@ -218,11 +218,11 @@ CREATE TABLE TAB_COL_STATS
 (
     CS_ID bigint NOT NULL,
     AVG_COL_LEN float NULL,
-    "COLUMN_NAME" varchar(128) NOT NULL,
-    COLUMN_TYPE varchar(128) NOT NULL,
-    DB_NAME varchar(128) NOT NULL,
-    BIG_DECIMAL_HIGH_VALUE varchar(255) NULL,
-    BIG_DECIMAL_LOW_VALUE varchar(255) NULL,
+    "COLUMN_NAME" nvarchar(128) NOT NULL,
+    COLUMN_TYPE nvarchar(128) NOT NULL,
+    DB_NAME nvarchar(128) NOT NULL,
+    BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
+    BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
     DOUBLE_HIGH_VALUE float NULL,
     DOUBLE_LOW_VALUE float NULL,
     LAST_ANALYZED bigint NOT NULL,
@@ -234,7 +234,7 @@ CREATE TABLE TAB_COL_STATS
     NUM_NULLS bigint NOT NULL,
     NUM_TRUES bigint NULL,
     TBL_ID bigint NULL,
-    "TABLE_NAME" varchar(128) NOT NULL
+    "TABLE_NAME" nvarchar(128) NOT NULL
 );
 
 ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
@@ -243,9 +243,9 @@ ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
 CREATE TABLE TYPES
 (
     TYPES_ID bigint NOT NULL,
-    TYPE_NAME varchar(128) NULL,
-    TYPE1 varchar(767) NULL,
-    TYPE2 varchar(767) NULL
+    TYPE_NAME nvarchar(128) NULL,
+    TYPE1 nvarchar(767) NULL,
+    TYPE2 nvarchar(767) NULL
 );
 
 ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
@@ -256,11 +256,11 @@ CREATE TABLE TBL_PRIVS
     TBL_GRANT_ID bigint NOT NULL,
     CREATE_TIME int NOT NULL,
     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR varchar(128) NULL,
-    GRANTOR_TYPE varchar(128) NULL,
-    PRINCIPAL_NAME varchar(128) NULL,
-    PRINCIPAL_TYPE varchar(128) NULL,
-    TBL_PRIV varchar(128) NULL,
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    TBL_PRIV nvarchar(128) NULL,
     TBL_ID bigint NULL
 );
 
@@ -270,11 +270,11 @@ ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
 CREATE TABLE DBS
 (
     DB_ID bigint NOT NULL,
-    "DESC" varchar(4000) NULL,
-    DB_LOCATION_URI varchar(4000) NOT NULL,
-    "NAME" varchar(128) NULL,
-    OWNER_NAME varchar(128) NULL,
-    OWNER_TYPE varchar(10) NULL
+    "DESC" nvarchar(4000) NULL,
+    DB_LOCATION_URI nvarchar(4000) NOT NULL,
+    "NAME" nvarchar(128) NULL,
+    OWNER_NAME nvarchar(128) NULL,
+    OWNER_TYPE nvarchar(10) NULL
 );
 
 ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
@@ -283,14 +283,14 @@ ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
 CREATE TABLE TBL_COL_PRIVS
 (
     TBL_COLUMN_GRANT_ID bigint NOT NULL,
-    "COLUMN_NAME" varchar(128) NULL,
+    "COLUMN_NAME" nvarchar(128) NULL,
     CREATE_TIME int NOT NULL,
     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR varchar(128) NULL,
-    GRANTOR_TYPE varchar(128) NULL,
-    PRINCIPAL_NAME varchar(128) NULL,
-    PRINCIPAL_TYPE varchar(128) NULL,
-    TBL_COL_PRIV varchar(128) NULL,
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    TBL_COL_PRIV nvarchar(128) NULL,
     TBL_ID bigint NULL
 );
 
@@ -299,8 +299,8 @@ ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUM
 -- Table DELEGATION_TOKENS for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
 CREATE TABLE DELEGATION_TOKENS
 (
-    TOKEN_IDENT varchar(767) NOT NULL,
-    TOKEN varchar(767) NULL
+    TOKEN_IDENT nvarchar(767) NOT NULL,
+    TOKEN nvarchar(767) NULL
 );
 
 ALTER TABLE DELEGATION_TOKENS ADD CONSTRAINT DELEGATION_TOKENS_PK PRIMARY KEY (TOKEN_IDENT);
@@ -309,8 +309,8 @@ ALTER TABLE DELEGATION_TOKENS ADD CONSTRAINT DELEGATION_TOKENS_PK PRIMARY KEY (T
 CREATE TABLE SERDES
 (
     SERDE_ID bigint NOT NULL,
-    "NAME" varchar(128) NULL,
-    SLIB varchar(4000) NULL
+    "NAME" nvarchar(128) NULL,
+    SLIB nvarchar(4000) NULL
 );
 
 ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
@@ -319,13 +319,13 @@ ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
 CREATE TABLE FUNCS
 (
     FUNC_ID bigint NOT NULL,
-    CLASS_NAME varchar(4000) NULL,
+    CLASS_NAME nvarchar(4000) NULL,
     CREATE_TIME int NOT NULL,
     DB_ID bigint NULL,
-    FUNC_NAME varchar(128) NULL,
+    FUNC_NAME nvarchar(128) NULL,
     FUNC_TYPE int NOT NULL,
-    OWNER_NAME varchar(128) NULL,
-    OWNER_TYPE varchar(10) NULL
+    OWNER_NAME nvarchar(128) NULL,
+    OWNER_TYPE nvarchar(10) NULL
 );
 
 ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
@@ -336,10 +336,10 @@ CREATE TABLE ROLE_MAP
     ROLE_GRANT_ID bigint NOT NULL,
     ADD_TIME int NOT NULL,
     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR varchar(128) NULL,
-    GRANTOR_TYPE varchar(128) NULL,
-    PRINCIPAL_NAME varchar(128) NULL,
-    PRINCIPAL_TYPE varchar(128) NULL,
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
     ROLE_ID bigint NULL
 );
 
@@ -352,11 +352,11 @@ CREATE TABLE TBLS
     CREATE_TIME int NOT NULL,
     DB_ID bigint NULL,
     LAST_ACCESS_TIME int NOT NULL,
-    OWNER varchar(767) NULL,
+    OWNER nvarchar(767) NULL,
     RETENTION int NOT NULL,
     SD_ID bigint NULL,
-    TBL_NAME varchar(128) NULL,
-    TBL_TYPE varchar(128) NULL,
+    TBL_NAME nvarchar(128) NULL,
+    TBL_TYPE nvarchar(128) NULL,
     VIEW_EXPANDED_TEXT text NULL,
     VIEW_ORIGINAL_TEXT text NULL
 );
@@ -368,12 +368,12 @@ CREATE TABLE SDS
 (
     SD_ID bigint NOT NULL,
     CD_ID bigint NULL,
-    INPUT_FORMAT varchar(4000) NULL,
+    INPUT_FORMAT nvarchar(4000) NULL,
     IS_COMPRESSED bit NOT NULL,
     IS_STOREDASSUBDIRECTORIES bit NOT NULL,
     LOCATION nvarchar(4000) NULL,
     NUM_BUCKETS int NOT NULL,
-    OUTPUT_FORMAT varchar(4000) NULL,
+    OUTPUT_FORMAT nvarchar(4000) NULL,
     SERDE_ID bigint NULL
 );
 
@@ -383,11 +383,11 @@ ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
 CREATE TABLE PARTITION_EVENTS
 (
     PART_NAME_ID bigint NOT NULL,
-    DB_NAME varchar(128) NULL,
+    DB_NAME nvarchar(128) NULL,
     EVENT_TIME bigint NOT NULL,
     EVENT_TYPE int NOT NULL,
-    PARTITION_NAME varchar(767) NULL,
-    TBL_NAME varchar(128) NULL
+    PARTITION_NAME nvarchar(767) NULL,
+    TBL_NAME nvarchar(128) NULL
 );
 
 ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
@@ -396,7 +396,7 @@ ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PAR
 CREATE TABLE SORT_COLS
 (
     SD_ID bigint NOT NULL,
-    "COLUMN_NAME" varchar(128) NULL,
+    "COLUMN_NAME" nvarchar(128) NULL,
     "ORDER" int NOT NULL,
     INTEGER_IDX int NOT NULL
 );
@@ -407,7 +407,7 @@ ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX
 CREATE TABLE SKEWED_COL_NAMES
 (
     SD_ID bigint NOT NULL,
-    SKEWED_COL_NAME varchar(255) NULL,
+    SKEWED_COL_NAME nvarchar(255) NULL,
     INTEGER_IDX int NOT NULL
 );
 
@@ -418,7 +418,7 @@ CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
 (
     SD_ID bigint NOT NULL,
     STRING_LIST_ID_KID bigint NOT NULL,
-    LOCATION varchar(4000) NULL
+    LOCATION nvarchar(4000) NULL
 );
 
 ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
@@ -427,7 +427,7 @@ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK
 CREATE TABLE SKEWED_STRING_LIST_VALUES
 (
     STRING_LIST_ID bigint NOT NULL,
-    STRING_LIST_VALUE varchar(255) NULL,
+    STRING_LIST_VALUE nvarchar(255) NULL,
     INTEGER_IDX int NOT NULL
 );
 
@@ -447,9 +447,9 @@ ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY
 CREATE TABLE PARTITION_KEYS
 (
     TBL_ID bigint NOT NULL,
-    PKEY_COMMENT varchar(4000) NULL,
-    PKEY_NAME varchar(128) NOT NULL,
-    PKEY_TYPE varchar(767) NOT NULL,
+    PKEY_COMMENT nvarchar(4000) NULL,
+    PKEY_NAME nvarchar(128) NOT NULL,
+    PKEY_TYPE nvarchar(767) NOT NULL,
     INTEGER_IDX int NOT NULL
 );
 
@@ -469,8 +469,8 @@ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID
 CREATE TABLE SD_PARAMS
 (
     SD_ID bigint NOT NULL,
-    PARAM_KEY varchar(256) NOT NULL,
-    PARAM_VALUE varchar(4000) NULL
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
 );
 
 ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
@@ -480,7 +480,7 @@ CREATE TABLE FUNC_RU
 (
     FUNC_ID bigint NOT NULL,
     RESOURCE_TYPE int NOT NULL,
-    RESOURCE_URI varchar(4000) NULL,
+    RESOURCE_URI nvarchar(4000) NULL,
     INTEGER_IDX int NOT NULL
 );
 
@@ -490,9 +490,9 @@ ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID,INTEGER_IDX);
 CREATE TABLE TYPE_FIELDS
 (
     TYPE_NAME bigint NOT NULL,
-    COMMENT varchar(256) NULL,
-    FIELD_NAME varchar(128) NOT NULL,
-    FIELD_TYPE varchar(767) NOT NULL,
+    COMMENT nvarchar(256) NULL,
+    FIELD_NAME nvarchar(128) NOT NULL,
+    FIELD_TYPE nvarchar(767) NOT NULL,
     INTEGER_IDX int NOT NULL
 );
 
@@ -502,7 +502,7 @@ ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIE
 CREATE TABLE BUCKETING_COLS
 (
     SD_ID bigint NOT NULL,
-    BUCKET_COL_NAME varchar(255) NULL,
+    BUCKET_COL_NAME nvarchar(255) NULL,
     INTEGER_IDX int NOT NULL
 );
 
@@ -512,8 +512,8 @@ ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,I
 CREATE TABLE DATABASE_PARAMS
 (
     DB_ID bigint NOT NULL,
-    PARAM_KEY varchar(180) NOT NULL,
-    PARAM_VALUE varchar(4000) NULL
+    PARAM_KEY nvarchar(180) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
 );
 
 ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
@@ -522,8 +522,8 @@ ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID
 CREATE TABLE INDEX_PARAMS
 (
     INDEX_ID bigint NOT NULL,
-    PARAM_KEY varchar(256) NOT NULL,
-    PARAM_VALUE varchar(4000) NULL
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
 );
 
 ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
@@ -532,9 +532,9 @@ ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PA
 CREATE TABLE COLUMNS_V2
 (
     CD_ID bigint NOT NULL,
-    COMMENT varchar(256) NULL,
-    "COLUMN_NAME" varchar(128) NOT NULL,
-    TYPE_NAME varchar(4000) NOT NULL,
+    COMMENT nvarchar(256) NULL,
+    "COLUMN_NAME" nvarchar(128) NOT NULL,
+    TYPE_NAME nvarchar(4000) NOT NULL,
     INTEGER_IDX int NOT NULL
 );
 
@@ -544,8 +544,8 @@ ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_PK PRIMARY KEY (CD_ID,"COLUMN_NAME
 CREATE TABLE SERDE_PARAMS
 (
     SERDE_ID bigint NOT NULL,
-    PARAM_KEY varchar(256) NOT NULL,
-    PARAM_VALUE varchar(4000) NULL
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
 );
 
 ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
@@ -554,8 +554,8 @@ ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PA
 CREATE TABLE PARTITION_PARAMS
 (
     PART_ID bigint NOT NULL,
-    PARAM_KEY varchar(256) NOT NULL,
-    PARAM_VALUE varchar(4000) NULL
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
 );
 
 ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
@@ -564,8 +564,8 @@ ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PAR
 CREATE TABLE TABLE_PARAMS
 (
     TBL_ID bigint NOT NULL,
-    PARAM_KEY varchar(256) NOT NULL,
-    PARAM_VALUE varchar(4000) NULL
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
 );
 
 ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
@@ -575,9 +575,9 @@ CREATE TABLE NOTIFICATION_LOG
     NL_ID bigint NOT NULL,
     EVENT_ID bigint NOT NULL,
     EVENT_TIME int NOT NULL,
-    EVENT_TYPE varchar(32) NOT NULL,
-    DB_NAME varchar(128) NULL,
-    TBL_NAME varchar(128) NULL,
+    EVENT_TYPE nvarchar(32) NOT NULL,
+    DB_NAME nvarchar(128) NULL,
+    TBL_NAME nvarchar(128) NULL,
     MESSAGE text NULL
 );
 
@@ -858,14 +858,14 @@ CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
 -- -----------------------------------------------------------------------------------------------------------------------------------------------
 CREATE TABLE COMPACTION_QUEUE(
 	CQ_ID bigint NOT NULL,
-	CQ_DATABASE varchar(128) NOT NULL,
-	CQ_TABLE varchar(128) NOT NULL,
-	CQ_PARTITION varchar(767) NULL,
+	CQ_DATABASE nvarchar(128) NOT NULL,
+	CQ_TABLE nvarchar(128) NOT NULL,
+	CQ_PARTITION nvarchar(767) NULL,
 	CQ_STATE char(1) NOT NULL,
 	CQ_TYPE char(1) NOT NULL,
-	CQ_WORKER_ID varchar(128) NULL,
+	CQ_WORKER_ID nvarchar(128) NULL,
 	CQ_START bigint NULL,
-	CQ_RUN_AS varchar(128) NULL,
+	CQ_RUN_AS nvarchar(128) NULL,
 PRIMARY KEY CLUSTERED 
 (
 	CQ_ID ASC
@@ -874,24 +874,24 @@ PRIMARY KEY CLUSTERED
 
 CREATE TABLE COMPLETED_TXN_COMPONENTS(
 	CTC_TXNID bigint NULL,
-	CTC_DATABASE varchar(128) NOT NULL,
-	CTC_TABLE varchar(128) NULL,
-	CTC_PARTITION varchar(767) NULL
+	CTC_DATABASE nvarchar(128) NOT NULL,
+	CTC_TABLE nvarchar(128) NULL,
+	CTC_PARTITION nvarchar(767) NULL
 );
 
 CREATE TABLE HIVE_LOCKS(
 	HL_LOCK_EXT_ID bigint NOT NULL,
 	HL_LOCK_INT_ID bigint NOT NULL,
 	HL_TXNID bigint NULL,
-	HL_DB varchar(128) NOT NULL,
-	HL_TABLE varchar(128) NULL,
-	HL_PARTITION varchar(767) NULL,
+	HL_DB nvarchar(128) NOT NULL,
+	HL_TABLE nvarchar(128) NULL,
+	HL_PARTITION nvarchar(767) NULL,
 	HL_LOCK_STATE char(1) NOT NULL,
 	HL_LOCK_TYPE char(1) NOT NULL,
 	HL_LAST_HEARTBEAT bigint NOT NULL,
 	HL_ACQUIRED_AT bigint NULL,
-	HL_USER varchar(128) NOT NULL,
-	HL_HOST varchar(128) NOT NULL,
+	HL_USER nvarchar(128) NOT NULL,
+	HL_HOST nvarchar(128) NOT NULL,
 PRIMARY KEY CLUSTERED 
 (
 	HL_LOCK_EXT_ID ASC,
@@ -922,8 +922,8 @@ CREATE TABLE TXNS(
 	TXN_STATE char(1) NOT NULL,
 	TXN_STARTED bigint NOT NULL,
 	TXN_LAST_HEARTBEAT bigint NOT NULL,
-	TXN_USER varchar(128) NOT NULL,
-	TXN_HOST varchar(128) NOT NULL,
+	TXN_USER nvarchar(128) NOT NULL,
+	TXN_HOST nvarchar(128) NOT NULL,
 PRIMARY KEY CLUSTERED 
 (
 	TXN_ID ASC
@@ -932,9 +932,9 @@ PRIMARY KEY CLUSTERED
 
 CREATE TABLE TXN_COMPONENTS(
 	TC_TXNID bigint NULL,
-	TC_DATABASE varchar(128) NOT NULL,
-	TC_TABLE varchar(128) NULL,
-	TC_PARTITION varchar(767) NULL
+	TC_DATABASE nvarchar(128) NOT NULL,
+	TC_TABLE nvarchar(128) NULL,
+	TC_PARTITION nvarchar(767) NULL
 );
 
 ALTER TABLE TXN_COMPONENTS  WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID);

http://git-wip-us.apache.org/repos/asf/hive/blob/8c93f2ba/metastore/scripts/upgrade/mssql/hive-schema-1.3.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/hive-schema-1.3.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-1.3.0.mssql.sql
index 723a185..7165edd 100644
--- a/metastore/scripts/upgrade/mssql/hive-schema-1.3.0.mssql.sql
+++ b/metastore/scripts/upgrade/mssql/hive-schema-1.3.0.mssql.sql
@@ -49,7 +49,7 @@
 CREATE TABLE MASTER_KEYS
 (
     KEY_ID int NOT NULL,
-    MASTER_KEY varchar(767) NULL
+    MASTER_KEY nvarchar(767) NULL
 );
 
 ALTER TABLE MASTER_KEYS ADD CONSTRAINT MASTER_KEYS_PK PRIMARY KEY (KEY_ID);
@@ -60,8 +60,8 @@ CREATE TABLE IDXS
     INDEX_ID bigint NOT NULL,
     CREATE_TIME int NOT NULL,
     DEFERRED_REBUILD bit NOT NULL,
-    INDEX_HANDLER_CLASS varchar(4000) NULL,
-    INDEX_NAME varchar(128) NULL,
+    INDEX_HANDLER_CLASS nvarchar(4000) NULL,
+    INDEX_NAME nvarchar(128) NULL,
     INDEX_TBL_ID bigint NULL,
     LAST_ACCESS_TIME int NOT NULL,
     ORIG_TBL_ID bigint NULL,
@@ -75,11 +75,11 @@ CREATE TABLE PART_COL_STATS
 (
     CS_ID bigint NOT NULL,
     AVG_COL_LEN float NULL,
-    "COLUMN_NAME" varchar(128) NOT NULL,
-    COLUMN_TYPE varchar(128) NOT NULL,
-    DB_NAME varchar(128) NOT NULL,
-    BIG_DECIMAL_HIGH_VALUE varchar(255) NULL,
-    BIG_DECIMAL_LOW_VALUE varchar(255) NULL,
+    "COLUMN_NAME" nvarchar(128) NOT NULL,
+    COLUMN_TYPE nvarchar(128) NOT NULL,
+    DB_NAME nvarchar(128) NOT NULL,
+    BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
+    BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
     DOUBLE_HIGH_VALUE float NULL,
     DOUBLE_LOW_VALUE float NULL,
     LAST_ANALYZED bigint NOT NULL,
@@ -91,8 +91,8 @@ CREATE TABLE PART_COL_STATS
     NUM_NULLS bigint NOT NULL,
     NUM_TRUES bigint NULL,
     PART_ID bigint NULL,
-    PARTITION_NAME varchar(767) NOT NULL,
-    "TABLE_NAME" varchar(128) NOT NULL
+    PARTITION_NAME nvarchar(767) NOT NULL,
+    "TABLE_NAME" nvarchar(128) NOT NULL
 );
 
 ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
@@ -105,12 +105,12 @@ CREATE TABLE PART_PRIVS
     PART_GRANT_ID bigint NOT NULL,
     CREATE_TIME int NOT NULL,
     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR varchar(128) NULL,
-    GRANTOR_TYPE varchar(128) NULL,
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
     PART_ID bigint NULL,
-    PRINCIPAL_NAME varchar(128) NULL,
-    PRINCIPAL_TYPE varchar(128) NULL,
-    PART_PRIV varchar(128) NULL
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    PART_PRIV nvarchar(128) NULL
 );
 
 ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
@@ -128,8 +128,8 @@ CREATE TABLE ROLES
 (
     ROLE_ID bigint NOT NULL,
     CREATE_TIME int NOT NULL,
-    OWNER_NAME varchar(128) NULL,
-    ROLE_NAME varchar(128) NULL
+    OWNER_NAME nvarchar(128) NULL,
+    ROLE_NAME nvarchar(128) NULL
 );
 
 ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
@@ -159,8 +159,8 @@ ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
 CREATE TABLE VERSION
 (
     VER_ID bigint NOT NULL,
-    SCHEMA_VERSION varchar(127) NOT NULL,
-    VERSION_COMMENT varchar(255) NOT NULL
+    SCHEMA_VERSION nvarchar(127) NOT NULL,
+    VERSION_COMMENT nvarchar(255) NOT NULL
 );
 
 ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
@@ -171,11 +171,11 @@ CREATE TABLE GLOBAL_PRIVS
     USER_GRANT_ID bigint NOT NULL,
     CREATE_TIME int NOT NULL,
     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR varchar(128) NULL,
-    GRANTOR_TYPE varchar(128) NULL,
-    PRINCIPAL_NAME varchar(128) NULL,
-    PRINCIPAL_TYPE varchar(128) NULL,
-    USER_PRIV varchar(128) NULL
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    USER_PRIV nvarchar(128) NULL
 );
 
 ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
@@ -184,15 +184,15 @@ ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_
 CREATE TABLE PART_COL_PRIVS
 (
     PART_COLUMN_GRANT_ID bigint NOT NULL,
-    "COLUMN_NAME" varchar(128) NULL,
+    "COLUMN_NAME" nvarchar(128) NULL,
     CREATE_TIME int NOT NULL,
     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR varchar(128) NULL,
-    GRANTOR_TYPE varchar(128) NULL,
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
     PART_ID bigint NULL,
-    PRINCIPAL_NAME varchar(128) NULL,
-    PRINCIPAL_TYPE varchar(128) NULL,
-    PART_COL_PRIV varchar(128) NULL
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    PART_COL_PRIV nvarchar(128) NULL
 );
 
 ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
@@ -204,11 +204,11 @@ CREATE TABLE DB_PRIVS
     CREATE_TIME int NOT NULL,
     DB_ID bigint NULL,
     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR varchar(128) NULL,
-    GRANTOR_TYPE varchar(128) NULL,
-    PRINCIPAL_NAME varchar(128) NULL,
-    PRINCIPAL_TYPE varchar(128) NULL,
-    DB_PRIV varchar(128) NULL
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    DB_PRIV nvarchar(128) NULL
 );
 
 ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
@@ -218,11 +218,11 @@ CREATE TABLE TAB_COL_STATS
 (
     CS_ID bigint NOT NULL,
     AVG_COL_LEN float NULL,
-    "COLUMN_NAME" varchar(128) NOT NULL,
-    COLUMN_TYPE varchar(128) NOT NULL,
-    DB_NAME varchar(128) NOT NULL,
-    BIG_DECIMAL_HIGH_VALUE varchar(255) NULL,
-    BIG_DECIMAL_LOW_VALUE varchar(255) NULL,
+    "COLUMN_NAME" nvarchar(128) NOT NULL,
+    COLUMN_TYPE nvarchar(128) NOT NULL,
+    DB_NAME nvarchar(128) NOT NULL,
+    BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
+    BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
     DOUBLE_HIGH_VALUE float NULL,
     DOUBLE_LOW_VALUE float NULL,
     LAST_ANALYZED bigint NOT NULL,
@@ -234,7 +234,7 @@ CREATE TABLE TAB_COL_STATS
     NUM_NULLS bigint NOT NULL,
     NUM_TRUES bigint NULL,
     TBL_ID bigint NULL,
-    "TABLE_NAME" varchar(128) NOT NULL
+    "TABLE_NAME" nvarchar(128) NOT NULL
 );
 
 ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
@@ -243,9 +243,9 @@ ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
 CREATE TABLE TYPES
 (
     TYPES_ID bigint NOT NULL,
-    TYPE_NAME varchar(128) NULL,
-    TYPE1 varchar(767) NULL,
-    TYPE2 varchar(767) NULL
+    TYPE_NAME nvarchar(128) NULL,
+    TYPE1 nvarchar(767) NULL,
+    TYPE2 nvarchar(767) NULL
 );
 
 ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
@@ -256,11 +256,11 @@ CREATE TABLE TBL_PRIVS
     TBL_GRANT_ID bigint NOT NULL,
     CREATE_TIME int NOT NULL,
     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR varchar(128) NULL,
-    GRANTOR_TYPE varchar(128) NULL,
-    PRINCIPAL_NAME varchar(128) NULL,
-    PRINCIPAL_TYPE varchar(128) NULL,
-    TBL_PRIV varchar(128) NULL,
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    TBL_PRIV nvarchar(128) NULL,
     TBL_ID bigint NULL
 );
 
@@ -270,11 +270,11 @@ ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
 CREATE TABLE DBS
 (
     DB_ID bigint NOT NULL,
-    "DESC" varchar(4000) NULL,
-    DB_LOCATION_URI varchar(4000) NOT NULL,
-    "NAME" varchar(128) NULL,
-    OWNER_NAME varchar(128) NULL,
-    OWNER_TYPE varchar(10) NULL
+    "DESC" nvarchar(4000) NULL,
+    DB_LOCATION_URI nvarchar(4000) NOT NULL,
+    "NAME" nvarchar(128) NULL,
+    OWNER_NAME nvarchar(128) NULL,
+    OWNER_TYPE nvarchar(10) NULL
 );
 
 ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
@@ -283,14 +283,14 @@ ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
 CREATE TABLE TBL_COL_PRIVS
 (
     TBL_COLUMN_GRANT_ID bigint NOT NULL,
-    "COLUMN_NAME" varchar(128) NULL,
+    "COLUMN_NAME" nvarchar(128) NULL,
     CREATE_TIME int NOT NULL,
     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR varchar(128) NULL,
-    GRANTOR_TYPE varchar(128) NULL,
-    PRINCIPAL_NAME varchar(128) NULL,
-    PRINCIPAL_TYPE varchar(128) NULL,
-    TBL_COL_PRIV varchar(128) NULL,
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    TBL_COL_PRIV nvarchar(128) NULL,
     TBL_ID bigint NULL
 );
 
@@ -299,8 +299,8 @@ ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUM
 -- Table DELEGATION_TOKENS for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
 CREATE TABLE DELEGATION_TOKENS
 (
-    TOKEN_IDENT varchar(767) NOT NULL,
-    TOKEN varchar(767) NULL
+    TOKEN_IDENT nvarchar(767) NOT NULL,
+    TOKEN nvarchar(767) NULL
 );
 
 ALTER TABLE DELEGATION_TOKENS ADD CONSTRAINT DELEGATION_TOKENS_PK PRIMARY KEY (TOKEN_IDENT);
@@ -309,8 +309,8 @@ ALTER TABLE DELEGATION_TOKENS ADD CONSTRAINT DELEGATION_TOKENS_PK PRIMARY KEY (T
 CREATE TABLE SERDES
 (
     SERDE_ID bigint NOT NULL,
-    "NAME" varchar(128) NULL,
-    SLIB varchar(4000) NULL
+    "NAME" nvarchar(128) NULL,
+    SLIB nvarchar(4000) NULL
 );
 
 ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
@@ -319,13 +319,13 @@ ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
 CREATE TABLE FUNCS
 (
     FUNC_ID bigint NOT NULL,
-    CLASS_NAME varchar(4000) NULL,
+    CLASS_NAME nvarchar(4000) NULL,
     CREATE_TIME int NOT NULL,
     DB_ID bigint NULL,
-    FUNC_NAME varchar(128) NULL,
+    FUNC_NAME nvarchar(128) NULL,
     FUNC_TYPE int NOT NULL,
-    OWNER_NAME varchar(128) NULL,
-    OWNER_TYPE varchar(10) NULL
+    OWNER_NAME nvarchar(128) NULL,
+    OWNER_TYPE nvarchar(10) NULL
 );
 
 ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
@@ -336,10 +336,10 @@ CREATE TABLE ROLE_MAP
     ROLE_GRANT_ID bigint NOT NULL,
     ADD_TIME int NOT NULL,
     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
-    GRANTOR varchar(128) NULL,
-    GRANTOR_TYPE varchar(128) NULL,
-    PRINCIPAL_NAME varchar(128) NULL,
-    PRINCIPAL_TYPE varchar(128) NULL,
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
     ROLE_ID bigint NULL
 );
 
@@ -352,11 +352,11 @@ CREATE TABLE TBLS
     CREATE_TIME int NOT NULL,
     DB_ID bigint NULL,
     LAST_ACCESS_TIME int NOT NULL,
-    OWNER varchar(767) NULL,
+    OWNER nvarchar(767) NULL,
     RETENTION int NOT NULL,
     SD_ID bigint NULL,
-    TBL_NAME varchar(128) NULL,
-    TBL_TYPE varchar(128) NULL,
+    TBL_NAME nvarchar(128) NULL,
+    TBL_TYPE nvarchar(128) NULL,
     VIEW_EXPANDED_TEXT text NULL,
     VIEW_ORIGINAL_TEXT text NULL
 );
@@ -368,12 +368,12 @@ CREATE TABLE SDS
 (
     SD_ID bigint NOT NULL,
     CD_ID bigint NULL,
-    INPUT_FORMAT varchar(4000) NULL,
+    INPUT_FORMAT nvarchar(4000) NULL,
     IS_COMPRESSED bit NOT NULL,
     IS_STOREDASSUBDIRECTORIES bit NOT NULL,
     LOCATION nvarchar(4000) NULL,
     NUM_BUCKETS int NOT NULL,
-    OUTPUT_FORMAT varchar(4000) NULL,
+    OUTPUT_FORMAT nvarchar(4000) NULL,
     SERDE_ID bigint NULL
 );
 
@@ -383,11 +383,11 @@ ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
 CREATE TABLE PARTITION_EVENTS
 (
     PART_NAME_ID bigint NOT NULL,
-    DB_NAME varchar(128) NULL,
+    DB_NAME nvarchar(128) NULL,
     EVENT_TIME bigint NOT NULL,
     EVENT_TYPE int NOT NULL,
-    PARTITION_NAME varchar(767) NULL,
-    TBL_NAME varchar(128) NULL
+    PARTITION_NAME nvarchar(767) NULL,
+    TBL_NAME nvarchar(128) NULL
 );
 
 ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
@@ -396,7 +396,7 @@ ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PAR
 CREATE TABLE SORT_COLS
 (
     SD_ID bigint NOT NULL,
-    "COLUMN_NAME" varchar(128) NULL,
+    "COLUMN_NAME" nvarchar(128) NULL,
     "ORDER" int NOT NULL,
     INTEGER_IDX int NOT NULL
 );
@@ -407,7 +407,7 @@ ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX
 CREATE TABLE SKEWED_COL_NAMES
 (
     SD_ID bigint NOT NULL,
-    SKEWED_COL_NAME varchar(255) NULL,
+    SKEWED_COL_NAME nvarchar(255) NULL,
     INTEGER_IDX int NOT NULL
 );
 
@@ -418,7 +418,7 @@ CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
 (
     SD_ID bigint NOT NULL,
     STRING_LIST_ID_KID bigint NOT NULL,
-    LOCATION varchar(4000) NULL
+    LOCATION nvarchar(4000) NULL
 );
 
 ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
@@ -427,7 +427,7 @@ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK
 CREATE TABLE SKEWED_STRING_LIST_VALUES
 (
     STRING_LIST_ID bigint NOT NULL,
-    STRING_LIST_VALUE varchar(255) NULL,
+    STRING_LIST_VALUE nvarchar(255) NULL,
     INTEGER_IDX int NOT NULL
 );
 
@@ -447,9 +447,9 @@ ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY
 CREATE TABLE PARTITION_KEYS
 (
     TBL_ID bigint NOT NULL,
-    PKEY_COMMENT varchar(4000) NULL,
-    PKEY_NAME varchar(128) NOT NULL,
-    PKEY_TYPE varchar(767) NOT NULL,
+    PKEY_COMMENT nvarchar(4000) NULL,
+    PKEY_NAME nvarchar(128) NOT NULL,
+    PKEY_TYPE nvarchar(767) NOT NULL,
     INTEGER_IDX int NOT NULL
 );
 
@@ -469,8 +469,8 @@ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID
 CREATE TABLE SD_PARAMS
 (
     SD_ID bigint NOT NULL,
-    PARAM_KEY varchar(256) NOT NULL,
-    PARAM_VALUE varchar(4000) NULL
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
 );
 
 ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
@@ -480,7 +480,7 @@ CREATE TABLE FUNC_RU
 (
     FUNC_ID bigint NOT NULL,
     RESOURCE_TYPE int NOT NULL,
-    RESOURCE_URI varchar(4000) NULL,
+    RESOURCE_URI nvarchar(4000) NULL,
     INTEGER_IDX int NOT NULL
 );
 
@@ -490,9 +490,9 @@ ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID,INTEGER_IDX);
 CREATE TABLE TYPE_FIELDS
 (
     TYPE_NAME bigint NOT NULL,
-    COMMENT varchar(256) NULL,
-    FIELD_NAME varchar(128) NOT NULL,
-    FIELD_TYPE varchar(767) NOT NULL,
+    COMMENT nvarchar(256) NULL,
+    FIELD_NAME nvarchar(128) NOT NULL,
+    FIELD_TYPE nvarchar(767) NOT NULL,
     INTEGER_IDX int NOT NULL
 );
 
@@ -502,7 +502,7 @@ ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIE
 CREATE TABLE BUCKETING_COLS
 (
     SD_ID bigint NOT NULL,
-    BUCKET_COL_NAME varchar(255) NULL,
+    BUCKET_COL_NAME nvarchar(255) NULL,
     INTEGER_IDX int NOT NULL
 );
 
@@ -512,8 +512,8 @@ ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,I
 CREATE TABLE DATABASE_PARAMS
 (
     DB_ID bigint NOT NULL,
-    PARAM_KEY varchar(180) NOT NULL,
-    PARAM_VALUE varchar(4000) NULL
+    PARAM_KEY nvarchar(180) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
 );
 
 ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
@@ -522,8 +522,8 @@ ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID
 CREATE TABLE INDEX_PARAMS
 (
     INDEX_ID bigint NOT NULL,
-    PARAM_KEY varchar(256) NOT NULL,
-    PARAM_VALUE varchar(4000) NULL
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
 );
 
 ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
@@ -532,9 +532,9 @@ ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PA
 CREATE TABLE COLUMNS_V2
 (
     CD_ID bigint NOT NULL,
-    COMMENT varchar(256) NULL,
-    "COLUMN_NAME" varchar(128) NOT NULL,
-    TYPE_NAME varchar(4000) NOT NULL,
+    COMMENT nvarchar(256) NULL,
+    "COLUMN_NAME" nvarchar(128) NOT NULL,
+    TYPE_NAME nvarchar(4000) NOT NULL,
     INTEGER_IDX int NOT NULL
 );
 
@@ -544,8 +544,8 @@ ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_PK PRIMARY KEY (CD_ID,"COLUMN_NAME
 CREATE TABLE SERDE_PARAMS
 (
     SERDE_ID bigint NOT NULL,
-    PARAM_KEY varchar(256) NOT NULL,
-    PARAM_VALUE varchar(4000) NULL
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
 );
 
 ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
@@ -554,8 +554,8 @@ ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PA
 CREATE TABLE PARTITION_PARAMS
 (
     PART_ID bigint NOT NULL,
-    PARAM_KEY varchar(256) NOT NULL,
-    PARAM_VALUE varchar(4000) NULL
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
 );
 
 ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
@@ -564,8 +564,8 @@ ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PAR
 CREATE TABLE TABLE_PARAMS
 (
     TBL_ID bigint NOT NULL,
-    PARAM_KEY varchar(256) NOT NULL,
-    PARAM_VALUE varchar(4000) NULL
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
 );
 
 ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
@@ -575,9 +575,9 @@ CREATE TABLE NOTIFICATION_LOG
     NL_ID bigint NOT NULL,
     EVENT_ID bigint NOT NULL,
     EVENT_TIME int NOT NULL,
-    EVENT_TYPE varchar(32) NOT NULL,
-    DB_NAME varchar(128) NULL,
-    TBL_NAME varchar(128) NULL,
+    EVENT_TYPE nvarchar(32) NOT NULL,
+    DB_NAME nvarchar(128) NULL,
+    TBL_NAME nvarchar(128) NULL,
     MESSAGE text NULL
 );
 
@@ -858,14 +858,14 @@ CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
 -- -----------------------------------------------------------------------------------------------------------------------------------------------
 CREATE TABLE COMPACTION_QUEUE(
 	CQ_ID bigint NOT NULL,
-	CQ_DATABASE varchar(128) NOT NULL,
-	CQ_TABLE varchar(128) NOT NULL,
-	CQ_PARTITION varchar(767) NULL,
+	CQ_DATABASE nvarchar(128) NOT NULL,
+	CQ_TABLE nvarchar(128) NOT NULL,
+	CQ_PARTITION nvarchar(767) NULL,
 	CQ_STATE char(1) NOT NULL,
 	CQ_TYPE char(1) NOT NULL,
-	CQ_WORKER_ID varchar(128) NULL,
+	CQ_WORKER_ID nvarchar(128) NULL,
 	CQ_START bigint NULL,
-	CQ_RUN_AS varchar(128) NULL,
+	CQ_RUN_AS nvarchar(128) NULL,
 PRIMARY KEY CLUSTERED 
 (
 	CQ_ID ASC
@@ -874,24 +874,24 @@ PRIMARY KEY CLUSTERED
 
 CREATE TABLE COMPLETED_TXN_COMPONENTS(
 	CTC_TXNID bigint NULL,
-	CTC_DATABASE varchar(128) NOT NULL,
-	CTC_TABLE varchar(128) NULL,
-	CTC_PARTITION varchar(767) NULL
+	CTC_DATABASE nvarchar(128) NOT NULL,
+	CTC_TABLE nvarchar(128) NULL,
+	CTC_PARTITION nvarchar(767) NULL
 );
 
 CREATE TABLE HIVE_LOCKS(
 	HL_LOCK_EXT_ID bigint NOT NULL,
 	HL_LOCK_INT_ID bigint NOT NULL,
 	HL_TXNID bigint NULL,
-	HL_DB varchar(128) NOT NULL,
-	HL_TABLE varchar(128) NULL,
-	HL_PARTITION varchar(767) NULL,
+	HL_DB nvarchar(128) NOT NULL,
+	HL_TABLE nvarchar(128) NULL,
+	HL_PARTITION nvarchar(767) NULL,
 	HL_LOCK_STATE char(1) NOT NULL,
 	HL_LOCK_TYPE char(1) NOT NULL,
 	HL_LAST_HEARTBEAT bigint NOT NULL,
 	HL_ACQUIRED_AT bigint NULL,
-	HL_USER varchar(128) NOT NULL,
-	HL_HOST varchar(128) NOT NULL,
+	HL_USER nvarchar(128) NOT NULL,
+	HL_HOST nvarchar(128) NOT NULL,
 PRIMARY KEY CLUSTERED 
 (
 	HL_LOCK_EXT_ID ASC,
@@ -922,8 +922,8 @@ CREATE TABLE TXNS(
 	TXN_STATE char(1) NOT NULL,
 	TXN_STARTED bigint NOT NULL,
 	TXN_LAST_HEARTBEAT bigint NOT NULL,
-	TXN_USER varchar(128) NOT NULL,
-	TXN_HOST varchar(128) NOT NULL,
+	TXN_USER nvarchar(128) NOT NULL,
+	TXN_HOST nvarchar(128) NOT NULL,
 PRIMARY KEY CLUSTERED 
 (
 	TXN_ID ASC
@@ -932,9 +932,9 @@ PRIMARY KEY CLUSTERED
 
 CREATE TABLE TXN_COMPONENTS(
 	TC_TXNID bigint NULL,
-	TC_DATABASE varchar(128) NOT NULL,
-	TC_TABLE varchar(128) NULL,
-	TC_PARTITION varchar(767) NULL
+	TC_DATABASE nvarchar(128) NOT NULL,
+	TC_TABLE nvarchar(128) NULL,
+	TC_PARTITION nvarchar(767) NULL
 );
 
 ALTER TABLE TXN_COMPONENTS  WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID);

http://git-wip-us.apache.org/repos/asf/hive/blob/8c93f2ba/metastore/scripts/upgrade/mssql/upgrade-1.1.0-to-1.2.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/upgrade-1.1.0-to-1.2.0.mssql.sql b/metastore/scripts/upgrade/mssql/upgrade-1.1.0-to-1.2.0.mssql.sql
index 9e3bffb..9ff7d5c 100644
--- a/metastore/scripts/upgrade/mssql/upgrade-1.1.0-to-1.2.0.mssql.sql
+++ b/metastore/scripts/upgrade/mssql/upgrade-1.1.0-to-1.2.0.mssql.sql
@@ -1,5 +1,6 @@
 SELECT 'Upgrading MetaStore schema from 1.1.0 to 1.2.0' AS MESSAGE;
 
+:r 006-HIVE-9456.mssql.sql;
 
 UPDATE VERSION SET SCHEMA_VERSION='1.2.0', VERSION_COMMENT='Hive release version 1.2.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 1.1.0 to 1.2.0' AS MESSAGE;


[04/50] [abbrv] hive git commit: HIVE-9508: MetaStore client socket connection should have a lifetime (Thiruvel Thirumoolan reviewed by Vaibhav Gumashta)

Posted by xu...@apache.org.
HIVE-9508: MetaStore client socket connection should have a lifetime (Thiruvel Thirumoolan reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e2a12c9a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e2a12c9a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e2a12c9a

Branch: refs/heads/beeline-cli
Commit: e2a12c9a630d037f3e3aaf42acc873eac86bc9f3
Parents: 652febc
Author: Vaibhav Gumashta <vg...@apache.org>
Authored: Tue May 5 10:44:16 2015 -0700
Committer: Vaibhav Gumashta <vg...@apache.org>
Committed: Tue May 5 10:44:16 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  6 ++++
 .../hive/metastore/TestHiveMetaStore.java       | 30 ++++++++++++++++++++
 .../hive/metastore/RetryingMetaStoreClient.java | 27 +++++++++++++++---
 3 files changed, 59 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e2a12c9a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 5d4dbea..69fda45 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -119,6 +119,7 @@ public class HiveConf extends Configuration {
       HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES,
       HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY,
       HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT,
+      HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_LIFETIME,
       HiveConf.ConfVars.METASTOREPWD,
       HiveConf.ConfVars.METASTORECONNECTURLHOOK,
       HiveConf.ConfVars.METASTORECONNECTURLKEY,
@@ -398,6 +399,11 @@ public class HiveConf extends Configuration {
     METASTORE_CLIENT_SOCKET_TIMEOUT("hive.metastore.client.socket.timeout", "600s",
         new TimeValidator(TimeUnit.SECONDS),
         "MetaStore Client socket timeout in seconds"),
+    METASTORE_CLIENT_SOCKET_LIFETIME("hive.metastore.client.socket.lifetime", "0s",
+        new TimeValidator(TimeUnit.SECONDS),
+        "MetaStore Client socket lifetime in seconds. After this time is exceeded, client\n" +
+        "reconnects on the next MetaStore operation. A value of 0s means the connection\n" +
+        "has an infinite lifetime."),
     METASTOREPWD("javax.jdo.option.ConnectionPassword", "mine",
         "password to use against metastore database"),
     METASTORECONNECTURLHOOK("hive.metastore.ds.connection.url.hook", "",

http://git-wip-us.apache.org/repos/asf/hive/blob/e2a12c9a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
index 130fd67..dffeb34 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
@@ -2867,4 +2867,34 @@ public abstract class TestHiveMetaStore extends TestCase {
         ownerName, ownerType, createTime, functionType, resources);
     client.createFunction(func);
   }
+
+  public void testRetriableClientWithConnLifetime() throws Exception {
+
+    HiveConf conf = new HiveConf(hiveConf);
+    conf.setLong(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_LIFETIME.name(), 60);
+    long timeout = 65 * 1000; // Lets use a timeout more than the socket lifetime to simulate a reconnect
+
+    // Test a normal retriable client
+    IMetaStoreClient client = RetryingMetaStoreClient.getProxy(conf, getHookLoader(), HiveMetaStoreClient.class.getName());
+    client.getAllDatabases();
+    client.close();
+
+    // Connect after the lifetime, there should not be any failures
+    client = RetryingMetaStoreClient.getProxy(conf, getHookLoader(), HiveMetaStoreClient.class.getName());
+    Thread.sleep(timeout);
+    client.getAllDatabases();
+    client.close();
+  }
+
+  private HiveMetaHookLoader getHookLoader() {
+    HiveMetaHookLoader hookLoader = new HiveMetaHookLoader() {
+      @Override
+      public HiveMetaHook getHook(
+          org.apache.hadoop.hive.metastore.api.Table tbl)
+          throws MetaException {
+        return null;
+      }
+    };
+    return hookLoader;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/e2a12c9a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
index 77da6f7..1b6487a 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
@@ -54,9 +54,9 @@ public class RetryingMetaStoreClient implements InvocationHandler {
   private final int retryLimit;
   private final long retryDelaySeconds;
   private final Map<String, Long> metaCallTimeMap;
-
-
-
+  private final long connectionLifeTimeInMillis;
+  private long lastConnectionTime;
+  private boolean localMetaStore;
 
   protected RetryingMetaStoreClient(HiveConf hiveConf, HiveMetaHookLoader hookLoader,
       Map<String, Long> metaCallTimeMap, Class<? extends IMetaStoreClient> msClientClass) throws MetaException {
@@ -64,6 +64,11 @@ public class RetryingMetaStoreClient implements InvocationHandler {
     this.retryDelaySeconds = hiveConf.getTimeVar(
         HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS);
     this.metaCallTimeMap = metaCallTimeMap;
+    this.connectionLifeTimeInMillis =
+        hiveConf.getTimeVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_LIFETIME, TimeUnit.SECONDS) * 1000;
+    this.lastConnectionTime = System.currentTimeMillis();
+    String msUri = hiveConf.getVar(HiveConf.ConfVars.METASTOREURIS);
+    localMetaStore = (msUri == null) || msUri.trim().isEmpty();
 
     reloginExpiringKeytabUser();
     this.base = MetaStoreUtils.newInstance(msClientClass, new Class[] {
@@ -104,8 +109,9 @@ public class RetryingMetaStoreClient implements InvocationHandler {
     while (true) {
       try {
         reloginExpiringKeytabUser();
-        if(retriesMade > 0){
+        if (retriesMade > 0 || hasConnectionLifeTimeReached(method)) {
           base.reconnect();
+          lastConnectionTime = System.currentTimeMillis();
         }
         if (metaCallTimeMap == null) {
           ret = method.invoke(base, args);
@@ -171,6 +177,19 @@ public class RetryingMetaStoreClient implements InvocationHandler {
     return methodSb.toString();
   }
 
+  private boolean hasConnectionLifeTimeReached(Method method) {
+    if (connectionLifeTimeInMillis <= 0 || localMetaStore ||
+        method.getName().equalsIgnoreCase("close")) {
+      return false;
+    }
+    boolean shouldReconnect =
+        (System.currentTimeMillis() - lastConnectionTime) >= connectionLifeTimeInMillis;
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Reconnection status for Method: " + method.getName() + " is " + shouldReconnect);
+    }
+    return shouldReconnect;
+  }
+
   /**
    * Relogin if login user is logged in using keytab
    * Relogin is actually done by ugi code only if sufficient time has passed


[40/50] [abbrv] hive git commit: HIVE-10620 : ZooKeeperHiveLock overrides equal() method but not hashcode() (Chaoyu Tang, reviewed by Ashutosh)

Posted by xu...@apache.org.
HIVE-10620 : ZooKeeperHiveLock overrides equal() method but not hashcode() (Chaoyu Tang, reviewed by Ashutosh)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/61763335
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/61763335
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/61763335

Branch: refs/heads/beeline-cli
Commit: 61763335b6a184e4a0ce6dde8a6e3a5d93af4857
Parents: 7149ab1
Author: Szehon Ho <sz...@cloudera.com>
Authored: Thu May 7 11:18:13 2015 -0700
Committer: Szehon Ho <sz...@cloudera.com>
Committed: Thu May 7 11:18:13 2015 -0700

----------------------------------------------------------------------
 .../ql/lockmgr/zookeeper/ZooKeeperHiveLock.java | 22 ++++++++++++++++++++
 1 file changed, 22 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/61763335/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLock.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLock.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLock.java
index 8e35007..463a339 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLock.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLock.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.ql.lockmgr.zookeeper;
 
+import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLock;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockMode;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject;
@@ -71,4 +72,25 @@ public class ZooKeeperHiveLock extends HiveLock {
       obj.equals(zLock.getHiveLockObject()) &&
       mode == zLock.getHiveLockMode();
   }
+  
+  @Override
+  public int hashCode() {
+    HashCodeBuilder builder = new HashCodeBuilder();
+    boolean pathPresent = path != null;
+    builder.append(pathPresent);
+    if (pathPresent) {
+      builder.append(path.toCharArray());
+    }
+    boolean lockObjectPresent = obj != null;
+    builder.append(lockObjectPresent);
+    if (lockObjectPresent) {
+      builder.append(obj.hashCode());
+    }
+    boolean modePresent = mode != null;
+    builder.append(modePresent);
+    if (modePresent) {
+      builder.append(mode);
+    }
+    return builder.toHashCode();
+  }
 }


[02/50] [abbrv] hive git commit: HIVE-10140 : Window boundary is not compared correctly (Aihua Xu via Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-10140 : Window boundary is not compared correctly (Aihua Xu via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cccaa550
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cccaa550
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cccaa550

Branch: refs/heads/beeline-cli
Commit: cccaa5509e2f9948d6dd667b4d8fd6135469c806
Parents: f895b27
Author: Aihua Xu <ai...@gmail.com>
Authored: Thu Apr 30 09:42:00 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Tue May 5 10:03:40 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/WindowingSpec.java     |   7 +-
 .../clientpositive/windowing_windowspec.q       |   2 +
 .../clientpositive/windowing_windowspec.q.out   | 108 +++++++++++++++++++
 3 files changed, 115 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/cccaa550/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java
index 4fbb8b7..6dfa214 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java
@@ -614,8 +614,10 @@ public class WindowingSpec {
       if (c != 0) {
         return c;
       }
+
       RangeBoundarySpec rb = (RangeBoundarySpec) other;
-      return amt - rb.amt;
+      // Valid range is "range/rows between 10 preceding and 2 preceding" for preceding case
+      return this.direction == Direction.PRECEDING ? rb.amt - amt : amt - rb.amt;
     }
 
   }
@@ -713,7 +715,8 @@ public class WindowingSpec {
         return c;
       }
       ValueBoundarySpec vb = (ValueBoundarySpec) other;
-      return amt - vb.amt;
+      // Valid range is "range/rows between 10 preceding and 2 preceding" for preceding case
+      return this.direction == Direction.PRECEDING ? vb.amt - amt : amt - vb.amt;
     }
 
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/cccaa550/ql/src/test/queries/clientpositive/windowing_windowspec.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/windowing_windowspec.q b/ql/src/test/queries/clientpositive/windowing_windowspec.q
index 63f97b7..202eb74 100644
--- a/ql/src/test/queries/clientpositive/windowing_windowspec.q
+++ b/ql/src/test/queries/clientpositive/windowing_windowspec.q
@@ -31,6 +31,8 @@ select s, sum(i) over(partition by ts order by s) from over10k limit 100;
 
 select f, sum(f) over (partition by ts order by f range between unbounded preceding and current row) from over10k limit 100;
 
+select f, sum(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k limit 100;
+
 select s, i, round(avg(d) over (partition by s order by i) / 10.0 , 2) from over10k limit 7;
 
 select s, i, round((avg(d) over  w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k window w1 as (partition by s order by i) limit 7;

http://git-wip-us.apache.org/repos/asf/hive/blob/cccaa550/ql/src/test/results/clientpositive/windowing_windowspec.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/windowing_windowspec.q.out b/ql/src/test/results/clientpositive/windowing_windowspec.q.out
index 8d78c22..66b0b52 100644
--- a/ql/src/test/results/clientpositive/windowing_windowspec.q.out
+++ b/ql/src/test/results/clientpositive/windowing_windowspec.q.out
@@ -800,6 +800,114 @@ POSTHOOK: Input: default@over10k
 71.68	722.6499947607517
 79.46	802.1099938452244
 80.02	882.1299904882908
+PREHOOK: query: select f, sum(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over10k
+#### A masked pattern was here ####
+POSTHOOK: query: select f, sum(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over10k
+#### A masked pattern was here ####
+3.17	14.0600004196167
+10.89	28.600000381469727
+14.54	43.38000011444092
+14.78	58.0600004196167
+17.85	67.78000068664551
+20.61	81.9300012588501
+28.69	96.3700008392334
+29.22	109.69000053405762
+31.17	127.42999839782715
+38.35	137.3499984741211
+38.61	147.60999870300293
+39.48	156.97999954223633
+40.54	160.22999954223633
+41.6	167.70000076293945
+46.08	182.5800018310547
+54.36	198.97999954223633
+56.94	222.3400001525879
+64.96	249.7799949645996
+73.52	273.99999618530273
+78.58	298.4700012207031
+81.41	318.2200012207031
+84.71	332.1300048828125
+87.43	344.9100036621094
+91.36	356.45999908447266
+92.96	366.79000091552734
+95.04	279.36000061035156
+0.83	2.8199999928474426
+1.99	6.550000011920929
+3.73	15.409999668598175
+8.86	25.199999570846558
+10.62	34.52999925613403
+11.32	43.6299991607666
+12.83	49.46999931335449
+14.7	53.80999946594238
+14.96	60.06999969482422
+17.58	66.34000015258789
+19.1	72.65000057220459
+21.01	84.64000129699707
+26.95	94.29000091552734
+27.23	104.26000022888184
+29.07	112.95999908447266
+29.71	117.8499984741211
+31.84	122.55999946594238
+31.94	128.80999946594238
+35.32	136.42000007629395
+37.32	143.07999992370605
+38.5	153.22000122070312
+42.08	162.20000076293945
+44.3	169.54000091552734
+44.66	177.88000106811523
+46.84	184.68999862670898
+48.89	190.02999877929688
+49.64	195.64999771118164
+50.28	200.89999771118164
+52.09	205.2699966430664
+53.26	209.71999740600586
+54.09	215.88999938964844
+56.45	220.55999755859375
+56.76	228.70999908447266
+61.41	236.5
+61.88	243.07999801635742
+63.03	250.87000274658203
+64.55	258.08000564575195
+68.62	272.3300018310547
+76.13	288.3500061035156
+79.05	304.2300033569336
+80.43	317.02000427246094
+81.41	323.74000549316406
+82.85	328.67000579833984
+83.98	332.4500045776367
+84.21	336.59000396728516
+85.55	341.67000579833984
+87.93	346.62000274658203
+88.93	356.6800003051758
+94.27	370.57999420166016
+99.45	282.6499938964844
+0.36	0.8400000035762787
+0.48	1.6300000250339508
+0.79	2.9000000059604645
+1.27	7.020000010728836
+4.48	15.540000021457672
+9.0	38.02000045776367
+23.27	61.87999963760376
+25.13	82.73999977111816
+25.34	99.64999961853027
+25.91	105.38999938964844
+29.01	110.72999954223633
+30.47	123.34000015258789
+37.95	136.72999954223633
+39.3	153.6299991607666
+45.91	175.5999984741211
+52.44	191.74999618530273
+54.1	209.14999771118164
+56.7	222.0099983215332
+58.77	231.6599998474121
+62.09	245.7599983215332
+68.2	260.73999786376953
+71.68	281.4299964904785
+79.46	299.35999298095703
+80.02	312.4499969482422
 PREHOOK: query: select s, i, round(avg(d) over (partition by s order by i) / 10.0 , 2) from over10k limit 7
 PREHOOK: type: QUERY
 PREHOOK: Input: default@over10k


[13/50] [abbrv] hive git commit: HIVE-10539 : set default value of hive.repl.task.factory (Thejas Nair via Sushanth Sowmyan)

Posted by xu...@apache.org.
HIVE-10539 : set default value of hive.repl.task.factory (Thejas Nair via Sushanth Sowmyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a9d70a03
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a9d70a03
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a9d70a03

Branch: refs/heads/beeline-cli
Commit: a9d70a03953aeedbe70b62fbc5820bc18754db8c
Parents: 45307c1
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Wed May 6 02:54:43 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Wed May 6 02:55:35 2015 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hive/conf/HiveConf.java    |  3 ++-
 .../hive/hcatalog/api/repl/TestReplicationTask.java   | 14 ++++++--------
 2 files changed, 8 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a9d70a03/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 69fda45..db17f0f 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1657,7 +1657,8 @@ public class HiveConf extends Configuration {
         "imported on to tables that are the target of replication. If this parameter is\n" +
         "set, regular imports will check if the destination table(if it exists) has a " +
         "'repl.last.id' set on it. If so, it will fail."),
-    HIVE_REPL_TASK_FACTORY("hive.repl.task.factory","",
+    HIVE_REPL_TASK_FACTORY("hive.repl.task.factory",
+        "org.apache.hive.hcatalog.api.repl.exim.EximReplicationTaskFactory",
         "Parameter that can be used to override which ReplicationTaskFactory will be\n" +
         "used to instantiate ReplicationTask events. Override for third party repl plugins"),
     HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS("hive.mapper.cannot.span.multiple.partitions", false, ""),

http://git-wip-us.apache.org/repos/asf/hive/blob/a9d70a03/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/repl/TestReplicationTask.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/repl/TestReplicationTask.java b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/repl/TestReplicationTask.java
index ea7698e..9d62eaa 100644
--- a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/repl/TestReplicationTask.java
+++ b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/repl/TestReplicationTask.java
@@ -19,11 +19,14 @@
 package org.apache.hive.hcatalog.api.repl;
 
 import junit.framework.TestCase;
+
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hive.hcatalog.api.HCatClient;
 import org.apache.hive.hcatalog.api.HCatNotificationEvent;
+import org.apache.hive.hcatalog.api.repl.exim.CreateTableReplicationTask;
+import org.apache.hive.hcatalog.api.repl.exim.EximReplicationTaskFactory;
 import org.apache.hive.hcatalog.common.HCatConstants;
 import org.apache.hive.hcatalog.common.HCatException;
 import org.apache.hive.hcatalog.messaging.MessageFactory;
@@ -75,17 +78,12 @@ public class TestReplicationTask extends TestCase{
     event.setTableName(t.getTableName());
 
     ReplicationTask.resetFactory(null);
-    Exception caught = null;
-    try {
-      ReplicationTask rtask = ReplicationTask.create(HCatClient.create(new HiveConf()),new HCatNotificationEvent(event));
-    } catch (Exception e){
-      caught = e;
-    }
-    assertNotNull("By default, without a ReplicationTaskFactory instantiated, replication tasks should fail.",caught);
+    ReplicationTask rtask = ReplicationTask.create(HCatClient.create(new HiveConf()),new HCatNotificationEvent(event));
+    assertTrue("Provided factory instantiation should yield CreateTableReplicationTask", rtask instanceof CreateTableReplicationTask);
 
     ReplicationTask.resetFactory(NoopFactory.class);
 
-    ReplicationTask rtask = ReplicationTask.create(HCatClient.create(new HiveConf()),new HCatNotificationEvent(event));
+    rtask = ReplicationTask.create(HCatClient.create(new HiveConf()),new HCatNotificationEvent(event));
     assertTrue("Provided factory instantiation should yield NoopReplicationTask", rtask instanceof NoopReplicationTask);
 
     ReplicationTask.resetFactory(null);


[30/50] [abbrv] hive git commit: HIVE-10521 - TxnHandler.timeOutTxns only times out some of the expired transactions (Alan Gates via Eugene Koifman)

Posted by xu...@apache.org.
HIVE-10521 - TxnHandler.timeOutTxns only times out some of the expired transactions (Alan Gates via Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4b444082
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4b444082
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4b444082

Branch: refs/heads/beeline-cli
Commit: 4b444082fcae9eb8ea60ec160723a0337ead1852
Parents: 80fb891
Author: Eugene Koifman <ek...@hortonworks.com>
Authored: Wed May 6 19:36:48 2015 -0700
Committer: Eugene Koifman <ek...@hortonworks.com>
Committed: Wed May 6 19:36:48 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/metastore/txn/TxnHandler.java   | 35 ++++++++++++------
 .../hive/metastore/txn/TestTxnHandler.java      | 39 +++++++++++++++-----
 2 files changed, 53 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4b444082/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 704c3ed..7c3b55c 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -75,6 +75,7 @@ public class TxnHandler {
   static final protected char LOCK_SEMI_SHARED = 'w';
 
   static final private int ALLOWED_REPEATED_DEADLOCKS = 10;
+  static final private int TIMED_OUT_TXN_ABORT_BATCH_SIZE = 100;
   static final private Log LOG = LogFactory.getLog(TxnHandler.class.getName());
 
   static private DataSource connPool;
@@ -130,7 +131,8 @@ public class TxnHandler {
     timeout = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS);
     deadlockCnt = 0;
     buildJumpTable();
-    retryInterval = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HMSHANDLERINTERVAL, TimeUnit.MILLISECONDS);
+    retryInterval = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HMSHANDLERINTERVAL,
+        TimeUnit.MILLISECONDS);
     retryLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HMSHANDLERATTEMPTS);
     deadlockRetryInterval = retryInterval / 10;
 
@@ -334,9 +336,7 @@ public class TxnHandler {
       Connection dbConn = null;
       try {
         dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE);
-        List<Long> txnids = new ArrayList<Long>(1);
-        txnids.add(txnid);
-        if (abortTxns(dbConn, txnids) != 1) {
+        if (abortTxns(dbConn, Collections.singletonList(txnid)) != 1) {
           LOG.debug("Going to rollback");
           dbConn.rollback();
           throw new NoSuchTxnException("No such transaction: " + txnid);
@@ -1321,8 +1321,6 @@ public class TxnHandler {
       LOG.debug("Going to execute update <" + buf.toString() + ">");
       updateCnt = stmt.executeUpdate(buf.toString());
 
-      LOG.debug("Going to commit");
-      dbConn.commit();
     } finally {
       closeStmt(stmt);
     }
@@ -1818,10 +1816,10 @@ public class TxnHandler {
     }
   }
 
-  // Abort timed out transactions.  This calls abortTxn(), which does a commit,
+  // Abort timed out transactions.  This does a commit,
   // and thus should be done before any calls to heartbeat that will leave
   // open transactions on the underlying database.
-  private void timeOutTxns(Connection dbConn) throws SQLException, MetaException {
+  private void timeOutTxns(Connection dbConn) throws SQLException, MetaException, RetryException {
     long now = getDbTime(dbConn);
     Statement stmt = null;
     try {
@@ -1834,10 +1832,23 @@ public class TxnHandler {
       List<Long> deadTxns = new ArrayList<Long>();
       // Limit the number of timed out transactions we do in one pass to keep from generating a
       // huge delete statement
-      for (int i = 0; i < 20 && rs.next(); i++) deadTxns.add(rs.getLong(1));
-      // We don't care whether all of the transactions get deleted or not,
-      // if some didn't it most likely means someone else deleted them in the interum
-      if (deadTxns.size() > 0) abortTxns(dbConn, deadTxns);
+      do {
+        deadTxns.clear();
+        for (int i = 0; i <  TIMED_OUT_TXN_ABORT_BATCH_SIZE && rs.next(); i++) {
+          deadTxns.add(rs.getLong(1));
+        }
+        // We don't care whether all of the transactions get deleted or not,
+        // if some didn't it most likely means someone else deleted them in the interum
+        if (deadTxns.size() > 0) abortTxns(dbConn, deadTxns);
+      } while (deadTxns.size() > 0);
+      LOG.debug("Going to commit");
+      dbConn.commit();
+    } catch (SQLException e) {
+      LOG.debug("Going to rollback");
+      rollbackDBConn(dbConn);
+      checkRetryable(dbConn, e, "abortTxn");
+      throw new MetaException("Unable to update transaction database "
+        + StringUtils.stringifyException(e));
     } finally {
       closeStmt(stmt);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/4b444082/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
index d4266e1..f478184 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
@@ -937,16 +937,16 @@ public class TestTxnHandler {
   @Test
   public void testLockTimeout() throws Exception {
     long timeout = txnHandler.setTimeout(1);
-    LockComponent comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
-    comp.setTablename("mytable");
-    comp.setPartitionname("mypartition");
-    List<LockComponent> components = new ArrayList<LockComponent>(1);
-    components.add(comp);
-    LockRequest req = new LockRequest(components, "me", "localhost");
-    LockResponse res = txnHandler.lock(req);
-    assertTrue(res.getState() == LockState.ACQUIRED);
-    Thread.currentThread().sleep(10);
     try {
+      LockComponent comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
+      comp.setTablename("mytable");
+      comp.setPartitionname("mypartition");
+      List<LockComponent> components = new ArrayList<LockComponent>(1);
+      components.add(comp);
+      LockRequest req = new LockRequest(components, "me", "localhost");
+      LockResponse res = txnHandler.lock(req);
+      assertTrue(res.getState() == LockState.ACQUIRED);
+      Thread.currentThread().sleep(10);
       txnHandler.checkLock(new CheckLockRequest(res.getLockid()));
       fail("Told there was a lock, when it should have timed out.");
     } catch (NoSuchLockException e) {
@@ -956,6 +956,27 @@ public class TestTxnHandler {
   }
 
   @Test
+  public void testRecoverManyTimeouts() throws Exception {
+    long timeout = txnHandler.setTimeout(1);
+    try {
+      txnHandler.openTxns(new OpenTxnRequest(503, "me", "localhost"));
+      Thread.currentThread().sleep(10);
+      txnHandler.getOpenTxns();
+      GetOpenTxnsInfoResponse rsp = txnHandler.getOpenTxnsInfo();
+      int numAborted = 0;
+      for (TxnInfo txnInfo : rsp.getOpen_txns()) {
+        assertEquals(TxnState.ABORTED, txnInfo.getState());
+        numAborted++;
+      }
+      assertEquals(503, numAborted);
+    } finally {
+      txnHandler.setTimeout(timeout);
+    }
+
+
+  }
+
+  @Test
   public void testHeartbeatNoLock() throws Exception {
     HeartbeatRequest h = new HeartbeatRequest();
     h.setLockid(29389839L);


[25/50] [abbrv] hive git commit: HIVE-10484: Vectorization : RuntimeException "Big Table Retained Mapping duplicate column" (Matt McCline reviewed by Vikram Dixit)

Posted by xu...@apache.org.
HIVE-10484: Vectorization : RuntimeException "Big Table Retained Mapping duplicate column" (Matt McCline reviewed by Vikram Dixit)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bd8d59e4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bd8d59e4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bd8d59e4

Branch: refs/heads/beeline-cli
Commit: bd8d59e40ae87fc23c030fd1dda19a4ddfe3cb5b
Parents: 2531040
Author: vikram <vi...@hortonworks.com>
Authored: Wed May 6 17:27:52 2015 -0700
Committer: vikram <vi...@hortonworks.com>
Committed: Wed May 6 17:27:52 2015 -0700

----------------------------------------------------------------------
 .../ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java     | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/bd8d59e4/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
index b215f70..a9082eb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
@@ -321,7 +321,10 @@ public abstract class VectorMapJoinCommonOperator extends MapJoinOperator implem
       projectionMapping.add(nextOutputColumn, batchColumnIndex, typeName);
 
       // Collect columns we copy from the big table batch to the overflow batch.
-      bigTableRetainedMapping.add(batchColumnIndex, batchColumnIndex, typeName);
+      if (!bigTableRetainedMapping.containsOutputColumn(batchColumnIndex)) {
+        // Tolerate repeated use of a big table column.
+        bigTableRetainedMapping.add(batchColumnIndex, batchColumnIndex, typeName);
+      }
 
       nextOutputColumn++;
     }


[33/50] [abbrv] hive git commit: HIVE-8696 : HCatClientHMSImpl doesn't use a Retrying-HiveMetastoreClient (Thiruvel Thirumoolan via Sushanth Sowmyan)

Posted by xu...@apache.org.
HIVE-8696 : HCatClientHMSImpl doesn't use a Retrying-HiveMetastoreClient (Thiruvel Thirumoolan via Sushanth Sowmyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e0044e07
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e0044e07
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e0044e07

Branch: refs/heads/beeline-cli
Commit: e0044e0723d8e831ad0f29c6615b5f70e2ee0658
Parents: 26ec033
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Thu May 7 01:12:11 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Thu May 7 01:13:05 2015 -0700

----------------------------------------------------------------------
 .../hive/hcatalog/common/HiveClientCache.java   |  9 +++-
 .../hcatalog/mapreduce/TestPassProperties.java  |  5 +-
 .../hive/metastore/RetryingMetaStoreClient.java | 53 ++++++++++++++++----
 3 files changed, 55 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e0044e07/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java
index 578b6ea..0966581 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java
@@ -35,6 +35,7 @@ import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.shims.Utils;
@@ -81,7 +82,7 @@ class HiveClientCache {
   }
 
   public static IMetaStoreClient getNonCachedHiveMetastoreClient(HiveConf hiveConf) throws MetaException {
-    return new HiveMetaStoreClient(hiveConf);
+    return RetryingMetaStoreClient.getProxy(hiveConf);
   }
 
   public HiveClientCache(HiveConf hiveConf) {
@@ -226,7 +227,11 @@ class HiveClientCache {
       return hiveCache.get(cacheKey, new Callable<ICacheableMetaStoreClient>() {
         @Override
         public ICacheableMetaStoreClient call() throws MetaException {
-          return new CacheableHiveMetaStoreClient(cacheKey.getHiveConf(), timeout);
+          return
+              (ICacheableMetaStoreClient) RetryingMetaStoreClient.getProxy(cacheKey.getHiveConf(),
+                  new Class<?>[]{HiveConf.class, Integer.class},
+                  new Object[]{cacheKey.getHiveConf(), timeout},
+                  CacheableHiveMetaStoreClient.class.getName());
         }
       });
     } catch (ExecutionException e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/e0044e07/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java
index 735ab5f..8673b48 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
 import java.util.ArrayList;
 
 import org.apache.hadoop.conf.Configuration;
@@ -109,8 +110,10 @@ public class TestPassProperties {
       new FileOutputCommitterContainer(job, null).cleanupJob(job);
     } catch (Exception e) {
       caughtException = true;
-      assertTrue(e.getCause().getMessage().contains(
+      assertTrue(((InvocationTargetException)e.getCause().getCause().getCause()).getTargetException().getMessage().contains(
           "Could not connect to meta store using any of the URIs provided"));
+      assertTrue(e.getCause().getMessage().contains(
+          "Unable to instantiate org.apache.hive.hcatalog.common.HiveClientCache$CacheableHiveMetaStoreClient"));
     }
     assertTrue(caughtException);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/e0044e07/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
index 1b6487a..fb44484 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
@@ -60,6 +60,18 @@ public class RetryingMetaStoreClient implements InvocationHandler {
 
   protected RetryingMetaStoreClient(HiveConf hiveConf, HiveMetaHookLoader hookLoader,
       Map<String, Long> metaCallTimeMap, Class<? extends IMetaStoreClient> msClientClass) throws MetaException {
+
+    this(hiveConf,
+        new Class[] {HiveConf.class, HiveMetaHookLoader.class},
+        new Object[] {hiveConf, hookLoader},
+        metaCallTimeMap,
+        msClientClass);
+  }
+
+  protected RetryingMetaStoreClient(HiveConf hiveConf, Class<?>[] constructorArgTypes,
+      Object[] constructorArgs, Map<String, Long> metaCallTimeMap, Class<? extends IMetaStoreClient> msClientClass)
+      throws MetaException {
+
     this.retryLimit = hiveConf.getIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES);
     this.retryDelaySeconds = hiveConf.getTimeVar(
         HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS);
@@ -71,12 +83,14 @@ public class RetryingMetaStoreClient implements InvocationHandler {
     localMetaStore = (msUri == null) || msUri.trim().isEmpty();
 
     reloginExpiringKeytabUser();
-    this.base = MetaStoreUtils.newInstance(msClientClass, new Class[] {
-        HiveConf.class, HiveMetaHookLoader.class}, new Object[] {hiveConf, hookLoader});
+    this.base = (IMetaStoreClient) MetaStoreUtils.newInstance(msClientClass, constructorArgTypes, constructorArgs);
   }
 
-  public static IMetaStoreClient getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader) throws MetaException {
-    return getProxy(hiveConf, hookLoader, null, HiveMetaStoreClient.class.getName());
+  public static IMetaStoreClient getProxy(HiveConf hiveConf) throws MetaException {
+
+    return getProxy(hiveConf, new Class[]{HiveConf.class}, new Object[]{hiveConf}, null,
+        HiveMetaStoreClient.class.getName()
+    );
   }
 
   public static IMetaStoreClient getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader,
@@ -84,19 +98,40 @@ public class RetryingMetaStoreClient implements InvocationHandler {
     return getProxy(hiveConf, hookLoader, null, mscClassName);
   }
 
+  public static IMetaStoreClient getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader,
+      Map<String, Long> metaCallTimeMap, String mscClassName) throws MetaException {
+
+    return getProxy(hiveConf,
+        new Class[] {HiveConf.class, HiveMetaHookLoader.class},
+        new Object[] {hiveConf, hookLoader},
+        metaCallTimeMap,
+        mscClassName
+    );
+  }
+
   /**
    * This constructor is meant for Hive internal use only.
    * Please use getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader) for external purpose.
    */
-  public static IMetaStoreClient getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader,
-      Map<String, Long> metaCallTimeMap, String mscClassName) throws MetaException {
+  public static IMetaStoreClient getProxy(HiveConf hiveConf, Class<?>[] constructorArgTypes,
+      Object[] constructorArgs, String mscClassName) throws MetaException {
+    return getProxy(hiveConf, constructorArgTypes, constructorArgs, null, mscClassName);
+  }
+
+  /**
+   * This constructor is meant for Hive internal use only.
+   * Please use getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader) for external purpose.
+   */
+  public static IMetaStoreClient getProxy(HiveConf hiveConf, Class<?>[] constructorArgTypes,
+      Object[] constructorArgs, Map<String, Long> metaCallTimeMap,
+      String mscClassName) throws MetaException {
 
     Class<? extends IMetaStoreClient> baseClass = (Class<? extends IMetaStoreClient>) MetaStoreUtils
         .getClass(mscClassName);
 
-    RetryingMetaStoreClient handler = new RetryingMetaStoreClient(hiveConf, hookLoader,
-        metaCallTimeMap, baseClass);
-
+    RetryingMetaStoreClient handler =
+        new RetryingMetaStoreClient(hiveConf, constructorArgTypes, constructorArgs,
+            metaCallTimeMap, baseClass);
     return (IMetaStoreClient) Proxy.newProxyInstance(
         RetryingMetaStoreClient.class.getClassLoader(), baseClass.getInterfaces(), handler);
   }


[42/50] [abbrv] hive git commit: schemaTool upgrade from 0.14.0 to 1.3.0 causes failure (Hari Subramaniyan via Sushanth Sowmyan, Thejas Nair) (addendum fix)

Posted by xu...@apache.org.
schemaTool upgrade from 0.14.0 to 1.3.0 causes failure (Hari Subramaniyan via Sushanth Sowmyan, Thejas Nair) (addendum fix)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0af1d6e7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0af1d6e7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0af1d6e7

Branch: refs/heads/beeline-cli
Commit: 0af1d6e7d36fbd8071a35692f51d72eb86b0e9da
Parents: 42f88ca
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Thu May 7 12:07:17 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Thu May 7 12:07:17 2015 -0700

----------------------------------------------------------------------
 metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql | 2 +-
 metastore/scripts/upgrade/mysql/hive-schema-1.3.0.mysql.sql | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0af1d6e7/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql
index 07dce8f..a205ac3 100644
--- a/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql
@@ -216,7 +216,7 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS` (
   UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
   KEY `PARTITIONS_N49` (`TBL_ID`),
   KEY `PARTITIONS_N50` (`SD_ID`),
-  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`)
+  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
   CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
   CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
   CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)

http://git-wip-us.apache.org/repos/asf/hive/blob/0af1d6e7/metastore/scripts/upgrade/mysql/hive-schema-1.3.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/hive-schema-1.3.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-1.3.0.mysql.sql
index 19ae264..bc63f0d 100644
--- a/metastore/scripts/upgrade/mysql/hive-schema-1.3.0.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/hive-schema-1.3.0.mysql.sql
@@ -216,7 +216,7 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS` (
   UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
   KEY `PARTITIONS_N49` (`TBL_ID`),
   KEY `PARTITIONS_N50` (`SD_ID`),
-  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`)
+  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
   CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
   CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
   CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)


[12/50] [abbrv] hive git commit: HIVE-9582 : HCatalog should use IMetaStoreClient interface (Thiruvel Thirumoolan, reviewed by Sushanth Sowmyan, Thejas Nair)

Posted by xu...@apache.org.
HIVE-9582 : HCatalog should use IMetaStoreClient interface (Thiruvel Thirumoolan, reviewed by Sushanth Sowmyan, Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/45307c10
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/45307c10
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/45307c10

Branch: refs/heads/beeline-cli
Commit: 45307c10e472e7dd42b28310f9adf7afe27bf6d7
Parents: c011673
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Wed May 6 02:32:06 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Wed May 6 02:34:23 2015 -0700

----------------------------------------------------------------------
 .../apache/hive/hcatalog/common/HCatUtil.java   | 37 ++++++---
 .../hive/hcatalog/common/HiveClientCache.java   | 85 +++++++++++++-------
 .../DefaultOutputCommitterContainer.java        |  6 +-
 .../mapreduce/FileOutputCommitterContainer.java | 14 ++--
 .../mapreduce/FileOutputFormatContainer.java    |  8 +-
 .../hcatalog/mapreduce/HCatOutputFormat.java    |  6 +-
 .../hcatalog/mapreduce/InitializeInput.java     |  6 +-
 .../hive/hcatalog/mapreduce/Security.java       | 10 +--
 .../hcatalog/common/TestHiveClientCache.java    | 37 +++++----
 .../hcatalog/mapreduce/HCatMapReduceTest.java   |  2 +-
 .../hcatalog/mapreduce/TestPassProperties.java  |  2 +-
 .../apache/hive/hcatalog/pig/PigHCatUtil.java   | 10 +--
 .../streaming/AbstractRecordWriter.java         | 11 ++-
 .../hive/hcatalog/streaming/HiveEndPoint.java   |  9 ++-
 .../hive/hcatalog/api/HCatClientHMSImpl.java    | 17 ++--
 .../hcatalog/templeton/CompleteDelegator.java   |  6 +-
 .../hcatalog/templeton/SecureProxySupport.java  |  9 ++-
 .../templeton/tool/TempletonControllerJob.java  |  7 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |  3 +
 19 files changed, 173 insertions(+), 112 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java
index 63909b8..3ee30ed 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java
@@ -38,9 +38,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -175,7 +175,7 @@ public class HCatUtil {
     }
   }
 
-  public static Table getTable(HiveMetaStoreClient client, String dbName, String tableName)
+  public static Table getTable(IMetaStoreClient client, String dbName, String tableName)
     throws NoSuchObjectException, TException, MetaException {
     return new Table(client.getTable(dbName, tableName));
   }
@@ -538,17 +538,17 @@ public class HCatUtil {
    * @throws MetaException When HiveMetaStoreClient couldn't be created
    * @throws IOException
    */
-  public static HiveMetaStoreClient getHiveClient(HiveConf hiveConf)
-    throws MetaException, IOException {
+  public static IMetaStoreClient getHiveMetastoreClient(HiveConf hiveConf)
+      throws MetaException, IOException {
 
     if (hiveConf.getBoolean(HCatConstants.HCAT_HIVE_CLIENT_DISABLE_CACHE, false)){
       // If cache is disabled, don't use it.
-      return HiveClientCache.getNonCachedHiveClient(hiveConf);
+      return HiveClientCache.getNonCachedHiveMetastoreClient(hiveConf);
     }
 
     // Singleton behaviour: create the cache instance if required.
     if (hiveClientCache == null) {
-      synchronized (HiveMetaStoreClient.class) {
+      synchronized (IMetaStoreClient.class) {
         if (hiveClientCache == null) {
           hiveClientCache = new HiveClientCache(hiveConf);
         }
@@ -561,11 +561,30 @@ public class HCatUtil {
     }
   }
 
-  private static HiveMetaStoreClient getNonCachedHiveClient(HiveConf hiveConf) throws MetaException{
-    return new HiveMetaStoreClient(hiveConf);
+  /**
+   * Get or create a hive client depending on whether it exits in cache or not.
+   * @Deprecated : use {@link #getHiveMetastoreClient(HiveConf)} instead.
+   * This was deprecated in Hive 1.2, slated for removal in two versions
+   * (i.e. 1.2 & 1.3(projected) will have it, but it will be removed after that)
+   * @param hiveConf The hive configuration
+   * @return the client
+   * @throws MetaException When HiveMetaStoreClient couldn't be created
+   * @throws IOException
+   */
+  @Deprecated
+  public static HiveMetaStoreClient getHiveClient(HiveConf hiveConf) throws MetaException, IOException {
+    IMetaStoreClient imsc = getHiveMetastoreClient(hiveConf);
+    // Try piggybacking on the function that returns IMSC. Current implementation of the IMSC cache
+    // has CacheableMetaStoreClients, which are HMSC, so we can return them as-is. If not, it's okay
+    // for us to ignore the caching aspect and return a vanilla HMSC.
+    if (imsc instanceof HiveMetaStoreClient){
+      return (HiveMetaStoreClient)imsc;
+    } else {
+      return new HiveMetaStoreClient(hiveConf);
+    }
   }
 
-  public static void closeHiveClientQuietly(HiveMetaStoreClient client) {
+  public static void closeHiveClientQuietly(IMetaStoreClient client) {
     try {
       if (client != null)
         client.close();

http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java
index a001252..578b6ea 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java
@@ -34,6 +34,7 @@ import org.apache.commons.lang.builder.EqualsBuilder;
 import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.shims.Utils;
@@ -54,7 +55,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 class HiveClientCache {
   public final static int DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS = 2 * 60;
 
-  final private Cache<HiveClientCacheKey, CacheableHiveMetaStoreClient> hiveCache;
+  final private Cache<HiveClientCacheKey, ICacheableMetaStoreClient> hiveCache;
   private static final Logger LOG = LoggerFactory.getLogger(HiveClientCache.class);
   private final int timeout;
   // This lock is used to make sure removalListener won't close a client that is being contemplated for returning by get()
@@ -79,7 +80,7 @@ class HiveClientCache {
     return threadId.get();
   }
 
-  public static HiveMetaStoreClient getNonCachedHiveClient(HiveConf hiveConf) throws MetaException {
+  public static IMetaStoreClient getNonCachedHiveMetastoreClient(HiveConf hiveConf) throws MetaException {
     return new HiveMetaStoreClient(hiveConf);
   }
 
@@ -92,11 +93,11 @@ class HiveClientCache {
    */
   public HiveClientCache(final int timeout) {
     this.timeout = timeout;
-    RemovalListener<HiveClientCacheKey, CacheableHiveMetaStoreClient> removalListener =
-      new RemovalListener<HiveClientCacheKey, CacheableHiveMetaStoreClient>() {
+    RemovalListener<HiveClientCacheKey, ICacheableMetaStoreClient> removalListener =
+      new RemovalListener<HiveClientCacheKey, ICacheableMetaStoreClient>() {
         @Override
-        public void onRemoval(RemovalNotification<HiveClientCacheKey, CacheableHiveMetaStoreClient> notification) {
-          CacheableHiveMetaStoreClient hiveMetaStoreClient = notification.getValue();
+        public void onRemoval(RemovalNotification<HiveClientCacheKey, ICacheableMetaStoreClient> notification) {
+          ICacheableMetaStoreClient hiveMetaStoreClient = notification.getValue();
           if (hiveMetaStoreClient != null) {
             synchronized (CACHE_TEARDOWN_LOCK) {
               hiveMetaStoreClient.setExpiredFromCache();
@@ -169,8 +170,8 @@ class HiveClientCache {
    */
   void closeAllClientsQuietly() {
     try {
-      ConcurrentMap<HiveClientCacheKey, CacheableHiveMetaStoreClient> elements = hiveCache.asMap();
-      for (CacheableHiveMetaStoreClient cacheableHiveMetaStoreClient : elements.values()) {
+      ConcurrentMap<HiveClientCacheKey, ICacheableMetaStoreClient> elements = hiveCache.asMap();
+      for (ICacheableMetaStoreClient cacheableHiveMetaStoreClient : elements.values()) {
         cacheableHiveMetaStoreClient.tearDown();
       }
     } catch (Exception e) {
@@ -191,24 +192,24 @@ class HiveClientCache {
    * @throws IOException
    * @throws LoginException
    */
-  public HiveMetaStoreClient get(final HiveConf hiveConf) throws MetaException, IOException, LoginException {
+  public ICacheableMetaStoreClient get(final HiveConf hiveConf) throws MetaException, IOException, LoginException {
     final HiveClientCacheKey cacheKey = HiveClientCacheKey.fromHiveConf(hiveConf, getThreadId());
-    CacheableHiveMetaStoreClient hiveMetaStoreClient = null;
+    ICacheableMetaStoreClient cacheableHiveMetaStoreClient = null;
     // the hmsc is not shared across threads. So the only way it could get closed while we are doing healthcheck
     // is if removalListener closes it. The synchronization takes care that removalListener won't do it
     synchronized (CACHE_TEARDOWN_LOCK) {
-      hiveMetaStoreClient = getOrCreate(cacheKey);
-      hiveMetaStoreClient.acquire();
+      cacheableHiveMetaStoreClient = getOrCreate(cacheKey);
+      cacheableHiveMetaStoreClient.acquire();
     }
-    if (!hiveMetaStoreClient.isOpen()) {
+    if (!cacheableHiveMetaStoreClient.isOpen()) {
       synchronized (CACHE_TEARDOWN_LOCK) {
         hiveCache.invalidate(cacheKey);
-        hiveMetaStoreClient.close();
-        hiveMetaStoreClient = getOrCreate(cacheKey);
-        hiveMetaStoreClient.acquire();
+        cacheableHiveMetaStoreClient.close();
+        cacheableHiveMetaStoreClient = getOrCreate(cacheKey);
+        cacheableHiveMetaStoreClient.acquire();
       }
     }
-    return hiveMetaStoreClient;
+    return cacheableHiveMetaStoreClient;
   }
 
   /**
@@ -219,11 +220,12 @@ class HiveClientCache {
    * @throws MetaException
    * @throws LoginException
    */
-  private CacheableHiveMetaStoreClient getOrCreate(final HiveClientCacheKey cacheKey) throws IOException, MetaException, LoginException {
+  private ICacheableMetaStoreClient getOrCreate(final HiveClientCacheKey cacheKey)
+      throws IOException, MetaException, LoginException {
     try {
-      return hiveCache.get(cacheKey, new Callable<CacheableHiveMetaStoreClient>() {
+      return hiveCache.get(cacheKey, new Callable<ICacheableMetaStoreClient>() {
         @Override
-        public CacheableHiveMetaStoreClient call() throws MetaException {
+        public ICacheableMetaStoreClient call() throws MetaException {
           return new CacheableHiveMetaStoreClient(cacheKey.getHiveConf(), timeout);
         }
       });
@@ -289,28 +291,48 @@ class HiveClientCache {
     }
   }
 
+  public interface ICacheableMetaStoreClient extends IMetaStoreClient {
+
+    void acquire();
+
+    void release();
+
+    void setExpiredFromCache();
+
+    AtomicInteger getUsers();
+
+    boolean isClosed();
+
+    boolean isOpen();
+
+    void tearDownIfUnused();
+
+    void tearDown();
+  }
+
   /**
    * Add # of current users on HiveMetaStoreClient, so that the client can be cleaned when no one is using it.
    */
-  public static class CacheableHiveMetaStoreClient extends HiveMetaStoreClient {
+  static class CacheableHiveMetaStoreClient extends HiveMetaStoreClient implements ICacheableMetaStoreClient {
+
     private final AtomicInteger users = new AtomicInteger(0);
     private volatile boolean expiredFromCache = false;
     private boolean isClosed = false;
     private final long expiryTime;
     private static final int EXPIRY_TIME_EXTENSION_IN_MILLIS = 60 * 1000;
 
-    public CacheableHiveMetaStoreClient(final HiveConf conf, final int timeout) throws MetaException {
+    CacheableHiveMetaStoreClient(final HiveConf conf, final Integer timeout) throws MetaException {
       super(conf);
       // Extend the expiry time with some extra time on top of guava expiry time to make sure
       // that items closed() are for sure expired and would never be returned by guava.
       this.expiryTime = System.currentTimeMillis() + timeout * 1000 + EXPIRY_TIME_EXTENSION_IN_MILLIS;
     }
 
-    private void acquire() {
+    public void acquire() {
       users.incrementAndGet();
     }
 
-    private void release() {
+    public void release() {
       users.decrementAndGet();
     }
 
@@ -322,15 +344,22 @@ class HiveClientCache {
       return isClosed;
     }
 
+    /*
+     * Used only for Debugging or testing purposes
+     */
+    public AtomicInteger getUsers() {
+      return users;
+    }
+
     /**
      * Make a call to hive meta store and see if the client is still usable. Some calls where the user provides
      * invalid data renders the client unusable for future use (example: create a table with very long table name)
      * @return
      */
-    protected boolean isOpen() {
+    public boolean isOpen() {
       try {
         // Look for an unlikely database name and see if either MetaException or TException is thrown
-        this.getDatabases("NonExistentDatabaseUsedForHealthCheck");
+        super.getDatabases("NonExistentDatabaseUsedForHealthCheck");
       } catch (TException e) {
         return false;
       }
@@ -354,7 +383,7 @@ class HiveClientCache {
      *  1. There are no active user
      *  2. It has expired from the cache
      */
-    private void tearDownIfUnused() {
+    public void tearDownIfUnused() {
       if (users.get() == 0 && expiredFromCache) {
         this.tearDown();
       }
@@ -363,7 +392,7 @@ class HiveClientCache {
     /**
      * Close if not closed already
      */
-    protected synchronized void tearDown() {
+    public synchronized void tearDown() {
       try {
         if (!isClosed) {
           super.close();

http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DefaultOutputCommitterContainer.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DefaultOutputCommitterContainer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DefaultOutputCommitterContainer.java
index cead40d..90c2d71 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DefaultOutputCommitterContainer.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DefaultOutputCommitterContainer.java
@@ -22,7 +22,7 @@ package org.apache.hive.hcatalog.mapreduce;
 import java.io.IOException;
 
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.JobStatus.State;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
@@ -90,10 +90,10 @@ class DefaultOutputCommitterContainer extends OutputCommitterContainer {
     getBaseOutputCommitter().cleanupJob(HCatMapRedUtil.createJobContext(context));
 
     //Cancel HCat and JobTracker tokens
-    HiveMetaStoreClient client = null;
+    IMetaStoreClient client = null;
     try {
       HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
-      client = HCatUtil.getHiveClient(hiveConf);
+      client = HCatUtil.getHiveMetastoreClient(hiveConf);
       String tokenStrForm = client.getTokenStrForm();
       if (tokenStrForm != null && context.getConfiguration().get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
         client.cancelDelegationToken(tokenStrForm);

http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
index 8146d85..367f4ea 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
@@ -43,7 +44,6 @@ import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.shims.ShimLoader;
@@ -466,7 +466,7 @@ class FileOutputCommitterContainer extends OutputCommitterContainer {
    * @throws org.apache.hadoop.hive.metastore.api.MetaException the meta exception
    * @throws org.apache.thrift.TException the t exception
    */
-  private void updateTableSchema(HiveMetaStoreClient client, Table table,
+  private void updateTableSchema(IMetaStoreClient client, Table table,
                    HCatSchema partitionSchema) throws IOException, InvalidOperationException, MetaException, TException {
 
 
@@ -775,12 +775,12 @@ class FileOutputCommitterContainer extends OutputCommitterContainer {
       return;
     }
 
-    HiveMetaStoreClient client = null;
+    IMetaStoreClient client = null;
     HCatTableInfo tableInfo = jobInfo.getTableInfo();
     List<Partition> partitionsAdded = new ArrayList<Partition>();
     try {
       HiveConf hiveConf = HCatUtil.getHiveConf(conf);
-      client = HCatUtil.getHiveClient(hiveConf);
+      client = HCatUtil.getHiveMetastoreClient(hiveConf);
       StorerInfo storer = InternalUtil.extractStorerInfo(table.getTTable().getSd(),table.getParameters());
 
       FileStatus tblStat = fs.getFileStatus(tblPath);
@@ -952,7 +952,7 @@ class FileOutputCommitterContainer extends OutputCommitterContainer {
           // metastore
           for (Partition p : partitionsAdded) {
             client.dropPartition(tableInfo.getDatabaseName(),
-                tableInfo.getTableName(), p.getValues());
+                tableInfo.getTableName(), p.getValues(), true);
           }
         } catch (Exception te) {
           // Keep cause as the original exception
@@ -990,11 +990,11 @@ class FileOutputCommitterContainer extends OutputCommitterContainer {
 
   private void cancelDelegationTokens(JobContext context) throws IOException{
     LOG.info("Cancelling delegation token for the job.");
-    HiveMetaStoreClient client = null;
+    IMetaStoreClient client = null;
     try {
       HiveConf hiveConf = HCatUtil
           .getHiveConf(context.getConfiguration());
-      client = HCatUtil.getHiveClient(hiveConf);
+      client = HCatUtil.getHiveMetastoreClient(hiveConf);
       // cancel the deleg. tokens that were acquired for this job now that
       // we are done - we should cancel if the tokens were acquired by
       // HCatOutputFormat and not if they were supplied by Oozie.

http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java
index 1cd5306..001b59b 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java
@@ -22,9 +22,9 @@ package org.apache.hive.hcatalog.mapreduce;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
@@ -113,10 +113,10 @@ class FileOutputFormatContainer extends OutputFormatContainer {
   @Override
   public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException {
     OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(context.getConfiguration());
-    HiveMetaStoreClient client = null;
+    IMetaStoreClient client = null;
     try {
       HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
-      client = HCatUtil.getHiveClient(hiveConf);
+      client = HCatUtil.getHiveMetastoreClient(hiveConf);
       handleDuplicatePublish(context,
         jobInfo,
         client,
@@ -163,7 +163,7 @@ class FileOutputFormatContainer extends OutputFormatContainer {
    * @throws org.apache.thrift.TException
    */
   private static void handleDuplicatePublish(JobContext context, OutputJobInfo outputInfo,
-      HiveMetaStoreClient client, Table table)
+      IMetaStoreClient client, Table table)
       throws IOException, MetaException, TException, NoSuchObjectException {
 
     /*

http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java
index 6947398..f9e71f0 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
@@ -82,12 +82,12 @@ public class HCatOutputFormat extends HCatBaseOutputFormat {
   @SuppressWarnings("unchecked")
   public static void setOutput(Configuration conf, Credentials credentials,
                  OutputJobInfo outputJobInfo) throws IOException {
-    HiveMetaStoreClient client = null;
+    IMetaStoreClient client = null;
 
     try {
 
       HiveConf hiveConf = HCatUtil.getHiveConf(conf);
-      client = HCatUtil.getHiveClient(hiveConf);
+      client = HCatUtil.getHiveMetastoreClient(hiveConf);
       Table table = HCatUtil.getTable(client, outputJobInfo.getDatabaseName(),
         outputJobInfo.getTableName());
 

http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InitializeInput.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InitializeInput.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InitializeInput.java
index 1980ef5..2f07be1 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InitializeInput.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InitializeInput.java
@@ -27,8 +27,8 @@ import java.util.Properties;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.ql.metadata.Table;
@@ -93,7 +93,7 @@ class InitializeInput {
    */
   private static InputJobInfo getInputJobInfo(
     Configuration conf, InputJobInfo inputJobInfo, String locationFilter) throws Exception {
-    HiveMetaStoreClient client = null;
+    IMetaStoreClient client = null;
     HiveConf hiveConf = null;
     try {
       if (conf != null) {
@@ -101,7 +101,7 @@ class InitializeInput {
       } else {
         hiveConf = new HiveConf(HCatInputFormat.class);
       }
-      client = HCatUtil.getHiveClient(hiveConf);
+      client = HCatUtil.getHiveMetastoreClient(hiveConf);
       Table table = HCatUtil.getTable(client, inputJobInfo.getDatabaseName(),
         inputJobInfo.getTableName());
 

http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/Security.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/Security.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/Security.java
index 39ef86e..9b62195 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/Security.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/Security.java
@@ -26,7 +26,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.thrift.DelegationTokenSelector;
@@ -103,7 +103,7 @@ final class Security {
   void handleSecurity(
     Credentials credentials,
     OutputJobInfo outputJobInfo,
-    HiveMetaStoreClient client,
+    IMetaStoreClient client,
     Configuration conf,
     boolean harRequested)
     throws IOException, MetaException, TException, Exception {
@@ -136,7 +136,7 @@ final class Security {
         // hcat normally in OutputCommitter.commitJob()
         // when the JobTracker in Hadoop MapReduce starts supporting renewal of
         // arbitrary tokens, the renewer should be the principal of the JobTracker
-        hiveToken = HCatUtil.extractThriftToken(client.getDelegationToken(ugi.getUserName()), tokenSignature);
+        hiveToken = HCatUtil.extractThriftToken(client.getDelegationToken(ugi.getUserName(), ugi.getUserName()), tokenSignature);
 
         if (harRequested) {
           TokenSelector<? extends TokenIdentifier> jtTokenSelector =
@@ -165,7 +165,7 @@ final class Security {
   void handleSecurity(
     Job job,
     OutputJobInfo outputJobInfo,
-    HiveMetaStoreClient client,
+    IMetaStoreClient client,
     Configuration conf,
     boolean harRequested)
     throws IOException, MetaException, TException, Exception {
@@ -175,7 +175,7 @@ final class Security {
   // we should cancel hcat token if it was acquired by hcat
   // and not if it was supplied (ie Oozie). In the latter
   // case the HCAT_KEY_TOKEN_SIGNATURE property in the conf will not be set
-  void cancelToken(HiveMetaStoreClient client, JobContext context) throws IOException, MetaException {
+  void cancelToken(IMetaStoreClient client, JobContext context) throws IOException, MetaException {
     String tokenStrForm = client.getTokenStrForm();
     if (tokenStrForm != null && context.getConfiguration().get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
       try {

http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java
index 63a5548..b2c9c7a 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java
@@ -20,7 +20,7 @@ package org.apache.hive.hcatalog.common;
 
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStore;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -76,29 +76,28 @@ public class TestHiveClientCache {
 
   @Test
   public void testCacheHit() throws IOException, MetaException, LoginException {
-
     HiveClientCache cache = new HiveClientCache(1000);
-    HiveMetaStoreClient client = cache.get(hiveConf);
+    HiveClientCache.ICacheableMetaStoreClient client = cache.get(hiveConf);
     assertNotNull(client);
     client.close(); // close shouldn't matter
 
     // Setting a non important configuration should return the same client only
     hiveConf.setIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS, 10);
-    HiveMetaStoreClient client2 = cache.get(hiveConf);
+    HiveClientCache.ICacheableMetaStoreClient client2 = cache.get(hiveConf);
     assertNotNull(client2);
-    assertEquals(client, client2);
+    assertEquals(client.getUsers(), client2.getUsers());
     client2.close();
   }
 
   @Test
   public void testCacheMiss() throws IOException, MetaException, LoginException {
     HiveClientCache cache = new HiveClientCache(1000);
-    HiveMetaStoreClient client = cache.get(hiveConf);
+    IMetaStoreClient client = cache.get(hiveConf);
     assertNotNull(client);
 
     // Set different uri as it is one of the criteria deciding whether to return the same client or not
     hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, " "); // URIs are checked for string equivalence, even spaces make them different
-    HiveMetaStoreClient client2 = cache.get(hiveConf);
+    IMetaStoreClient client2 = cache.get(hiveConf);
     assertNotNull(client2);
     assertNotSame(client, client2);
   }
@@ -110,11 +109,11 @@ public class TestHiveClientCache {
   @Test
   public void testCacheExpiry() throws IOException, MetaException, LoginException, InterruptedException {
     HiveClientCache cache = new HiveClientCache(1);
-    HiveClientCache.CacheableHiveMetaStoreClient client = (HiveClientCache.CacheableHiveMetaStoreClient) cache.get(hiveConf);
+    HiveClientCache.ICacheableMetaStoreClient client = cache.get(hiveConf);
     assertNotNull(client);
 
     Thread.sleep(2500);
-    HiveMetaStoreClient client2 = cache.get(hiveConf);
+    HiveClientCache.ICacheableMetaStoreClient client2 = cache.get(hiveConf);
     client.close();
     assertTrue(client.isClosed()); // close() after *expiry time* and *a cache access* should  have tore down the client
 
@@ -132,21 +131,21 @@ public class TestHiveClientCache {
   public void testMultipleThreadAccess() throws ExecutionException, InterruptedException {
     final HiveClientCache cache = new HiveClientCache(1000);
 
-    class GetHiveClient implements Callable<HiveMetaStoreClient> {
+    class GetHiveClient implements Callable<IMetaStoreClient> {
       @Override
-      public HiveMetaStoreClient call() throws IOException, MetaException, LoginException {
+      public IMetaStoreClient call() throws IOException, MetaException, LoginException {
         return cache.get(hiveConf);
       }
     }
 
     ExecutorService executor = Executors.newFixedThreadPool(2);
 
-    Callable<HiveMetaStoreClient> worker1 = new GetHiveClient();
-    Callable<HiveMetaStoreClient> worker2 = new GetHiveClient();
-    Future<HiveMetaStoreClient> clientFuture1 = executor.submit(worker1);
-    Future<HiveMetaStoreClient> clientFuture2 = executor.submit(worker2);
-    HiveMetaStoreClient client1 = clientFuture1.get();
-    HiveMetaStoreClient client2 = clientFuture2.get();
+    Callable<IMetaStoreClient> worker1 = new GetHiveClient();
+    Callable<IMetaStoreClient> worker2 = new GetHiveClient();
+    Future<IMetaStoreClient> clientFuture1 = executor.submit(worker1);
+    Future<IMetaStoreClient> clientFuture2 = executor.submit(worker2);
+    IMetaStoreClient client1 = clientFuture1.get();
+    IMetaStoreClient client2 = clientFuture2.get();
     assertNotNull(client1);
     assertNotNull(client2);
     assertNotSame(client1, client2);
@@ -155,9 +154,9 @@ public class TestHiveClientCache {
   @Test
   public void testCloseAllClients() throws IOException, MetaException, LoginException {
     final HiveClientCache cache = new HiveClientCache(1000);
-    HiveClientCache.CacheableHiveMetaStoreClient client1 = (HiveClientCache.CacheableHiveMetaStoreClient) cache.get(hiveConf);
+    HiveClientCache.ICacheableMetaStoreClient client1 = cache.get(hiveConf);
     hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, " "); // URIs are checked for string equivalence, even spaces make them different
-    HiveClientCache.CacheableHiveMetaStoreClient client2 = (HiveClientCache.CacheableHiveMetaStoreClient) cache.get(hiveConf);
+    HiveClientCache.ICacheableMetaStoreClient client2 = cache.get(hiveConf);
     cache.closeAllClientsQuietly();
     assertTrue(client1.isClosed());
     assertTrue(client2.isClosed());

http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java
index c98d947..f437079 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java
@@ -147,7 +147,7 @@ public abstract class HCatMapReduceTest extends HCatBaseTest {
     // Hack to initialize cache with 0 expiry time causing it to return a new hive client every time
     // Otherwise the cache doesn't play well with the second test method with the client gets closed() in the
     // tearDown() of the previous test
-    HCatUtil.getHiveClient(hiveConf);
+    HCatUtil.getHiveMetastoreClient(hiveConf);
 
     MapCreate.writeCount = 0;
     MapRead.readCount = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java
index f8a0af1..735ab5f 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java
@@ -109,7 +109,7 @@ public class TestPassProperties {
       new FileOutputCommitterContainer(job, null).cleanupJob(job);
     } catch (Exception e) {
       caughtException = true;
-      assertTrue(e.getMessage().contains(
+      assertTrue(e.getCause().getMessage().contains(
           "Could not connect to meta store using any of the URIs provided"));
     }
     assertTrue(caughtException);

http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java
----------------------------------------------------------------------
diff --git a/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java b/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java
index 48a40b1..337f4fb 100644
--- a/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java
+++ b/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java
@@ -24,7 +24,6 @@ import java.sql.Date;
 import java.sql.Timestamp;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Calendar;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -36,7 +35,7 @@ import org.apache.hadoop.hive.common.type.HiveChar;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.common.type.HiveVarchar;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.ql.metadata.Table;
@@ -63,7 +62,6 @@ import org.apache.pig.impl.logicalLayer.schema.Schema;
 import org.apache.pig.impl.util.UDFContext;
 import org.apache.pig.impl.util.Utils;
 import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -141,7 +139,7 @@ class PigHCatUtil {
     return job.getConfiguration().get(HCatConstants.HCAT_METASTORE_PRINCIPAL);
   }
 
-  private static HiveMetaStoreClient getHiveMetaClient(String serverUri,
+  private static IMetaStoreClient getHiveMetaClient(String serverUri,
                              String serverKerberosPrincipal,
                              Class<?> clazz,
                              Job job) throws Exception {
@@ -163,7 +161,7 @@ class PigHCatUtil {
     }
 
     try {
-      return HCatUtil.getHiveClient(hiveConf);
+      return HCatUtil.getHiveMetastoreClient(hiveConf);
     } catch (Exception e) {
       throw new Exception("Could not instantiate a HiveMetaStoreClient connecting to server uri:[" + serverUri + "]", e);
     }
@@ -203,7 +201,7 @@ class PigHCatUtil {
     String dbName = dbTablePair.first;
     String tableName = dbTablePair.second;
     Table table = null;
-    HiveMetaStoreClient client = null;
+    IMetaStoreClient client = null;
     try {
       client = getHiveMetaClient(hcatServerUri, hcatServerPrincipal, PigHCatUtil.class, job);
       table = HCatUtil.getTable(client, dbName, tableName);

http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
index 1c85ab5..ed46bca 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
@@ -24,7 +24,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.Table;
@@ -33,6 +33,7 @@ import org.apache.hadoop.hive.ql.io.RecordUpdater;
 import org.apache.hadoop.hive.serde2.SerDe;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hive.hcatalog.common.HCatUtil;
 import org.apache.thrift.TException;
 
 import java.io.IOException;
@@ -46,7 +47,7 @@ abstract class AbstractRecordWriter implements RecordWriter {
   final HiveEndPoint endPoint;
   final Table tbl;
 
-  final HiveMetaStoreClient msClient;
+  final IMetaStoreClient msClient;
   RecordUpdater updater = null;
 
   private final int totalBuckets;
@@ -62,7 +63,7 @@ abstract class AbstractRecordWriter implements RecordWriter {
     this.conf = conf!=null ? conf
                 : HiveEndPoint.createHiveConf(DelimitedInputWriter.class, endPoint.metaStoreUri);
     try {
-      msClient = new HiveMetaStoreClient(this.conf);
+      msClient = HCatUtil.getHiveMetastoreClient(this.conf);
       this.tbl = msClient.getTable(endPoint.database, endPoint.table);
       this.partitionPath = getPathForEndPoint(msClient, endPoint);
       this.totalBuckets = tbl.getSd().getNumBuckets();
@@ -80,6 +81,8 @@ abstract class AbstractRecordWriter implements RecordWriter {
       throw new StreamingException(e.getMessage(), e);
     } catch (ClassNotFoundException e) {
       throw new StreamingException(e.getMessage(), e);
+    } catch (IOException e) {
+      throw new StreamingException(e.getMessage(), e);
     }
   }
 
@@ -147,7 +150,7 @@ abstract class AbstractRecordWriter implements RecordWriter {
     }
   }
 
-  private Path getPathForEndPoint(HiveMetaStoreClient msClient, HiveEndPoint endPoint)
+  private Path getPathForEndPoint(IMetaStoreClient msClient, HiveEndPoint endPoint)
           throws StreamingException {
     try {
       String location;

http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
index a08f2f9..3c25486 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
@@ -22,7 +22,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.cli.CliSessionState;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.LockComponentBuilder;
 import org.apache.hadoop.hive.metastore.LockRequestBuilder;
@@ -40,6 +39,7 @@ import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
 import org.apache.hadoop.hive.ql.CommandNeedRetryException;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hive.hcatalog.common.HCatUtil;
 
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.thrift.TException;
@@ -445,10 +445,13 @@ public class HiveEndPoint {
         conf.setBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL,true);
       }
       try {
-        return new HiveMetaStoreClient(conf);
+        return HCatUtil.getHiveMetastoreClient(conf);
       } catch (MetaException e) {
         throw new ConnectionError("Error connecting to Hive Metastore URI: "
-                + endPoint.metaStoreUri, e);
+                + endPoint.metaStoreUri + ". " + e.getMessage(), e);
+      } catch (IOException e) {
+        throw new ConnectionError("Error connecting to Hive Metastore URI: "
+            + endPoint.metaStoreUri + ". " + e.getMessage(), e);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java
index 3b2cd38..3a69581 100644
--- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java
+++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.TableType;
@@ -85,7 +84,7 @@ import javax.annotation.Nullable;
 public class HCatClientHMSImpl extends HCatClient {
 
   private static final Logger LOG = LoggerFactory.getLogger(HCatClientHMSImpl.class);
-  private HiveMetaStoreClient hmsClient;
+  private IMetaStoreClient hmsClient;
   private Configuration config;
   private HiveConf hiveConfig;
 
@@ -96,7 +95,9 @@ public class HCatClientHMSImpl extends HCatClient {
     try {
       dbNames = hmsClient.getDatabases(pattern);
     } catch (MetaException exp) {
-      throw new HCatException("MetaException while listing db names", exp);
+      throw new HCatException("MetaException while listing db names. " + exp.getMessage(), exp);
+    } catch (TException e) {
+      throw new HCatException("Transport Exception while listing db names. " + e.getMessage(), e);
     }
     return dbNames;
   }
@@ -172,8 +173,12 @@ public class HCatClientHMSImpl extends HCatClient {
     try {
       tableNames = hmsClient.getTables(checkDB(dbName), tablePattern);
     } catch (MetaException e) {
-      throw new HCatException(
-        "MetaException while fetching table names.", e);
+      throw new HCatException("MetaException while fetching table names. " + e.getMessage(), e);
+    } catch (UnknownDBException e) {
+      throw new HCatException("UnknownDB " + dbName + " while fetching table names.", e);
+    } catch (TException e) {
+      throw new HCatException("Transport exception while fetching table names. "
+          + e.getMessage(), e);
     }
     return tableNames;
   }
@@ -815,7 +820,7 @@ public class HCatClientHMSImpl extends HCatClient {
     this.config = conf;
     try {
       hiveConfig = HCatUtil.getHiveConf(config);
-      hmsClient = HCatUtil.getHiveClient(hiveConfig);
+      hmsClient = HCatUtil.getHiveMetastoreClient(hiveConfig);
     } catch (MetaException exp) {
       throw new HCatException("MetaException while creating HMS client",
         exp);

http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/CompleteDelegator.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/CompleteDelegator.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/CompleteDelegator.java
index 1b9663d..e3be5b7 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/CompleteDelegator.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/CompleteDelegator.java
@@ -26,7 +26,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hive.hcatalog.common.HCatUtil;
 import org.apache.hive.hcatalog.templeton.tool.DelegationTokenCache;
 import org.apache.hive.hcatalog.templeton.tool.JobState;
@@ -94,13 +94,13 @@ public class CompleteDelegator extends TempletonDelegator {
       return new CompleteBean("Callback sent");
     } finally {
       state.close();
-      HiveMetaStoreClient client = null;
+      IMetaStoreClient client = null;
       try {
         if(cancelMetastoreToken) {
           String metastoreTokenStrForm =
                   DelegationTokenCache.getStringFormTokenCache().getDelegationToken(id);
           if(metastoreTokenStrForm != null) {
-            client = HCatUtil.getHiveClient(new HiveConf());
+            client = HCatUtil.getHiveMetastoreClient(new HiveConf());
             client.cancelDelegationToken(metastoreTokenStrForm);
             LOG.debug("Cancelled token for jobId=" + id + " status from JT=" + jobStatus);
             DelegationTokenCache.getStringFormTokenCache().removeDelegationToken(id);

http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java
index 8ae61a1..b4687b5 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java
@@ -30,12 +30,13 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hive.hcatalog.common.HCatUtil;
 import org.apache.thrift.TException;
 
 /**
@@ -175,8 +176,8 @@ public class SecureProxySupport {
 
   private String buildHcatDelegationToken(String user)
     throws IOException, InterruptedException, MetaException, TException {
-    HiveConf c = new HiveConf();
-    final HiveMetaStoreClient client = new HiveMetaStoreClient(c);
+    final HiveConf c = new HiveConf();
+    final IMetaStoreClient client = HCatUtil.getHiveMetastoreClient(c);
     LOG.info("user: " + user + " loginUser: " + UserGroupInformation.getLoginUser().getUserName());
     final TokenWrapper twrapper = new TokenWrapper();
     final UserGroupInformation ugi = UgiFactory.getUgi(user);
@@ -184,7 +185,7 @@ public class SecureProxySupport {
       public String run()
         throws IOException, MetaException, TException {
         String u = ugi.getUserName();
-        return client.getDelegationToken(u);
+        return client.getDelegationToken(c.getUser(), u);
       }
     });
     return s;

http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java
index 1f8ebf3..349bd5c 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobClient;
@@ -40,6 +40,7 @@ import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIden
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Tool;
+import org.apache.hive.hcatalog.common.HCatUtil;
 import org.apache.hive.hcatalog.templeton.AppConfig;
 import org.apache.hive.hcatalog.templeton.SecureProxySupport;
 import org.apache.hive.hcatalog.templeton.UgiFactory;
@@ -176,12 +177,12 @@ public class TempletonControllerJob extends Configured implements Tool, JobSubmi
     return real.doAs(new PrivilegedExceptionAction<String>() {
       @Override
       public String run() throws IOException, TException, InterruptedException  {
-        final HiveMetaStoreClient client = new HiveMetaStoreClient(c);
+        final IMetaStoreClient client = HCatUtil.getHiveMetastoreClient(c);
         return ugi.doAs(new PrivilegedExceptionAction<String>() {
           @Override
           public String run() throws IOException, TException, InterruptedException {
             String u = ugi.getUserName();
-            return client.getDelegationToken(u);
+            return client.getDelegationToken(c.getUser(),u);
           }
         });
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/45307c10/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 129a98d..341b0ca 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hive.metastore.api.TxnOpenException;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.thrift.TException;
 
+import java.io.IOException;
 import java.util.List;
 import java.util.Map;
 
@@ -1115,6 +1116,8 @@ public interface IMetaStoreClient {
    */
   void cancelDelegationToken(String tokenStrForm) throws MetaException, TException;
 
+  public String getTokenStrForm() throws IOException;
+
   void createFunction(Function func)
       throws InvalidObjectException, MetaException, TException;
 


[37/50] [abbrv] hive git commit: HIVE-9365: The Metastore should take port configuration from hive-site.xml (Reuben Kuhnert, reviewed by Sergio Pena)

Posted by xu...@apache.org.
HIVE-9365: The Metastore should take port configuration from hive-site.xml (Reuben Kuhnert, reviewed by Sergio Pena)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d434f645
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d434f645
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d434f645

Branch: refs/heads/beeline-cli
Commit: d434f6459050fbb1b12875b3015474fed2b6b914
Parents: e24662c
Author: Sergio Pena <se...@cloudera.com>
Authored: Thu May 7 10:05:45 2015 -0500
Committer: Sergio Pena <se...@cloudera.com>
Committed: Thu May 7 10:05:45 2015 -0500

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  3 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    | 27 +++++----
 .../hive/metastore/TestHiveMetastoreCli.java    | 63 ++++++++++++++++++++
 3 files changed, 82 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d434f645/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 85e732f..5d5a928 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -115,6 +115,7 @@ public class HiveConf extends Configuration {
   public static final HiveConf.ConfVars[] metaVars = {
       HiveConf.ConfVars.METASTOREWAREHOUSE,
       HiveConf.ConfVars.METASTOREURIS,
+      HiveConf.ConfVars.METASTORE_SERVER_PORT,
       HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES,
       HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES,
       HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY,
@@ -392,7 +393,7 @@ public class HiveConf extends Configuration {
         "Number of retries while opening a connection to metastore"),
     METASTORETHRIFTFAILURERETRIES("hive.metastore.failure.retries", 1,
         "Number of retries upon failure of Thrift metastore calls"),
-
+    METASTORE_SERVER_PORT("hive.metastore.port", 9083, "Hive metastore listener port"),
     METASTORE_CLIENT_CONNECT_RETRY_DELAY("hive.metastore.client.connect.retry.delay", "1s",
         new TimeValidator(TimeUnit.SECONDS),
         "Number of seconds for the client to wait between consecutive connection attempts"),

http://git-wip-us.apache.org/repos/asf/hive/blob/d434f645/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 3f267ff..986579a 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -259,7 +259,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
   /**
    * default port on which to start the Hive server
    */
-  private static final int DEFAULT_HIVE_METASTORE_PORT = 9083;
   public static final String ADMIN = "admin";
   public static final String PUBLIC = "public";
 
@@ -5775,18 +5774,19 @@ public class HiveMetaStore extends ThriftHiveMetastore {
    *
    */
   static public class HiveMetastoreCli extends CommonCliOptions {
-    int port = DEFAULT_HIVE_METASTORE_PORT;
+    private int port;
 
     @SuppressWarnings("static-access")
-    public HiveMetastoreCli() {
+    public HiveMetastoreCli(Configuration configuration) {
       super("hivemetastore", true);
+      this.port = HiveConf.getIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT);
 
       // -p port
       OPTIONS.addOption(OptionBuilder
           .hasArg()
           .withArgName("port")
           .withDescription("Hive Metastore port number, default:"
-              + DEFAULT_HIVE_METASTORE_PORT)
+              + this.port)
           .create('p'));
 
     }
@@ -5803,21 +5803,26 @@ public class HiveMetaStore extends ThriftHiveMetastore {
             "This usage has been deprecated, consider using the new command "
                 + "line syntax (run with -h to see usage information)");
 
-        port = new Integer(args[0]);
+        this.port = new Integer(args[0]);
       }
 
       // notice that command line options take precedence over the
       // deprecated (old style) naked args...
+
       if (commandLine.hasOption('p')) {
-        port = Integer.parseInt(commandLine.getOptionValue('p'));
+        this.port = Integer.parseInt(commandLine.getOptionValue('p'));
       } else {
         // legacy handling
         String metastorePort = System.getenv("METASTORE_PORT");
         if (metastorePort != null) {
-          port = Integer.parseInt(metastorePort);
+          this.port = Integer.parseInt(metastorePort);
         }
       }
     }
+
+    public int getPort() {
+      return this.port;
+    }
   }
 
   /**
@@ -5825,7 +5830,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
    */
   public static void main(String[] args) throws Throwable {
     HiveConf.setLoadMetastoreConfig(true);
-    HiveMetastoreCli cli = new HiveMetastoreCli();
+    HiveConf conf = new HiveConf(HMSHandler.class);
+
+    HiveMetastoreCli cli = new HiveMetastoreCli(conf);
     cli.parse(args);
     final boolean isCliVerbose = cli.isVerbose();
     // NOTE: It is critical to do this prior to initializing log4j, otherwise
@@ -5851,7 +5858,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         System.err.println(msg);
       }
 
-      HiveConf conf = new HiveConf(HMSHandler.class);
 
       // set all properties specified on the command line
       for (Map.Entry<Object, Object> item : hiveconf.entrySet()) {
@@ -5870,11 +5876,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
       });
 
+
       Lock startLock = new ReentrantLock();
       Condition startCondition = startLock.newCondition();
       AtomicBoolean startedServing = new AtomicBoolean();
       startMetaStoreThreads(conf, startLock, startCondition, startedServing);
-      startMetaStore(cli.port, ShimLoader.getHadoopThriftAuthBridge(), conf, startLock,
+      startMetaStore(cli.getPort(), ShimLoader.getHadoopThriftAuthBridge(), conf, startLock,
           startCondition, startedServing);
     } catch (Throwable t) {
       // Catch the exception, log it and rethrow it.

http://git-wip-us.apache.org/repos/asf/hive/blob/d434f645/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java
new file mode 100644
index 0000000..f581c7d
--- /dev/null
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.junit.Test;
+
+public class TestHiveMetastoreCli {
+  private static final String[] CLI_ARGUMENTS = { "9999" };
+
+  @Test
+  public void testDefaultCliPortValue() {
+    HiveConf configuration = new HiveConf();
+    HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
+    assert (cli.getPort() == HiveConf.getIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT));
+  }
+
+  @Test
+  public void testOverriddenCliPortValue() {
+    HiveConf configuration = new HiveConf();
+    HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
+    cli.parse(TestHiveMetastoreCli.CLI_ARGUMENTS);
+
+    assert (cli.getPort() == 9999);
+  }
+
+  @Test
+  public void testOverriddenMetastoreServerPortValue() {
+    HiveConf configuration = new HiveConf();
+    HiveConf.setIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT, 12345);
+
+    HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
+
+    assert (cli.getPort() == 12345);
+  }
+
+  @Test
+  public void testCliOverridesConfiguration() {
+    HiveConf configuration = new HiveConf();
+    HiveConf.setIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT, 12345);
+
+    HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
+    cli.parse(CLI_ARGUMENTS);
+
+    assert (cli.getPort() == 9999);
+  }
+}


[43/50] [abbrv] hive git commit: HIVE-10538: Fix NPE in FileSinkOperator from hashcode mismatch (Peter Slawski reviewed by Prasanth Jayachandran)

Posted by xu...@apache.org.
HIVE-10538: Fix NPE in FileSinkOperator from hashcode mismatch (Peter Slawski reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3633db25
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3633db25
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3633db25

Branch: refs/heads/beeline-cli
Commit: 3633db25fadd39fdcad15d95af1cf69cc6e2429e
Parents: 0af1d6e
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Thu May 7 13:40:45 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Thu May 7 13:40:45 2015 -0700

----------------------------------------------------------------------
 .../test/resources/testconfiguration.properties |   1 +
 .../hadoop/hive/ql/exec/ReduceSinkOperator.java |   3 +-
 .../test/queries/clientpositive/bucket_many.q   |  16 ++
 .../results/clientpositive/bucket_many.q.out    | 230 +++++++++++++++++++
 .../results/clientpositive/spark/cbo_gby.q.out  |   4 +-
 .../clientpositive/spark/cbo_udf_udaf.q.out     |   2 +-
 ...pby_complex_types_multi_single_reducer.q.out |  38 +--
 .../spark/lateral_view_explode2.q.out           |   4 +-
 .../clientpositive/spark/union_remove_25.q.out  |   2 +-
 .../clientpositive/spark/union_top_level.q.out  |  16 +-
 .../spark/vector_cast_constant.q.java1.7.out    |  16 +-
 .../spark/vector_cast_constant.q.java1.8.out    |  16 +-
 .../spark/vectorized_timestamp_funcs.q.out      |   4 +-
 13 files changed, 300 insertions(+), 52 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 3eff7d0..eeb46cc 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -3,6 +3,7 @@ minimr.query.files=auto_sortmerge_join_16.q,\
   bucket4.q,\
   bucket5.q,\
   bucket6.q,\
+  bucket_many.q,\
   bucket_num_reducers.q,\
   bucket_num_reducers2.q,\
   bucketizedhiveinputformat.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
index 468d87f..859a28f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
@@ -125,7 +125,7 @@ public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
   protected transient Object[] cachedValues;
   protected transient List<List<Integer>> distinctColIndices;
   protected transient Random random;
-  protected transient int bucketNumber;
+  protected transient int bucketNumber = -1;
 
   /**
    * This two dimensional array holds key data and a corresponding Union object
@@ -552,6 +552,7 @@ public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
     // in case of bucketed table, insert the bucket number as the last column in value
     if (bucketEval != null) {
       length -= 1;
+      assert bucketNumber >= 0;
       cachedValues[length] = new Text(String.valueOf(bucketNumber));
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/queries/clientpositive/bucket_many.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_many.q b/ql/src/test/queries/clientpositive/bucket_many.q
new file mode 100644
index 0000000..1f0b795
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/bucket_many.q
@@ -0,0 +1,16 @@
+set hive.enforce.bucketing = true;
+set mapred.reduce.tasks = 16;
+
+create table bucket_many(key int, value string) clustered by (key) into 256 buckets;
+
+explain extended
+insert overwrite table bucket_many
+select * from src;
+
+insert overwrite table bucket_many
+select * from src;
+
+explain
+select * from bucket_many tablesample (bucket 1 out of 256) s;
+
+select * from bucket_many tablesample (bucket 1 out of 256) s;

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/bucket_many.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_many.q.out b/ql/src/test/results/clientpositive/bucket_many.q.out
new file mode 100644
index 0000000..9f09163
--- /dev/null
+++ b/ql/src/test/results/clientpositive/bucket_many.q.out
@@ -0,0 +1,230 @@
+PREHOOK: query: create table bucket_many(key int, value string) clustered by (key) into 256 buckets
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_many
+POSTHOOK: query: create table bucket_many(key int, value string) clustered by (key) into 256 buckets
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_many
+PREHOOK: query: explain extended
+insert overwrite table bucket_many
+select * from src
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucket_many
+select * from src
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_TABREF
+         TOK_TABNAME
+            src
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_TAB
+            TOK_TABNAME
+               bucket_many
+      TOK_SELECT
+         TOK_SELEXPR
+            TOK_ALLCOLREF
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                sort order: 
+                Map-reduce partition columns: UDFToInteger(_col0) (type: int)
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                tag: -1
+                value expressions: _col0 (type: string), _col1 (type: string)
+                auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: src
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.src
+              numFiles 1
+              numRows 500
+              rawDataSize 5312
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE true
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.src
+                numFiles 1
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src
+            name: default.src
+      Truncated Path -> Alias:
+        /src [src]
+      Needs Tagging: false
+      Reduce Operator Tree:
+        Select Operator
+          expressions: UDFToInteger(VALUE._col0) (type: int), VALUE._col1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            GlobalTableId: 1
+#### A masked pattern was here ####
+            NumFilesPerFileSink: 16
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                properties:
+                  bucket_count 256
+                  bucket_field_name key
+                  columns key,value
+                  columns.comments 
+                  columns.types int:string
+#### A masked pattern was here ####
+                  name default.bucket_many
+                  serialization.ddl struct bucket_many { i32 key, string value}
+                  serialization.format 1
+                  serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                name: default.bucket_many
+            TotalFiles: 256
+            GatherStats: true
+            MultiFileSpray: true
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count 256
+                bucket_field_name key
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.bucket_many
+                serialization.ddl struct bucket_many { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucket_many
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucket_many
+select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@bucket_many
+POSTHOOK: query: insert overwrite table bucket_many
+select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bucket_many
+POSTHOOK: Lineage: bucket_many.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket_many.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain
+select * from bucket_many tablesample (bucket 1 out of 256) s
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from bucket_many tablesample (bucket 1 out of 256) s
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: s
+            Statistics: Num rows: 55 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (((hash(key) & 2147483647) % 256) = 0) (type: boolean)
+              Statistics: Num rows: 27 Data size: 2853 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 27 Data size: 2853 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 27 Data size: 2853 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from bucket_many tablesample (bucket 1 out of 256) s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_many
+#### A masked pattern was here ####
+POSTHOOK: query: select * from bucket_many tablesample (bucket 1 out of 256) s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_many
+#### A masked pattern was here ####
+256	val_256
+0	val_0
+0	val_0
+0	val_0
+256	val_256

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/spark/cbo_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/cbo_gby.q.out b/ql/src/test/results/clientpositive/spark/cbo_gby.q.out
index 67c7a63..9ca8a88 100644
--- a/ql/src/test/results/clientpositive/spark/cbo_gby.q.out
+++ b/ql/src/test/results/clientpositive/spark/cbo_gby.q.out
@@ -11,10 +11,10 @@ POSTHOOK: Input: default@cbo_t1
 POSTHOOK: Input: default@cbo_t1@dt=2014
 #### A masked pattern was here ####
 1	4	12
+ 1 	4	2
 NULL	NULL	NULL
  1	4	2
 1 	4	2
- 1 	4	2
 PREHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x
 PREHOOK: type: QUERY
 PREHOOK: Input: default@cbo_t1
@@ -25,9 +25,9 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@cbo_t1
 POSTHOOK: Input: default@cbo_t1@dt=2014
 #### A masked pattern was here ####
+5.0	2	3
 NULL	NULL	1
 5.0	12	1
-5.0	2	3
 PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@cbo_t1

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/spark/cbo_udf_udaf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/cbo_udf_udaf.q.out b/ql/src/test/results/clientpositive/spark/cbo_udf_udaf.q.out
index 932943d..ded043f 100644
--- a/ql/src/test/results/clientpositive/spark/cbo_udf_udaf.q.out
+++ b/ql/src/test/results/clientpositive/spark/cbo_udf_udaf.q.out
@@ -79,9 +79,9 @@ POSTHOOK: Input: default@cbo_t1
 POSTHOOK: Input: default@cbo_t1@dt=2014
 #### A masked pattern was here ####
 NULL	0	NULL
+1 	2	1.0
  1 	2	1.0
  1	2	1.0
-1 	2	1.0
 1	12	1.0
 PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float order by a
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out
index 9118845..9fe3b72 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out
@@ -204,16 +204,16 @@ POSTHOOK: query: SELECT DEST1.* FROM DEST1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dest1
 #### A masked pattern was here ####
-["166"]	1
-["169"]	4
+["118"]	2
+["180"]	1
+["201"]	1
+["202"]	1
 ["238"]	2
-["258"]	1
-["306"]	1
-["384"]	3
-["392"]	1
-["435"]	1
-["455"]	1
-["468"]	4
+["273"]	3
+["282"]	2
+["419"]	1
+["432"]	1
+["467"]	1
 PREHOOK: query: SELECT DEST2.* FROM DEST2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest2
@@ -222,13 +222,13 @@ POSTHOOK: query: SELECT DEST2.* FROM DEST2
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dest2
 #### A masked pattern was here ####
-{"120":"val_120"}	2
-{"129":"val_129"}	2
-{"160":"val_160"}	1
-{"26":"val_26"}	2
-{"27":"val_27"}	1
-{"288":"val_288"}	2
-{"298":"val_298"}	3
-{"30":"val_30"}	1
-{"311":"val_311"}	3
-{"74":"val_74"}	1
+{"0":"val_0"}	3
+{"138":"val_138"}	4
+{"170":"val_170"}	1
+{"19":"val_19"}	1
+{"222":"val_222"}	1
+{"223":"val_223"}	2
+{"226":"val_226"}	1
+{"489":"val_489"}	4
+{"8":"val_8"}	1
+{"80":"val_80"}	1

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/spark/lateral_view_explode2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/lateral_view_explode2.q.out b/ql/src/test/results/clientpositive/spark/lateral_view_explode2.q.out
index a5c95b5..41d60f5 100644
--- a/ql/src/test/results/clientpositive/spark/lateral_view_explode2.q.out
+++ b/ql/src/test/results/clientpositive/spark/lateral_view_explode2.q.out
@@ -93,9 +93,9 @@ POSTHOOK: query: SELECT col1, col2 FROM src LATERAL VIEW explode2(array(1,2,3))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
-2	2
-1	1
 3	3
+1	1
+2	2
 PREHOOK: query: DROP TEMPORARY FUNCTION explode2
 PREHOOK: type: DROPFUNCTION
 PREHOOK: Output: explode2

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
index f32aaea..5853cc0 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
@@ -424,7 +424,7 @@ Partition Parameters:
 	numFiles            	2                   
 	numRows             	-1                  
 	rawDataSize         	-1                  
-	totalSize           	6814                
+	totalSize           	6826                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/spark/union_top_level.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_top_level.q.out b/ql/src/test/results/clientpositive/spark/union_top_level.q.out
index f57fc04..a64fc95 100644
--- a/ql/src/test/results/clientpositive/spark/union_top_level.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_top_level.q.out
@@ -348,18 +348,18 @@ POSTHOOK: Input: default@src
 0	val_0
 0	val_0
 0	val_0
-10	val_10
-10	val_10
+0	val_0
+0	val_0
+100	val_100
+100	val_100
+100	val_100
+100	val_100
+100	val_100
+100	val_100
 100	val_100
 100	val_100
-103	val_103
-103	val_103
-104	val_104
-104	val_104
 104	val_104
 104	val_104
-111	val_111
-111	val_111
 PREHOOK: query: -- ctas
 explain
 create table union_top as

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
index e159c8b..aaac8aa 100644
--- a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
@@ -191,13 +191,13 @@ POSTHOOK: query: SELECT
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
+65636	50.0	50.0	50
+65550	50.0	50.0	50
+65592	50.0	50.0	50
+65744	50.0	50.0	50
+65668	50.0	50.0	50
+65722	50.0	50.0	50
 65598	50.0	50.0	50
-65694	50.0	50.0	50
-65678	50.0	50.0	50
-65684	50.0	50.0	50
+65568	50.0	50.0	50
 65596	50.0	50.0	50
-65692	50.0	50.0	50
-65630	50.0	50.0	50
-65674	50.0	50.0	50
-65628	50.0	50.0	50
-65776	50.0	50.0	50
+65738	50.0	50.0	50

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.8.out b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.8.out
index 43c07e6..44ecd09 100644
--- a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.8.out
@@ -191,13 +191,13 @@ POSTHOOK: query: SELECT
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
-65788	50.0	50.0	50
+65636	50.0	50.0	50
+65550	50.0	50.0	50
+65592	50.0	50.0	50
+65744	50.0	50.0	50
+65722	50.0	50.0	50
+65668	50.0	50.0	50
 65598	50.0	50.0	50
-65694	50.0	50.0	50
-65678	50.0	50.0	50
-65684	50.0	50.0	50
 65596	50.0	50.0	50
-65692	50.0	50.0	50
-65630	50.0	50.0	50
-65674	50.0	50.0	50
-65628	50.0	50.0	50
+65568	50.0	50.0	50
+65738	50.0	50.0	50

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
index 3a0c3f1..3044582 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
@@ -768,7 +768,7 @@ FROM alltypesorc_string
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc_string
 #### A masked pattern was here ####
-1123143.857
+1123143.8569999998
 PREHOOK: query: EXPLAIN SELECT
   avg(ctimestamp1),
   variance(ctimestamp1),
@@ -868,4 +868,4 @@ FROM alltypesorc_string
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc_string
 #### A masked pattern was here ####
-2.8798560435897438E13	8.970772952794215E19	8.970772952794215E19	9.206845925236167E19	9.471416447815086E9	9.471416447815086E9	9.471416447815086E9	9.595231068211004E9
+2.8798560435897438E13	8.970772952794214E19	8.970772952794214E19	9.206845925236167E19	9.471416447815086E9	9.471416447815086E9	9.471416447815086E9	9.595231068211004E9


[41/50] [abbrv] hive git commit: HIVE-10453 : HS2 leaking open file descriptors when using UDFs (Yongzhi Chen via Szehon)

Posted by xu...@apache.org.
HIVE-10453 : HS2 leaking open file descriptors when using UDFs (Yongzhi Chen via Szehon)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/42f88ca9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/42f88ca9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/42f88ca9

Branch: refs/heads/beeline-cli
Commit: 42f88ca9a105956a2085b75dae681d62dd784cef
Parents: 6176333
Author: Szehon Ho <sz...@cloudera.com>
Authored: Thu May 7 11:20:53 2015 -0700
Committer: Szehon Ho <sz...@cloudera.com>
Committed: Thu May 7 11:20:53 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/exec/Registry.java    | 29 ++++++++++++++++++--
 .../apache/hadoop/hive/ql/exec/Utilities.java   |  5 ++++
 .../hadoop/hive/ql/session/SessionState.java    |  4 +--
 3 files changed, 34 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/42f88ca9/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
index 2ba91d0..a5d59ae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
@@ -20,8 +20,10 @@ package org.apache.hadoop.hive.ql.exec;
 
 import com.google.common.base.Splitter;
 import com.google.common.collect.Sets;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.ql.exec.FunctionInfo.FunctionResource;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -43,9 +45,12 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.util.ReflectionUtils;
 
+import java.io.IOException;
+import java.net.URLClassLoader;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -66,6 +71,7 @@ public class Registry {
    */
   private final Map<String, FunctionInfo> mFunctions = new LinkedHashMap<String, FunctionInfo>();
   private final Set<Class<?>> builtIns = Collections.synchronizedSet(new HashSet<Class<?>>());
+  private final Set<ClassLoader> mSessionUDFLoaders = new LinkedHashSet<ClassLoader>();
 
   private final boolean isNative;
 
@@ -443,7 +449,6 @@ public class Registry {
   // should be called after session registry is checked
   private FunctionInfo registerToSessionRegistry(String qualifiedName, FunctionInfo function) {
     FunctionInfo ret = null;
-
     ClassLoader prev = Utilities.getSessionSpecifiedClassLoader();
     try {
       // Found UDF in metastore - now add it to the function registry
@@ -455,7 +460,6 @@ public class Registry {
         LOG.error("Unable to load resources for " + qualifiedName + ":" + e, e);
         return null;
       }
-
       ClassLoader loader = Utilities.getSessionSpecifiedClassLoader();
       Class<?> udfClass = Class.forName(function.getClassName(), true, loader);
 
@@ -463,6 +467,9 @@ public class Registry {
       if (ret == null) {
         LOG.error(function.getClassName() + " is not a valid UDF class and was not registered.");
       }
+      if (SessionState.get().isHiveServerQuery()) {
+        SessionState.getRegistryForWrite().addToUDFLoaders(loader);
+      }
     } catch (ClassNotFoundException e) {
       // Lookup of UDf class failed
       LOG.error("Unable to load UDF class: " + e);
@@ -489,6 +496,24 @@ public class Registry {
     builtIns.clear();
   }
 
+  public synchronized void closeCUDFLoaders() {
+    try {
+      for(ClassLoader loader: mSessionUDFLoaders) {
+        JavaUtils.closeClassLoader(loader);
+      }
+    } catch (IOException ie) {
+        LOG.error("Error in close loader: " + ie);
+    }
+    mSessionUDFLoaders.clear();
+  }
+
+  public synchronized void addToUDFLoaders(ClassLoader loader) {
+    mSessionUDFLoaders.add(loader);
+  }
+  public synchronized void removeFromUDFLoaders(ClassLoader loader) {
+    mSessionUDFLoaders.remove(loader);
+  }
+
   /**
    * Setup blocked flag for all builtin UDFs as per udf whitelist and blacklist
    * @param whiteListStr

http://git-wip-us.apache.org/repos/asf/hive/blob/42f88ca9/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index ad5c8f8..7b48b8b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -2250,6 +2250,11 @@ public final class Utilities {
       }
     }
     JavaUtils.closeClassLoader(loader);
+//this loader is closed, remove it from cached registry loaders to avoid remove it again.
+    Registry reg = SessionState.getRegistry();
+    if(reg != null) {
+      reg.removeFromUDFLoaders(loader);
+    }
 
     loader = new URLClassLoader(newPath.toArray(new URL[0]));
     curThread.setContextClassLoader(loader);

http://git-wip-us.apache.org/repos/asf/hive/blob/42f88ca9/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index 8db78e5..37b6d6f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -1458,7 +1458,7 @@ public class SessionState {
   }
 
   public void close() throws IOException {
-    registry.clear();;
+    registry.clear();
     if (txnMgr != null) txnMgr.closeTxnManager();
     JavaUtils.closeClassLoadersTo(conf.getClassLoader(), parentLoader);
     File resourceDir =
@@ -1493,7 +1493,7 @@ public class SessionState {
         sparkSession = null;
       }
     }
-
+    registry.closeCUDFLoaders();
     dropSessionPaths(conf);
   }
 


[45/50] [abbrv] hive git commit: HIVE-10526: CBO (Calcite Return Path): HiveCost epsilon comparison should take row count in to account (Laljo John Pullokkaran reviewed by Ashutosh Chauhan)

Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
index f7026a8..f84524b 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
@@ -53,11 +53,11 @@ POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Output: default@ss
 POSTHOOK: Lineage: ss.k1 SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: ss.k2 EXPRESSION [(src)y.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: ss.k3 SIMPLE [(srcpart)z.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: ss.k2 SIMPLE [(src)y.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: ss.k3 EXPRESSION [(srcpart)z.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: ss.v1 SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: ss.v2 EXPRESSION [(src)y.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: ss.v3 SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: ss.v2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: ss.v3 EXPRESSION [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: INSERT OVERWRITE TABLE sr
 SELECT x.key,x.value,y.key,y.value,z.key,z.value
 FROM src1 x 
@@ -81,11 +81,11 @@ POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 POSTHOOK: Output: default@sr
 POSTHOOK: Lineage: sr.k1 SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: sr.k2 EXPRESSION [(src)y.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: sr.k3 SIMPLE [(srcpart)z.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: sr.k2 SIMPLE [(src)y.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: sr.k3 EXPRESSION [(srcpart)z.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: sr.v1 SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: sr.v2 EXPRESSION [(src)y.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: sr.v3 SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: sr.v2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: sr.v3 EXPRESSION [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: INSERT OVERWRITE TABLE cs
 SELECT x.key,x.value,y.key,y.value,z.key,z.value
 FROM src1 x 
@@ -195,7 +195,7 @@ Stage-0
                Merge Join Operator [MERGEJOIN_29]
                |  condition map:[{"":"Inner Join 0 to 1"}]
                |  keys:{"1":"_col3 (type: string)","0":"_col0 (type: string)"}
-               |  outputColumnNames:["_col0","_col4","_col5"]
+               |  outputColumnNames:["_col1","_col2","_col5"]
                |  Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                |<-Map 1 [SIMPLE_EDGE]
                |  Reduce Output Operator [RS_14]
@@ -203,14 +203,15 @@ Stage-0
                |     Map-reduce partition columns:_col0 (type: string)
                |     sort order:+
                |     Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-               |     Select Operator [SEL_2]
-               |        outputColumnNames:["_col0"]
+               |     value expressions:_col1 (type: string)
+               |     Select Operator [SEL_1]
+               |        outputColumnNames:["_col0","_col1"]
                |        Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                |        Filter Operator [FIL_25]
-               |           predicate:value is not null (type: boolean)
+               |           predicate:key is not null (type: boolean)
                |           Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                |           TableScan [TS_0]
-               |              alias:z
+               |              alias:y
                |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                |<-Reducer 4 [SIMPLE_EDGE]
                   Reduce Output Operator [RS_16]
@@ -218,11 +219,11 @@ Stage-0
                      Map-reduce partition columns:_col3 (type: string)
                      sort order:+
                      Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-                     value expressions:_col1 (type: string), _col2 (type: string)
+                     value expressions:_col0 (type: string)
                      Merge Join Operator [MERGEJOIN_28]
                      |  condition map:[{"":"Inner Join 0 to 1"}]
-                     |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
-                     |  outputColumnNames:["_col1","_col2","_col3"]
+                     |  keys:{"1":"_col1 (type: string)","0":"_col0 (type: string)"}
+                     |  outputColumnNames:["_col0","_col3"]
                      |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                      |<-Map 3 [SIMPLE_EDGE]
                      |  Reduce Output Operator [RS_8]
@@ -230,28 +231,27 @@ Stage-0
                      |     Map-reduce partition columns:_col0 (type: string)
                      |     sort order:+
                      |     Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                     |     value expressions:_col1 (type: string)
                      |     Select Operator [SEL_4]
-                     |        outputColumnNames:["_col0","_col1"]
+                     |        outputColumnNames:["_col0"]
                      |        Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                      |        Filter Operator [FIL_26]
-                     |           predicate:key is not null (type: boolean)
+                     |           predicate:value is not null (type: boolean)
                      |           Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                     |           TableScan [TS_3]
-                     |              alias:y
+                     |           TableScan [TS_2]
+                     |              alias:z
                      |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                      |<-Map 5 [SIMPLE_EDGE]
                         Reduce Output Operator [RS_10]
-                           key expressions:_col0 (type: string)
-                           Map-reduce partition columns:_col0 (type: string)
+                           key expressions:_col1 (type: string)
+                           Map-reduce partition columns:_col1 (type: string)
                            sort order:+
                            Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
-                           value expressions:_col1 (type: string)
+                           value expressions:_col0 (type: string)
                            Select Operator [SEL_6]
                               outputColumnNames:["_col0","_col1"]
                               Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                               Filter Operator [FIL_27]
-                                 predicate:(key is not null and value is not null) (type: boolean)
+                                 predicate:(value is not null and key is not null) (type: boolean)
                                  Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                                  TableScan [TS_5]
                                     alias:x
@@ -315,21 +315,21 @@ POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
 Vertex dependency in root stage
-Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE)
+Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 6 (SIMPLE_EDGE)
+Reducer 13 <- Map 12 (SIMPLE_EDGE), Map 14 (SIMPLE_EDGE)
 Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 9 (SIMPLE_EDGE)
-Reducer 14 <- Map 13 (SIMPLE_EDGE), Reducer 16 (SIMPLE_EDGE)
-Reducer 12 <- Map 11 (SIMPLE_EDGE), Reducer 14 (SIMPLE_EDGE)
+Reducer 11 <- Map 10 (SIMPLE_EDGE), Reducer 13 (SIMPLE_EDGE)
 Reducer 5 <- Reducer 4 (SIMPLE_EDGE)
-Reducer 4 <- Reducer 12 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE)
-Reducer 6 <- Reducer 5 (SIMPLE_EDGE)
-Reducer 9 <- Map 10 (SIMPLE_EDGE), Map 8 (SIMPLE_EDGE)
+Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+Reducer 9 <- Reducer 16 (SIMPLE_EDGE), Reducer 8 (SIMPLE_EDGE)
+Reducer 8 <- Map 7 (SIMPLE_EDGE), Reducer 11 (SIMPLE_EDGE)
 Reducer 16 <- Map 15 (SIMPLE_EDGE), Map 17 (SIMPLE_EDGE)
 
 Stage-0
    Fetch Operator
       limit:100
       Stage-1
-         Reducer 6
+         Reducer 5
          File Output Operator [FS_71]
             compressed:false
             Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
@@ -339,236 +339,236 @@ Stage-0
                Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
                Select Operator [SEL_69]
                |  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
-               |  Statistics:Num rows: 365 Data size: 3882 Basic stats: COMPLETE Column stats: NONE
-               |<-Reducer 5 [SIMPLE_EDGE]
+               |  Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
+               |<-Reducer 4 [SIMPLE_EDGE]
                   Reduce Output Operator [RS_68]
                      key expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string)
                      sort order:+++
-                     Statistics:Num rows: 365 Data size: 3882 Basic stats: COMPLETE Column stats: NONE
+                     Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
                      value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint)
                      Group By Operator [GBY_66]
                      |  aggregations:["count(VALUE._col0)","count(VALUE._col1)","count(VALUE._col2)"]
                      |  keys:KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
                      |  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                     |  Statistics:Num rows: 365 Data size: 3882 Basic stats: COMPLETE Column stats: NONE
-                     |<-Reducer 4 [SIMPLE_EDGE]
+                     |  Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
+                     |<-Reducer 3 [SIMPLE_EDGE]
                         Reduce Output Operator [RS_65]
                            key expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string)
                            Map-reduce partition columns:_col0 (type: string), _col1 (type: string), _col2 (type: string)
                            sort order:+++
-                           Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                           Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
                            value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint)
                            Group By Operator [GBY_64]
                               aggregations:["count(_col3)","count(_col4)","count(_col5)"]
                               keys:_col0 (type: string), _col1 (type: string), _col2 (type: string)
                               outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                              Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                              Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
                               Select Operator [SEL_62]
                                  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                 Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
-                                 Merge Join Operator [MERGEJOIN_111]
+                                 Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
+                                 Merge Join Operator [MERGEJOIN_113]
                                  |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |  keys:{"1":"_col8 (type: string), _col10 (type: string)","0":"_col8 (type: string), _col10 (type: string)"}
-                                 |  outputColumnNames:["_col2","_col3","_col8","_col9","_col20","_col21"]
-                                 |  Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
-                                 |<-Reducer 12 [SIMPLE_EDGE]
-                                 |  Reduce Output Operator [RS_60]
-                                 |     key expressions:_col8 (type: string), _col10 (type: string)
-                                 |     Map-reduce partition columns:_col8 (type: string), _col10 (type: string)
+                                 |  keys:{"1":"_col15 (type: string), _col17 (type: string)","0":"_col1 (type: string), _col3 (type: string)"}
+                                 |  outputColumnNames:["_col2","_col3","_col12","_col13","_col20","_col21"]
+                                 |  Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
+                                 |<-Reducer 2 [SIMPLE_EDGE]
+                                 |  Reduce Output Operator [RS_58]
+                                 |     key expressions:_col1 (type: string), _col3 (type: string)
+                                 |     Map-reduce partition columns:_col1 (type: string), _col3 (type: string)
                                  |     sort order:++
-                                 |     Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
-                                 |     value expressions:_col6 (type: string), _col7 (type: string)
-                                 |     Select Operator [SEL_46]
-                                 |        outputColumnNames:["_col10","_col6","_col7","_col8"]
-                                 |        Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
-                                 |        Merge Join Operator [MERGEJOIN_109]
-                                 |        |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |        |  keys:{"1":"_col5 (type: string)","0":"_col1 (type: string)"}
-                                 |        |  outputColumnNames:["_col6","_col7","_col8","_col10"]
-                                 |        |  Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
-                                 |        |<-Map 11 [SIMPLE_EDGE]
-                                 |        |  Reduce Output Operator [RS_42]
-                                 |        |     key expressions:_col1 (type: string)
-                                 |        |     Map-reduce partition columns:_col1 (type: string)
-                                 |        |     sort order:+
-                                 |        |     Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
-                                 |        |     Select Operator [SEL_19]
-                                 |        |        outputColumnNames:["_col1"]
-                                 |        |        Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
-                                 |        |        Filter Operator [FIL_101]
-                                 |        |           predicate:((key = 'src1key') and value is not null) (type: boolean)
-                                 |        |           Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
-                                 |        |           TableScan [TS_17]
-                                 |        |              alias:src1
-                                 |        |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-                                 |        |<-Reducer 14 [SIMPLE_EDGE]
-                                 |           Reduce Output Operator [RS_44]
-                                 |              key expressions:_col5 (type: string)
-                                 |              Map-reduce partition columns:_col5 (type: string)
-                                 |              sort order:+
-                                 |              Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                                 |              value expressions:_col4 (type: string), _col6 (type: string), _col8 (type: string)
-                                 |              Merge Join Operator [MERGEJOIN_108]
-                                 |              |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |              |  keys:{"1":"_col2 (type: string)","0":"_col0 (type: string)"}
-                                 |              |  outputColumnNames:["_col4","_col5","_col6","_col8"]
-                                 |              |  Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                                 |              |<-Map 13 [SIMPLE_EDGE]
-                                 |              |  Reduce Output Operator [RS_36]
-                                 |              |     key expressions:_col0 (type: string)
-                                 |              |     Map-reduce partition columns:_col0 (type: string)
-                                 |              |     sort order:+
-                                 |              |     Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |              |     Select Operator [SEL_22]
-                                 |              |        outputColumnNames:["_col0"]
-                                 |              |        Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |              |        Filter Operator [FIL_102]
-                                 |              |           predicate:((value = 'd1value') and key is not null) (type: boolean)
-                                 |              |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |              |           TableScan [TS_20]
-                                 |              |              alias:d1
-                                 |              |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                 |              |<-Reducer 16 [SIMPLE_EDGE]
-                                 |                 Reduce Output Operator [RS_38]
-                                 |                    key expressions:_col2 (type: string)
-                                 |                    Map-reduce partition columns:_col2 (type: string)
-                                 |                    sort order:+
-                                 |                    Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                                 |                    value expressions:_col3 (type: string), _col4 (type: string), _col6 (type: string)
-                                 |                    Merge Join Operator [MERGEJOIN_107]
-                                 |                    |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |                    |  keys:{"1":"_col3 (type: string)","0":"_col1 (type: string)"}
-                                 |                    |  outputColumnNames:["_col2","_col3","_col4","_col6"]
-                                 |                    |  Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                                 |                    |<-Map 15 [SIMPLE_EDGE]
-                                 |                    |  Reduce Output Operator [RS_30]
-                                 |                    |     key expressions:_col1 (type: string)
-                                 |                    |     Map-reduce partition columns:_col1 (type: string)
-                                 |                    |     sort order:+
-                                 |                    |     Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                 |                    |     Select Operator [SEL_25]
-                                 |                    |        outputColumnNames:["_col1"]
-                                 |                    |        Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                 |                    |        Filter Operator [FIL_103]
-                                 |                    |           predicate:((key = 'srcpartkey') and value is not null) (type: boolean)
-                                 |                    |           Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                 |                    |           TableScan [TS_23]
-                                 |                    |              alias:srcpart
-                                 |                    |              Statistics:Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                                 |                    |<-Map 17 [SIMPLE_EDGE]
-                                 |                       Reduce Output Operator [RS_32]
-                                 |                          key expressions:_col3 (type: string)
-                                 |                          Map-reduce partition columns:_col3 (type: string)
-                                 |                          sort order:+
-                                 |                          Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                 |                          value expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string), _col4 (type: string)
-                                 |                          Select Operator [SEL_28]
-                                 |                             outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
-                                 |                             Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                 |                             Filter Operator [FIL_104]
-                                 |                                predicate:((((((v3 = 'ssv3') and v2 is not null) and k1 is not null) and v1 is not null) and k2 is not null) and k3 is not null) (type: boolean)
-                                 |                                Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                 |                                TableScan [TS_26]
-                                 |                                   alias:ss
-                                 |                                   Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
-                                 |<-Reducer 3 [SIMPLE_EDGE]
-                                    Reduce Output Operator [RS_58]
-                                       key expressions:_col8 (type: string), _col10 (type: string)
-                                       Map-reduce partition columns:_col8 (type: string), _col10 (type: string)
+                                 |     Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                 |     value expressions:_col2 (type: string)
+                                 |     Merge Join Operator [MERGEJOIN_107]
+                                 |     |  condition map:[{"":"Inner Join 0 to 1"}]
+                                 |     |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                                 |     |  outputColumnNames:["_col1","_col2","_col3"]
+                                 |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                 |     |<-Map 1 [SIMPLE_EDGE]
+                                 |     |  Reduce Output Operator [RS_53]
+                                 |     |     key expressions:_col0 (type: string)
+                                 |     |     Map-reduce partition columns:_col0 (type: string)
+                                 |     |     sort order:+
+                                 |     |     Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+                                 |     |     value expressions:_col1 (type: string), _col2 (type: string), _col3 (type: string)
+                                 |     |     Select Operator [SEL_1]
+                                 |     |        outputColumnNames:["_col0","_col1","_col2","_col3"]
+                                 |     |        Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+                                 |     |        Filter Operator [FIL_99]
+                                 |     |           predicate:((k1 is not null and v2 is not null) and v3 is not null) (type: boolean)
+                                 |     |           Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+                                 |     |           TableScan [TS_0]
+                                 |     |              alias:cs
+                                 |     |              Statistics:Num rows: 170 Data size: 5890 Basic stats: COMPLETE Column stats: NONE
+                                 |     |<-Map 6 [SIMPLE_EDGE]
+                                 |        Reduce Output Operator [RS_55]
+                                 |           key expressions:_col0 (type: string)
+                                 |           Map-reduce partition columns:_col0 (type: string)
+                                 |           sort order:+
+                                 |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                 |           Select Operator [SEL_4]
+                                 |              outputColumnNames:["_col0"]
+                                 |              Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                 |              Filter Operator [FIL_100]
+                                 |                 predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
+                                 |                 Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                 |                 TableScan [TS_2]
+                                 |                    alias:d1
+                                 |                    Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                 |<-Reducer 9 [SIMPLE_EDGE]
+                                    Reduce Output Operator [RS_60]
+                                       key expressions:_col15 (type: string), _col17 (type: string)
+                                       Map-reduce partition columns:_col15 (type: string), _col17 (type: string)
                                        sort order:++
-                                       Statistics:Num rows: 150 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
-                                       value expressions:_col2 (type: string), _col3 (type: string), _col9 (type: string)
-                                       Merge Join Operator [MERGEJOIN_110]
-                                       |  condition map:[{"":"Inner Join 0 to 1"}]
-                                       |  keys:{"1":"_col3 (type: string), _col5 (type: string)","0":"_col1 (type: string), _col3 (type: string)"}
-                                       |  outputColumnNames:["_col2","_col3","_col8","_col9","_col10"]
-                                       |  Statistics:Num rows: 150 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
-                                       |<-Reducer 2 [SIMPLE_EDGE]
-                                       |  Reduce Output Operator [RS_53]
-                                       |     key expressions:_col1 (type: string), _col3 (type: string)
-                                       |     Map-reduce partition columns:_col1 (type: string), _col3 (type: string)
-                                       |     sort order:++
-                                       |     Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                       |     value expressions:_col2 (type: string)
-                                       |     Merge Join Operator [MERGEJOIN_105]
-                                       |     |  condition map:[{"":"Inner Join 0 to 1"}]
-                                       |     |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
-                                       |     |  outputColumnNames:["_col1","_col2","_col3"]
-                                       |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                       |     |<-Map 1 [SIMPLE_EDGE]
-                                       |     |  Reduce Output Operator [RS_48]
-                                       |     |     key expressions:_col0 (type: string)
-                                       |     |     Map-reduce partition columns:_col0 (type: string)
-                                       |     |     sort order:+
-                                       |     |     Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
-                                       |     |     value expressions:_col1 (type: string), _col2 (type: string), _col3 (type: string)
-                                       |     |     Select Operator [SEL_1]
-                                       |     |        outputColumnNames:["_col0","_col1","_col2","_col3"]
-                                       |     |        Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
-                                       |     |        Filter Operator [FIL_97]
-                                       |     |           predicate:((k1 is not null and v2 is not null) and v3 is not null) (type: boolean)
-                                       |     |           Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
-                                       |     |           TableScan [TS_0]
-                                       |     |              alias:cs
-                                       |     |              Statistics:Num rows: 170 Data size: 5890 Basic stats: COMPLETE Column stats: NONE
-                                       |     |<-Map 7 [SIMPLE_EDGE]
-                                       |        Reduce Output Operator [RS_50]
-                                       |           key expressions:_col0 (type: string)
-                                       |           Map-reduce partition columns:_col0 (type: string)
-                                       |           sort order:+
-                                       |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                       |           Select Operator [SEL_4]
-                                       |              outputColumnNames:["_col0"]
-                                       |              Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                       |              Filter Operator [FIL_98]
-                                       |                 predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
-                                       |                 Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                       |                 TableScan [TS_2]
-                                       |                    alias:d1
-                                       |                    Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                       |<-Reducer 9 [SIMPLE_EDGE]
-                                          Reduce Output Operator [RS_55]
-                                             key expressions:_col3 (type: string), _col5 (type: string)
-                                             Map-reduce partition columns:_col3 (type: string), _col5 (type: string)
-                                             sort order:++
-                                             Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                             value expressions:_col2 (type: string), _col4 (type: string)
-                                             Merge Join Operator [MERGEJOIN_106]
-                                             |  condition map:[{"":"Inner Join 0 to 1"}]
-                                             |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
-                                             |  outputColumnNames:["_col2","_col3","_col4","_col5"]
-                                             |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                             |<-Map 10 [SIMPLE_EDGE]
-                                             |  Reduce Output Operator [RS_14]
-                                             |     key expressions:_col0 (type: string)
-                                             |     Map-reduce partition columns:_col0 (type: string)
-                                             |     sort order:+
-                                             |     Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                             |     Select Operator [SEL_10]
-                                             |        outputColumnNames:["_col0"]
-                                             |        Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                             |        Filter Operator [FIL_100]
-                                             |           predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
-                                             |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                             |           TableScan [TS_8]
-                                             |              alias:d1
-                                             |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                             |<-Map 8 [SIMPLE_EDGE]
-                                                Reduce Output Operator [RS_12]
-                                                   key expressions:_col0 (type: string)
-                                                   Map-reduce partition columns:_col0 (type: string)
-                                                   sort order:+
-                                                   Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                                   value expressions:_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
-                                                   Select Operator [SEL_7]
-                                                      outputColumnNames:["_col0","_col2","_col3","_col4","_col5"]
-                                                      Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                                      Filter Operator [FIL_99]
-                                                         predicate:((((((v1 = 'srv1') and k1 is not null) and v2 is not null) and v3 is not null) and k2 is not null) and k3 is not null) (type: boolean)
-                                                         Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                                         TableScan [TS_5]
-                                                            alias:sr
-                                                            Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
+                                       Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                                       value expressions:_col6 (type: string), _col7 (type: string), _col14 (type: string)
+                                       Select Operator [SEL_51]
+                                          outputColumnNames:["_col14","_col15","_col17","_col6","_col7"]
+                                          Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                                          Merge Join Operator [MERGEJOIN_112]
+                                          |  condition map:[{"":"Inner Join 0 to 1"}]
+                                          |  keys:{"1":"_col2 (type: string), _col4 (type: string)","0":"_col8 (type: string), _col10 (type: string)"}
+                                          |  outputColumnNames:["_col6","_col7","_col14","_col15","_col17"]
+                                          |  Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                                          |<-Reducer 16 [SIMPLE_EDGE]
+                                          |  Reduce Output Operator [RS_49]
+                                          |     key expressions:_col2 (type: string), _col4 (type: string)
+                                          |     Map-reduce partition columns:_col2 (type: string), _col4 (type: string)
+                                          |     sort order:++
+                                          |     Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                          |     value expressions:_col3 (type: string), _col5 (type: string)
+                                          |     Merge Join Operator [MERGEJOIN_111]
+                                          |     |  condition map:[{"":"Inner Join 0 to 1"}]
+                                          |     |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                                          |     |  outputColumnNames:["_col2","_col3","_col4","_col5"]
+                                          |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                          |     |<-Map 15 [SIMPLE_EDGE]
+                                          |     |  Reduce Output Operator [RS_36]
+                                          |     |     key expressions:_col0 (type: string)
+                                          |     |     Map-reduce partition columns:_col0 (type: string)
+                                          |     |     sort order:+
+                                          |     |     Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                          |     |     value expressions:_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
+                                          |     |     Select Operator [SEL_31]
+                                          |     |        outputColumnNames:["_col0","_col2","_col3","_col4","_col5"]
+                                          |     |        Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                          |     |        Filter Operator [FIL_105]
+                                          |     |           predicate:((((((v1 = 'srv1') and k1 is not null) and k2 is not null) and k3 is not null) and v2 is not null) and v3 is not null) (type: boolean)
+                                          |     |           Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                          |     |           TableScan [TS_29]
+                                          |     |              alias:sr
+                                          |     |              Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
+                                          |     |<-Map 17 [SIMPLE_EDGE]
+                                          |        Reduce Output Operator [RS_38]
+                                          |           key expressions:_col0 (type: string)
+                                          |           Map-reduce partition columns:_col0 (type: string)
+                                          |           sort order:+
+                                          |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                          |           Select Operator [SEL_34]
+                                          |              outputColumnNames:["_col0"]
+                                          |              Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                          |              Filter Operator [FIL_106]
+                                          |                 predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
+                                          |                 Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                          |                 TableScan [TS_32]
+                                          |                    alias:d1
+                                          |                    Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                          |<-Reducer 8 [SIMPLE_EDGE]
+                                             Reduce Output Operator [RS_47]
+                                                key expressions:_col8 (type: string), _col10 (type: string)
+                                                Map-reduce partition columns:_col8 (type: string), _col10 (type: string)
+                                                sort order:++
+                                                Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
+                                                value expressions:_col6 (type: string), _col7 (type: string)
+                                                Merge Join Operator [MERGEJOIN_110]
+                                                |  condition map:[{"":"Inner Join 0 to 1"}]
+                                                |  keys:{"1":"_col5 (type: string)","0":"_col1 (type: string)"}
+                                                |  outputColumnNames:["_col6","_col7","_col8","_col10"]
+                                                |  Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
+                                                |<-Map 7 [SIMPLE_EDGE]
+                                                |  Reduce Output Operator [RS_42]
+                                                |     key expressions:_col1 (type: string)
+                                                |     Map-reduce partition columns:_col1 (type: string)
+                                                |     sort order:+
+                                                |     Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                                                |     Select Operator [SEL_7]
+                                                |        outputColumnNames:["_col1"]
+                                                |        Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                                                |        Filter Operator [FIL_101]
+                                                |           predicate:((key = 'src1key') and value is not null) (type: boolean)
+                                                |           Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                                                |           TableScan [TS_5]
+                                                |              alias:src1
+                                                |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                                                |<-Reducer 11 [SIMPLE_EDGE]
+                                                   Reduce Output Operator [RS_44]
+                                                      key expressions:_col5 (type: string)
+                                                      Map-reduce partition columns:_col5 (type: string)
+                                                      sort order:+
+                                                      Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+                                                      value expressions:_col4 (type: string), _col6 (type: string), _col8 (type: string)
+                                                      Merge Join Operator [MERGEJOIN_109]
+                                                      |  condition map:[{"":"Inner Join 0 to 1"}]
+                                                      |  keys:{"1":"_col2 (type: string)","0":"_col0 (type: string)"}
+                                                      |  outputColumnNames:["_col4","_col5","_col6","_col8"]
+                                                      |  Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+                                                      |<-Map 10 [SIMPLE_EDGE]
+                                                      |  Reduce Output Operator [RS_24]
+                                                      |     key expressions:_col0 (type: string)
+                                                      |     Map-reduce partition columns:_col0 (type: string)
+                                                      |     sort order:+
+                                                      |     Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                                      |     Select Operator [SEL_10]
+                                                      |        outputColumnNames:["_col0"]
+                                                      |        Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                                      |        Filter Operator [FIL_102]
+                                                      |           predicate:((value = 'd1value') and key is not null) (type: boolean)
+                                                      |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                                      |           TableScan [TS_8]
+                                                      |              alias:d1
+                                                      |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                                      |<-Reducer 13 [SIMPLE_EDGE]
+                                                         Reduce Output Operator [RS_26]
+                                                            key expressions:_col2 (type: string)
+                                                            Map-reduce partition columns:_col2 (type: string)
+                                                            sort order:+
+                                                            Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                                                            value expressions:_col3 (type: string), _col4 (type: string), _col6 (type: string)
+                                                            Merge Join Operator [MERGEJOIN_108]
+                                                            |  condition map:[{"":"Inner Join 0 to 1"}]
+                                                            |  keys:{"1":"_col3 (type: string)","0":"_col1 (type: string)"}
+                                                            |  outputColumnNames:["_col2","_col3","_col4","_col6"]
+                                                            |  Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                                                            |<-Map 12 [SIMPLE_EDGE]
+                                                            |  Reduce Output Operator [RS_18]
+                                                            |     key expressions:_col1 (type: string)
+                                                            |     Map-reduce partition columns:_col1 (type: string)
+                                                            |     sort order:+
+                                                            |     Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                                            |     Select Operator [SEL_13]
+                                                            |        outputColumnNames:["_col1"]
+                                                            |        Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                                            |        Filter Operator [FIL_103]
+                                                            |           predicate:((key = 'srcpartkey') and value is not null) (type: boolean)
+                                                            |           Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                                            |           TableScan [TS_11]
+                                                            |              alias:srcpart
+                                                            |              Statistics:Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                                                            |<-Map 14 [SIMPLE_EDGE]
+                                                               Reduce Output Operator [RS_20]
+                                                                  key expressions:_col3 (type: string)
+                                                                  Map-reduce partition columns:_col3 (type: string)
+                                                                  sort order:+
+                                                                  Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                                                  value expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string), _col4 (type: string)
+                                                                  Select Operator [SEL_16]
+                                                                     outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
+                                                                     Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                                                     Filter Operator [FIL_104]
+                                                                        predicate:((((((v3 = 'ssv3') and v2 is not null) and k1 is not null) and v1 is not null) and k2 is not null) and k3 is not null) (type: boolean)
+                                                                        Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                                                        TableScan [TS_14]
+                                                                           alias:ss
+                                                                           Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
 PREHOOK: query: explain
 SELECT x.key, z.value, y.value
 FROM src1 x JOIN src y ON (x.key = y.key) 
@@ -1298,7 +1298,7 @@ Stage-0
                Map Join Operator [MAPJOIN_29]
                |  condition map:[{"":"Inner Join 0 to 1"}]
                |  keys:{"Map 1":"_col0 (type: string)","Map 2":"_col3 (type: string)"}
-               |  outputColumnNames:["_col0","_col4","_col5"]
+               |  outputColumnNames:["_col1","_col2","_col5"]
                |  Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                |<-Map 1 [BROADCAST_EDGE]
                |  Reduce Output Operator [RS_14]
@@ -1306,44 +1306,45 @@ Stage-0
                |     Map-reduce partition columns:_col0 (type: string)
                |     sort order:+
                |     Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-               |     Select Operator [SEL_2]
-               |        outputColumnNames:["_col0"]
+               |     value expressions:_col1 (type: string)
+               |     Select Operator [SEL_1]
+               |        outputColumnNames:["_col0","_col1"]
                |        Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                |        Filter Operator [FIL_25]
-               |           predicate:value is not null (type: boolean)
+               |           predicate:key is not null (type: boolean)
                |           Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                |           TableScan [TS_0]
-               |              alias:z
+               |              alias:y
                |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                |<-Map Join Operator [MAPJOIN_28]
                   |  condition map:[{"":"Inner Join 0 to 1"}]
-                  |  keys:{"Map 2":"_col0 (type: string)","Map 3":"_col0 (type: string)"}
-                  |  outputColumnNames:["_col1","_col2","_col3"]
+                  |  keys:{"Map 2":"_col0 (type: string)","Map 3":"_col1 (type: string)"}
+                  |  outputColumnNames:["_col0","_col3"]
                   |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                   |<-Map 3 [BROADCAST_EDGE]
                   |  Reduce Output Operator [RS_10]
-                  |     key expressions:_col0 (type: string)
-                  |     Map-reduce partition columns:_col0 (type: string)
+                  |     key expressions:_col1 (type: string)
+                  |     Map-reduce partition columns:_col1 (type: string)
                   |     sort order:+
                   |     Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
-                  |     value expressions:_col1 (type: string)
+                  |     value expressions:_col0 (type: string)
                   |     Select Operator [SEL_6]
                   |        outputColumnNames:["_col0","_col1"]
                   |        Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                   |        Filter Operator [FIL_27]
-                  |           predicate:(key is not null and value is not null) (type: boolean)
+                  |           predicate:(value is not null and key is not null) (type: boolean)
                   |           Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                   |           TableScan [TS_5]
                   |              alias:x
                   |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                   |<-Select Operator [SEL_4]
-                        outputColumnNames:["_col0","_col1"]
+                        outputColumnNames:["_col0"]
                         Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                         Filter Operator [FIL_26]
-                           predicate:key is not null (type: boolean)
+                           predicate:value is not null (type: boolean)
                            Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                           TableScan [TS_3]
-                              alias:y
+                           TableScan [TS_2]
+                              alias:z
                               Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
 PREHOOK: query: EXPLAIN
 select 
@@ -1404,17 +1405,17 @@ POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
 Vertex dependency in root stage
-Map 2 <- Map 1 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
-Map 4 <- Map 3 (BROADCAST_EDGE)
-Map 7 <- Map 10 (BROADCAST_EDGE), Map 2 (BROADCAST_EDGE), Map 5 (BROADCAST_EDGE), Map 6 (BROADCAST_EDGE)
-Reducer 9 <- Reducer 8 (SIMPLE_EDGE)
-Reducer 8 <- Map 7 (SIMPLE_EDGE)
+Map 2 <- Map 1 (BROADCAST_EDGE)
+Map 10 <- Map 9 (BROADCAST_EDGE)
+Map 5 <- Map 10 (BROADCAST_EDGE), Map 2 (BROADCAST_EDGE), Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE), Map 8 (BROADCAST_EDGE)
+Reducer 7 <- Reducer 6 (SIMPLE_EDGE)
+Reducer 6 <- Map 5 (SIMPLE_EDGE)
 
 Stage-0
    Fetch Operator
       limit:100
       Stage-1
-         Reducer 9
+         Reducer 7
          File Output Operator [FS_71]
             compressed:false
             Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
@@ -1424,190 +1425,190 @@ Stage-0
                Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
                Select Operator [SEL_69]
                |  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
-               |  Statistics:Num rows: 365 Data size: 3882 Basic stats: COMPLETE Column stats: NONE
-               |<-Reducer 8 [SIMPLE_EDGE]
+               |  Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
+               |<-Reducer 6 [SIMPLE_EDGE]
                   Reduce Output Operator [RS_68]
                      key expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string)
                      sort order:+++
-                     Statistics:Num rows: 365 Data size: 3882 Basic stats: COMPLETE Column stats: NONE
+                     Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
                      value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint)
                      Group By Operator [GBY_66]
                      |  aggregations:["count(VALUE._col0)","count(VALUE._col1)","count(VALUE._col2)"]
                      |  keys:KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
                      |  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                     |  Statistics:Num rows: 365 Data size: 3882 Basic stats: COMPLETE Column stats: NONE
-                     |<-Map 7 [SIMPLE_EDGE]
+                     |  Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
+                     |<-Map 5 [SIMPLE_EDGE]
                         Reduce Output Operator [RS_65]
                            key expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string)
                            Map-reduce partition columns:_col0 (type: string), _col1 (type: string), _col2 (type: string)
                            sort order:+++
-                           Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                           Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
                            value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint)
                            Group By Operator [GBY_64]
                               aggregations:["count(_col3)","count(_col4)","count(_col5)"]
                               keys:_col0 (type: string), _col1 (type: string), _col2 (type: string)
                               outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                              Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                              Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
                               Select Operator [SEL_62]
                                  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                 Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
-                                 Map Join Operator [MAPJOIN_111]
+                                 Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
+                                 Map Join Operator [MAPJOIN_113]
                                  |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |  keys:{"Map 2":"_col8 (type: string), _col10 (type: string)","Map 7":"_col8 (type: string), _col10 (type: string)"}
-                                 |  outputColumnNames:["_col2","_col3","_col8","_col9","_col20","_col21"]
-                                 |  Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                                 |  keys:{"Map 2":"_col1 (type: string), _col3 (type: string)","Map 5":"_col15 (type: string), _col17 (type: string)"}
+                                 |  outputColumnNames:["_col2","_col3","_col12","_col13","_col20","_col21"]
+                                 |  Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
                                  |<-Map 2 [BROADCAST_EDGE]
                                  |  Reduce Output Operator [RS_58]
-                                 |     key expressions:_col8 (type: string), _col10 (type: string)
-                                 |     Map-reduce partition columns:_col8 (type: string), _col10 (type: string)
+                                 |     key expressions:_col1 (type: string), _col3 (type: string)
+                                 |     Map-reduce partition columns:_col1 (type: string), _col3 (type: string)
                                  |     sort order:++
-                                 |     Statistics:Num rows: 150 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
-                                 |     value expressions:_col2 (type: string), _col3 (type: string), _col9 (type: string)
-                                 |     Map Join Operator [MAPJOIN_110]
+                                 |     Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                 |     value expressions:_col2 (type: string)
+                                 |     Map Join Operator [MAPJOIN_107]
                                  |     |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |     |  keys:{"Map 2":"_col1 (type: string), _col3 (type: string)","Map 4":"_col3 (type: string), _col5 (type: string)"}
-                                 |     |  outputColumnNames:["_col2","_col3","_col8","_col9","_col10"]
-                                 |     |  Statistics:Num rows: 150 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
-                                 |     |<-Map 4 [BROADCAST_EDGE]
-                                 |     |  Reduce Output Operator [RS_55]
-                                 |     |     key expressions:_col3 (type: string), _col5 (type: string)
-                                 |     |     Map-reduce partition columns:_col3 (type: string), _col5 (type: string)
-                                 |     |     sort order:++
-                                 |     |     Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                 |     |     value expressions:_col2 (type: string), _col4 (type: string)
-                                 |     |     Map Join Operator [MAPJOIN_106]
-                                 |     |     |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |     |     |  keys:{"Map 3":"_col0 (type: string)","Map 4":"_col0 (type: string)"}
-                                 |     |     |  outputColumnNames:["_col2","_col3","_col4","_col5"]
-                                 |     |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                 |     |     |<-Map 3 [BROADCAST_EDGE]
-                                 |     |     |  Reduce Output Operator [RS_12]
-                                 |     |     |     key expressions:_col0 (type: string)
-                                 |     |     |     Map-reduce partition columns:_col0 (type: string)
-                                 |     |     |     sort order:+
-                                 |     |     |     Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                 |     |     |     value expressions:_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
-                                 |     |     |     Select Operator [SEL_7]
-                                 |     |     |        outputColumnNames:["_col0","_col2","_col3","_col4","_col5"]
-                                 |     |     |        Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                 |     |     |        Filter Operator [FIL_99]
-                                 |     |     |           predicate:((((((v1 = 'srv1') and k1 is not null) and v2 is not null) and v3 is not null) and k2 is not null) and k3 is not null) (type: boolean)
-                                 |     |     |           Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                 |     |     |           TableScan [TS_5]
-                                 |     |     |              alias:sr
-                                 |     |     |              Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
-                                 |     |     |<-Select Operator [SEL_10]
-                                 |     |           outputColumnNames:["_col0"]
-                                 |     |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |     |           Filter Operator [FIL_100]
-                                 |     |              predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
-                                 |     |              Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |     |              TableScan [TS_8]
-                                 |     |                 alias:d1
-                                 |     |                 Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                 |     |<-Map Join Operator [MAPJOIN_105]
-                                 |        |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |        |  keys:{"Map 1":"_col0 (type: string)","Map 2":"_col0 (type: string)"}
-                                 |        |  outputColumnNames:["_col1","_col2","_col3"]
-                                 |        |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                 |        |<-Map 1 [BROADCAST_EDGE]
-                                 |        |  Reduce Output Operator [RS_48]
-                                 |        |     key expressions:_col0 (type: string)
-                                 |        |     Map-reduce partition columns:_col0 (type: string)
-                                 |        |     sort order:+
-                                 |        |     Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
-                                 |        |     value expressions:_col1 (type: string), _col2 (type: string), _col3 (type: string)
-                                 |        |     Select Operator [SEL_1]
-                                 |        |        outputColumnNames:["_col0","_col1","_col2","_col3"]
-                                 |        |        Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
-                                 |        |        Filter Operator [FIL_97]
-                                 |        |           predicate:((k1 is not null and v2 is not null) and v3 is not null) (type: boolean)
-                                 |        |           Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
-                                 |        |           TableScan [TS_0]
-                                 |        |              alias:cs
-                                 |        |              Statistics:Num rows: 170 Data size: 5890 Basic stats: COMPLETE Column stats: NONE
-                                 |        |<-Select Operator [SEL_4]
-                                 |              outputColumnNames:["_col0"]
+                                 |     |  keys:{"Map 1":"_col0 (type: string)","Map 2":"_col0 (type: string)"}
+                                 |     |  outputColumnNames:["_col1","_col2","_col3"]
+                                 |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                 |     |<-Map 1 [BROADCAST_EDGE]
+                                 |     |  Reduce Output Operator [RS_53]
+                                 |     |     key expressions:_col0 (type: string)
+                                 |     |     Map-reduce partition columns:_col0 (type: string)
+                                 |     |     sort order:+
+                                 |     |     Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+                                 |     |     value expressions:_col1 (type: string), _col2 (type: string), _col3 (type: string)
+                                 |     |     Select Operator [SEL_1]
+                                 |     |        outputColumnNames:["_col0","_col1","_col2","_col3"]
+                                 |     |        Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+                                 |     |        Filter Operator [FIL_99]
+                                 |     |           predicate:((k1 is not null and v2 is not null) and v3 is not null) (type: boolean)
+                                 |     |           Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+                                 |     |           TableScan [TS_0]
+                                 |     |              alias:cs
+                                 |     |              Statistics:Num rows: 170 Data size: 5890 Basic stats: COMPLETE Column stats: NONE
+                                 |     |<-Select Operator [SEL_4]
+                                 |           outputColumnNames:["_col0"]
+                                 |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                 |           Filter Operator [FIL_100]
+                                 |              predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
                                  |              Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |              Filter Operator [FIL_98]
-                                 |                 predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
-                                 |                 Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |                 TableScan [TS_2]
-                                 |                    alias:d1
-                                 |                    Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                 |<-Select Operator [SEL_46]
-                                       outputColumnNames:["_col10","_col6","_col7","_col8"]
-                                       Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
-                                       Map Join Operator [MAPJOIN_109]
+                                 |              TableScan [TS_2]
+                                 |                 alias:d1
+                                 |                 Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                 |<-Select Operator [SEL_51]
+                                       outputColumnNames:["_col14","_col15","_col17","_col6","_col7"]
+                                       Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                                       Map Join Operator [MAPJOIN_112]
                                        |  condition map:[{"":"Inner Join 0 to 1"}]
-                                       |  keys:{"Map 5":"_col1 (type: string)","Map 7":"_col5 (type: string)"}
-                                       |  outputColumnNames:["_col6","_col7","_col8","_col10"]
-                                       |  Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
-                                       |<-Map 5 [BROADCAST_EDGE]
-                                       |  Reduce Output Operator [RS_42]
-                                       |     key expressions:_col1 (type: string)
-                                       |     Map-reduce partition columns:_col1 (type: string)
-                                       |     sort order:+
-                                       |     Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
-                                       |     Select Operator [SEL_19]
-                                       |        outputColumnNames:["_col1"]
-                                       |        Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
-                                       |        Filter Operator [FIL_101]
-                                       |           predicate:((key = 'src1key') and value is not null) (type: boolean)
-                                       |           Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
-                                       |           TableScan [TS_17]
-                                       |              alias:src1
-                                       |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-                                       |<-Map Join Operator [MAPJOIN_108]
+                                       |  keys:{"Map 10":"_col2 (type: string), _col4 (type: string)","Map 5":"_col8 (type: string), _col10 (type: string)"}
+                                       |  outputColumnNames:["_col6","_col7","_col14","_col15","_col17"]
+                                       |  Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                                       |<-Map 10 [BROADCAST_EDGE]
+                                       |  Reduce Output Operator [RS_49]
+                                       |     key expressions:_col2 (type: string), _col4 (type: string)
+                                       |     Map-reduce partition columns:_col2 (type: string), _col4 (type: string)
+                                       |     sort order:++
+                                       |     Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                       |     value expressions:_col3 (type: string), _col5 (type: string)
+                                       |     Map Join Operator [MAPJOIN_111]
+                                       |     |  condition map:[{"":"Inner Join 0 to 1"}]
+                                       |     |  keys:{"Map 10":"_col0 (type: string)","Map 9":"_col0 (type: string)"}
+                                       |     |  outputColumnNames:["_col2","_col3","_col4","_col5"]
+                                       |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                       |     |<-Map 9 [BROADCAST_EDGE]
+                                       |     |  Reduce Output Operator [RS_36]
+                                       |     |     key expressions:_col0 (type: string)
+                                       |     |     Map-reduce partition columns:_col0 (type: string)
+                                       |     |     sort order:+
+                                       |     |     Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                       |     |     value expressions:_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
+                                       |     |     Select Operator [SEL_31]
+                                       |     |        outputColumnNames:["_col0","_col2","_col3","_col4","_col5"]
+                                       |     |        Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                       |     |        Filter Operator [FIL_105]
+                                       |     |           predicate:((((((v1 = 'srv1') and k1 is not null) and k2 is not null) and k3 is not null) and v2 is not null) and v3 is not null) (type: boolean)
+                                       |     |           Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                       |     |           TableScan [TS_29]
+                                       |     |              alias:sr
+                                       |     |              Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
+                                       |     |<-Select Operator [SEL_34]
+                                       |           outputColumnNames:["_col0"]
+                                       |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                       |           Filter Operator [FIL_106]
+                                       |              predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
+                                       |              Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                       |              TableScan [TS_32]
+                                       |                 alias:d1
+                                       |                 Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                       |<-Map Join Operator [MAPJOIN_110]
                                           |  condition map:[{"":"Inner Join 0 to 1"}]
-                                          |  keys:{"Map 7":"_col2 (type: string)","Map 6":"_col0 (type: string)"}
-                                          |  outputColumnNames:["_col4","_col5","_col6","_col8"]
-                                          |  Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                                          |<-Map 6 [BROADCAST_EDGE]
-                                          |  Reduce Output Operator [RS_36]
-                                          |     key expressions:_col0 (type: string)
-                                          |     Map-reduce partition columns:_col0 (type: string)
+                                          |  keys:{"Map 3":"_col1 (type: string)","Map 5":"_col5 (type: string)"}
+                                          |  outputColumnNames:["_col6","_col7","_col8","_col10"]
+                                          |  Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
+                                          |<-Map 3 [BROADCAST_EDGE]
+                                          |  Reduce Output Operator [RS_42]
+                                          |     key expressions:_col1 (type: string)
+                                          |     Map-reduce partition columns:_col1 (type: string)
                                           |     sort order:+
-                                          |     Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                          |     Select Operator [SEL_22]
-                                          |        outputColumnNames:["_col0"]
-                                          |        Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                          |        Filter Operator [FIL_102]
-                                          |           predicate:((value = 'd1value') and key is not null) (type: boolean)
-                                          |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                          |           TableScan [TS_20]
-                                          |              alias:d1
-                                          |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                          |<-Map Join Operator [MAPJOIN_107]
+                                          |     Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                                          |     Select Operator [SEL_7]
+                                          |        outputColumnNames:["_col1"]
+                                          |        Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                                          |        Filter Operator [FIL_101]
+                                          |           predicate:((key = 'src1key') and value is not null) (type: boolean)
+                                          |           Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                                          |           TableScan [TS_5]
+                                          |              alias:src1
+                                          |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                                          |<-Map Join Operator [MAPJOIN_109]
                                              |  condition map:[{"":"Inner Join 0 to 1"}]
-                                             |  keys:{"Map 10":"_col3 (type: string)","Map 7":"_col1 (type: string)"}
-                                             |  outputColumnNames:["_col2","_col3","_col4","_col6"]
-                                             |  Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                                             |<-Map 10 [BROADCAST_EDGE]
-                                             |  Reduce Output Operator [RS_32]
-                                             |     key expressions:_col3 (type: string)
-                                             |     Map-reduce partition columns:_col3 (type: string)
+                                             |  keys:{"Map 5":"_col2 (type: string)","Map 4":"_col0 (type: string)"}
+                                             |  outputColumnNames:["_col4","_col5","_col6","_col8"]
+                                             |  Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+                                             |<-Map 4 [BROADCAST_EDGE]
+                                             |  Reduce Output Operator [RS_24]
+                                             |     key expressions:_col0 (type: string)
+                                             |     Map-reduce partition columns:_col0 (type: string)
                                              |     sort order:+
-                                             |     Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                             |     value expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string), _col4 (type: string)
-                                             |     Select Operator [SEL_28]
-                                             |        outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
-                                             |        Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                             |        Filter Operator [FIL_104]
-                                             |           predicate:((((((v3 = 'ssv3') and v2 is not null) and k1 is not null) and v1 is not null) and k2 is not null) and k3 is not null) (type: boolean)
-                                             |           Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                             |           TableScan [TS_26]
-                                             |              alias:ss
-                                             |              Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
-                                             |<-Select Operator [SEL_25]
-                                                   outputColumnNames:["_col1"]
-                                                   Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                                   Filter Operator [FIL_103]
-                                                      predicate:((key = 'srcpartkey') and value is not null) (type: boolean)
+                                             |     Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column 

<TRUNCATED>

[26/50] [abbrv] hive git commit: HIVE-10564 : webhcat should use webhcat-site.xml properties for controller job submission HIVE-10564.2.patch (Thejas Nair, reviewed by Eugene Koifman)

Posted by xu...@apache.org.
HIVE-10564 : webhcat should use webhcat-site.xml properties for controller job submission HIVE-10564.2.patch (Thejas Nair, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/306e61af
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/306e61af
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/306e61af

Branch: refs/heads/beeline-cli
Commit: 306e61afbab7b5aabc05f624f7ea4621e4fd9eb7
Parents: bd8d59e
Author: Thejas Nair <th...@hortonworks.com>
Authored: Wed May 6 17:50:07 2015 -0700
Committer: Thejas Nair <th...@hortonworks.com>
Committed: Wed May 6 17:50:18 2015 -0700

----------------------------------------------------------------------
 .../hive/hcatalog/templeton/tool/TempletonControllerJob.java  | 7 +------
 1 file changed, 1 insertion(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/306e61af/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java
index 349bd5c..5c7de80 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java
@@ -76,16 +76,11 @@ public class TempletonControllerJob extends Configured implements Tool, JobSubmi
    *                              and added to the job
    */
   public TempletonControllerJob(boolean secureMetastoreAccess, AppConfig conf) {
-    super();
+    super(new Configuration(conf));
     this.secureMetastoreAccess = secureMetastoreAccess;
     this.appConf = conf;
   }
 
-  @Override
-  public Configuration getConf() {
-    return appConf;
-  }
-
   private JobID submittedJobId;
 
   public String getSubmittedId() {


[34/50] [abbrv] hive git commit: HIVE-10612 : HIVE-10578 broke TestSQLStdHiveAccessControllerHS2 tests (Thejas Nair via Sushanth Sowmyan, reviwed by Ashutosh Chauhan)

Posted by xu...@apache.org.
HIVE-10612 : HIVE-10578 broke TestSQLStdHiveAccessControllerHS2 tests (Thejas Nair via Sushanth Sowmyan, reviwed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/72088ca7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/72088ca7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/72088ca7

Branch: refs/heads/beeline-cli
Commit: 72088ca7c39136dd68dbbefb3897cd7abfdd982c
Parents: e0044e0
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Thu May 7 01:27:54 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Thu May 7 01:28:57 2015 -0700

----------------------------------------------------------------------
 common/src/java/org/apache/hadoop/hive/conf/HiveConf.java | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/72088ca7/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 54e154c..85e732f 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2838,7 +2838,6 @@ public class HiveConf extends Configuration {
     ConfVars.HIVE_INSERT_INTO_MULTILEVEL_DIRS.varname,
     ConfVars.HIVE_LOCALIZE_RESOURCE_NUM_WAIT_ATTEMPTS.varname,
     ConfVars.HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES.varname,
-    ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION.varname,
     ConfVars.HIVE_QUOTEDID_SUPPORT.varname,
     ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES.varname,
     ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS.varname,


[09/50] [abbrv] hive git commit: HIVE-10213 : MapReduce jobs using dynamic-partitioning fail on commit (Mithun Radhakrishnan via Sushanth Sowmyan)

Posted by xu...@apache.org.
HIVE-10213 : MapReduce jobs using dynamic-partitioning fail on commit (Mithun Radhakrishnan via Sushanth Sowmyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/02b6cd11
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/02b6cd11
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/02b6cd11

Branch: refs/heads/beeline-cli
Commit: 02b6cd11024d4b4334f70b95287dff49ba8379f7
Parents: 7276cd2
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Tue May 5 15:46:18 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Tue May 5 15:46:55 2015 -0700

----------------------------------------------------------------------
 .../hive/hcatalog/mapreduce/TaskCommitContextRegistry.java     | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/02b6cd11/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/TaskCommitContextRegistry.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/TaskCommitContextRegistry.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/TaskCommitContextRegistry.java
index 8c6d0fc..bdffb19 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/TaskCommitContextRegistry.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/TaskCommitContextRegistry.java
@@ -71,7 +71,8 @@ public class TaskCommitContextRegistry {
   public synchronized void commitTask(TaskAttemptContext context) throws IOException {
     String key = generateKey(context);
     if (!taskCommitters.containsKey(key)) {
-      throw new IOException("No callback registered for TaskAttemptID:" + key);
+      LOG.warn("No callback registered for TaskAttemptID:" + key + ". Skipping.");
+      return;
     }
 
     try {
@@ -99,7 +100,8 @@ public class TaskCommitContextRegistry {
   public synchronized void abortTask(TaskAttemptContext context) throws IOException {
     String key = generateKey(context);
     if (!taskCommitters.containsKey(key)) {
-      throw new IOException("No callback registered for TaskAttemptID:" + key);
+      LOG.warn("No callback registered for TaskAttemptID:" + key + ". Skipping.");
+      return;
     }
 
     try {


[28/50] [abbrv] hive git commit: HIVE-10506: CBO (Calcite Return Path): Disallow return path to be enable if CBO is off (Jesus Camacho Rodriguez via Laljo John Pullokkaran)

Posted by xu...@apache.org.
HIVE-10506: CBO (Calcite Return Path): Disallow return path to be enable if CBO is off (Jesus Camacho Rodriguez via Laljo John Pullokkaran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/93995c8b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/93995c8b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/93995c8b

Branch: refs/heads/beeline-cli
Commit: 93995c8be3dedc8785ced64939c608ae2433d4af
Parents: ecde4ae
Author: John Pullokkaran <jp...@hortonworks.com>
Authored: Wed May 6 18:15:33 2015 -0700
Committer: jpullokk <jp...@apache.org>
Committed: Wed May 6 18:21:19 2015 -0700

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/Context.java      | 10 +++++++++-
 .../hive/ql/optimizer/IdentityProjectRemover.java       | 12 ++++++++++++
 .../hive/ql/optimizer/NonBlockingOpDeDupProc.java       | 11 +++++++++++
 .../org/apache/hadoop/hive/ql/optimizer/Optimizer.java  |  8 +++-----
 .../calcite/translator/HiveOpConverterPostProc.java     | 10 ++++++++++
 .../org/apache/hadoop/hive/ql/parse/CalcitePlanner.java |  1 +
 6 files changed, 46 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/93995c8b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
index 9692738..a74bbbe 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
@@ -23,7 +23,6 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
 import java.text.SimpleDateFormat;
-import java.util.ArrayList;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.List;
@@ -86,6 +85,7 @@ public class Context {
   protected int pathid = 10000;
   protected boolean explain = false;
   protected String cboInfo;
+  protected boolean cboSucceeded;
   protected boolean explainLogical = false;
   protected String cmd = "";
   // number of previous attempts
@@ -706,4 +706,12 @@ public class Context {
     this.cboInfo = cboInfo;
   }
 
+  public boolean isCboSucceeded() {
+    return cboSucceeded;
+  }
+
+  public void setCboSucceeded(boolean cboSucceeded) {
+    this.cboSucceeded = cboSucceeded;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/93995c8b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
index 433699b..e3d3ce6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
@@ -26,8 +26,10 @@ import java.util.Stack;
 
 import com.google.common.base.Predicates;
 import com.google.common.collect.Iterators;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.LateralViewForwardOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
@@ -69,6 +71,16 @@ public class IdentityProjectRemover implements Transform {
   private static final Log LOG = LogFactory.getLog(IdentityProjectRemover.class);
   @Override
   public ParseContext transform(ParseContext pctx) throws SemanticException {
+    // 0. We check the conditions to apply this transformation,
+    //    if we do not meet them we bail out
+    final boolean cboEnabled = HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVE_CBO_ENABLED);
+    final boolean returnPathEnabled = HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP);
+    final boolean cboSucceeded = pctx.getContext().isCboSucceeded();
+    if(cboEnabled && returnPathEnabled && cboSucceeded) {
+      return pctx;
+    }
+
+    // 1. We apply the transformation
     Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
     opRules.put(new RuleRegExp("R1",
       "(" + SelectOperator.getOperatorName() + "%)"), new ProjectRemover());

http://git-wip-us.apache.org/repos/asf/hive/blob/93995c8b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
index 95c2b0b..3006a6e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
@@ -28,6 +28,7 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
 
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.FilterOperator;
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
@@ -57,6 +58,16 @@ public class NonBlockingOpDeDupProc implements Transform {
 
   @Override
   public ParseContext transform(ParseContext pctx) throws SemanticException {
+    // 0. We check the conditions to apply this transformation,
+    //    if we do not meet them we bail out
+    final boolean cboEnabled = HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVE_CBO_ENABLED);
+    final boolean returnPathEnabled = HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP);
+    final boolean cboSucceeded = pctx.getContext().isCboSucceeded();
+    if(cboEnabled && returnPathEnabled && cboSucceeded) {
+      return pctx;
+    }
+
+    // 1. We apply the transformation
     String SEL = SelectOperator.getOperatorName();
     String FIL = FilterOperator.getOperatorName();
     Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();

http://git-wip-us.apache.org/repos/asf/hive/blob/93995c8b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
index 58f8afe..a7cf8b7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
@@ -62,11 +62,9 @@ public class Optimizer {
 
     transformations = new ArrayList<Transform>();
 
-    // If we are translating Calcite operators into Hive operators, we need
-    // additional postprocessing
-    if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP)) {
-      transformations.add(new HiveOpConverterPostProc());
-    }
+    // Add the additional postprocessing transformations needed if
+    // we are translating Calcite operators into Hive operators.
+    transformations.add(new HiveOpConverterPostProc());
 
     // Add the transformation that computes the lineage information.
     transformations.add(new Generator());

http://git-wip-us.apache.org/repos/asf/hive/blob/93995c8b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java
index cdd7c7e..e7c8342 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java
@@ -26,6 +26,7 @@ import java.util.Stack;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
@@ -55,6 +56,15 @@ public class HiveOpConverterPostProc implements Transform {
 
   @Override
   public ParseContext transform(ParseContext pctx) throws SemanticException {
+    // 0. We check the conditions to apply this transformation,
+    //    if we do not meet them we bail out
+    final boolean cboEnabled = HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVE_CBO_ENABLED);
+    final boolean returnPathEnabled = HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP);
+    final boolean cboSucceeded = pctx.getContext().isCboSucceeded();
+    if(!(cboEnabled && returnPathEnabled && cboSucceeded)) {
+      return pctx;
+    }
+
     // 1. Initialize aux data structures
     this.pctx = pctx;
     this.aliasToOpInfo = new HashMap<String, Operator<? extends OperatorDesc>>();

http://git-wip-us.apache.org/repos/asf/hive/blob/93995c8b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 49ad6ad..48f488f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -265,6 +265,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
           sinkOp = genPlan(getQB());
           LOG.info("CBO Succeeded; optimized logical plan.");
           this.ctx.setCboInfo("Plan optimized by CBO.");
+          this.ctx.setCboSucceeded(true);
           LOG.debug(newAST.dump());
           } 
         } catch (Exception e) {