You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2015/10/13 01:53:53 UTC

[1/5] hive git commit: HIVE-12095 - Revert "HIVE-11866 : Add framework to enable testing using LDAPServer using LDAP protocol (Naveen Gangam via Szehon)"

Repository: hive
Updated Branches:
  refs/heads/llap 3cfcad660 -> eb28deb61


HIVE-12095 - Revert "HIVE-11866 : Add framework to enable testing using LDAPServer using LDAP protocol (Naveen Gangam via Szehon)"

This reverts commit 8964c1ebc7f14f03c2c5773a785ed50d318798fe.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b97fdc0d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b97fdc0d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b97fdc0d

Branch: refs/heads/llap
Commit: b97fdc0dbe08e25a1445df8acdeec93b2ec08084
Parents: 09f5e84
Author: Thejas Nair <th...@hortonworks.com>
Authored: Mon Oct 12 12:22:24 2015 -0700
Committer: Thejas Nair <th...@hortonworks.com>
Committed: Mon Oct 12 12:22:24 2015 -0700

----------------------------------------------------------------------
 pom.xml                                         |   1 -
 service/pom.xml                                 |   6 -
 .../auth/TestLdapAtnProviderWithLdapServer.java | 215 -------------------
 .../org/apache/hive/service/auth/ldapdata.ldif  |  59 -----
 4 files changed, 281 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b97fdc0d/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 2ef2a09..b11a405 100644
--- a/pom.xml
+++ b/pom.xml
@@ -166,7 +166,6 @@
     <scala.version>2.10.4</scala.version>
     <tempus-fugit.version>1.1</tempus-fugit.version>
     <snappy.version>0.2</snappy.version>
-    <unboundid.version>2.3.1</unboundid.version>
     <wadl-resourcedoc-doclet.version>1.4</wadl-resourcedoc-doclet.version>
     <velocity.version>1.5</velocity.version>
     <xerces.version>2.9.1</xerces.version>

http://git-wip-us.apache.org/repos/asf/hive/blob/b97fdc0d/service/pom.xml
----------------------------------------------------------------------
diff --git a/service/pom.xml b/service/pom.xml
index d9bf8d1..07eeb9a 100644
--- a/service/pom.xml
+++ b/service/pom.xml
@@ -111,12 +111,6 @@
       <version>${junit.version}</version>
       <scope>test</scope>
     </dependency>
-    <dependency>
-      <groupId>com.unboundid</groupId>
-      <artifactId>unboundid-ldapsdk</artifactId>
-      <version>${unboundid.version}</version>
-      <scope>test</scope>
-    </dependency>
   </dependencies>
 
   <profiles>

http://git-wip-us.apache.org/repos/asf/hive/blob/b97fdc0d/service/src/test/org/apache/hive/service/auth/TestLdapAtnProviderWithLdapServer.java
----------------------------------------------------------------------
diff --git a/service/src/test/org/apache/hive/service/auth/TestLdapAtnProviderWithLdapServer.java b/service/src/test/org/apache/hive/service/auth/TestLdapAtnProviderWithLdapServer.java
deleted file mode 100644
index 8f015b0..0000000
--- a/service/src/test/org/apache/hive/service/auth/TestLdapAtnProviderWithLdapServer.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hive.service.auth;
-
-import com.unboundid.ldap.listener.InMemoryDirectoryServer;
-import com.unboundid.ldap.listener.InMemoryDirectoryServerConfig;
-import com.unboundid.ldap.listener.InMemoryListenerConfig;
-import com.unboundid.ldap.sdk.DN;
-import com.unboundid.ldap.sdk.LDAPConnection;
-import com.unboundid.ldif.LDIFReader;
-
-import java.io.BufferedReader;
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.InputStream;
-import java.util.Hashtable;
-import java.util.Iterator;
-import java.util.Set;
-
-import javax.security.sasl.AuthenticationException;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Tests that use an in-memory LDAP Server (unboundID) to test HS2's
- * LDAP Authentication Provider. The ldap server uses a sample ldif
- * file to load ldap data into its directory.
- * Any of Hive's LDAP Configuration properties are set on the HiveConf
- * prior to the initialization of LdapAuthenticationProviderImpl.
- * Each test uses a different set of properties to alter the Atn
- * provider behavior.
- */
-public class TestLdapAtnProviderWithLdapServer {
-  private static String ldapUrl;
-  private static InMemoryDirectoryServer server;
-  private static InMemoryDirectoryServerConfig config;
-  private static HiveConf hiveConf;
-  private static byte[] hiveConfBackup;
-  private static LdapAuthenticationProviderImpl ldapProvider;
-  private static final int serverPort = 33300;
-
-  @BeforeClass
-  public static void init() throws Exception {
-    DN dn = new DN("dc=example, dc=com");
-    config = new InMemoryDirectoryServerConfig(dn);
-    config.setSchema(null);
-    config.addAdditionalBindCredentials("cn=user1,ou=People,dc=example,dc=com","user1");
-    config.addAdditionalBindCredentials("cn=user2,ou=People,dc=example,dc=com","user2");
-
-    // listener config only necessary if you want to make sure that the
-    // server listens on port 33300, otherwise a free random port will
-    // be picked at runtime - which might be even better for tests btw.
-    config.setListenerConfigs(
-            new InMemoryListenerConfig("myListener", null, serverPort, null, null, null));
-
-    server = new InMemoryDirectoryServer(config);
-
-    server.startListening();
-
-    File ldifFile = new File(Thread.currentThread().getContextClassLoader()
-                       .getResource("org/apache/hive/service/auth/ldapdata.ldif").getFile());
-    LDIFReader ldifReader = new LDIFReader(ldifFile);
-    // import your test data from ldif files
-    server.importFromLDIF(true, ldifReader);
-
-    LDAPConnection conn = server.getConnection();
-    int port = server.getListenPort();
-    ldapUrl = new String("ldap://localhost:" + port);
-
-    hiveConf = new HiveConf();
-    ByteArrayOutputStream baos = new ByteArrayOutputStream();
-    hiveConf.writeXml(baos);
-    baos.close();
-    hiveConfBackup = baos.toByteArray();
-    hiveConf.set("hive.root.logger", "TRACE,console");
-    hiveConf.set("hive.server2.authentication.ldap.url", ldapUrl);
-    hiveConf.set("hive.server2.authentication.ldap.baseDN", "dc=example,dc=com");
-    hiveConf.set("hive.server2.authentication.ldap.userDNPattern", "cn=%s,ou=People,dc=example,dc=com");
-    FileOutputStream fos = new FileOutputStream(new File(hiveConf.getHiveSiteLocation().toURI()));
-    hiveConf.writeXml(fos);
-    fos.close();
-
-    ldapProvider = new LdapAuthenticationProviderImpl();
-  }
-
-  private static void initLdapAtn(Hashtable<String, String> hiveProperties)
-        throws Exception {
-    Set<String> keys = hiveProperties.keySet();
-    Iterator<String> iter = keys.iterator();
-    hiveConf = new HiveConf();
-
-    try {
-      boolean deleted = new File(hiveConf.getHiveSiteLocation().toURI()).delete();
-    } catch (Exception e) {}
-
-    ByteArrayOutputStream baos = new ByteArrayOutputStream();
-    hiveConf.writeXml(baos);
-    baos.close();
-
-    hiveConf.set("hive.root.logger", "TRACE,console");
-    hiveConf.set("hive.server2.authentication.ldap.url", ldapUrl);
-    hiveConf.set("hive.server2.authentication.ldap.userDNPattern", "cn=%s,ou=People,dc=example,dc=com");
-    hiveConf.set("hive.server2.authentication.ldap.groupDNPattern", "cn=%s,ou=Groups,dc=example,dc=com");
-
-    String key;
-    String value;
-    while (iter.hasNext()) {
-      key = iter.next();
-      value = hiveProperties.get(key);
-      hiveConf.set(key, value);
-    }
-
-    FileOutputStream fos = new FileOutputStream(new File(hiveConf.getHiveSiteLocation().toURI()));
-    hiveConf.writeXml(fos);
-    fos.close();
-
-    ldapProvider = new LdapAuthenticationProviderImpl();
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    server.shutDown(true);
-  }
-
-  @Test
-  public void testRoot() throws Exception {
-    Hashtable<String, String> ldapProperties = new Hashtable<String, String>();
-    initLdapAtn(ldapProperties);
-    String user;
-
-    user = "cn=user1,ou=People,dc=example,dc=com";
-    try {
-      ldapProvider.Authenticate(user, "user1");
-      assertTrue(true);
-
-      user = "cn=user2,ou=People,dc=example,dc=com";
-      ldapProvider.Authenticate(user, "user2");
-      assertTrue(true);
-    } catch (AuthenticationException e) {
-      e.printStackTrace();
-      Assert.fail("Authentication failed for user:" + user);
-    }
-  }
-
-  @Test
-  public void testUserBindPositive() throws Exception {
-    Hashtable<String, String> ldapProperties = new Hashtable<String, String>();
-    ldapProperties.put("hive.server2.authentication.ldap.userFilter", "user1,user2");
-    initLdapAtn(ldapProperties);
-    String user;
-
-    user = "cn=user1,ou=People,dc=example,dc=com";
-    try {
-      ldapProvider.Authenticate(user, "user1");
-      assertTrue("testUserBindPositive: Authentication succeeded for user1 as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserBindPositive: Authentication failed for user:" + user +
-                    " with password user1, expected to succeed");
-    }
-
-    user = "cn=user2,ou=People,dc=example,dc=com";
-    try {
-      ldapProvider.Authenticate(user, "user2");
-      assertTrue("testUserBindPositive: Authentication succeeded for user2 as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserBindPositive: Authentication failed for user:" + user +
-                    " with password user2, expected to succeed");
-    }
-  }
-
-  @Test
-  public void testUserBindNegative() throws Exception {
-    Hashtable<String, String> ldapProperties = new Hashtable<String, String>();
-    initLdapAtn(ldapProperties);
-
-    try {
-      ldapProvider.Authenticate("cn=user1,ou=People,dc=example,dc=com", "user2");
-      Assert.fail("testUserBindNegative: Authentication succeeded for user1 with password " +
-                   "user2, expected to fail");
-    } catch (AuthenticationException e) {
-      assertTrue("testUserBindNegative: Authentication failed for user1 as expected", true);
-    }
-
-    try {
-      ldapProvider.Authenticate("cn=user2,ou=People,dc=example,dc=com", "user");
-      Assert.fail("testUserBindNegative: Authentication failed for user2 with password user, " +
-                    "expected to fail");
-    } catch (AuthenticationException e) {
-      assertTrue("testUserBindNegative: Authentication failed for user2 as expected", true);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b97fdc0d/service/src/test/resources/org/apache/hive/service/auth/ldapdata.ldif
----------------------------------------------------------------------
diff --git a/service/src/test/resources/org/apache/hive/service/auth/ldapdata.ldif b/service/src/test/resources/org/apache/hive/service/auth/ldapdata.ldif
deleted file mode 100644
index 686fb3f..0000000
--- a/service/src/test/resources/org/apache/hive/service/auth/ldapdata.ldif
+++ /dev/null
@@ -1,59 +0,0 @@
-dn: dc=example,dc=com
-distinguishedName: dc=example,dc=com
-objectClass: top
-objectClass: domain
-dc: example
-
-dn: ou=People,dc=example,dc=com
-distinguishedName: ou=People,dc=example,dc=com
-objectClass: top
-objectClass: organizationalUnit
-ou: People
-description: Contains entries which describe persons (seamen)
-
-dn: ou=Groups,dc=example,dc=com
-distinguishedName: ou=Groups,dc=example,dc=com
-objectClass: top
-objectClass: organizationalUnit
-ou: Groups
-description: Contains entries which describe groups (crews, for instance)
-
-dn: cn=group1,ou=Groups,dc=example,dc=com
-distinguishedName: cn=group1,ou=Groups,dc=example,dc=com
-objectClass: group
-objectClass: top
-givenName: Group1
-cn: Test Group1
-sn: group1
-
-dn: cn=group2,ou=Groups,dc=example,dc=com
-distinguishedName: cn=group2,ou=Groups,dc=example,dc=com
-objectClass: group
-objectClass: top
-givenName: Group2
-cn: Test Group2
-sn: group1
-
-dn: cn=user1,ou=People,dc=example,dc=com
-distinguishedName: cn=user1,ou=People,dc=example,dc=com
-objectClass: inetOrgPerson
-objectClass: person
-objectClass: top
-givenName: Test1
-cn: Test User1
-sn: user1
-uid: user1
-userPassword: user1
-memberOf: cn=group1,ou=Groups,dc=example,dc=com
-
-dn: cn=user2,ou=People,dc=example,dc=com
-distinguishedName: cn=user2,ou=People,dc=example,dc=com
-objectClass: inetOrgPerson
-objectClass: person
-objectClass: top
-givenName: Test2
-cn: Test User2
-sn: user2
-uid: user2
-userPassword: user2
-memberOf: cn=group2,ou=Groups,dc=example,dc=com


[2/5] hive git commit: HIVE-12065 : FS stats collection may generate incorrect stats for multi-insert query (Ashutosh Chauhan via Pengcheng Xiong)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/test/queries/clientpositive/infer_bucket_sort_multi_insert.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_multi_insert.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_multi_insert.q
index e3992b8..3341df0 100644
--- a/ql/src/test/queries/clientpositive/infer_bucket_sort_multi_insert.q
+++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_multi_insert.q
@@ -1,5 +1,6 @@
 set hive.exec.infer.bucket.sort=true;
 set hive.exec.infer.bucket.sort.num.buckets.power.two=true;
+set hive.stats.dbclass=fs;
 
 -- This tests inferring how data is bucketed/sorted from the operators in the reducer
 -- and populating that information in partitions' metadata.  In particular, those cases

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/test/queries/clientpositive/multi_insert.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/multi_insert.q b/ql/src/test/queries/clientpositive/multi_insert.q
index 5947985..1fdfa59 100644
--- a/ql/src/test/queries/clientpositive/multi_insert.q
+++ b/ql/src/test/queries/clientpositive/multi_insert.q
@@ -5,7 +5,7 @@ create table src_multi2 like src;
 
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
-
+set hive.stats.dbclass=fs;
 explain
 from src
 insert overwrite table src_multi1 select * where key < 10

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/test/queries/clientpositive/multi_insert_gby2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/multi_insert_gby2.q b/ql/src/test/queries/clientpositive/multi_insert_gby2.q
index 46e2b19..fa29261 100644
--- a/ql/src/test/queries/clientpositive/multi_insert_gby2.q
+++ b/ql/src/test/queries/clientpositive/multi_insert_gby2.q
@@ -1,7 +1,7 @@
 --HIVE-3699 Multiple insert overwrite into multiple tables query stores same results in all tables
 create table e1 (count int);
 create table e2 (percentile double);
-
+set hive.stats.dbclass=fs;
 explain
 FROM (select key, cast(key as double) as value from src order by key) a
 INSERT OVERWRITE TABLE e1

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/test/queries/clientpositive/multi_insert_gby3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/multi_insert_gby3.q b/ql/src/test/queries/clientpositive/multi_insert_gby3.q
index 1221af4..d85ff9a 100644
--- a/ql/src/test/queries/clientpositive/multi_insert_gby3.q
+++ b/ql/src/test/queries/clientpositive/multi_insert_gby3.q
@@ -2,7 +2,7 @@
 create table e1 (key string, keyD double);
 create table e2 (key string, keyD double, value string);
 create table e3 (key string, keyD double);
-
+set hive.stats.dbclass=fs;
 explain
 FROM (select key, cast(key as double) as keyD, value from src order by key) a
 INSERT OVERWRITE TABLE e1

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/test/queries/clientpositive/multi_insert_lateral_view.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/multi_insert_lateral_view.q b/ql/src/test/queries/clientpositive/multi_insert_lateral_view.q
index acf905f..d80717f 100644
--- a/ql/src/test/queries/clientpositive/multi_insert_lateral_view.q
+++ b/ql/src/test/queries/clientpositive/multi_insert_lateral_view.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
 -- SORT_QUERY_RESULTS
 
 create table src_10 as select * from src limit 10;

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/test/queries/clientpositive/multi_insert_mixed.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/multi_insert_mixed.q b/ql/src/test/queries/clientpositive/multi_insert_mixed.q
index 6d91973..8fb577a 100644
--- a/ql/src/test/queries/clientpositive/multi_insert_mixed.q
+++ b/ql/src/test/queries/clientpositive/multi_insert_mixed.q
@@ -1,7 +1,7 @@
 create table src_multi1 like src;
 create table src_multi2 like src;
 create table src_multi3 like src;
-
+set hive.stats.dbclass=fs;
 -- Testing the case where a map work contains both shuffling (ReduceSinkOperator)
 -- and inserting to output table (FileSinkOperator).
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q b/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q
index 3117713..3ddaa47 100644
--- a/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q
+++ b/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q
@@ -1,5 +1,5 @@
 set hive.multi.insert.move.tasks.share.dependencies=true;
-
+set hive.stats.dbclass=fs;
 -- SORT_QUERY_RESULTS
 
 create table src_multi1 like src;

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/test/queries/clientpositive/multi_insert_union_src.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/multi_insert_union_src.q b/ql/src/test/queries/clientpositive/multi_insert_union_src.q
index 088d756..f9b6f87 100644
--- a/ql/src/test/queries/clientpositive/multi_insert_union_src.q
+++ b/ql/src/test/queries/clientpositive/multi_insert_union_src.q
@@ -1,7 +1,7 @@
 drop table if exists src2;
 drop table if exists src_multi1;
 drop table if exists src_multi1;
-
+set hive.stats.dbclass=fs;
 CREATE TABLE src2 as SELECT * FROM src;
 
 create table src_multi1 like src;

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/test/results/clientpositive/spark/column_access_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/column_access_stats.q.out b/ql/src/test/results/clientpositive/spark/column_access_stats.q.out
index 5803093..869d6cb 100644
--- a/ql/src/test/results/clientpositive/spark/column_access_stats.q.out
+++ b/ql/src/test/results/clientpositive/spark/column_access_stats.q.out
@@ -387,15 +387,15 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: t2
-                  Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
                       sort order: +
                       Map-reduce partition columns: key (type: string)
-                      Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -405,10 +405,10 @@ STAGE PLANS:
                   0 key (type: string)
                   1 key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -506,15 +506,15 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: t2
-                  Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((val = 3) and key is not null) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
                       sort order: +
                       Map-reduce partition columns: key (type: string)
-                      Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -600,19 +600,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: t2
-                  Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((key = 6) and val is not null) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: val (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -709,32 +709,32 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: t2
-                  Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: t3
-                  Statistics: Num rows: 1 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
                       sort order: +
                       Map-reduce partition columns: key (type: string)
-                      Statistics: Num rows: 1 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE
                       value expressions: val (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -745,12 +745,12 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Reduce Operator Tree:
               Join Operator
@@ -760,10 +760,10 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 key (type: string)
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/test/results/clientpositive/spark/pcr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/pcr.q.out b/ql/src/test/results/clientpositive/spark/pcr.q.out
index fb08f10..b67a909 100644
--- a/ql/src/test/results/clientpositive/spark/pcr.q.out
+++ b/ql/src/test/results/clientpositive/spark/pcr.q.out
@@ -4390,8 +4390,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                               name default.pcr_t2
                               numFiles 1
-                              numRows 0
-                              rawDataSize 0
+                              numRows 20
+                              rawDataSize 160
                               serialization.ddl struct pcr_t2 { i32 key, string value}
                               serialization.format 1
                               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -4429,8 +4429,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                               name default.pcr_t3
                               numFiles 1
-                              numRows 0
-                              rawDataSize 0
+                              numRows 20
+                              rawDataSize 160
                               serialization.ddl struct pcr_t3 { i32 key, string value}
                               serialization.format 1
                               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -4509,8 +4509,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.pcr_t2
                 numFiles 1
-                numRows 0
-                rawDataSize 0
+                numRows 20
+                rawDataSize 160
                 serialization.ddl struct pcr_t2 { i32 key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -4540,8 +4540,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.pcr_t3
                 numFiles 1
-                numRows 0
-                rawDataSize 0
+                numRows 20
+                rawDataSize 160
                 serialization.ddl struct pcr_t3 { i32 key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/test/results/clientpositive/spark/ppd_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_join5.q.out b/ql/src/test/results/clientpositive/spark/ppd_join5.q.out
index 0cf9080..6bf5080 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_join5.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_join5.q.out
@@ -59,40 +59,40 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: a
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (id1 is not null and id2 is not null) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: id1 (type: string), id2 (type: string)
                       sort order: ++
                       Map-reduce partition columns: id1 (type: string), id2 (type: string)
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: b
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (id is not null and (d <= 1)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: id (type: string), id (type: string)
                       sort order: ++
                       Map-reduce partition columns: id (type: string), id (type: string)
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                       value expressions: d (type: int)
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: c
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (d <= 1) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                       value expressions: d (type: int)
         Reducer 2 
             Reduce Operator Tree:
@@ -103,10 +103,10 @@ STAGE PLANS:
                   0 id1 (type: string), id2 (type: string)
                   1 id (type: string), id (type: string)
                 outputColumnNames: _col0, _col1, _col6
-                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: string), _col1 (type: string), _col6 (type: int)
         Reducer 3 
             Reduce Operator Tree:
@@ -117,14 +117,14 @@ STAGE PLANS:
                   0 
                   1 
                 outputColumnNames: _col0, _col1, _col6, _col11
-                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: string), _col6 (type: int), _col11 (type: int)
                   outputColumnNames: _col0, _col1, _col2, _col3
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -167,40 +167,40 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: a
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (id1 is not null and id2 is not null) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: id1 (type: string), id2 (type: string)
                       sort order: ++
                       Map-reduce partition columns: id1 (type: string), id2 (type: string)
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: b
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (id is not null and (d <= 1)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: id (type: string), id (type: string)
                       sort order: ++
                       Map-reduce partition columns: id (type: string), id (type: string)
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                       value expressions: d (type: int)
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: c
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (d <= 1) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                       value expressions: d (type: int)
         Reducer 2 
             Reduce Operator Tree:
@@ -211,10 +211,10 @@ STAGE PLANS:
                   0 id1 (type: string), id2 (type: string)
                   1 id (type: string), id (type: string)
                 outputColumnNames: _col0, _col1, _col6
-                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: string), _col1 (type: string), _col6 (type: int)
         Reducer 3 
             Reduce Operator Tree:
@@ -225,17 +225,17 @@ STAGE PLANS:
                   0 
                   1 
                 outputColumnNames: _col0, _col1, _col6, _col11
-                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
                   predicate: ((_col6 > 1) or (_col11 > 1)) (type: boolean)
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col0 (type: string), _col1 (type: string), _col6 (type: int), _col11 (type: int)
                     outputColumnNames: _col0, _col1, _col2, _col3
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.TextInputFormat
                           output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out
index eeb23a8..eeb18b0 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out
@@ -159,19 +159,19 @@ STAGE PLANS:
                         1 key (type: int)
                       outputColumnNames: _col0, _col7
                       Position of Big Table: 0
-                      Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 825 Data size: 8764 Basic stats: COMPLETE Column stats: NONE
                       BucketMapJoin: true
                       Select Operator
                         expressions: _col0 (type: int), _col7 (type: string)
                         outputColumnNames: _col0, _col1
-                        Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 825 Data size: 8764 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
                           GlobalTableId: 1
 #### A masked pattern was here ####
                           NumFilesPerFileSink: 1
                           Static Partition Specification: ds=1/
-                          Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 825 Data size: 8764 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                           table:
                               input format: org.apache.hadoop.mapred.TextInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out
index 7c34ceb..878b930 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out
@@ -341,12 +341,12 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: b
-                  Statistics: Num rows: 55 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
                     predicate: UDFToDouble(value) is not null (type: boolean)
-                    Statistics: Num rows: 28 Data size: 2958 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     Spark HashTable Sink Operator
                       keys:
                         0 UDFToDouble(key) (type: double)
@@ -373,8 +373,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.test_table4
                     numFiles 16
-                    numRows 0
-                    rawDataSize 0
+                    numRows 500
+                    rawDataSize 5312
                     serialization.ddl struct test_table4 { i32 key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -395,8 +395,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                       name default.test_table4
                       numFiles 16
-                      numRows 0
-                      rawDataSize 0
+                      numRows 500
+                      rawDataSize 5312
                       serialization.ddl struct test_table4 { i32 key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -418,12 +418,12 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: a
-                  Statistics: Num rows: 55 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
                     predicate: UDFToDouble(key) is not null (type: boolean)
-                    Statistics: Num rows: 28 Data size: 2958 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
                       condition map:
                            Inner Join 0 to 1
@@ -434,15 +434,15 @@ STAGE PLANS:
                       input vertices:
                         1 Map 3
                       Position of Big Table: 0
-                      Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
                         expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
                         outputColumnNames: _col0, _col1, _col2, _col3
-                        Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           key expressions: _col0 (type: int)
                           sort order: +
-                          Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                           tag: -1
                           value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: string)
                           auto parallelism: false
@@ -467,8 +467,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.test_table3
                     numFiles 16
-                    numRows 0
-                    rawDataSize 0
+                    numRows 500
+                    rawDataSize 5312
                     serialization.ddl struct test_table3 { i32 key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -489,8 +489,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                       name default.test_table3
                       numFiles 16
-                      numRows 0
-                      rawDataSize 0
+                      numRows 500
+                      rawDataSize 5312
                       serialization.ddl struct test_table3 { i32 key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -507,16 +507,16 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: int), VALUE._col2 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 10
-                  Statistics: Num rows: 10 Data size: 1080 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
                     GlobalTableId: 0
 #### A masked pattern was here ####
                     NumFilesPerFileSink: 1
-                    Statistics: Num rows: 10 Data size: 1080 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out
index 4032170..06b6121 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out
@@ -809,12 +809,12 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: b
-                  Statistics: Num rows: 71 Data size: 7718 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
                     predicate: (key is not null and value is not null) (type: boolean)
-                    Statistics: Num rows: 18 Data size: 1956 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 125 Data size: 1804 Basic stats: COMPLETE Column stats: NONE
                     Spark HashTable Sink Operator
                       keys:
                         0 key (type: int), value (type: string)
@@ -846,8 +846,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.test_table2
                     numFiles 16
-                    numRows 0
-                    rawDataSize 0
+                    numRows 500
+                    rawDataSize 7218
                     serialization.ddl struct test_table2 { i32 key, i32 key2, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -868,8 +868,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                       name default.test_table2
                       numFiles 16
-                      numRows 0
-                      rawDataSize 0
+                      numRows 500
+                      rawDataSize 7218
                       serialization.ddl struct test_table2 { i32 key, i32 key2, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/test/results/clientpositive/spark/smb_mapjoin_16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_16.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_16.q.out
index db737b3..9ce40bd 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_16.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_16.q.out
@@ -67,7 +67,7 @@ STAGE PLANS:
                       keys:
                         0 key (type: int)
                         1 key (type: int)
-                      Statistics: Num rows: 799 Data size: 3198 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: count()
                         mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/test/results/clientpositive/spark/union34.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union34.q.out b/ql/src/test/results/clientpositive/spark/union34.q.out
index a9edf14..0d35488 100644
--- a/ql/src/test/results/clientpositive/spark/union34.q.out
+++ b/ql/src/test/results/clientpositive/spark/union34.q.out
@@ -92,14 +92,14 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src10_2
-                  Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: string)
@@ -117,14 +117,14 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src10_1
-                  Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -134,11 +134,11 @@ STAGE PLANS:
                         outputColumnNames: _col0, _col1
                         input vertices:
                           1 Map 3
-                        Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           key expressions: _col0 (type: string)
                           sort order: +
-                          Statistics: Num rows: 3 Data size: 353 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 25 Data size: 265 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col1 (type: string)
             Local Work:
               Map Reduce Local Work
@@ -146,39 +146,39 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src10_3
-                  Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
-                      Statistics: Num rows: 3 Data size: 353 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 25 Data size: 265 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: string)
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: src10_4
-                  Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
-                      Statistics: Num rows: 3 Data size: 353 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 25 Data size: 265 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 3 Data size: 353 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 25 Data size: 265 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 3 Data size: 353 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 25 Data size: 265 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -280,64 +280,64 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src10_1
-                  Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: src10_2
-                  Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: src10_3
-                  Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
-                      Statistics: Num rows: 3 Data size: 353 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 25 Data size: 265 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: string)
         Map 6 
             Map Operator Tree:
                 TableScan
                   alias: src10_4
-                  Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
-                      Statistics: Num rows: 3 Data size: 353 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 25 Data size: 265 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -345,21 +345,21 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
-                  Statistics: Num rows: 3 Data size: 353 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 25 Data size: 265 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
         Reducer 3 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 3 Data size: 353 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 25 Data size: 265 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 3 Data size: 353 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 25 Data size: 265 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat


[3/5] hive git commit: HIVE-12065 : FS stats collection may generate incorrect stats for multi-insert query (Ashutosh Chauhan via Pengcheng Xiong)

Posted by se...@apache.org.
HIVE-12065 : FS stats collection may generate incorrect stats for multi-insert query (Ashutosh Chauhan via Pengcheng Xiong)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9b4826e7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9b4826e7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9b4826e7

Branch: refs/heads/llap
Commit: 9b4826e765423c4766dbac052127546213ac0752
Parents: b97fdc0
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Wed Oct 7 17:45:50 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Mon Oct 12 16:21:05 2015 -0700

----------------------------------------------------------------------
 .../hive/ql/stats/DummyStatsAggregator.java     | 12 ++--
 .../hive/ql/stats/DummyStatsPublisher.java      | 15 +++--
 .../ql/stats/KeyVerifyingStatsAggregator.java   | 10 +--
 .../hadoop/hive/ql/exec/FileSinkOperator.java   |  8 ++-
 .../apache/hadoop/hive/ql/exec/StatsTask.java   | 31 ++++++---
 .../hadoop/hive/ql/exec/TableScanOperator.java  |  7 +-
 .../apache/hadoop/hive/ql/exec/Utilities.java   | 31 ++++++++-
 .../hadoop/hive/ql/exec/mr/ExecDriver.java      | 14 +++-
 .../hive/ql/exec/spark/SparkPlanGenerator.java  | 16 +++--
 .../hadoop/hive/ql/exec/tez/DagUtils.java       |  5 +-
 .../hive/ql/index/AggregateIndexHandler.java    |  1 -
 .../hive/ql/index/TableBasedIndexHandler.java   |  7 --
 .../ql/index/bitmap/BitmapIndexHandler.java     |  1 -
 .../ql/index/compact/CompactIndexHandler.java   |  1 -
 .../ql/io/rcfile/stats/PartialScanMapper.java   |  7 +-
 .../ql/io/rcfile/stats/PartialScanTask.java     | 11 +++-
 .../ql/io/rcfile/stats/PartialScanWork.java     | 14 ++++
 .../hive/ql/optimizer/GenMRTableScan1.java      |  3 +
 .../hive/ql/optimizer/GenMapRedUtils.java       |  2 +-
 .../hive/ql/parse/ProcessAnalyzeTable.java      |  4 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  8 +--
 .../parse/spark/SparkProcessAnalyzeTable.java   |  2 +
 .../hadoop/hive/ql/plan/FileSinkDesc.java       | 16 ++++-
 .../apache/hadoop/hive/ql/plan/StatsWork.java   | 15 +++--
 .../hadoop/hive/ql/plan/TableScanDesc.java      | 12 +++-
 .../hive/ql/stats/CounterStatsAggregator.java   |  8 +--
 .../ql/stats/CounterStatsAggregatorSpark.java   |  6 +-
 .../ql/stats/CounterStatsAggregatorTez.java     | 10 ++-
 .../hive/ql/stats/CounterStatsPublisher.java    |  7 +-
 .../hadoop/hive/ql/stats/StatsAggregator.java   |  7 +-
 .../hive/ql/stats/StatsCollectionContext.java   | 63 ++++++++++++++++++
 .../hadoop/hive/ql/stats/StatsPublisher.java    |  8 +--
 .../hive/ql/stats/fs/FSStatsAggregator.java     | 23 ++++---
 .../hive/ql/stats/fs/FSStatsPublisher.java      | 32 +++++----
 .../hive/ql/stats/jdbc/JDBCStatsAggregator.java | 18 +++---
 .../hive/ql/stats/jdbc/JDBCStatsPublisher.java  | 22 ++++---
 .../hive/ql/exec/TestFileSinkOperator.java      | 13 ++--
 .../ql/exec/TestStatsPublisherEnhanced.java     | 61 ++++++++++--------
 .../infer_bucket_sort_multi_insert.q            |  1 +
 .../test/queries/clientpositive/multi_insert.q  |  2 +-
 .../queries/clientpositive/multi_insert_gby2.q  |  2 +-
 .../queries/clientpositive/multi_insert_gby3.q  |  2 +-
 .../clientpositive/multi_insert_lateral_view.q  |  1 +
 .../queries/clientpositive/multi_insert_mixed.q |  2 +-
 ...multi_insert_move_tasks_share_dependencies.q |  2 +-
 .../clientpositive/multi_insert_union_src.q     |  2 +-
 .../spark/column_access_stats.q.out             | 46 ++++++-------
 .../test/results/clientpositive/spark/pcr.q.out | 16 ++---
 .../clientpositive/spark/ppd_join5.q.out        | 58 ++++++++---------
 .../clientpositive/spark/smb_mapjoin_12.q.out   |  6 +-
 .../clientpositive/spark/smb_mapjoin_13.q.out   | 36 +++++------
 .../clientpositive/spark/smb_mapjoin_15.q.out   | 12 ++--
 .../clientpositive/spark/smb_mapjoin_16.q.out   |  2 +-
 .../results/clientpositive/spark/union34.q.out  | 68 ++++++++++----------
 54 files changed, 486 insertions(+), 303 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsAggregator.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsAggregator.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsAggregator.java
index 327eabc..eb3f6eb 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsAggregator.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsAggregator.java
@@ -18,9 +18,7 @@
 
 package org.apache.hadoop.hive.ql.stats;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.Task;
 
 /**
  * An test implementation for StatsAggregator.
@@ -34,8 +32,9 @@ public class DummyStatsAggregator implements StatsAggregator {
 
   // This is a test. The parameter hive.test.dummystats.aggregator's value
   // denotes the method which needs to throw an error.
-  public boolean connect(Configuration hconf, Task sourceTask) {
-    errorMethod = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVETESTMODEDUMMYSTATAGGR);
+  @Override
+  public boolean connect(StatsCollectionContext scc) {
+    errorMethod = HiveConf.getVar(scc.getHiveConf(), HiveConf.ConfVars.HIVETESTMODEDUMMYSTATAGGR);
     if (errorMethod.equalsIgnoreCase("connect")) {
       return false;
     }
@@ -43,17 +42,20 @@ public class DummyStatsAggregator implements StatsAggregator {
     return true;
   }
 
+  @Override
   public String aggregateStats(String keyPrefix, String statType) {
     return null;
   }
 
-  public boolean closeConnection() {
+  @Override
+  public boolean closeConnection(StatsCollectionContext scc) {
     if (errorMethod.equalsIgnoreCase("closeConnection")) {
       return false;
     }
     return true;
   }
 
+  @Override
   public boolean cleanUp(String keyPrefix) {
     if (errorMethod.equalsIgnoreCase("cleanUp")) {
       return false;

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsPublisher.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsPublisher.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsPublisher.java
index 1f6e80f..9f1fdb4 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsPublisher.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsPublisher.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.stats;
 
 import java.util.Map;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 
 /**
@@ -36,8 +35,9 @@ public class DummyStatsPublisher implements StatsPublisher {
 
   // This is a test. The parameter hive.test.dummystats.publisher's value
   // denotes the method which needs to throw an error.
-  public boolean init(Configuration hconf) {
-    errorMethod = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVETESTMODEDUMMYSTATPUB);
+  @Override
+  public boolean init(StatsCollectionContext context) {
+    errorMethod = HiveConf.getVar(context.getHiveConf(), HiveConf.ConfVars.HIVETESTMODEDUMMYSTATPUB);
     if (errorMethod.equalsIgnoreCase("init")) {
       return false;
     }
@@ -45,8 +45,9 @@ public class DummyStatsPublisher implements StatsPublisher {
     return true;
   }
 
-  public boolean connect(Configuration hconf) {
-    errorMethod = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVETESTMODEDUMMYSTATPUB);
+  @Override
+  public boolean connect(StatsCollectionContext context) {
+    errorMethod = HiveConf.getVar(context.getHiveConf(), HiveConf.ConfVars.HIVETESTMODEDUMMYSTATPUB);
     if (errorMethod.equalsIgnoreCase("connect")) {
       return false;
     }
@@ -54,6 +55,7 @@ public class DummyStatsPublisher implements StatsPublisher {
     return true;
   }
 
+  @Override
   public boolean publishStat(String fileID, Map<String, String> stats) {
     if (errorMethod.equalsIgnoreCase("publishStat")) {
       return false;
@@ -61,7 +63,8 @@ public class DummyStatsPublisher implements StatsPublisher {
     return true;
   }
 
-  public boolean closeConnection() {
+  @Override
+  public boolean closeConnection(StatsCollectionContext context) {
     if (errorMethod.equalsIgnoreCase("closeConnection")) {
       return false;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/KeyVerifyingStatsAggregator.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/KeyVerifyingStatsAggregator.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/KeyVerifyingStatsAggregator.java
index cb0b584..4e00316 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/KeyVerifyingStatsAggregator.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/KeyVerifyingStatsAggregator.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hive.ql.stats;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.session.SessionState;
 
 /**
@@ -30,10 +28,12 @@ import org.apache.hadoop.hive.ql.session.SessionState;
 
 public class KeyVerifyingStatsAggregator implements StatsAggregator {
 
-  public boolean connect(Configuration hconf, Task sourceTask) {
+  @Override
+  public boolean connect(StatsCollectionContext scc) {
     return true;
   }
 
+  @Override
   public String aggregateStats(String keyPrefix, String statType) {
     SessionState ss = SessionState.get();
     // Have to use the length instead of the actual prefix because the prefix is location dependent
@@ -43,10 +43,12 @@ public class KeyVerifyingStatsAggregator implements StatsAggregator {
     return null;
   }
 
-  public boolean closeConnection() {
+  @Override
+  public boolean closeConnection(StatsCollectionContext scc) {
     return true;
   }
 
+  @Override
   public boolean cleanUp(String keyPrefix) {
     return true;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index e247673..864c3d9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -61,9 +61,9 @@ import org.apache.hadoop.hive.ql.plan.FileSinkDesc.DPSortState;
 import org.apache.hadoop.hive.ql.plan.ListBucketingCtx;
 import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.SkewedColumnPositionPair;
-import org.apache.hadoop.hive.ql.plan.TableDesc;
 import org.apache.hadoop.hive.ql.plan.api.OperatorType;
 import org.apache.hadoop.hive.ql.stats.StatsCollectionTaskIndependent;
+import org.apache.hadoop.hive.ql.stats.StatsCollectionContext;
 import org.apache.hadoop.hive.ql.stats.StatsPublisher;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.SerDeStats;
@@ -1137,7 +1137,9 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
       return;
     }
 
-    if (!statsPublisher.connect(hconf)) {
+    StatsCollectionContext sContext = new StatsCollectionContext(hconf);
+    sContext.setStatsTmpDir(conf.getStatsTmpDir());
+    if (!statsPublisher.connect(sContext)) {
       // just return, stats gathering should not block the main query
       LOG.error("StatsPublishing error: cannot connect to database");
       if (isStatsReliable) {
@@ -1204,7 +1206,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
         }
       }
     }
-    if (!statsPublisher.closeConnection()) {
+    if (!statsPublisher.closeConnection(sContext)) {
       // The original exception is lost.
       // Not changing the interface to maintain backward compatibility
       if (isStatsReliable) {

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
index 41ece04..9775645 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hive.ql.plan.StatsWork;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
 import org.apache.hadoop.hive.ql.stats.StatsAggregator;
 import org.apache.hadoop.hive.ql.stats.StatsCollectionTaskIndependent;
+import org.apache.hadoop.hive.ql.stats.StatsCollectionContext;
 import org.apache.hadoop.hive.ql.stats.StatsFactory;
 import org.apache.hadoop.hive.ql.stats.StatsPublisher;
 import org.apache.hadoop.util.StringUtils;
@@ -134,13 +135,14 @@ public class StatsTask extends Task<StatsWork> implements Serializable {
 
     StatsAggregator statsAggregator = null;
     int ret = 0;
-
+    StatsCollectionContext scc = null;
     try {
       // Stats setup:
       Warehouse wh = new Warehouse(conf);
       if (!getWork().getNoStatsAggregator() && !getWork().isNoScanAnalyzeCommand()) {
         try {
-          statsAggregator = createStatsAggregator(conf);
+          scc = getContext();
+          statsAggregator = createStatsAggregator(scc);
         } catch (HiveException e) {
           if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_RELIABLE)) {
             throw e;
@@ -241,7 +243,7 @@ public class StatsTask extends Task<StatsWork> implements Serializable {
       }
     } finally {
       if (statsAggregator != null) {
-        statsAggregator.closeConnection();
+        statsAggregator.closeConnection(scc);
       }
     }
     // The return value of 0 indicates success,
@@ -268,7 +270,7 @@ public class StatsTask extends Task<StatsWork> implements Serializable {
     return prefix.toString();
   }
 
-  private StatsAggregator createStatsAggregator(HiveConf conf) throws HiveException {
+  private StatsAggregator createStatsAggregator(StatsCollectionContext scc) throws HiveException {
     String statsImpl = HiveConf.getVar(conf, HiveConf.ConfVars.HIVESTATSDBCLASS);
     StatsFactory factory = StatsFactory.newFactory(statsImpl, conf);
     if (factory == null) {
@@ -277,21 +279,30 @@ public class StatsTask extends Task<StatsWork> implements Serializable {
     // initialize stats publishing table for noscan which has only stats task
     // the rest of MR task following stats task initializes it in ExecDriver.java
     StatsPublisher statsPublisher = factory.getStatsPublisher();
-    if (!statsPublisher.init(conf)) { // creating stats table if not exists
+    if (!statsPublisher.init(scc)) { // creating stats table if not exists
       throw new HiveException(ErrorMsg.STATSPUBLISHER_INITIALIZATION_ERROR.getErrorCodedMsg());
     }
-    Task sourceTask = getWork().getSourceTask();
-    if (sourceTask == null) {
-      throw new HiveException(ErrorMsg.STATSAGGREGATOR_SOURCETASK_NULL.getErrorCodedMsg());
-    }
+
     // manufacture a StatsAggregator
     StatsAggregator statsAggregator = factory.getStatsAggregator();
-    if (!statsAggregator.connect(conf, sourceTask)) {
+    if (!statsAggregator.connect(scc)) {
       throw new HiveException(ErrorMsg.STATSAGGREGATOR_CONNECTION_ERROR.getErrorCodedMsg(statsImpl));
     }
     return statsAggregator;
   }
 
+  private StatsCollectionContext getContext() throws HiveException {
+
+    StatsCollectionContext scc = new StatsCollectionContext(conf);
+    Task sourceTask = getWork().getSourceTask();
+    if (sourceTask == null) {
+      throw new HiveException(ErrorMsg.STATSAGGREGATOR_SOURCETASK_NULL.getErrorCodedMsg());
+    }
+    scc.setTask(sourceTask);
+    scc.setStatsTmpDir(this.getWork().getStatsTmpDir());
+    return scc;
+  }
+
   private boolean existStats(Map<String, String> parameters) {
     return parameters.containsKey(StatsSetupConst.ROW_COUNT)
         || parameters.containsKey(StatsSetupConst.NUM_FILES)

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
index cbf02e9..22f7520 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hive.ql.plan.TableDesc;
 import org.apache.hadoop.hive.ql.plan.TableScanDesc;
 import org.apache.hadoop.hive.ql.plan.api.OperatorType;
 import org.apache.hadoop.hive.ql.stats.StatsCollectionTaskIndependent;
+import org.apache.hadoop.hive.ql.stats.StatsCollectionContext;
 import org.apache.hadoop.hive.ql.stats.StatsPublisher;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
@@ -282,7 +283,9 @@ public class TableScanOperator extends Operator<TableScanDesc> implements
 
     // Initializing a stats publisher
     StatsPublisher statsPublisher = Utilities.getStatsPublisher(jc);
-    if (!statsPublisher.connect(jc)) {
+    StatsCollectionContext sc = new StatsCollectionContext(jc);
+    sc.setStatsTmpDir(conf.getTmpStatsDir());
+    if (!statsPublisher.connect(sc)) {
       // just return, stats gathering should not block the main query.
       if (isLogInfoEnabled) {
         LOG.info("StatsPublishing error: cannot connect to database.");
@@ -318,7 +321,7 @@ public class TableScanOperator extends Operator<TableScanDesc> implements
 	LOG.info("publishing : " + key + " : " + statsToPublish.toString());
       }
     }
-    if (!statsPublisher.closeConnection()) {
+    if (!statsPublisher.closeConnection(sc)) {
       if (isStatsReliable) {
         throw new HiveException(ErrorMsg.STATSPUBLISHER_CLOSING_ERROR.getErrorCodedMsg());
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 5b21af9..b1ab1b5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec;
 
-import static com.google.common.base.Preconditions.checkNotNull;
-
 import java.beans.DefaultPersistenceDelegate;
 import java.beans.Encoder;
 import java.beans.ExceptionListener;
@@ -102,6 +100,7 @@ import org.apache.hadoop.hive.common.HiveInterruptCallback;
 import org.apache.hadoop.hive.common.HiveInterruptUtils;
 import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.common.JavaUtils;
+import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.Warehouse;
@@ -160,6 +159,7 @@ import org.apache.hadoop.hive.ql.plan.ReduceWork;
 import org.apache.hadoop.hive.ql.plan.SparkEdgeProperty;
 import org.apache.hadoop.hive.ql.plan.SparkWork;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
+import org.apache.hadoop.hive.ql.plan.TableScanDesc;
 import org.apache.hadoop.hive.ql.plan.api.Adjacency;
 import org.apache.hadoop.hive.ql.plan.api.Graph;
 import org.apache.hadoop.hive.ql.session.SessionState;
@@ -3933,4 +3933,31 @@ public final class Utilities {
     }
 
   }
+  public static List<String> getStatsTmpDirs(BaseWork work, Configuration conf) {
+
+    List<String> statsTmpDirs = new ArrayList<>();
+    if (!StatsSetupConst.StatDB.fs.name().equalsIgnoreCase(HiveConf.getVar(conf, ConfVars.HIVESTATSDBCLASS))) {
+      // no-op for non-fs stats collection
+      return statsTmpDirs;
+    }
+    // if its auto-stats gather for inserts or CTAS, stats dir will be in FileSink
+    Set<Operator<? extends OperatorDesc>> ops = work.getAllLeafOperators();
+    if (work instanceof MapWork) {
+      // if its an anlayze statement, stats dir will be in TableScan
+      ops.addAll(work.getAllRootOperators());
+    }
+    for (Operator<? extends OperatorDesc> op : ops) {
+      OperatorDesc desc = op.getConf();
+      String statsTmpDir = null;
+      if (desc instanceof FileSinkDesc) {
+         statsTmpDir = ((FileSinkDesc)desc).getStatsTmpDir();
+      } else if (desc instanceof TableScanDesc) {
+        statsTmpDir = ((TableScanDesc) desc).getTmpStatsDir();
+      }
+      if (statsTmpDir != null && !statsTmpDir.isEmpty()) {
+        statsTmpDirs.add(statsTmpDir);
+      }
+    }
+    return statsTmpDirs;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
index d9225a9..b799a17 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
@@ -29,6 +29,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
+import java.util.Set;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
@@ -75,6 +76,7 @@ import org.apache.hadoop.hive.ql.plan.ReduceWork;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
+import org.apache.hadoop.hive.ql.stats.StatsCollectionContext;
 import org.apache.hadoop.hive.ql.stats.StatsFactory;
 import org.apache.hadoop.hive.ql.stats.StatsPublisher;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -243,7 +245,7 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
 
     try {
       String partitioner = HiveConf.getVar(job, ConfVars.HIVEPARTITIONER);
-      job.setPartitionerClass((Class<? extends Partitioner>) JavaUtils.loadClass(partitioner));
+      job.setPartitionerClass(JavaUtils.loadClass(partitioner));
     } catch (ClassNotFoundException e) {
       throw new RuntimeException(e.getMessage(), e);
     }
@@ -289,7 +291,7 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
     LOG.info("Using " + inpFormat);
 
     try {
-      job.setInputFormat((Class<? extends InputFormat>) JavaUtils.loadClass(inpFormat));
+      job.setInputFormat(JavaUtils.loadClass(inpFormat));
     } catch (ClassNotFoundException e) {
       throw new RuntimeException(e.getMessage(), e);
     }
@@ -408,7 +410,13 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
         StatsFactory factory = StatsFactory.newFactory(job);
         if (factory != null) {
           statsPublisher = factory.getStatsPublisher();
-          if (!statsPublisher.init(job)) { // creating stats table if not exists
+          List<String> statsTmpDir = Utilities.getStatsTmpDirs(mWork, job);
+          if (rWork != null) {
+            statsTmpDir.addAll(Utilities.getStatsTmpDirs(rWork, job));
+          }
+          StatsCollectionContext sc = new StatsCollectionContext(job);
+          sc.setStatsTmpDirs(statsTmpDir);
+          if (!statsPublisher.init(sc)) { // creating stats table if not exists
             if (HiveConf.getBoolVar(job, HiveConf.ConfVars.HIVE_STATS_RELIABLE)) {
               throw
                 new HiveException(ErrorMsg.STATSPUBLISHER_INITIALIZATION_ERROR.getErrorCodedMsg());

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
index 4c3ee4b..51e66ac 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hive.ql.plan.MapWork;
 import org.apache.hadoop.hive.ql.plan.ReduceWork;
 import org.apache.hadoop.hive.ql.plan.SparkEdgeProperty;
 import org.apache.hadoop.hive.ql.plan.SparkWork;
+import org.apache.hadoop.hive.ql.stats.StatsCollectionContext;
 import org.apache.hadoop.hive.ql.stats.StatsFactory;
 import org.apache.hadoop.hive.ql.stats.StatsPublisher;
 import org.apache.hadoop.io.Writable;
@@ -65,11 +66,11 @@ public class SparkPlanGenerator {
   private final PerfLogger perfLogger = SessionState.getPerfLogger();
   private static final Log LOG = LogFactory.getLog(SparkPlanGenerator.class);
 
-  private JavaSparkContext sc;
+  private final JavaSparkContext sc;
   private final JobConf jobConf;
-  private Context context;
-  private Path scratchDir;
-  private SparkReporter sparkReporter;
+  private final Context context;
+  private final Path scratchDir;
+  private final SparkReporter sparkReporter;
   private Map<BaseWork, BaseWork> cloneToWork;
   private final Map<BaseWork, SparkTran> workToTranMap;
   private final Map<BaseWork, SparkTran> workToParentWorkTranMap;
@@ -270,8 +271,7 @@ public class SparkPlanGenerator {
     // Make sure we'll use a different plan path from the original one
     HiveConf.setVar(cloned, HiveConf.ConfVars.PLAN, "");
     try {
-      cloned.setPartitionerClass((Class<? extends Partitioner>)
-          JavaUtils.loadClass(HiveConf.getVar(cloned, HiveConf.ConfVars.HIVEPARTITIONER)));
+      cloned.setPartitionerClass(JavaUtils.loadClass(HiveConf.getVar(cloned, HiveConf.ConfVars.HIVEPARTITIONER)));
     } catch (ClassNotFoundException e) {
       String msg = "Could not find partitioner class: " + e.getMessage()
         + " which is specified by: " + HiveConf.ConfVars.HIVEPARTITIONER.varname;
@@ -315,7 +315,9 @@ public class SparkPlanGenerator {
       StatsFactory factory = StatsFactory.newFactory(jobConf);
       if (factory != null) {
         statsPublisher = factory.getStatsPublisher();
-        if (!statsPublisher.init(jobConf)) { // creating stats table if not exists
+        StatsCollectionContext sc = new StatsCollectionContext(jobConf);
+        sc.setStatsTmpDirs(Utilities.getStatsTmpDirs(work, jobConf));
+        if (!statsPublisher.init(sc)) { // creating stats table if not exists
           if (HiveConf.getBoolVar(jobConf, HiveConf.ConfVars.HIVE_STATS_RELIABLE)) {
             throw new HiveException(
                 ErrorMsg.STATSPUBLISHER_INITIALIZATION_ERROR.getErrorCodedMsg());

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
index 19da1c3..bf950cb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
@@ -74,6 +74,7 @@ import org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType;
 import org.apache.hadoop.hive.ql.plan.TezWork;
 import org.apache.hadoop.hive.ql.plan.TezWork.VertexType;
 import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.ql.stats.StatsCollectionContext;
 import org.apache.hadoop.hive.ql.stats.StatsFactory;
 import org.apache.hadoop.hive.ql.stats.StatsPublisher;
 import org.apache.hadoop.hive.shims.Utils;
@@ -1094,8 +1095,10 @@ public class DagUtils {
       StatsPublisher statsPublisher;
       StatsFactory factory = StatsFactory.newFactory(conf);
       if (factory != null) {
+        StatsCollectionContext sCntxt = new StatsCollectionContext(conf);
+        sCntxt.setStatsTmpDirs(Utilities.getStatsTmpDirs(work, conf));
         statsPublisher = factory.getStatsPublisher();
-        if (!statsPublisher.init(conf)) { // creating stats table if not exists
+        if (!statsPublisher.init(sCntxt)) { // creating stats table if not exists
           if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_RELIABLE)) {
             throw
               new HiveException(ErrorMsg.STATSPUBLISHER_INITIALIZATION_ERROR.getErrorCodedMsg());

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java
index e67996d..68709b4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java
@@ -153,7 +153,6 @@ public class AggregateIndexHandler extends CompactIndexHandler {
       builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGETEZFILES, false);
       Task<?> rootTask = IndexUtils.createRootTask(builderConf, inputs, outputs,
           command, (LinkedHashMap<String, String>) partSpec, indexTableName, dbName);
-      super.setStatsDir(builderConf);
       return rootTask;
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java
index a019350..807959e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java
@@ -116,13 +116,6 @@ public abstract class TableBasedIndexHandler extends AbstractIndexHandler {
     return null;
   }
 
-  protected void setStatsDir(HiveConf builderConf) {
-    String statsDir;
-    if ((statsDir = builderConf.get(StatsSetupConst.STATS_TMP_LOC)) != null) {
-      getConf().set(StatsSetupConst.STATS_TMP_LOC, statsDir);
-    }
-   }
-
   protected List<String> getPartKVPairStringArray(
       LinkedHashMap<String, String> partSpec) {
     List<String> ret = new ArrayList<String>(partSpec.size());

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java
index b076933..cb191ac 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java
@@ -289,7 +289,6 @@ public class BitmapIndexHandler extends TableBasedIndexHandler {
 
     Task<?> rootTask = IndexUtils.createRootTask(builderConf, inputs, outputs,
         command, partSpec, indexTableName, dbName);
-    super.setStatsDir(builderConf);
     return rootTask;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java
index 1dbe230..586e16d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java
@@ -150,7 +150,6 @@ public class CompactIndexHandler extends TableBasedIndexHandler {
     builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGETEZFILES, false);
     Task<?> rootTask = IndexUtils.createRootTask(builderConf, inputs, outputs,
         command, partSpec, indexTableName, dbName);
-    super.setStatsDir(builderConf);
     return rootTask;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanMapper.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanMapper.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanMapper.java
index 3e1ef0a..be3a671 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanMapper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanMapper.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileKeyBufferWrapper;
 import org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileValueBufferWrapper;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.stats.CounterStatsPublisher;
+import org.apache.hadoop.hive.ql.stats.StatsCollectionContext;
 import org.apache.hadoop.hive.ql.stats.StatsFactory;
 import org.apache.hadoop.hive.ql.stats.StatsPublisher;
 import org.apache.hadoop.hive.shims.CombineHiveKey;
@@ -145,7 +146,9 @@ public class PartialScanMapper extends MapReduceBase implements
       throw new HiveException(ErrorMsg.STATSPUBLISHER_NOT_OBTAINED.getErrorCodedMsg());
     }
 
-    if (!statsPublisher.connect(jc)) {
+    StatsCollectionContext sc = new StatsCollectionContext(jc);
+    sc.setStatsTmpDir(jc.get(StatsSetupConst.STATS_TMP_LOC, ""));
+    if (!statsPublisher.connect(sc)) {
       // should fail since stats gathering is main purpose of the job
       LOG.error("StatsPublishing error: cannot connect to database");
       throw new HiveException(ErrorMsg.STATSPUBLISHER_CONNECTION_ERROR.getErrorCodedMsg());
@@ -170,7 +173,7 @@ public class PartialScanMapper extends MapReduceBase implements
       throw new HiveException(ErrorMsg.STATSPUBLISHER_PUBLISHING_ERROR.getErrorCodedMsg());
     }
 
-    if (!statsPublisher.closeConnection()) {
+    if (!statsPublisher.closeConnection(sc)) {
       // The original exception is lost.
       // Not changing the interface to maintain backward compatibility
       throw new HiveException(ErrorMsg.STATSPUBLISHER_CLOSING_ERROR.getErrorCodedMsg());

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
index cee0878..8bebd0f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.JavaUtils;
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.common.StatsSetupConst.StatDB;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.DriverContext;
@@ -48,6 +50,7 @@ import org.apache.hadoop.hive.ql.plan.MapredWork;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
+import org.apache.hadoop.hive.ql.stats.StatsCollectionContext;
 import org.apache.hadoop.hive.ql.stats.StatsFactory;
 import org.apache.hadoop.hive.ql.stats.StatsPublisher;
 import org.apache.hadoop.io.NullWritable;
@@ -145,7 +148,7 @@ public class PartialScanTask extends Task<PartialScanWork> implements
     LOG.info("Using " + inpFormat);
 
     try {
-      job.setInputFormat((Class<? extends InputFormat>) JavaUtils.loadClass(inpFormat));
+      job.setInputFormat(JavaUtils.loadClass(inpFormat));
     } catch (ClassNotFoundException e) {
       throw new RuntimeException(e.getMessage(), e);
     }
@@ -175,7 +178,7 @@ public class PartialScanTask extends Task<PartialScanWork> implements
     HiveConf.setVar(job,
         HiveConf.ConfVars.HIVE_STATS_KEY_PREFIX,
         work.getAggKey());
-
+      job.set(StatsSetupConst.STATS_TMP_LOC, work.getStatsTmpDir());
     try {
       addInputPaths(job, work);
 
@@ -205,7 +208,9 @@ public class PartialScanTask extends Task<PartialScanWork> implements
         StatsFactory factory = StatsFactory.newFactory(job);
         if (factory != null) {
           statsPublisher = factory.getStatsPublisher();
-          if (!statsPublisher.init(job)) { // creating stats table if not exists
+          StatsCollectionContext sc = new StatsCollectionContext(job);
+          sc.setStatsTmpDir(work.getStatsTmpDir());
+          if (!statsPublisher.init(sc)) { // creating stats table if not exists
             if (HiveConf.getBoolVar(job, HiveConf.ConfVars.HIVE_STATS_RELIABLE)) {
               throw
                 new HiveException(ErrorMsg.STATSPUBLISHER_INITIALIZATION_ERROR.getErrorCodedMsg());

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanWork.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanWork.java
index c0a8ae7..c006743 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanWork.java
@@ -18,11 +18,15 @@
 
 package org.apache.hadoop.hive.ql.io.rcfile.stats;
 
+import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS;
+
 import java.io.Serializable;
 import java.util.LinkedHashMap;
 import java.util.List;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.StatsSetupConst.StatDB;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 import org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat;
 import org.apache.hadoop.hive.ql.plan.Explain;
@@ -42,6 +46,7 @@ public class PartialScanWork extends MapWork implements Serializable {
 
   private transient List<Path> inputPaths;
   private String aggKey;
+  private String statsTmpDir;
 
   public PartialScanWork() {
   }
@@ -101,4 +106,13 @@ public class PartialScanWork extends MapWork implements Serializable {
     this.aggKey = aggKey;
   }
 
+  public String getStatsTmpDir() {
+    return statsTmpDir;
+  }
+
+  public void setStatsTmpDir(String statsTmpDir, HiveConf conf) {
+    this.statsTmpDir = HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.fs.name())
+      ?  statsTmpDir : "";
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java
index eed1d7c..af0ac90 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java
@@ -64,6 +64,7 @@ public class GenMRTableScan1 implements NodeProcessor {
    * @param opProcCtx
    *          context
    */
+  @Override
   public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx opProcCtx,
       Object... nodeOutputs) throws SemanticException {
     TableScanOperator op = (TableScanOperator) nd;
@@ -121,6 +122,7 @@ public class GenMRTableScan1 implements NodeProcessor {
 
             StatsWork statsWork = new StatsWork(op.getConf().getTableMetadata().getTableSpec());
             statsWork.setAggKey(op.getConf().getStatsAggPrefix());
+            statsWork.setStatsTmpDir(op.getConf().getTmpStatsDir());
             statsWork.setSourceTask(currTask);
             statsWork.setStatsReliable(parseCtx.getConf().getBoolVar(
                 HiveConf.ConfVars.HIVE_STATS_RELIABLE));
@@ -195,6 +197,7 @@ public class GenMRTableScan1 implements NodeProcessor {
     PartialScanWork scanWork = new PartialScanWork(inputPaths);
     scanWork.setMapperCannotSpanPartns(true);
     scanWork.setAggKey(aggregationKey);
+    scanWork.setStatsTmpDir(op.getConf().getTmpStatsDir(), parseCtx.getConf());
 
     // stats work
     statsWork.setPartialScanAnalyzeCommand(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
index c696fd5..e8bd33d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
@@ -1416,7 +1416,7 @@ public final class GenMapRedUtils {
 
     statsWork.setSourceTask(currTask);
     statsWork.setStatsReliable(hconf.getBoolVar(ConfVars.HIVE_STATS_RELIABLE));
-
+    statsWork.setStatsTmpDir(nd.getConf().getStatsTmpDir());
     if (currTask.getWork() instanceof MapredWork) {
       MapredWork mrWork = (MapredWork) currTask.getWork();
       mrWork.getMapWork().setGatheringStats(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java
index f8d6905..16b4376 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java
@@ -70,7 +70,7 @@ public class ProcessAnalyzeTable implements NodeProcessor {
       throws SemanticException {
 
     GenTezProcContext context = (GenTezProcContext) procContext;
-    
+
     TableScanOperator tableScan = (TableScanOperator) nd;
 
     ParseContext parseContext = context.parseContext;
@@ -124,6 +124,7 @@ public class ProcessAnalyzeTable implements NodeProcessor {
 
       StatsWork statsWork = new StatsWork(tableScan.getConf().getTableMetadata().getTableSpec());
       statsWork.setAggKey(tableScan.getConf().getStatsAggPrefix());
+      statsWork.setStatsTmpDir(tableScan.getConf().getTmpStatsDir());
       statsWork.setSourceTask(context.currentTask);
       statsWork.setStatsReliable(parseContext.getConf().getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));
       Task<StatsWork> statsTask = TaskFactory.get(statsWork, parseContext.getConf());
@@ -181,6 +182,7 @@ public class ProcessAnalyzeTable implements NodeProcessor {
     PartialScanWork scanWork = new PartialScanWork(inputPaths);
     scanWork.setMapperCannotSpanPartns(true);
     scanWork.setAggKey(aggregationKey);
+    scanWork.setStatsTmpDir(tableScan.getConf().getTmpStatsDir(), parseContext.getConf());
 
     // stats work
     statsWork.setPartialScanAnalyzeCommand(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index a114281..4af07ad 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -6638,8 +6638,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     fileSinkDesc.setStatsAggPrefix(fileSinkDesc.getDirName().toString());
     if (HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.fs.name())) {
       String statsTmpLoc = ctx.getExtTmpPathRelTo(queryTmpdir).toString();
-      LOG.info("Set stats collection dir : " + statsTmpLoc);
-      conf.set(StatsSetupConst.STATS_TMP_LOC, statsTmpLoc);
+      fileSinkDesc.setStatsTmpDir(statsTmpLoc);
+      LOG.debug("Set stats collection dir : " + statsTmpLoc);
     }
 
     if (dest_part != null) {
@@ -9541,8 +9541,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     } else {
       if (HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.fs.name())) {
         String statsTmpLoc = ctx.getExtTmpPathRelTo(tab.getPath()).toString();
-        LOG.info("Set stats collection dir : " + statsTmpLoc);
-        conf.set(StatsSetupConst.STATS_TMP_LOC, statsTmpLoc);
+        LOG.debug("Set stats collection dir : " + statsTmpLoc);
+        tsDesc.setTmpStatsDir(statsTmpLoc);
       }
       tsDesc.setGatherStats(true);
       tsDesc.setStatsReliable(conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkProcessAnalyzeTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkProcessAnalyzeTable.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkProcessAnalyzeTable.java
index 66e148f..7ab4e7a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkProcessAnalyzeTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkProcessAnalyzeTable.java
@@ -121,6 +121,7 @@ public class SparkProcessAnalyzeTable implements NodeProcessor {
 
         StatsWork statsWork = new StatsWork(tableScan.getConf().getTableMetadata().getTableSpec());
         statsWork.setAggKey(tableScan.getConf().getStatsAggPrefix());
+        statsWork.setStatsTmpDir(tableScan.getConf().getTmpStatsDir());
         statsWork.setSourceTask(context.currentTask);
         statsWork.setStatsReliable(parseContext.getConf().getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));
         Task<StatsWork> statsTask = TaskFactory.get(statsWork, parseContext.getConf());
@@ -176,6 +177,7 @@ public class SparkProcessAnalyzeTable implements NodeProcessor {
     PartialScanWork scanWork = new PartialScanWork(inputPaths);
     scanWork.setMapperCannotSpanPartns(true);
     scanWork.setAggKey(aggregationKey);
+    scanWork.setStatsTmpDir(tableScan.getConf().getTmpStatsDir(), parseContext.getConf());
 
     // stats work
     statsWork.setPartialScanAnalyzeCommand(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
index f73b502..9d6318a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
@@ -86,6 +86,7 @@ public class FileSinkDesc extends AbstractOperatorDesc {
   private boolean statsReliable;
   private ListBucketingCtx lbCtx;
   private int maxStatsKeyPrefixLength = -1;
+  private String statsTmpDir;
 
   private boolean statsCollectRawDataSize;
 
@@ -156,7 +157,8 @@ public class FileSinkDesc extends AbstractOperatorDesc {
     ret.setDpSortState(dpSortState);
     ret.setWriteType(writeType);
     ret.setTransactionId(txnId);
-    return (Object) ret;
+    ret.setStatsTmpDir(statsTmpDir);
+    return ret;
   }
 
   @Explain(displayName = "directory", explainLevels = { Level.EXTENDED })
@@ -229,7 +231,7 @@ public class FileSinkDesc extends AbstractOperatorDesc {
   public void setMultiFileSpray(boolean multiFileSpray) {
     this.multiFileSpray = multiFileSpray;
   }
-  
+
   /**
    * @return destination is temporary
    */
@@ -465,4 +467,14 @@ public class FileSinkDesc extends AbstractOperatorDesc {
   public void setTable(Table table) {
     this.table = table;
   }
+
+
+  public String getStatsTmpDir() {
+    return statsTmpDir;
+  }
+
+  public void setStatsTmpDir(String statsCollectionTempDir) {
+    this.statsTmpDir = statsCollectionTempDir;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java
index c8515db..d87022d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java
@@ -57,6 +57,9 @@ public class StatsWork implements Serializable {
   // so this is set by DriverContext in runtime
   private transient Task sourceTask;
 
+  // used by FS based stats collector
+  private String statsTmpDir;
+
   public StatsWork() {
   }
 
@@ -72,10 +75,6 @@ public class StatsWork implements Serializable {
     this.loadFileDesc = loadFileDesc;
   }
 
-  public StatsWork(boolean statsReliable) {
-    this.statsReliable = statsReliable;
-  }
-
   public TableSpec getTableSpecs() {
     return tableSpecs;
   }
@@ -97,6 +96,14 @@ public class StatsWork implements Serializable {
     return aggKey;
   }
 
+  public String getStatsTmpDir() {
+    return statsTmpDir;
+  }
+
+  public void setStatsTmpDir(String statsTmpDir) {
+    this.statsTmpDir = statsTmpDir;
+  }
+
   public boolean getNoStatsAggregator() {
     return noStatsAggregator;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
index 98bce96..6661ce6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
@@ -24,7 +24,6 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.hive.ql.exec.PTFUtils;
-import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 import org.apache.hadoop.hive.ql.parse.TableSample;
@@ -70,6 +69,7 @@ public class TableScanDesc extends AbstractOperatorDesc {
   private boolean gatherStats;
   private boolean statsReliable;
   private int maxStatsKeyPrefixLength = -1;
+  private String tmpStatsDir;
 
   private ExprNodeGenericFuncDesc filterExpr;
   private transient Serializable filterObject;
@@ -203,6 +203,14 @@ public class TableScanDesc extends AbstractOperatorDesc {
     return gatherStats;
   }
 
+  public String getTmpStatsDir() {
+    return tmpStatsDir;
+  }
+
+  public void setTmpStatsDir(String tmpStatsDir) {
+    this.tmpStatsDir = tmpStatsDir;
+  }
+
   public List<VirtualColumn> getVirtualCols() {
     return virtualCols;
   }
@@ -264,7 +272,7 @@ public class TableScanDesc extends AbstractOperatorDesc {
   public void setBucketFileNameMapping(Map<String, Integer> bucketFileNameMapping) {
     this.bucketFileNameMapping = bucketFileNameMapping;
   }
-  
+
   public void setIsMetadataOnly(boolean metadata_only) {
     isMetadataOnly = metadata_only;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java
index 16b4460..b9863d9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java
@@ -25,7 +25,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.mr.ExecDriver;
-import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
 import org.apache.hadoop.mapred.Counters;
 import org.apache.hadoop.mapred.JobClient;
@@ -40,10 +39,11 @@ public class CounterStatsAggregator implements StatsAggregator, StatsCollectionT
   private JobClient jc;
 
   @Override
-  public boolean connect(Configuration hconf, Task sourceTask) {
+  public boolean connect(StatsCollectionContext scc) {
+    Task<?> sourceTask = scc.getTask();
     if (sourceTask instanceof MapRedTask) {
       try {
-        jc = new JobClient(toJobConf(hconf));
+        jc = new JobClient(toJobConf(scc.getHiveConf()));
         RunningJob job = jc.getJob(((MapRedTask)sourceTask).getJobID());
         if (job != null) {
           counters = job.getCounters();
@@ -71,7 +71,7 @@ public class CounterStatsAggregator implements StatsAggregator, StatsCollectionT
   }
 
   @Override
-  public boolean closeConnection() {
+  public boolean closeConnection(StatsCollectionContext scc) {
     try {
       jc.close();
     } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorSpark.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorSpark.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorSpark.java
index 13f6024..4c01b25 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorSpark.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorSpark.java
@@ -33,8 +33,8 @@ public class CounterStatsAggregatorSpark
 
   @SuppressWarnings("rawtypes")
   @Override
-  public boolean connect(Configuration hconf, Task sourceTask) {
-    SparkTask task = (SparkTask) sourceTask;
+  public boolean connect(StatsCollectionContext scc) {
+    SparkTask task = (SparkTask) scc.getTask();
     sparkCounters = task.getSparkCounters();
     if (sparkCounters == null) {
       return false;
@@ -52,7 +52,7 @@ public class CounterStatsAggregatorSpark
   }
 
   @Override
-  public boolean closeConnection() {
+  public boolean closeConnection(StatsCollectionContext scc) {
     return true;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorTez.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorTez.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorTez.java
index 02e8c0b..662c106 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorTez.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorTez.java
@@ -18,11 +18,8 @@
 
 package org.apache.hadoop.hive.ql.stats;
 
-import java.io.IOException;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.tez.TezTask;
 import org.apache.tez.common.counters.TezCounters;
@@ -46,10 +43,11 @@ public class CounterStatsAggregatorTez implements StatsAggregator, StatsCollecti
   }
 
   @Override
-  public boolean connect(Configuration hconf, Task sourceTask) {
+  public boolean connect(StatsCollectionContext scc) {
+    Task sourceTask = scc.getTask();
     if (!(sourceTask instanceof TezTask)) {
       delegate = true;
-      return mrAggregator.connect(hconf, sourceTask);
+      return mrAggregator.connect(scc);
     }
     counters = ((TezTask) sourceTask).getTezCounters();
     return counters != null;
@@ -75,7 +73,7 @@ public class CounterStatsAggregatorTez implements StatsAggregator, StatsCollecti
   }
 
   @Override
-  public boolean closeConnection() {
+  public boolean closeConnection(StatsCollectionContext scc) {
     return true;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsPublisher.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsPublisher.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsPublisher.java
index bf7d027..e5f1400 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsPublisher.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsPublisher.java
@@ -22,7 +22,6 @@ import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.exec.MapredContext;
 import org.apache.hadoop.mapred.Reporter;
 
@@ -33,12 +32,12 @@ public class CounterStatsPublisher implements StatsPublisher, StatsCollectionTas
   private Reporter reporter;
 
   @Override
-  public boolean init(Configuration hconf) {
+  public boolean init(StatsCollectionContext context) {
     return true;
   }
 
   @Override
-  public boolean connect(Configuration hconf) {
+  public boolean connect(StatsCollectionContext statsContext) {
     MapredContext context = MapredContext.get();
     if (context == null || context.getReporter() == null) {
       return false;
@@ -61,7 +60,7 @@ public class CounterStatsPublisher implements StatsPublisher, StatsCollectionTas
     return true;
   }
   @Override
-  public boolean closeConnection() {
+  public boolean closeConnection(StatsCollectionContext context) {
     return true;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java
index 0ae0489..b115daf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java
@@ -18,9 +18,6 @@
 
 package org.apache.hadoop.hive.ql.stats;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.ql.exec.Task;
-
 /**
  * An interface for any possible implementation for gathering statistics.
  */
@@ -35,7 +32,7 @@ public interface StatsAggregator {
    * @param sourceTask
    * @return true if connection is successful, false otherwise.
    */
-  public boolean connect(Configuration hconf, Task sourceTask);
+  public boolean connect(StatsCollectionContext scc);
 
   /**
    * This method aggregates a given statistic from all tasks (partial stats).
@@ -65,7 +62,7 @@ public interface StatsAggregator {
    *
    * @return true if close connection is successful, false otherwise.
    */
-  public boolean closeConnection();
+  public boolean closeConnection(StatsCollectionContext scc);
 
   /**
    * This method is called after all statistics have been aggregated. Since we support multiple

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsCollectionContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsCollectionContext.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsCollectionContext.java
new file mode 100644
index 0000000..ae6f2ac
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsCollectionContext.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.stats;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.exec.Task;
+
+public class StatsCollectionContext {
+
+  private final Configuration hiveConf;
+  private Task task;
+  private List<String> statsTmpDirs;
+
+  public List<String> getStatsTmpDirs() {
+    return statsTmpDirs;
+  }
+
+  public void setStatsTmpDirs(List<String> statsTmpDirs) {
+    this.statsTmpDirs = statsTmpDirs;
+  }
+
+  public void setStatsTmpDir(String statsTmpDir) {
+    this.statsTmpDirs = statsTmpDir == null ? new ArrayList<String>() :
+        Arrays.asList(new String[]{statsTmpDir});
+  }
+
+  public StatsCollectionContext(Configuration hiveConf) {
+    super();
+    this.hiveConf = hiveConf;
+  }
+
+  public Configuration getHiveConf() {
+    return hiveConf;
+  }
+
+  public Task getTask() {
+    return task;
+  }
+
+  public void setTask(Task task) {
+    this.task = task;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java
index 845ec6a..3631b83 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hive.ql.stats;
 
 import java.util.Map;
 
-import org.apache.hadoop.conf.Configuration;
-
 /**
  * An interface for any possible implementation for publishing statics.
  */
@@ -37,14 +35,14 @@ public interface StatsPublisher {
    * intermediate stats database.
    * @return true if initialization is successful, false otherwise.
    */
-  public boolean init(Configuration hconf);
+  public boolean init(StatsCollectionContext context);
 
   /**
    * This method connects to the intermediate statistics database.
    * @param hconf HiveConf that contains the connection parameters.
    * @return true if connection is successful, false otherwise.
    */
-  public boolean connect(Configuration hconf);
+  public boolean connect(StatsCollectionContext context);
 
   /**
    * This method publishes a given statistic into a disk storage, possibly HBase or MySQL.
@@ -66,6 +64,6 @@ public interface StatsPublisher {
   /**
    * This method closes the connection to the temporary storage.
    */
-  public boolean closeConnection();
+  public boolean closeConnection(StatsCollectionContext context);
 
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsAggregator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsAggregator.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsAggregator.java
index be025fb..6dfc178 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsAggregator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsAggregator.java
@@ -26,15 +26,14 @@ import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.stats.StatsAggregator;
+import org.apache.hadoop.hive.ql.stats.StatsCollectionContext;
 import org.apache.hadoop.hive.ql.stats.StatsCollectionTaskIndependent;
 
 import com.esotericsoftware.kryo.io.Input;
@@ -44,17 +43,17 @@ public class FSStatsAggregator implements StatsAggregator, StatsCollectionTaskIn
   private List<Map<String,Map<String,String>>> statsList;
   private Map<String, Map<String,String>> statsMap;
   private FileSystem fs;
-  private Configuration conf;
 
   @Override
-  public boolean connect(Configuration hconf, Task sourceTask) {
-    conf = hconf;
-    Path statsDir = new Path(hconf.get(StatsSetupConst.STATS_TMP_LOC));
+  public boolean connect(StatsCollectionContext scc) {
+    List<String> statsDirs = scc.getStatsTmpDirs();
+    assert statsDirs.size() == 1 : "Found multiple stats dirs: " + statsDirs;
+    Path statsDir = new Path(statsDirs.get(0));
     LOG.debug("About to read stats from : " + statsDir);
     statsMap  = new HashMap<String, Map<String,String>>();
 
     try {
-      fs = statsDir.getFileSystem(hconf);
+      fs = statsDir.getFileSystem(scc.getHiveConf());
       statsList = new ArrayList<Map<String,Map<String,String>>>();
       FileStatus[] status = fs.listStatus(statsDir, new PathFilter() {
         @Override
@@ -98,11 +97,15 @@ public class FSStatsAggregator implements StatsAggregator, StatsCollectionTaskIn
   }
 
   @Override
-  public boolean closeConnection() {
-    LOG.debug("About to delete stats tmp dir");
+  public boolean closeConnection(StatsCollectionContext scc) {
+    List<String> statsDirs = scc.getStatsTmpDirs();
+    assert statsDirs.size() == 1 : "Found multiple stats dirs: " + statsDirs;
+    Path statsDir = new Path(statsDirs.get(0));
+
+    LOG.debug("About to delete stats tmp dir :" + statsDir);
 
     try {
-      fs.delete(new Path(conf.get(StatsSetupConst.STATS_TMP_LOC)),true);
+      fs.delete(statsDir,true);
       return true;
     } catch (IOException e) {
       LOG.error(e);

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsPublisher.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsPublisher.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsPublisher.java
index ce96064..aa2bf62 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsPublisher.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsPublisher.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.stats.fs;
 
 import java.io.IOException;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 
@@ -30,6 +31,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.stats.StatsCollectionTaskIndependent;
+import org.apache.hadoop.hive.ql.stats.StatsCollectionContext;
 import org.apache.hadoop.hive.ql.stats.StatsPublisher;
 
 import com.esotericsoftware.kryo.io.Output;
@@ -41,12 +43,14 @@ public class FSStatsPublisher implements StatsPublisher, StatsCollectionTaskInde
   private Map<String, Map<String,String>> statsMap; // map from partID -> (statType->value)
 
   @Override
-  public boolean init(Configuration hconf) {
-    Path statsDir = new Path(hconf.get(StatsSetupConst.STATS_TMP_LOC));
-    LOG.debug("Initing FSStatsPublisher with : " + statsDir);
+  public boolean init(StatsCollectionContext context) {
     try {
-      statsDir.getFileSystem(hconf).mkdirs(statsDir);
-      LOG.info("created : " + statsDir);
+      for (String tmpDir : context.getStatsTmpDirs()) {
+        Path statsDir = new Path(tmpDir);
+        LOG.debug("Initing FSStatsPublisher with : " + statsDir);
+        statsDir.getFileSystem(context.getHiveConf()).mkdirs(statsDir);
+        LOG.info("created : " + statsDir);
+      }
       return true;
     } catch (IOException e) {
       LOG.error(e);
@@ -55,9 +59,11 @@ public class FSStatsPublisher implements StatsPublisher, StatsCollectionTaskInde
   }
 
   @Override
-  public boolean connect(Configuration hconf) {
-    conf = hconf;
-    Path statsDir = new Path(hconf.get(StatsSetupConst.STATS_TMP_LOC));
+  public boolean connect(StatsCollectionContext context) {
+    conf = context.getHiveConf();
+    List<String> statsDirs = context.getStatsTmpDirs();
+    assert statsDirs.size() == 1 : "Found multiple stats dirs: " + statsDirs;
+    Path statsDir = new Path(statsDirs.get(0));
     LOG.debug("Connecting to : " + statsDir);
     statsMap = new HashMap<String, Map<String,String>>();
     try {
@@ -85,14 +91,16 @@ public class FSStatsPublisher implements StatsPublisher, StatsCollectionTaskInde
   }
 
   @Override
-  public boolean closeConnection() {
-    Path statsDir = new Path(conf.get(StatsSetupConst.STATS_TMP_LOC));
+  public boolean closeConnection(StatsCollectionContext context) {
+    List<String> statsDirs = context.getStatsTmpDirs();
+    assert statsDirs.size() == 1 : "Found multiple stats dirs: " + statsDirs;
+    Path statsDir = new Path(statsDirs.get(0));
     try {
       Path statsFile = new Path(statsDir,StatsSetupConst.STATS_FILE_PREFIX +conf.getInt("mapred.task.partition",0));
       LOG.debug("About to create stats file for this task : " + statsFile);
       Output output = new Output(statsFile.getFileSystem(conf).create(statsFile,true));
-      LOG.info("Created file : " + statsFile);
-      LOG.info("Writing stats in it : " + statsMap);
+      LOG.debug("Created file : " + statsFile);
+      LOG.debug("Writing stats in it : " + statsMap);
       Utilities.runtimeSerializationKryo.get().writeObject(output, statsMap);
       output.close();
       return true;

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsAggregator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsAggregator.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsAggregator.java
index e92523e..d8c9926 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsAggregator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsAggregator.java
@@ -34,16 +34,15 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.stats.StatsAggregator;
+import org.apache.hadoop.hive.ql.stats.StatsCollectionContext;
 
 public class JDBCStatsAggregator implements StatsAggregator {
 
   private Connection conn;
   private String connectionString;
   private Configuration hiveconf;
-  private Task<?> sourceTask;
   private final Map<String, PreparedStatement> columnMapping;
   private final Log LOG = LogFactory.getLog(this.getClass().getName());
   private int timeout = 30;
@@ -58,8 +57,8 @@ public class JDBCStatsAggregator implements StatsAggregator {
   }
 
   @Override
-  public boolean connect(Configuration hiveconf, Task sourceTask) {
-    this.hiveconf = hiveconf;
+  public boolean connect(StatsCollectionContext scc) {
+    this.hiveconf = scc.getHiveConf();
     timeout = (int) HiveConf.getTimeVar(
         hiveconf, HiveConf.ConfVars.HIVE_STATS_JDBC_TIMEOUT, TimeUnit.SECONDS);
     connectionString = HiveConf.getVar(hiveconf, HiveConf.ConfVars.HIVESTATSDBCONNECTIONSTRING);
@@ -67,7 +66,6 @@ public class JDBCStatsAggregator implements StatsAggregator {
     maxRetries = HiveConf.getIntVar(hiveconf, HiveConf.ConfVars.HIVE_STATS_RETRIES_MAX);
     waitWindow = HiveConf.getTimeVar(
         hiveconf, HiveConf.ConfVars.HIVE_STATS_RETRIES_WAIT, TimeUnit.MILLISECONDS);
-    this.sourceTask = sourceTask;
 
     try {
       JavaUtils.loadClass(driver).newInstance();
@@ -159,14 +157,14 @@ public class JDBCStatsAggregator implements StatsAggregator {
           return null;
         }
         // close the current connection
-        closeConnection();
+        closeConnection(null);
         long waitTime = Utilities.getRandomWaitTime(waitWindow, failures, r);
         try {
           Thread.sleep(waitTime);
         } catch (InterruptedException iex) {
         }
         // getting a new connection
-        if (!connect(hiveconf, sourceTask)) {
+        if (!connect(new StatsCollectionContext(hiveconf))) {
           // if cannot reconnect, just fail because connect() already handles retries.
           LOG.error("Error during publishing aggregation. " + e);
           return null;
@@ -181,7 +179,7 @@ public class JDBCStatsAggregator implements StatsAggregator {
   }
 
   @Override
-  public boolean closeConnection() {
+  public boolean closeConnection(StatsCollectionContext scc) {
 
     if (conn == null) {
       return true;
@@ -238,14 +236,14 @@ public class JDBCStatsAggregator implements StatsAggregator {
             return false;
           }
           // close the current connection
-          closeConnection();
+          closeConnection(null);
           long waitTime = Utilities.getRandomWaitTime(waitWindow, failures, r);
           try {
             Thread.sleep(waitTime);
           } catch (InterruptedException iex) {
           }
           // getting a new connection
-          if (!connect(hiveconf, sourceTask)) {
+          if (!connect(new StatsCollectionContext(hiveconf))) {
             LOG.error("Error during clean-up. " + e);
             return false;
           }

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsPublisher.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsPublisher.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsPublisher.java
index aeb3d27..0318a8c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsPublisher.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsPublisher.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.stats.StatsCollectionContext;
 import org.apache.hadoop.hive.ql.stats.StatsPublisher;
 
 public class JDBCStatsPublisher implements StatsPublisher {
@@ -59,8 +60,8 @@ public class JDBCStatsPublisher implements StatsPublisher {
   }
 
   @Override
-  public boolean connect(Configuration hiveconf) {
-    this.hiveconf = hiveconf;
+  public boolean connect(StatsCollectionContext context) {
+    this.hiveconf = context.getHiveConf();
     maxRetries = HiveConf.getIntVar(hiveconf, HiveConf.ConfVars.HIVE_STATS_RETRIES_MAX);
     waitWindow = HiveConf.getTimeVar(
         hiveconf, HiveConf.ConfVars.HIVE_STATS_RETRIES_WAIT, TimeUnit.MILLISECONDS);
@@ -209,15 +210,16 @@ public class JDBCStatsPublisher implements StatsPublisher {
     if (failures >= maxRetries) {
       return false;
     }
+    StatsCollectionContext sCntxt = new StatsCollectionContext(hiveconf);
     // close the current connection
-    closeConnection();
+    closeConnection(sCntxt);
     long waitTime = Utilities.getRandomWaitTime(waitWindow, failures, r);
     try {
       Thread.sleep(waitTime);
     } catch (InterruptedException iex) {
     }
     // get a new connection
-    if (!connect(hiveconf)) {
+    if (!connect(sCntxt)) {
       // if cannot reconnect, just fail because connect() already handles retries.
       LOG.error("Error during publishing aggregation. " + e);
       return false;
@@ -226,7 +228,7 @@ public class JDBCStatsPublisher implements StatsPublisher {
   }
 
   @Override
-  public boolean closeConnection() {
+  public boolean closeConnection(StatsCollectionContext context) {
     if (conn == null) {
       return true;
     }
@@ -266,13 +268,13 @@ public class JDBCStatsPublisher implements StatsPublisher {
    * creating tables.).
    */
   @Override
-  public boolean init(Configuration hconf) {
+  public boolean init(StatsCollectionContext context) {
     Statement stmt = null;
     ResultSet rs = null;
     try {
-      this.hiveconf = hconf;
-      connectionString = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVESTATSDBCONNECTIONSTRING);
-      String driver = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVESTATSJDBCDRIVER);
+      this.hiveconf = context.getHiveConf();
+      connectionString = HiveConf.getVar(hiveconf, HiveConf.ConfVars.HIVESTATSDBCONNECTIONSTRING);
+      String driver = HiveConf.getVar(hiveconf, HiveConf.ConfVars.HIVESTATSJDBCDRIVER);
       JavaUtils.loadClass(driver).newInstance();
       synchronized(DriverManager.class) {
         DriverManager.setLoginTimeout(timeout);
@@ -339,7 +341,7 @@ public class JDBCStatsPublisher implements StatsPublisher {
           // do nothing
         }
       }
-      closeConnection();
+      closeConnection(context);
     }
     return true;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java
index 4594836..d22d022 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
 import org.apache.hadoop.hive.ql.stats.StatsAggregator;
+import org.apache.hadoop.hive.ql.stats.StatsCollectionContext;
 import org.apache.hadoop.hive.ql.stats.StatsPublisher;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.AbstractSerDe;
@@ -198,8 +199,6 @@ public class TestFileSinkOperator {
   @Before
   public void setup() throws Exception {
     jc = new JobConf();
-    jc.set(StatsSetupConst.STATS_TMP_LOC, File.createTempFile("TestFileSinkOperator",
-        "stats").getPath());
     jc.set(HiveConf.ConfVars.HIVE_STATS_DEFAULT_PUBLISHER.varname,
         TFSOStatsPublisher.class.getName());
     jc.set(HiveConf.ConfVars.HIVE_STATS_DEFAULT_AGGREGATOR.varname,
@@ -857,12 +856,12 @@ public class TestFileSinkOperator {
     static Map<String, String> stats;
 
     @Override
-    public boolean init(Configuration hconf) {
+    public boolean init(StatsCollectionContext context) {
       return true;
     }
 
     @Override
-    public boolean connect(Configuration hconf) {
+    public boolean connect(StatsCollectionContext context) {
       return true;
     }
 
@@ -873,7 +872,7 @@ public class TestFileSinkOperator {
     }
 
     @Override
-    public boolean closeConnection() {
+    public boolean closeConnection(StatsCollectionContext context) {
       return true;
     }
   }
@@ -881,7 +880,7 @@ public class TestFileSinkOperator {
   public static class TFSOStatsAggregator implements StatsAggregator {
 
     @Override
-    public boolean connect(Configuration hconf, Task sourceTask) {
+    public boolean connect(StatsCollectionContext scc) {
       return true;
     }
 
@@ -891,7 +890,7 @@ public class TestFileSinkOperator {
     }
 
     @Override
-    public boolean closeConnection() {
+    public boolean closeConnection(StatsCollectionContext scc) {
       return true;
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9b4826e7/ql/src/test/org/apache/hadoop/hive/ql/exec/TestStatsPublisherEnhanced.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestStatsPublisherEnhanced.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestStatsPublisherEnhanced.java
index 887716e..c257797 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestStatsPublisherEnhanced.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestStatsPublisherEnhanced.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.stats.StatsAggregator;
+import org.apache.hadoop.hive.ql.stats.StatsCollectionContext;
 import org.apache.hadoop.hive.ql.stats.StatsFactory;
 import org.apache.hadoop.hive.ql.stats.StatsPublisher;
 import org.apache.hadoop.mapred.JobConf;
@@ -60,9 +61,10 @@ public class TestStatsPublisherEnhanced extends TestCase {
   protected void tearDown() {
     StatsAggregator sa = factory.getStatsAggregator();
     assertNotNull(sa);
-    assertTrue(sa.connect(conf, null));
+    StatsCollectionContext sc = new StatsCollectionContext(conf);
+    assertTrue(sa.connect(sc));
     assertTrue(sa.cleanUp("file_0"));
-    assertTrue(sa.closeConnection());
+    assertTrue(sa.closeConnection(sc));
   }
 
   private void fillStatMap(String numRows, String rawDataSize) {
@@ -80,13 +82,14 @@ public class TestStatsPublisherEnhanced extends TestCase {
       // instantiate stats publisher
       StatsPublisher statsPublisher = Utilities.getStatsPublisher((JobConf) conf);
       assertNotNull(statsPublisher);
-      assertTrue(statsPublisher.init(conf));
-      assertTrue(statsPublisher.connect(conf));
+      StatsCollectionContext sc = new StatsCollectionContext(conf);
+      assertTrue(statsPublisher.init(sc));
+      assertTrue(statsPublisher.connect(sc));
 
       // instantiate stats aggregator
       StatsAggregator statsAggregator = factory.getStatsAggregator();
       assertNotNull(statsAggregator);
-      assertTrue(statsAggregator.connect(conf, null));
+      assertTrue(statsAggregator.connect(sc));
 
       // publish stats
       fillStatMap("200", "1000");
@@ -109,8 +112,8 @@ public class TestStatsPublisherEnhanced extends TestCase {
       assertEquals("3000", usize1);
 
       // close connections
-      assertTrue(statsPublisher.closeConnection());
-      assertTrue(statsAggregator.closeConnection());
+      assertTrue(statsPublisher.closeConnection(sc));
+      assertTrue(statsAggregator.closeConnection(sc));
 
       System.out
           .println("StatsPublisher - one stat published per key - aggregating matching key - OK");
@@ -128,13 +131,14 @@ public class TestStatsPublisherEnhanced extends TestCase {
       StatsPublisher statsPublisher = Utilities.getStatsPublisher(
           (JobConf) conf);
       assertNotNull(statsPublisher);
-      assertTrue(statsPublisher.init(conf));
-      assertTrue(statsPublisher.connect(conf));
+      StatsCollectionContext sc = new StatsCollectionContext(conf);
+      assertTrue(statsPublisher.init(sc));
+      assertTrue(statsPublisher.connect(sc));
 
       // instantiate stats aggregator
       StatsAggregator statsAggregator = factory.getStatsAggregator();
       assertNotNull(statsAggregator);
-      assertTrue(statsAggregator.connect(conf, null));
+      assertTrue(statsAggregator.connect(sc));
       // statsAggregator.cleanUp("file_0000");
       // assertTrue(statsAggregator.connect(conf));
 
@@ -172,8 +176,8 @@ public class TestStatsPublisherEnhanced extends TestCase {
       assertTrue(statsAggregator.cleanUp("file_0000"));
 
       // close connections
-      assertTrue(statsPublisher.closeConnection());
-      assertTrue(statsAggregator.closeConnection());
+      assertTrue(statsPublisher.closeConnection(sc));
+      assertTrue(statsAggregator.closeConnection(sc));
 
       System.out.println("StatsPublisher - basic functionality - OK");
     } catch (Throwable e) {
@@ -189,13 +193,14 @@ public class TestStatsPublisherEnhanced extends TestCase {
       // instantiate stats publisher
       StatsPublisher statsPublisher = Utilities.getStatsPublisher((JobConf) conf);
       assertNotNull(statsPublisher);
-      assertTrue(statsPublisher.init(conf));
-      assertTrue(statsPublisher.connect(conf));
+      StatsCollectionContext sc = new StatsCollectionContext(conf);
+      assertTrue(statsPublisher.init(sc));
+      assertTrue(statsPublisher.connect(sc));
 
       // instantiate stats aggregator
       StatsAggregator statsAggregator = factory.getStatsAggregator();
       assertNotNull(statsAggregator);
-      assertTrue(statsAggregator.connect(conf, null));
+      assertTrue(statsAggregator.connect(sc));
 
       // publish stats
       fillStatMap("200", "1000");
@@ -236,8 +241,8 @@ public class TestStatsPublisherEnhanced extends TestCase {
       assertTrue(statsAggregator.cleanUp("file_0000"));
 
       // close connections
-      assertTrue(statsPublisher.closeConnection());
-      assertTrue(statsAggregator.closeConnection());
+      assertTrue(statsPublisher.closeConnection(sc));
+      assertTrue(statsAggregator.closeConnection(sc));
 
       System.out.println("StatsPublisher - multiple updates - OK");
     } catch (Throwable e) {
@@ -254,13 +259,14 @@ public class TestStatsPublisherEnhanced extends TestCase {
       // instantiate stats publisher
       StatsPublisher statsPublisher = Utilities.getStatsPublisher((JobConf) conf);
       assertNotNull(statsPublisher);
-      assertTrue(statsPublisher.init(conf));
-      assertTrue(statsPublisher.connect(conf));
+      StatsCollectionContext sc = new StatsCollectionContext(conf);
+      assertTrue(statsPublisher.init(sc));
+      assertTrue(statsPublisher.connect(sc));
 
       // instantiate stats aggregator
       StatsAggregator statsAggregator = factory.getStatsAggregator();
       assertNotNull(statsAggregator);
-      assertTrue(statsAggregator.connect(conf, null));
+      assertTrue(statsAggregator.connect(sc));
 
       // publish stats
       fillStatMap("200", "");
@@ -305,8 +311,8 @@ public class TestStatsPublisherEnhanced extends TestCase {
       assertTrue(statsAggregator.cleanUp("file_0000"));
 
       // close connections
-      assertTrue(statsPublisher.closeConnection());
-      assertTrue(statsAggregator.closeConnection());
+      assertTrue(statsPublisher.closeConnection(sc));
+      assertTrue(statsAggregator.closeConnection(sc));
 
       System.out
           .println("StatsPublisher - (multiple updates + publishing subset of supported statistics) - OK");
@@ -325,13 +331,14 @@ public class TestStatsPublisherEnhanced extends TestCase {
       // instantiate stats publisher
       StatsPublisher statsPublisher = Utilities.getStatsPublisher((JobConf) conf);
       assertNotNull(statsPublisher);
-      assertTrue(statsPublisher.init(conf));
-      assertTrue(statsPublisher.connect(conf));
+      StatsCollectionContext sc = new StatsCollectionContext(conf);
+      assertTrue(statsPublisher.init(sc));
+      assertTrue(statsPublisher.connect(sc));
 
       // instantiate stats aggregator
       StatsAggregator statsAggregator = factory.getStatsAggregator();
       assertNotNull(statsAggregator);
-      assertTrue(statsAggregator.connect(conf, null));
+      assertTrue(statsAggregator.connect(sc));
 
       // publish stats
       fillStatMap("200", "1000");
@@ -364,8 +371,8 @@ public class TestStatsPublisherEnhanced extends TestCase {
       assertTrue(statsAggregator.cleanUp("file_0000"));
 
       // close connections
-      assertTrue(statsPublisher.closeConnection());
-      assertTrue(statsAggregator.closeConnection());
+      assertTrue(statsPublisher.closeConnection(sc));
+      assertTrue(statsAggregator.closeConnection(sc));
 
       System.out.println("StatsAggregator - clean-up - OK");
     } catch (Throwable e) {


[4/5] hive git commit: HIVE-12153 : LLAP: update errata for bad branch commits (Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-12153 : LLAP: update errata for bad branch commits (Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b6de889b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b6de889b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b6de889b

Branch: refs/heads/llap
Commit: b6de889b0ecb26b23d0530b9c594e2f1a9c65e87
Parents: 3cfcad6
Author: Sergey Shelukhin <se...@apache.org>
Authored: Mon Oct 12 16:51:42 2015 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Mon Oct 12 16:51:42 2015 -0700

----------------------------------------------------------------------
 errata.txt | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 67 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b6de889b/errata.txt
----------------------------------------------------------------------
diff --git a/errata.txt b/errata.txt
index 7607ef1..7df4b9e 100644
--- a/errata.txt
+++ b/errata.txt
@@ -9,3 +9,70 @@ f3ab5fda6af57afff31c29ad048d906fd095d5fb branch-1.2 HIVE-10885 https://issues.ap
 dcf21cd6fa98fb5db01ef661bb3b9f94d9ca2d15 master     HIVE-10021 https://issues.apache.org/jira/browse/HIVE-10021
 9763c9dd31bd5939db3ca50e75bb97955b411f6d master     HIVE-11536 https://issues.apache.org/jira/browse/HIVE-11536
 52a934f911c63fda5d69cb6036cb4e917c799259 llap       HIVE-11871 https://issues.apache.org/jira/browse/HIVE-11871
+1d0881e04e1aa9dd10dde8425427f29a53bee97a llap       HIVE-12125 https://issues.apache.org/jira/browse/HIVE-12125
+7f21a4254dff893ff6d882ab66e018075a37d484 llap       HIVE-12126 https://issues.apache.org/jira/browse/HIVE-12126
+a4e32580f7dd0d7cda08695af0a1feae6b175709 llap       HIVE-12127 https://issues.apache.org/jira/browse/HIVE-12127
+aebb82888e53676312a46587577ac67ad0b78579 llap       HIVE-12128 https://issues.apache.org/jira/browse/HIVE-12128
+b0860a48b75069dd24a413dca701a2685577c1cf llap       HIVE-12129 https://issues.apache.org/jira/browse/HIVE-12129
+ebb8fec397e098702e85ae68919af63f6cad2ac0 llap       HIVE-12130 https://issues.apache.org/jira/browse/HIVE-12130
+b7c53456dfa49ec08952f2f1237dffb59bd3e8a9 llap       HIVE-12131 https://issues.apache.org/jira/browse/HIVE-12131
+00045de70f85f7e8c843bc7bee7846339c9781b4 llap       HIVE-12132 https://issues.apache.org/jira/browse/HIVE-12132
+3dc2dd9c195e5985712e4fba968f12fdb1c5ec2e llap       HIVE-12133 https://issues.apache.org/jira/browse/HIVE-12133
+ac52a81f72c8ce26f13554a682ee7ae41a6a7015 llap       HIVE-12134 https://issues.apache.org/jira/browse/HIVE-12134
+b4df77b013d7e5b33c4a3eddee0c1d009e2f117a llap       HIVE-12135 https://issues.apache.org/jira/browse/HIVE-12135
+d4db62fb5f6779d9989f9a8153f1771895255982 llap       HIVE-12136 https://issues.apache.org/jira/browse/HIVE-12136
+a12191237578abbaafb35934d094dbf1278d1412 llap       HIVE-12137 https://issues.apache.org/jira/browse/HIVE-12137
+255b3a6fc621b96f32fad68437b3d8caf04823ec llap       HIVE-12138 https://issues.apache.org/jira/browse/HIVE-12138
+29cd5c8d7817eb0618e4f115329882d2c4a20417 llap       HIVE-12139 https://issues.apache.org/jira/browse/HIVE-12139
+f314e577c1f411ee5b434ba8d81e30e937e40c68 llap       HIVE-12140 https://issues.apache.org/jira/browse/HIVE-12140
+4bbaca8b33ed31cb67862f7c815ea7b6bbe5a2b4 llap       HIVE-12141 https://issues.apache.org/jira/browse/HIVE-12141
+289863694d242a16cdd5e8ed82bc8b4ef460bfdc llap       HIVE-12142 https://issues.apache.org/jira/browse/HIVE-12142
+c4b2a13a5e9bccadf1ca430c2a110cbe5d68a66b llap       HIVE-12143 https://issues.apache.org/jira/browse/HIVE-12143
+8b6c34eaa02f87806e6567a27baeb56c74f94926 llap       HIVE-12144 https://issues.apache.org/jira/browse/HIVE-12144
+7b5f80f1273f82f864ff4a36c7d640021e7d3d6a llap       HIVE-12145 https://issues.apache.org/jira/browse/HIVE-12145
+7ebf999e2fdee5de00a145653a1e58de53650602 llap       HIVE-12146 https://issues.apache.org/jira/browse/HIVE-12146
+e6b1556e39f81dc2861f612733b2ba61c17ff698 llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+b8acbb6ef7b97502b772569641917e6ef973b25e llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+35c18a3d9b67013bf8cb2185a27391390aacc1e4 llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+a31d7c8a5346fe3f26ad241e4be17a09a41583dc llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+08969c8517861d820ed353db7b1e98e9f1799d64 llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+7dc3cf966745feaffef742a2ea4d74c89d44e766 llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+f11222583c2b62248832010fbb7181eee369fbca llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+a7f77a73d89ee6503d5671258f74f5d7183d805e llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+35a314bc1bb2e7e7b29232cb63d1c5adbe26234e llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+5feb58db2c99627cb41a747a097a0ec4b019d60c llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+2dadf56692fe33e1a67f162e57ba9d36bd26b84a llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+e6965be3df0c74061c44e0a6aee5f74ce9d7c113 llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+76432fbe2e463c20b0230839366f8e35a0948f0f llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+42acf2b77b0f160629d9457774f5b109bc0b1fbe llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+0737a4a5aed16374e1ee504f147c96ecc6636f6a llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+d487f800a09936562dd41b6d8a039904c14dfaff llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+ccb63a31cb5f9003221341bad592080918627565 llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+74c4bdfeb03f2119e01a439cc86e384ddd2bfcde llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+371c2ba38cfd90feca4be2878daf030cf8a85bfb llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+52589882b8a69577f38dbe64a1b64e51bb5f6b52 llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+f9bb03441c2e50c31d29582083d467e32bc5e088 llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+e5bea303829174a4999b03bbcee5b0ad57a3bcf3 llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+94f3b0590ac749b2f13c2841d0b3c47c16c5d8b7 llap       HIVE-12147 https://issues.apache.org/jira/browse/HIVE-12147
+69886819281100327ffb9527a001a7956ffc8daf llap       HIVE-12148 https://issues.apache.org/jira/browse/HIVE-12148
+055ed8dc679d0f59645f2cf1b118ab125e24d4f5 llap       HIVE-12148 https://issues.apache.org/jira/browse/HIVE-12148
+05630792e1adf24e1ead0a3b03fcf0d4af689909 llap       HIVE-12149 https://issues.apache.org/jira/browse/HIVE-12149
+744dc9c36dd50d2c7ef8b54a76aba1d4109f1b23 llap       HIVE-12149 https://issues.apache.org/jira/browse/HIVE-12149
+7775f7cbad687ee39b78538c38bb0a5c0329e076 llap       HIVE-12150 https://issues.apache.org/jira/browse/HIVE-12150
+541fcbe720df8c62e3bd4e00311c9a8c95bb12a4 llap       HIVE-12150 https://issues.apache.org/jira/browse/HIVE-12150
+53094ba7190b326d32be5e43ed4d992823c5dd4e llap       HIVE-12150 https://issues.apache.org/jira/browse/HIVE-12150
+0f556fe3723ebb67dc22793fbfa4cc0e2e248f35 llap       HIVE-12150 https://issues.apache.org/jira/browse/HIVE-12150
+4104b2c35eaac2669e862f6703dc003e94aba0f6 llap       HIVE-12151 https://issues.apache.org/jira/browse/HIVE-12151
+ac4baea04ffc801bd2c972d7628deba0eb9ae4a8 llap       HIVE-12151 https://issues.apache.org/jira/browse/HIVE-12151
+200749619c929474333c5d540eadd3751d7ecb19 llap       HIVE-12151 https://issues.apache.org/jira/browse/HIVE-12151
+f27bcd9ca3a4296625079e2caf7408e855a197db llap       HIVE-12151 https://issues.apache.org/jira/browse/HIVE-12151
+9b3756902e5d70f36540d11b50234c3d9a2adb39 llap       HIVE-12151 https://issues.apache.org/jira/browse/HIVE-12151
+7d1ea695819ccdcaa86efe8d095323b5007df7f1 llap       HIVE-12151 https://issues.apache.org/jira/browse/HIVE-12151
+2dfd8457b7ee415f1b28c5de2650b3f2457f20ea llap       HIVE-12151 https://issues.apache.org/jira/browse/HIVE-12151
+fc6be8faf5c97901ccad33edca8f8f80023b308a llap       HIVE-12151 https://issues.apache.org/jira/browse/HIVE-12151
+20acdb661a12f1bc472633d89428917275b6364d llap       HIVE-12151 https://issues.apache.org/jira/browse/HIVE-12151
+3a2e8ee7e47bd31745dfc5f6a29c602e09747f24 llap       HIVE-12152 https://issues.apache.org/jira/browse/HIVE-12152
+8ed270cb9d8a9c49cccf99402ca92e3df3304d9f llap       HIVE-12152 https://issues.apache.org/jira/browse/HIVE-12152
+c6565f5d65da9ed5cb452db7e313d0ce7abc1105 llap       HIVE-9729  https://issues.apache.org/jira/browse/HIVE-9729
+d8298e1c85a515150562b0df68af89c18c468638 llap       HIVE-9418  https://issues.apache.org/jira/browse/HIVE-9729
+


[5/5] hive git commit: HIVE-12096 : LLAP: merge master into branch (Sergey Shelukhin) ADDENDUM merge

Posted by se...@apache.org.
HIVE-12096 : LLAP: merge master into branch (Sergey Shelukhin) ADDENDUM merge


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/eb28deb6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/eb28deb6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/eb28deb6

Branch: refs/heads/llap
Commit: eb28deb61d927b43bd82d06aee2802b0571c6832
Parents: b6de889 9b4826e
Author: Sergey Shelukhin <se...@apache.org>
Authored: Mon Oct 12 16:53:35 2015 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Mon Oct 12 16:53:35 2015 -0700

----------------------------------------------------------------------
 .../hive/ql/stats/DummyStatsAggregator.java     |  12 +-
 .../hive/ql/stats/DummyStatsPublisher.java      |  15 +-
 .../ql/stats/KeyVerifyingStatsAggregator.java   |  10 +-
 pom.xml                                         |   1 -
 .../hadoop/hive/ql/exec/FileSinkOperator.java   |   8 +-
 .../apache/hadoop/hive/ql/exec/StatsTask.java   |  31 ++-
 .../hadoop/hive/ql/exec/TableScanOperator.java  |   7 +-
 .../apache/hadoop/hive/ql/exec/Utilities.java   |  31 ++-
 .../hadoop/hive/ql/exec/mr/ExecDriver.java      |  14 +-
 .../hive/ql/exec/spark/SparkPlanGenerator.java  |  16 +-
 .../hadoop/hive/ql/exec/tez/DagUtils.java       |   5 +-
 .../hive/ql/index/AggregateIndexHandler.java    |   1 -
 .../hive/ql/index/TableBasedIndexHandler.java   |   7 -
 .../ql/index/bitmap/BitmapIndexHandler.java     |   1 -
 .../ql/index/compact/CompactIndexHandler.java   |   1 -
 .../ql/io/rcfile/stats/PartialScanMapper.java   |   7 +-
 .../ql/io/rcfile/stats/PartialScanTask.java     |  11 +-
 .../ql/io/rcfile/stats/PartialScanWork.java     |  14 ++
 .../hive/ql/optimizer/GenMRTableScan1.java      |   3 +
 .../hive/ql/optimizer/GenMapRedUtils.java       |   2 +-
 .../hive/ql/parse/ProcessAnalyzeTable.java      |   4 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   8 +-
 .../parse/spark/SparkProcessAnalyzeTable.java   |   2 +
 .../hadoop/hive/ql/plan/FileSinkDesc.java       |  16 +-
 .../apache/hadoop/hive/ql/plan/StatsWork.java   |  15 +-
 .../hadoop/hive/ql/plan/TableScanDesc.java      |  12 +-
 .../hive/ql/stats/CounterStatsAggregator.java   |   8 +-
 .../ql/stats/CounterStatsAggregatorSpark.java   |   6 +-
 .../ql/stats/CounterStatsAggregatorTez.java     |  10 +-
 .../hive/ql/stats/CounterStatsPublisher.java    |   7 +-
 .../hadoop/hive/ql/stats/StatsAggregator.java   |   7 +-
 .../hive/ql/stats/StatsCollectionContext.java   |  63 ++++++
 .../hadoop/hive/ql/stats/StatsPublisher.java    |   8 +-
 .../hive/ql/stats/fs/FSStatsAggregator.java     |  23 +-
 .../hive/ql/stats/fs/FSStatsPublisher.java      |  32 +--
 .../hive/ql/stats/jdbc/JDBCStatsAggregator.java |  18 +-
 .../hive/ql/stats/jdbc/JDBCStatsPublisher.java  |  22 +-
 .../hive/ql/exec/TestFileSinkOperator.java      |  13 +-
 .../ql/exec/TestStatsPublisherEnhanced.java     |  61 +++---
 .../infer_bucket_sort_multi_insert.q            |   1 +
 .../test/queries/clientpositive/multi_insert.q  |   2 +-
 .../queries/clientpositive/multi_insert_gby2.q  |   2 +-
 .../queries/clientpositive/multi_insert_gby3.q  |   2 +-
 .../clientpositive/multi_insert_lateral_view.q  |   1 +
 .../queries/clientpositive/multi_insert_mixed.q |   2 +-
 ...multi_insert_move_tasks_share_dependencies.q |   2 +-
 .../clientpositive/multi_insert_union_src.q     |   2 +-
 .../spark/column_access_stats.q.out             |  46 ++--
 .../test/results/clientpositive/spark/pcr.q.out |  16 +-
 .../clientpositive/spark/ppd_join5.q.out        |  58 ++---
 .../clientpositive/spark/smb_mapjoin_12.q.out   |   6 +-
 .../clientpositive/spark/smb_mapjoin_13.q.out   |  36 ++--
 .../clientpositive/spark/smb_mapjoin_15.q.out   |  12 +-
 .../clientpositive/spark/smb_mapjoin_16.q.out   |   2 +-
 .../results/clientpositive/spark/union34.q.out  |  68 +++---
 service/pom.xml                                 |   6 -
 .../auth/TestLdapAtnProviderWithLdapServer.java | 215 -------------------
 .../org/apache/hive/service/auth/ldapdata.ldif  |  59 -----
 58 files changed, 486 insertions(+), 584 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/eb28deb6/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/eb28deb6/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/eb28deb6/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/eb28deb6/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/eb28deb6/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/eb28deb6/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/eb28deb6/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
----------------------------------------------------------------------