You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2015/09/16 21:18:33 UTC

[15/50] [abbrv] hive git commit: HIVE-10980 : Merge of dynamic partitions loads all data to default partition (Illya Yalovyy via Ashutosh Chauhan)

HIVE-10980 : Merge of dynamic partitions loads all data to default partition (Illya Yalovyy via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ff1f5b1a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ff1f5b1a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ff1f5b1a

Branch: refs/heads/spark
Commit: ff1f5b1a7afc4c934bad2a39da217513760d0ba2
Parents: 1e97b16
Author: Illya Yalovyy <ya...@amazon.com>
Authored: Thu Sep 10 00:17:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Sat Sep 12 19:50:08 2015 -0700

----------------------------------------------------------------------
 data/files/dynpartdata1.txt                     |  5 +
 data/files/dynpartdata2.txt                     |  6 ++
 .../hive/ql/optimizer/GenMapRedUtils.java       | 57 +++++++++--
 ...nMapRedUtilsUsePartitionColumnsNegative.java | 73 +++++++++++++++
 ...nMapRedUtilsUsePartitionColumnsPositive.java | 61 ++++++++++++
 .../test/queries/clientpositive/dynpart_merge.q | 28 ++++++
 .../results/clientpositive/dynpart_merge.q.out  | 99 ++++++++++++++++++++
 .../list_bucket_dml_6.q.java1.7.out             | 12 +--
 .../list_bucket_dml_6.q.java1.8.out             | 12 +--
 .../clientpositive/list_bucket_dml_7.q.out      | 12 +--
 10 files changed, 341 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/data/files/dynpartdata1.txt
----------------------------------------------------------------------
diff --git a/data/files/dynpartdata1.txt b/data/files/dynpartdata1.txt
new file mode 100644
index 0000000..aefb87f
--- /dev/null
+++ b/data/files/dynpartdata1.txt
@@ -0,0 +1,5 @@
+20150316,16,reqA,clusterIdA,cacheId1
+20150316,16,reqB,clusterIdB,cacheId2
+20150316,16,reqA,clusterIdC,cacheId3
+20150316,16,reqD,clusterIdD,cacheId4
+20150316,16,reqA,clusterIdA,cacheId5

http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/data/files/dynpartdata2.txt
----------------------------------------------------------------------
diff --git a/data/files/dynpartdata2.txt b/data/files/dynpartdata2.txt
new file mode 100644
index 0000000..4afdb7f
--- /dev/null
+++ b/data/files/dynpartdata2.txt
@@ -0,0 +1,6 @@
+20150317,16,reqB,clusterIdB,cacheId6
+20150318,16,reqA,clusterIdC,cacheId6
+20150317,15,reqD,clusterIdD,cacheId7
+20150316,16,reqA,clusterIdD,cacheId8
+20150316,16,reqD,clusterIdB,cacheId9
+20150316,16,reqB,clusterIdA,cacheId1

http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
index 4a325fb..02fbdfe 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.optimizer;
 import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -112,6 +113,7 @@ import org.apache.hadoop.hive.ql.stats.StatsFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.mapred.InputFormat;
 
+import com.google.common.base.Preconditions;
 import com.google.common.collect.Interner;
 
 /**
@@ -1234,16 +1236,13 @@ public final class GenMapRedUtils {
       ArrayList<ColumnInfo> signature = inputRS.getSignature();
       String tblAlias = fsInputDesc.getTableInfo().getTableName();
       LinkedHashMap<String, String> colMap = new LinkedHashMap<String, String>();
-      StringBuilder partCols = new StringBuilder();
       for (String dpCol : dpCtx.getDPColNames()) {
         ColumnInfo colInfo = new ColumnInfo(dpCol,
             TypeInfoFactory.stringTypeInfo, // all partition column type should be string
             tblAlias, true); // partition column is virtual column
         signature.add(colInfo);
         colMap.put(dpCol, dpCol); // input and output have the same column name
-        partCols.append(dpCol).append('/');
       }
-      partCols.setLength(partCols.length() - 1); // remove the last '/'
       inputRS.setSignature(signature);
 
       // create another DynamicPartitionCtx, which has a different input-to-DP column mapping
@@ -1252,9 +1251,7 @@ public final class GenMapRedUtils {
       fsOutputDesc.setDynPartCtx(dpCtx2);
 
       // update the FileSinkOperator to include partition columns
-      fsInputDesc.getTableInfo().getProperties().setProperty(
-        org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS,
-        partCols.toString()); // list of dynamic partition column names
+      usePartitionColumns(fsInputDesc.getTableInfo().getProperties(), dpCtx.getDPColNames());
     } else {
       // non-partitioned table
       fsInputDesc.getTableInfo().getProperties().remove(
@@ -1877,7 +1874,55 @@ public final class GenMapRedUtils {
     }
     return null;
   }
+  /**
+   * Uses only specified partition columns.
+   * Provided properties should be pre-populated with partition column names and types.
+   * This function retains only information related to the columns from the list.
+   * @param properties properties to update
+   * @param partColNames list of columns to use
+   */
+  static void usePartitionColumns(Properties properties, List<String> partColNames) {
+    Preconditions.checkArgument(!partColNames.isEmpty(), "No partition columns provided to use");
+    Preconditions.checkArgument(new HashSet<String>(partColNames).size() == partColNames.size(),
+        "Partition columns should be unique: " + partColNames);
+
+    String[] partNames = properties.getProperty(
+        org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS)
+        .split("/");
+    String[] partTypes = properties.getProperty(
+        org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES)
+        .split(":");
+    Preconditions.checkArgument(partNames.length == partTypes.length,
+        "Partition Names, " + Arrays.toString(partNames) + " don't match partition Types, "
+        + Arrays.toString(partTypes));
+
+    Map<String, String> typeMap = new HashMap();
+    for (int i = 0; i < partNames.length; i++) {
+      String previousValue = typeMap.put(partNames[i], partTypes[i]);
+      Preconditions.checkArgument(previousValue == null, "Partition columns configuration is inconsistent. "
+          + "There are duplicates in partition column names: " + partNames);
+    }
 
+    StringBuilder partNamesBuf = new StringBuilder();
+    StringBuilder partTypesBuf = new StringBuilder();
+    for (String partName : partColNames) {
+      partNamesBuf.append(partName).append('/');
+      String partType = typeMap.get(partName);
+      if (partType == null) {
+        throw new RuntimeException("Type information for partition column " + partName + " is missing.");
+      }
+      partTypesBuf.append(partType).append(':');
+    }
+    partNamesBuf.setLength(partNamesBuf.length() - 1);
+    partTypesBuf.setLength(partTypesBuf.length() - 1);
+
+    properties.setProperty(
+        org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS,
+        partNamesBuf.toString());
+    properties.setProperty(
+        org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES,
+        partTypesBuf.toString());
+  }
   private GenMapRedUtils() {
     // prevent instantiation
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsUsePartitionColumnsNegative.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsUsePartitionColumnsNegative.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsUsePartitionColumnsNegative.java
new file mode 100644
index 0000000..153061f
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsUsePartitionColumnsNegative.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Properties;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.junit.Test;
+
+public class TestGenMapRedUtilsUsePartitionColumnsNegative {
+
+  @Test(expected = NullPointerException.class)
+  public void testUsePartitionColumnsNoPartColNames() {
+    Properties p = new Properties();
+    GenMapRedUtils.usePartitionColumns(p, Arrays.asList("p1"));
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testUsePartitionColumnsNamesTypesMismatch() {
+    Properties p = new Properties();
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, "p1/p2");
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES, "t1");
+    GenMapRedUtils.usePartitionColumns(p, Arrays.asList("p1"));
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testUsePartitionColumnsNoPartitionsToRetain() {
+    Properties p = new Properties();
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, "p1");
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES, "t1");
+    GenMapRedUtils.usePartitionColumns(p, Collections.EMPTY_LIST);
+  }
+
+  @Test(expected = RuntimeException.class)
+  public void testUsePartitionColumnsWrongPartColName() {
+    Properties p = new Properties();
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, "p1");
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES, "t1");
+    GenMapRedUtils.usePartitionColumns(p, Arrays.asList("p2"));
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testUsePartitionColumnsDuplicatePartColNameInArgument() {
+    Properties p = new Properties();
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, "p1/p2");
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES, "t1:t2");
+    GenMapRedUtils.usePartitionColumns(p, Arrays.asList("p1","p2","p1"));
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testUsePartitionColumnsDuplicatePartColNameInConfiguration() {
+    Properties p = new Properties();
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, "p1/p2/p1");
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES, "t1:t2:t3");
+    GenMapRedUtils.usePartitionColumns(p, Arrays.asList("p1"));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsUsePartitionColumnsPositive.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsUsePartitionColumnsPositive.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsUsePartitionColumnsPositive.java
new file mode 100644
index 0000000..9bcca66
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsUsePartitionColumnsPositive.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer;
+
+import java.util.Arrays;
+import java.util.Properties;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import static org.junit.Assert.*;
+
+@RunWith(Parameterized.class)
+public class TestGenMapRedUtilsUsePartitionColumnsPositive {
+
+  @Parameterized.Parameters(name = "{index}: updatePartitions({2})")
+  public static Iterable<Object[]> testCases() {
+    return Arrays.asList(new Object[][]{
+      {"p1/p2/p3","t1:t2:t3","p2","p2","t2"},
+      {"p1/p2/p3","t1:t2:t3","p2,p3","p2/p3","t2:t3"},
+      {"p1/p2/p3","t1:t2:t3","p1,p2,p3","p1/p2/p3","t1:t2:t3"},
+      {"p1/p2/p3","t1:t2:t3","p1,p3","p1/p3","t1:t3"},
+      {"p1","t1","p1","p1","t1"},
+      {"p1/p2/p3","t1:t2:t3","p3,p2,p1","p3/p2/p1","t3:t2:t1"}
+    });
+  }
+
+  @Parameterized.Parameter(0) public String inPartColNames;
+  @Parameterized.Parameter(1) public String inPartColTypes;
+  @Parameterized.Parameter(2) public String partNamesToRetain;
+  @Parameterized.Parameter(3) public String expectedPartColNames;
+  @Parameterized.Parameter(4) public String expectedPartColTypes;
+
+  @Test
+  public void testUsePartitionColumns() {
+    Properties p = new Properties();
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, inPartColNames);
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES, inPartColTypes);
+    GenMapRedUtils.usePartitionColumns(p, Arrays.asList(partNamesToRetain.split(",")));
+    String actualNames = p.getProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS);
+    String actualTypes = p.getProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES);
+    assertEquals(expectedPartColNames, actualNames);
+    assertEquals(expectedPartColTypes, actualTypes);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/ql/src/test/queries/clientpositive/dynpart_merge.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynpart_merge.q b/ql/src/test/queries/clientpositive/dynpart_merge.q
new file mode 100644
index 0000000..26f4de7
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/dynpart_merge.q
@@ -0,0 +1,28 @@
+set hive.exec.dynamic.partition=true;
+set hive.exec.dynamic.partition.mode=strict;
+set hive.optimize.sort.dynamic.partition=false;
+set hive.merge.mapfiles=true;
+set hive.merge.mapredfiles=true;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+
+create external table sdp (
+  dataint bigint,
+  hour int,
+  req string,
+  cid string,
+  caid string
+)
+row format delimited
+fields terminated by ',';
+
+load data local inpath '../../data/files/dynpartdata1.txt' into table sdp;
+load data local inpath '../../data/files/dynpartdata2.txt' into table sdp;
+
+create table tdp (cid string, caid string)
+partitioned by (dataint bigint, hour int, req string);
+
+insert overwrite table tdp partition (dataint=20150316, hour=16, req)
+select cid, caid, req from sdp where dataint=20150316 and hour=16;
+
+select * from tdp order by caid;
+show partitions tdp;

http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/ql/src/test/results/clientpositive/dynpart_merge.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynpart_merge.q.out b/ql/src/test/results/clientpositive/dynpart_merge.q.out
new file mode 100644
index 0000000..1c6f556
--- /dev/null
+++ b/ql/src/test/results/clientpositive/dynpart_merge.q.out
@@ -0,0 +1,99 @@
+PREHOOK: query: create external table sdp (
+  dataint bigint,
+  hour int,
+  req string,
+  cid string,
+  caid string
+)
+row format delimited
+fields terminated by ','
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@sdp
+POSTHOOK: query: create external table sdp (
+  dataint bigint,
+  hour int,
+  req string,
+  cid string,
+  caid string
+)
+row format delimited
+fields terminated by ','
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@sdp
+PREHOOK: query: load data local inpath '../../data/files/dynpartdata1.txt' into table sdp
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@sdp
+POSTHOOK: query: load data local inpath '../../data/files/dynpartdata1.txt' into table sdp
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@sdp
+PREHOOK: query: load data local inpath '../../data/files/dynpartdata2.txt' into table sdp
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@sdp
+POSTHOOK: query: load data local inpath '../../data/files/dynpartdata2.txt' into table sdp
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@sdp
+PREHOOK: query: create table tdp (cid string, caid string)
+partitioned by (dataint bigint, hour int, req string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tdp
+POSTHOOK: query: create table tdp (cid string, caid string)
+partitioned by (dataint bigint, hour int, req string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tdp
+PREHOOK: query: insert overwrite table tdp partition (dataint=20150316, hour=16, req)
+select cid, caid, req from sdp where dataint=20150316 and hour=16
+PREHOOK: type: QUERY
+PREHOOK: Input: default@sdp
+PREHOOK: Output: default@tdp@dataint=20150316/hour=16
+POSTHOOK: query: insert overwrite table tdp partition (dataint=20150316, hour=16, req)
+select cid, caid, req from sdp where dataint=20150316 and hour=16
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@sdp
+POSTHOOK: Output: default@tdp@dataint=20150316/hour=16/req=reqA
+POSTHOOK: Output: default@tdp@dataint=20150316/hour=16/req=reqB
+POSTHOOK: Output: default@tdp@dataint=20150316/hour=16/req=reqD
+POSTHOOK: Lineage: tdp PARTITION(dataint=20150316,hour=16,req=reqA).caid SIMPLE [(sdp)sdp.FieldSchema(name:caid, type:string, comment:null), ]
+POSTHOOK: Lineage: tdp PARTITION(dataint=20150316,hour=16,req=reqA).cid SIMPLE [(sdp)sdp.FieldSchema(name:cid, type:string, comment:null), ]
+POSTHOOK: Lineage: tdp PARTITION(dataint=20150316,hour=16,req=reqB).caid SIMPLE [(sdp)sdp.FieldSchema(name:caid, type:string, comment:null), ]
+POSTHOOK: Lineage: tdp PARTITION(dataint=20150316,hour=16,req=reqB).cid SIMPLE [(sdp)sdp.FieldSchema(name:cid, type:string, comment:null), ]
+POSTHOOK: Lineage: tdp PARTITION(dataint=20150316,hour=16,req=reqD).caid SIMPLE [(sdp)sdp.FieldSchema(name:caid, type:string, comment:null), ]
+POSTHOOK: Lineage: tdp PARTITION(dataint=20150316,hour=16,req=reqD).cid SIMPLE [(sdp)sdp.FieldSchema(name:cid, type:string, comment:null), ]
+PREHOOK: query: select * from tdp order by caid
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tdp
+PREHOOK: Input: default@tdp@dataint=20150316/hour=16/req=reqA
+PREHOOK: Input: default@tdp@dataint=20150316/hour=16/req=reqB
+PREHOOK: Input: default@tdp@dataint=20150316/hour=16/req=reqD
+#### A masked pattern was here ####
+POSTHOOK: query: select * from tdp order by caid
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tdp
+POSTHOOK: Input: default@tdp@dataint=20150316/hour=16/req=reqA
+POSTHOOK: Input: default@tdp@dataint=20150316/hour=16/req=reqB
+POSTHOOK: Input: default@tdp@dataint=20150316/hour=16/req=reqD
+#### A masked pattern was here ####
+clusterIdA	cacheId1	20150316	16	reqB
+clusterIdA	cacheId1	20150316	16	reqA
+clusterIdB	cacheId2	20150316	16	reqB
+clusterIdC	cacheId3	20150316	16	reqA
+clusterIdD	cacheId4	20150316	16	reqD
+clusterIdA	cacheId5	20150316	16	reqA
+clusterIdD	cacheId8	20150316	16	reqA
+clusterIdB	cacheId9	20150316	16	reqD
+PREHOOK: query: show partitions tdp
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@tdp
+POSTHOOK: query: show partitions tdp
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@tdp
+dataint=20150316/hour=16/req=reqA
+dataint=20150316/hour=16/req=reqB
+dataint=20150316/hour=16/req=reqD

http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out
index d223234..c3ede05 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out
@@ -540,7 +540,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                       name default.list_bucketing_dynamic_part
                       partition_columns hr
-                      partition_columns.types string:string
+                      partition_columns.types string
                       serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -677,7 +677,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.list_bucketing_dynamic_part
                 partition_columns hr
-                partition_columns.types string:string
+                partition_columns.types string
                 serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -709,7 +709,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.list_bucketing_dynamic_part
               partition_columns hr
-              partition_columns.types string:string
+              partition_columns.types string
               serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -726,7 +726,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.list_bucketing_dynamic_part
                 partition_columns hr
-                partition_columns.types string:string
+                partition_columns.types string
                 serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -758,7 +758,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.list_bucketing_dynamic_part
               partition_columns hr
-              partition_columns.types string:string
+              partition_columns.types string
               serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -775,7 +775,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.list_bucketing_dynamic_part
                 partition_columns hr
-                partition_columns.types string:string
+                partition_columns.types string
                 serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
index f884ace..16a6e72 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
@@ -540,7 +540,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                       name default.list_bucketing_dynamic_part
                       partition_columns hr
-                      partition_columns.types string:string
+                      partition_columns.types string
                       serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -677,7 +677,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.list_bucketing_dynamic_part
                 partition_columns hr
-                partition_columns.types string:string
+                partition_columns.types string
                 serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -709,7 +709,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.list_bucketing_dynamic_part
               partition_columns hr
-              partition_columns.types string:string
+              partition_columns.types string
               serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -726,7 +726,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.list_bucketing_dynamic_part
                 partition_columns hr
-                partition_columns.types string:string
+                partition_columns.types string
                 serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -758,7 +758,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.list_bucketing_dynamic_part
               partition_columns hr
-              partition_columns.types string:string
+              partition_columns.types string
               serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -775,7 +775,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.list_bucketing_dynamic_part
                 partition_columns hr
-                partition_columns.types string:string
+                partition_columns.types string
                 serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
index 541944d..7bf4a21 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
@@ -486,7 +486,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                       name default.list_bucketing_dynamic_part
                       partition_columns hr
-                      partition_columns.types string:string
+                      partition_columns.types string
                       serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -623,7 +623,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.list_bucketing_dynamic_part
                 partition_columns hr
-                partition_columns.types string:string
+                partition_columns.types string
                 serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -655,7 +655,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.list_bucketing_dynamic_part
               partition_columns hr
-              partition_columns.types string:string
+              partition_columns.types string
               serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -672,7 +672,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.list_bucketing_dynamic_part
                 partition_columns hr
-                partition_columns.types string:string
+                partition_columns.types string
                 serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -704,7 +704,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.list_bucketing_dynamic_part
               partition_columns hr
-              partition_columns.types string:string
+              partition_columns.types string
               serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -721,7 +721,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.list_bucketing_dynamic_part
                 partition_columns hr
-                partition_columns.types string:string
+                partition_columns.types string
                 serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe