You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by na...@apache.org on 2009/11/09 03:22:29 UTC

svn commit: r833965 - in /hadoop/hive/trunk: CHANGES.txt ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/test/queries/clientpositive/drop_multi_partitions.q ql/src/test/results/clientpositive/drop_multi_partitions.q.out

Author: namit
Date: Mon Nov  9 02:22:28 2009
New Revision: 833965

URL: http://svn.apache.org/viewvc?rev=833965&view=rev
Log:
HIVE-804. Support deletion of partitions based on a prefix partition spefication
(Zheng Shao via namit)


Added:
    hadoop/hive/trunk/ql/src/test/queries/clientpositive/drop_multi_partitions.q
    hadoop/hive/trunk/ql/src/test/results/clientpositive/drop_multi_partitions.q.out
Modified:
    hadoop/hive/trunk/CHANGES.txt
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java

Modified: hadoop/hive/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/CHANGES.txt?rev=833965&r1=833964&r2=833965&view=diff
==============================================================================
--- hadoop/hive/trunk/CHANGES.txt (original)
+++ hadoop/hive/trunk/CHANGES.txt Mon Nov  9 02:22:28 2009
@@ -247,6 +247,9 @@
     HIVE-910. NULL value is not correctly handled by ColumnarStruct
     (He Yongqiang via namit)
 
+    HIVE-804 Support deletion of partitions based on a prefix partition spefication
+    (Zheng Shao via namit)
+
 Release 0.4.0 -  Unreleased
 
   INCOMPATIBLE CHANGES

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=833965&r1=833964&r2=833965&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Mon Nov  9 02:22:28 2009
@@ -27,6 +27,7 @@
 import java.io.Writer;
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
@@ -1001,18 +1002,39 @@
       if (tbl != null)
         work.getOutputs().add(new WriteEntity(tbl));
     } else {
+      // get all partitions of the table
+      List<String> partitionNames = db.getPartitionNames(MetaStoreUtils.DEFAULT_DATABASE_NAME, dropTbl.getTableName(), (short)-1);
+      Set<Map<String, String>> partitions = new HashSet<Map<String, String>>();
+      for (int i = 0; i < partitionNames.size(); i++) {
+        try {
+          partitions.add(Warehouse.makeSpecFromName(partitionNames.get(i)));
+        } catch (MetaException e) {
+          LOG.warn("Unrecognized partition name from metastore: " + partitionNames.get(i)); 
+        }
+      }
       // drop partitions in the list
-      List<Partition> parts = new ArrayList<Partition>();
+      List<Partition> partsToDelete = new ArrayList<Partition>();
       for (Map<String, String> partSpec : dropTbl.getPartSpecs()) {
-        Partition part = db.getPartition(tbl, partSpec, false);
-        if (part == null) {
-          console.printInfo("Partition " + partSpec + " does not exist.");
-        } else {
-          parts.add(part);
+        Iterator<Map<String,String>> it = partitions.iterator();
+        while (it.hasNext()) {
+          Map<String, String> part = it.next();
+          // test if partSpec matches part
+          boolean match = true;
+          for (Map.Entry<String, String> item: partSpec.entrySet()) {
+            if (!item.getValue().equals(part.get(item.getKey()))) {
+              match = false;
+              break;
+            }
+          }
+          if (match) {
+            partsToDelete.add(db.getPartition(tbl, part, false));
+            it.remove();
+          }
         }
       }
+      
       // drop all existing partitions from the list
-      for (Partition partition : parts) {
+      for (Partition partition : partsToDelete) {
         console.printInfo("Dropping the partition " + partition.getName());
         db.dropPartition(MetaStoreUtils.DEFAULT_DATABASE_NAME, dropTbl
             .getTableName(), partition.getValues(), true); // drop data for the

Added: hadoop/hive/trunk/ql/src/test/queries/clientpositive/drop_multi_partitions.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/drop_multi_partitions.q?rev=833965&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/drop_multi_partitions.q (added)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/drop_multi_partitions.q Mon Nov  9 02:22:28 2009
@@ -0,0 +1,15 @@
+create table mp (a string) partitioned by (b string, c string);
+
+alter table mp add partition (b='1', c='1');
+alter table mp add partition (b='1', c='2');
+alter table mp add partition (b='2', c='2');
+
+show partitions mp;
+
+explain extended alter table mp drop partition (b='1');
+alter table mp drop partition (b='1');
+
+show partitions mp;
+
+drop table mp;
+

Added: hadoop/hive/trunk/ql/src/test/results/clientpositive/drop_multi_partitions.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/drop_multi_partitions.q.out?rev=833965&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/drop_multi_partitions.q.out (added)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/drop_multi_partitions.q.out Mon Nov  9 02:22:28 2009
@@ -0,0 +1,60 @@
+PREHOOK: query: create table mp (a string) partitioned by (b string, c string)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table mp (a string) partitioned by (b string, c string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@mp
+PREHOOK: query: alter table mp add partition (b='1', c='1')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: query: alter table mp add partition (b='1', c='1')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@mp@b=1/c=1
+PREHOOK: query: alter table mp add partition (b='1', c='2')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: query: alter table mp add partition (b='1', c='2')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@mp@b=1/c=2
+PREHOOK: query: alter table mp add partition (b='2', c='2')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: query: alter table mp add partition (b='2', c='2')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@mp@b=2/c=2
+PREHOOK: query: show partitions mp
+PREHOOK: type: SHOWPARTITIONS
+POSTHOOK: query: show partitions mp
+POSTHOOK: type: SHOWPARTITIONS
+b=1/c=1
+b=1/c=2
+b=2/c=2
+PREHOOK: query: explain extended alter table mp drop partition (b='1')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: query: explain extended alter table mp drop partition (b='1')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+ABSTRACT SYNTAX TREE:
+  (TOK_ALTERTABLE_DROPPARTS mp (TOK_PARTSPEC (TOK_PARTVAL b '1')))
+
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+      Drop Table Operator:
+        Drop Table
+          table: mp
+
+
+PREHOOK: query: alter table mp drop partition (b='1')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: query: alter table mp drop partition (b='1')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Output: default@mp@b=1/c=1
+POSTHOOK: Output: default@mp@b=1/c=2
+PREHOOK: query: show partitions mp
+PREHOOK: type: SHOWPARTITIONS
+POSTHOOK: query: show partitions mp
+POSTHOOK: type: SHOWPARTITIONS
+b=2/c=2
+PREHOOK: query: drop table mp
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table mp
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: default@mp