You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2012/10/30 22:35:35 UTC

svn commit: r1403878 - in /hive/trunk: common/src/java/org/apache/hadoop/hive/common/ ql/src/test/org/apache/hadoop/hive/ql/ ql/src/test/queries/clientpositive/ ql/src/test/results/clientpositive/

Author: hashutosh
Date: Tue Oct 30 21:35:34 2012
New Revision: 1403878

URL: http://svn.apache.org/viewvc?rev=1403878&view=rev
Log:
HIVE-3441 : testcases escape1,escape2 fail on windows (Thejas Nair via Ashutosh Chauhan)

Added:
    hive/trunk/ql/src/test/queries/clientpositive/combine2_win.q
    hive/trunk/ql/src/test/queries/clientpositive/input_part10_win.q
    hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part14_win.q
    hive/trunk/ql/src/test/results/clientpositive/combine2_win.q.out
    hive/trunk/ql/src/test/results/clientpositive/input_part10_win.q.out
    hive/trunk/ql/src/test/results/clientpositive/load_dyn_part14_win.q.out
Modified:
    hive/trunk/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
    hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java
    hive/trunk/ql/src/test/queries/clientpositive/combine2.q
    hive/trunk/ql/src/test/queries/clientpositive/escape1.q
    hive/trunk/ql/src/test/queries/clientpositive/escape2.q
    hive/trunk/ql/src/test/queries/clientpositive/input_part10.q
    hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part14.q
    hive/trunk/ql/src/test/results/clientpositive/combine2.q.out
    hive/trunk/ql/src/test/results/clientpositive/escape1.q.out
    hive/trunk/ql/src/test/results/clientpositive/escape2.q.out
    hive/trunk/ql/src/test/results/clientpositive/input_part10.q.out
    hive/trunk/ql/src/test/results/clientpositive/load_dyn_part14.q.out

Modified: hive/trunk/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hadoop/hive/common/FileUtils.java?rev=1403878&r1=1403877&r2=1403878&view=diff
==============================================================================
--- hive/trunk/common/src/java/org/apache/hadoop/hive/common/FileUtils.java (original)
+++ hive/trunk/common/src/java/org/apache/hadoop/hive/common/FileUtils.java Tue Oct 30 21:35:34 2012
@@ -22,12 +22,14 @@ import java.io.IOException;
 import java.net.URI;
 import java.util.BitSet;
 import java.util.List;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
+import org.apache.hadoop.util.Shell;
+
+
 /**
  * Collection of file manipulation utilities common across Hive.
  */
@@ -127,6 +129,12 @@ public final class FileUtils {
   // won't be corrupt, because the full path name in metastore is stored.
   // In that case, Hive will continue to read the old data, but when it creates
   // new partitions, it will use new names.
+  // edit : There are some use cases for which adding new chars does not seem
+  // to be backward compatible - Eg. if partition was created with name having
+  // a special char that you want to start escaping, and then you try dropping
+  // the partition with a hive version that now escapes the special char using
+  // the list below, then the drop partition fails to work.
+
   static BitSet charToEscape = new BitSet(128);
   static {
     for (char c = 0; c < ' '; c++) {
@@ -144,9 +152,19 @@ public final class FileUtils {
         '\u001A', '\u001B', '\u001C', '\u001D', '\u001E', '\u001F',
         '"', '#', '%', '\'', '*', '/', ':', '=', '?', '\\', '\u007F', '{',
         '[', ']', '^'};
+
     for (char c : clist) {
       charToEscape.set(c);
     }
+    
+    if(Shell.WINDOWS){
+      //On windows, following chars need to be escaped as well
+      char [] winClist = {' ', '<','>','|'};
+      for (char c : winClist) {
+        charToEscape.set(c);
+      }
+    }
+
   }
 
   static boolean needsEscaping(char c) {

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java?rev=1403878&r1=1403877&r2=1403878&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java Tue Oct 30 21:35:34 2012
@@ -317,11 +317,30 @@ public class QTestUtil {
   }
 
   public void addFile(File qf) throws Exception {
+
     FileInputStream fis = new FileInputStream(qf);
     BufferedInputStream bis = new BufferedInputStream(fis);
     BufferedReader br = new BufferedReader(new InputStreamReader(bis, "UTF8"));
     StringBuilder qsb = new StringBuilder();
 
+    // Read the entire query
+    String line;
+    while ((line = br.readLine()) != null) {
+      qsb.append(line + "\n");
+    }
+    String query = qsb.toString();
+
+    qMap.put(qf.getName(), query);
+
+    if(checkHadoopVersionExclude(qf.getName(), query)
+      || checkOSExclude(qf.getName(), query)) {
+      qSkipSet.add(qf.getName());
+    }
+    br.close();
+  }
+
+  private boolean checkHadoopVersionExclude(String fileName, String query){
+
     // Look for a hint to not run a test on some Hadoop versions
     Pattern pattern = Pattern.compile("-- (EX|IN)CLUDE_HADOOP_MAJOR_VERSIONS\\((.*)\\)");
 
@@ -329,59 +348,86 @@ public class QTestUtil {
     boolean includeQuery = false;
     Set<String> versionSet = new HashSet<String>();
     String hadoopVer = ShimLoader.getMajorVersion();
-    String line;
 
-    // Read the entire query
-    while ((line = br.readLine()) != null) {
-
-      // Each qfile may include at most one INCLUDE or EXCLUDE directive.
-      //
-      // If a qfile contains an INCLUDE directive, and hadoopVer does
-      // not appear in the list of versions to include, then the qfile
-      // is skipped.
-      //
-      // If a qfile contains an EXCLUDE directive, and hadoopVer is
-      // listed in the list of versions to EXCLUDE, then the qfile is
-      // skipped.
-      //
-      // Otherwise, the qfile is included.
-      Matcher matcher = pattern.matcher(line);
-      if (matcher.find()) {
-        if (excludeQuery || includeQuery) {
-          String message = "QTestUtil: qfile " + qf.getName()
-            + " contains more than one reference to (EX|IN)CLUDE_HADOOP_MAJOR_VERSIONS";
-          throw new UnsupportedOperationException(message);
-        }
+    Matcher matcher = pattern.matcher(query);
 
-        String prefix = matcher.group(1);
-        if ("EX".equals(prefix)) {
-          excludeQuery = true;
-        } else {
-          includeQuery = true;
-        }
+    // Each qfile may include at most one INCLUDE or EXCLUDE directive.
+    //
+    // If a qfile contains an INCLUDE directive, and hadoopVer does
+    // not appear in the list of versions to include, then the qfile
+    // is skipped.
+    //
+    // If a qfile contains an EXCLUDE directive, and hadoopVer is
+    // listed in the list of versions to EXCLUDE, then the qfile is
+    // skipped.
+    //
+    // Otherwise, the qfile is included.
+
+    if (matcher.find()) {
+
+      String prefix = matcher.group(1);
+      if ("EX".equals(prefix)) {
+        excludeQuery = true;
+      } else {
+        includeQuery = true;
+      }
 
-        String versions = matcher.group(2);
-        for (String s : versions.split("\\,")) {
-          s = s.trim();
-          versionSet.add(s);
-        }
+      String versions = matcher.group(2);
+      for (String s : versions.split("\\,")) {
+        s = s.trim();
+	versionSet.add(s);
       }
-      qsb.append(line + "\n");
     }
-    qMap.put(qf.getName(), qsb.toString());
+
+    if (matcher.find()) {
+      //2nd match is not supposed to be there
+      String message = "QTestUtil: qfile " + fileName
+	  + " contains more than one reference to (EX|IN)CLUDE_HADOOP_MAJOR_VERSIONS";
+      throw new UnsupportedOperationException(message);
+    }
 
     if (excludeQuery && versionSet.contains(hadoopVer)) {
-      System.out.println("QTestUtil: " + qf.getName()
+      System.out.println("QTestUtil: " + fileName
         + " EXCLUDE list contains Hadoop Version " + hadoopVer + ". Skipping...");
-      qSkipSet.add(qf.getName());
+      return true;
     } else if (includeQuery && !versionSet.contains(hadoopVer)) {
-      System.out.println("QTestUtil: " + qf.getName()
+      System.out.println("QTestUtil: " + fileName
         + " INCLUDE list does not contain Hadoop Version " + hadoopVer + ". Skipping...");
-      qSkipSet.add(qf.getName());
+      return true;
     }
-    br.close();
+    return false;
   }
 
+  private boolean checkOSExclude(String fileName, String query){
+    // Look for a hint to not run a test on some Hadoop versions
+    Pattern pattern = Pattern.compile("-- (EX|IN)CLUDE_OS_WINDOWS");
+
+    // detect whether this query wants to be excluded or included
+    // on windows
+    Matcher matcher = pattern.matcher(query);
+    if (matcher.find()) {
+      String prefix = matcher.group(1);
+      if ("EX".equals(prefix)) {
+	//windows is to be exluded
+	if(Shell.WINDOWS){
+	  System.out.println("Due to the OS being windows " +
+	    "adding the  query " + fileName +
+	    " to the set of tests to skip");
+	  return true;
+	}
+      }
+      else  if(!Shell.WINDOWS){
+	//non windows to be exluded
+	System.out.println("Due to the OS not being windows " +
+	    "adding the  query " + fileName +
+	    " to the set of tests to skip");
+	return true;
+      }
+    }
+    return false;
+  }
+
+
   /**
    * Clear out any side effects of running tests
    */

Modified: hive/trunk/ql/src/test/queries/clientpositive/combine2.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/combine2.q?rev=1403878&r1=1403877&r2=1403878&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/combine2.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/combine2.q Tue Oct 30 21:35:34 2012
@@ -10,6 +10,9 @@ set hive.exec.dynamic.partition.mode=non
 set mapred.cache.shared.enabled=false;
 set hive.merge.smallfiles.avgsize=0;
 
+-- EXCLUDE_OS_WINDOWS
+-- excluded on windows because of difference in file name encoding logic
+
 
 
 create table combine2(key string) partitioned by (value string);

Added: hive/trunk/ql/src/test/queries/clientpositive/combine2_win.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/combine2_win.q?rev=1403878&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/combine2_win.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/combine2_win.q Tue Oct 30 21:35:34 2012
@@ -0,0 +1,39 @@
+set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
+set mapred.min.split.size=256;
+set mapred.min.split.size.per.node=256;
+set mapred.min.split.size.per.rack=256;
+set mapred.max.split.size=256;
+set hive.exec.dynamic.partition=true;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set mapred.cache.shared.enabled=false;
+set hive.merge.smallfiles.avgsize=0;
+
+-- INCLUDE_OS_WINDOWS
+-- included only on  windows because of difference in file name encoding logic
+
+create table combine2(key string) partitioned by (value string);
+
+insert overwrite table combine2 partition(value) 
+select * from (
+   select key, value from src where key < 10
+   union all 
+   select key, '|' as value from src where key = 11
+   union all
+   select key, '2010-04-21 09:45:00' value from src where key = 19) s;
+
+show partitions combine2;
+
+explain
+select key, value from combine2 where value is not null order by key;
+
+select key, value from combine2 where value is not null order by key;
+
+explain extended
+select count(1) from combine2 where value is not null;
+
+select count(1) from combine2 where value is not null;
+
+explain
+select ds, count(1) from srcpart where ds is not null group by ds;
+
+select ds, count(1) from srcpart where ds is not null group by ds;

Modified: hive/trunk/ql/src/test/queries/clientpositive/escape1.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/escape1.q?rev=1403878&r1=1403877&r2=1403878&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/escape1.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/escape1.q Tue Oct 30 21:35:34 2012
@@ -1,5 +1,9 @@
 set hive.exec.dynamic.partition=true;
 set hive.exec.max.dynamic.partitions.pernode=200;
+
+-- EXCLUDE_OS_WINDOWS
+-- excluded on windows because of difference in file name encoding logic
+
 DROP TABLE escape1;
 DROP TABLE escape_raw;
 

Modified: hive/trunk/ql/src/test/queries/clientpositive/escape2.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/escape2.q?rev=1403878&r1=1403877&r2=1403878&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/escape2.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/escape2.q Tue Oct 30 21:35:34 2012
@@ -2,6 +2,10 @@ set hive.exec.dynamic.partition=true;
 set hive.exec.max.dynamic.partitions.pernode=200;
 set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 set hive.default.fileformat=RCFILE;
+
+-- EXCLUDE_OS_WINDOWS
+-- excluded on windows because of difference in file name encoding logic
+
 DROP TABLE IF EXISTS escape2;
 DROP TABLE IF EXISTS escape_raw;
 

Modified: hive/trunk/ql/src/test/queries/clientpositive/input_part10.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/input_part10.q?rev=1403878&r1=1403877&r2=1403878&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/input_part10.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/input_part10.q Tue Oct 30 21:35:34 2012
@@ -1,3 +1,6 @@
+-- EXCLUDE_OS_WINDOWS
+-- excluded on windows because of difference in file name encoding logic
+
 CREATE TABLE part_special (
   a STRING,
   b STRING

Added: hive/trunk/ql/src/test/queries/clientpositive/input_part10_win.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/input_part10_win.q?rev=1403878&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/input_part10_win.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/input_part10_win.q Tue Oct 30 21:35:34 2012
@@ -0,0 +1,23 @@
+-- INCLUDE_OS_WINDOWS
+-- included only on  windows because of difference in file name encoding logic
+
+CREATE TABLE part_special (
+  a STRING,
+  b STRING
+) PARTITIONED BY (
+  ds STRING,
+  ts STRING
+);
+
+EXPLAIN
+INSERT OVERWRITE TABLE part_special PARTITION(ds='2008 04 08', ts = '10:11:12=455')
+SELECT 1, 2 FROM src LIMIT 1;
+
+INSERT OVERWRITE TABLE part_special PARTITION(ds='2008 04 08', ts = '10:11:12=455')
+SELECT 1, 2 FROM src LIMIT 1;
+
+DESCRIBE EXTENDED part_special PARTITION(ds='2008 04 08', ts = '10:11:12=455');
+
+SELECT * FROM part_special WHERE ds='2008 04 08' AND ts = '10:11:12=455';
+
+

Modified: hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part14.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part14.q?rev=1403878&r1=1403877&r2=1403878&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part14.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part14.q Tue Oct 30 21:35:34 2012
@@ -1,3 +1,5 @@
+-- EXCLUDE_OS_WINDOWS
+-- excluded on windows because of difference in file name encoding logic
 
 create table if not exists nzhang_part14 (key string) 
   partitioned by (value string);

Added: hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part14_win.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part14_win.q?rev=1403878&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part14_win.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part14_win.q Tue Oct 30 21:35:34 2012
@@ -0,0 +1,38 @@
+-- INCLUDE_OS_WINDOWS
+-- included only on  windows because of difference in file name encoding logic
+
+
+create table if not exists nzhang_part14 (key string) 
+  partitioned by (value string);
+
+describe extended nzhang_part14;
+
+set hive.exec.dynamic.partition=true;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+explain
+insert overwrite table nzhang_part14 partition(value) 
+select key, value from (
+  select 'k1' as key, cast(null as string) as value from src limit 2
+  union all
+  select 'k2' as key, '' as value from src limit 2
+  union all 
+  select 'k3' as key, ' ' as value from src limit 2
+) T;
+
+insert overwrite table nzhang_part14 partition(value) 
+select key, value from (
+  select 'k1' as key, cast(null as string) as value from src limit 2
+  union all
+  select 'k2' as key, '' as value from src limit 2
+  union all 
+  select 'k3' as key, ' ' as value from src limit 2
+) T;
+
+
+show partitions nzhang_part14;
+
+select * from nzhang_part14 where value <> 'a'
+order by key, value;
+
+

Modified: hive/trunk/ql/src/test/results/clientpositive/combine2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/combine2.q.out?rev=1403878&r1=1403877&r2=1403878&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/combine2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/combine2.q.out Tue Oct 30 21:35:34 2012
@@ -2,9 +2,19 @@ PREHOOK: query: USE default
 PREHOOK: type: SWITCHDATABASE
 POSTHOOK: query: USE default
 POSTHOOK: type: SWITCHDATABASE
-PREHOOK: query: create table combine2(key string) partitioned by (value string)
+PREHOOK: query: -- EXCLUDE_OS_WINDOWS
+-- excluded on windows because of difference in file name encoding logic
+
+
+
+create table combine2(key string) partitioned by (value string)
 PREHOOK: type: CREATETABLE
-POSTHOOK: query: create table combine2(key string) partitioned by (value string)
+POSTHOOK: query: -- EXCLUDE_OS_WINDOWS
+-- excluded on windows because of difference in file name encoding logic
+
+
+
+create table combine2(key string) partitioned by (value string)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@combine2
 PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.22)

Added: hive/trunk/ql/src/test/results/clientpositive/combine2_win.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/combine2_win.q.out?rev=1403878&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/combine2_win.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/combine2_win.q.out Tue Oct 30 21:35:34 2012
@@ -0,0 +1,753 @@
+PREHOOK: query: -- INCLUDE_OS_WINDOWS
+
+create table combine2(key string) partitioned by (value string)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: -- INCLUDE_OS_WINDOWS
+
+create table combine2(key string) partitioned by (value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@combine2
+PREHOOK: query: insert overwrite table combine2 partition(value) 
+select * from (
+   select key, value from src where key < 10
+   union all 
+   select key, '|' as value from src where key = 11
+   union all
+   select key, '2010-04-21 09:45:00' value from src where key = 19) s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@combine2
+POSTHOOK: query: insert overwrite table combine2 partition(value) 
+select * from (
+   select key, value from src where key < 10
+   union all 
+   select key, '|' as value from src where key = 11
+   union all
+   select key, '2010-04-21 09:45:00' value from src where key = 19) s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@combine2@value=%7C
+POSTHOOK: Output: default@combine2@value=2010-04-21%2009%3A45%3A00
+POSTHOOK: Output: default@combine2@value=val_0
+POSTHOOK: Output: default@combine2@value=val_2
+POSTHOOK: Output: default@combine2@value=val_4
+POSTHOOK: Output: default@combine2@value=val_5
+POSTHOOK: Output: default@combine2@value=val_8
+POSTHOOK: Output: default@combine2@value=val_9
+POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=|).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: show partitions combine2
+PREHOOK: type: SHOWPARTITIONS
+POSTHOOK: query: show partitions combine2
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=|).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+value=%7C
+value=2010-04-21%2009%3A45%3A00
+value=val_0
+value=val_2
+value=val_4
+value=val_5
+value=val_8
+value=val_9
+PREHOOK: query: explain
+select key, value from combine2 where value is not null order by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key, value from combine2 where value is not null order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=|).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME combine2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_WHERE (TOK_FUNCTION TOK_ISNOTNULL (TOK_TABLE_OR_COL value))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        combine2 
+          TableScan
+            alias: combine2
+            Select Operator
+              expressions:
+                    expr: key
+                    type: string
+                    expr: value
+                    type: string
+              outputColumnNames: _col0, _col1
+              Reduce Output Operator
+                key expressions:
+                      expr: _col0
+                      type: string
+                sort order: +
+                tag: -1
+                value expressions:
+                      expr: _col0
+                      type: string
+                      expr: _col1
+                      type: string
+      Reduce Operator Tree:
+        Extract
+          File Output Operator
+            compressed: false
+            GlobalTableId: 0
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: select key, value from combine2 where value is not null order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@combine2@value=%7C
+PREHOOK: Input: default@combine2@value=2010-04-21%2009%3A45%3A00
+PREHOOK: Input: default@combine2@value=val_0
+PREHOOK: Input: default@combine2@value=val_2
+PREHOOK: Input: default@combine2@value=val_4
+PREHOOK: Input: default@combine2@value=val_5
+PREHOOK: Input: default@combine2@value=val_8
+PREHOOK: Input: default@combine2@value=val_9
+#### A masked pattern was here ####
+POSTHOOK: query: select key, value from combine2 where value is not null order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@combine2@value=%7C
+POSTHOOK: Input: default@combine2@value=2010-04-21%2009%3A45%3A00
+POSTHOOK: Input: default@combine2@value=val_0
+POSTHOOK: Input: default@combine2@value=val_2
+POSTHOOK: Input: default@combine2@value=val_4
+POSTHOOK: Input: default@combine2@value=val_5
+POSTHOOK: Input: default@combine2@value=val_8
+POSTHOOK: Input: default@combine2@value=val_9
+#### A masked pattern was here ####
+POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=|).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+0	val_0
+0	val_0
+0	val_0
+11	|
+19	2010-04-21 09:45:00
+2	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: explain extended
+select count(1) from combine2 where value is not null
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select count(1) from combine2 where value is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=|).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME combine2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_WHERE (TOK_FUNCTION TOK_ISNOTNULL (TOK_TABLE_OR_COL value)))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        combine2 
+          TableScan
+            alias: combine2
+            GatherStats: false
+            Select Operator
+              Group By Operator
+                aggregations:
+                      expr: count(1)
+                bucketGroup: false
+                mode: hash
+                outputColumnNames: _col0
+                Reduce Output Operator
+                  sort order: 
+                  tag: -1
+                  value expressions:
+                        expr: _col0
+                        type: bigint
+      Needs Tagging: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: value=%7C
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              value |
+            properties:
+              bucket_count -1
+              columns key
+              columns.types string
+#### A masked pattern was here ####
+              name default.combine2
+              numFiles 1
+              numPartitions 8
+              numRows 1
+              partition_columns value
+              rawDataSize 2
+              serialization.ddl struct combine2 { string key}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 3
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key
+                columns.types string
+#### A masked pattern was here ####
+                name default.combine2
+                numFiles 8
+                numPartitions 8
+                numRows 12
+                partition_columns value
+                rawDataSize 14
+                serialization.ddl struct combine2 { string key}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 26
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.combine2
+            name: default.combine2
+#### A masked pattern was here ####
+          Partition
+            base file name: value=2010-04-21%2009%3A45%3A00
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              value 2010-04-21 09:45:00
+            properties:
+              bucket_count -1
+              columns key
+              columns.types string
+#### A masked pattern was here ####
+              name default.combine2
+              numFiles 1
+              numPartitions 8
+              numRows 1
+              partition_columns value
+              rawDataSize 2
+              serialization.ddl struct combine2 { string key}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 3
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key
+                columns.types string
+#### A masked pattern was here ####
+                name default.combine2
+                numFiles 8
+                numPartitions 8
+                numRows 12
+                partition_columns value
+                rawDataSize 14
+                serialization.ddl struct combine2 { string key}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 26
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.combine2
+            name: default.combine2
+#### A masked pattern was here ####
+          Partition
+            base file name: value=val_0
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              value val_0
+            properties:
+              bucket_count -1
+              columns key
+              columns.types string
+#### A masked pattern was here ####
+              name default.combine2
+              numFiles 1
+              numPartitions 8
+              numRows 3
+              partition_columns value
+              rawDataSize 3
+              serialization.ddl struct combine2 { string key}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 6
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key
+                columns.types string
+#### A masked pattern was here ####
+                name default.combine2
+                numFiles 8
+                numPartitions 8
+                numRows 12
+                partition_columns value
+                rawDataSize 14
+                serialization.ddl struct combine2 { string key}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 26
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.combine2
+            name: default.combine2
+#### A masked pattern was here ####
+          Partition
+            base file name: value=val_2
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              value val_2
+            properties:
+              bucket_count -1
+              columns key
+              columns.types string
+#### A masked pattern was here ####
+              name default.combine2
+              numFiles 1
+              numPartitions 8
+              numRows 1
+              partition_columns value
+              rawDataSize 1
+              serialization.ddl struct combine2 { string key}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 2
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key
+                columns.types string
+#### A masked pattern was here ####
+                name default.combine2
+                numFiles 8
+                numPartitions 8
+                numRows 12
+                partition_columns value
+                rawDataSize 14
+                serialization.ddl struct combine2 { string key}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 26
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.combine2
+            name: default.combine2
+#### A masked pattern was here ####
+          Partition
+            base file name: value=val_4
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              value val_4
+            properties:
+              bucket_count -1
+              columns key
+              columns.types string
+#### A masked pattern was here ####
+              name default.combine2
+              numFiles 1
+              numPartitions 8
+              numRows 1
+              partition_columns value
+              rawDataSize 1
+              serialization.ddl struct combine2 { string key}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 2
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key
+                columns.types string
+#### A masked pattern was here ####
+                name default.combine2
+                numFiles 8
+                numPartitions 8
+                numRows 12
+                partition_columns value
+                rawDataSize 14
+                serialization.ddl struct combine2 { string key}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 26
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.combine2
+            name: default.combine2
+#### A masked pattern was here ####
+          Partition
+            base file name: value=val_5
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              value val_5
+            properties:
+              bucket_count -1
+              columns key
+              columns.types string
+#### A masked pattern was here ####
+              name default.combine2
+              numFiles 1
+              numPartitions 8
+              numRows 3
+              partition_columns value
+              rawDataSize 3
+              serialization.ddl struct combine2 { string key}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 6
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key
+                columns.types string
+#### A masked pattern was here ####
+                name default.combine2
+                numFiles 8
+                numPartitions 8
+                numRows 12
+                partition_columns value
+                rawDataSize 14
+                serialization.ddl struct combine2 { string key}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 26
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.combine2
+            name: default.combine2
+#### A masked pattern was here ####
+          Partition
+            base file name: value=val_8
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              value val_8
+            properties:
+              bucket_count -1
+              columns key
+              columns.types string
+#### A masked pattern was here ####
+              name default.combine2
+              numFiles 1
+              numPartitions 8
+              numRows 1
+              partition_columns value
+              rawDataSize 1
+              serialization.ddl struct combine2 { string key}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 2
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key
+                columns.types string
+#### A masked pattern was here ####
+                name default.combine2
+                numFiles 8
+                numPartitions 8
+                numRows 12
+                partition_columns value
+                rawDataSize 14
+                serialization.ddl struct combine2 { string key}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 26
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.combine2
+            name: default.combine2
+#### A masked pattern was here ####
+          Partition
+            base file name: value=val_9
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              value val_9
+            properties:
+              bucket_count -1
+              columns key
+              columns.types string
+#### A masked pattern was here ####
+              name default.combine2
+              numFiles 1
+              numPartitions 8
+              numRows 1
+              partition_columns value
+              rawDataSize 1
+              serialization.ddl struct combine2 { string key}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 2
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key
+                columns.types string
+#### A masked pattern was here ####
+                name default.combine2
+                numFiles 8
+                numPartitions 8
+                numRows 12
+                partition_columns value
+                rawDataSize 14
+                serialization.ddl struct combine2 { string key}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 26
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.combine2
+            name: default.combine2
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations:
+                expr: count(VALUE._col0)
+          bucketGroup: false
+          mode: mergepartial
+          outputColumnNames: _col0
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: bigint
+            outputColumnNames: _col0
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 1
+#### A masked pattern was here ####
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    columns _col0
+                    columns.types bigint
+                    escape.delim \
+                    serialization.format 1
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: select count(1) from combine2 where value is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@combine2@value=%7C
+PREHOOK: Input: default@combine2@value=2010-04-21%2009%3A45%3A00
+PREHOOK: Input: default@combine2@value=val_0
+PREHOOK: Input: default@combine2@value=val_2
+PREHOOK: Input: default@combine2@value=val_4
+PREHOOK: Input: default@combine2@value=val_5
+PREHOOK: Input: default@combine2@value=val_8
+PREHOOK: Input: default@combine2@value=val_9
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from combine2 where value is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@combine2@value=%7C
+POSTHOOK: Input: default@combine2@value=2010-04-21%2009%3A45%3A00
+POSTHOOK: Input: default@combine2@value=val_0
+POSTHOOK: Input: default@combine2@value=val_2
+POSTHOOK: Input: default@combine2@value=val_4
+POSTHOOK: Input: default@combine2@value=val_5
+POSTHOOK: Input: default@combine2@value=val_8
+POSTHOOK: Input: default@combine2@value=val_9
+#### A masked pattern was here ####
+POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=|).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+12
+PREHOOK: query: explain
+select ds, count(1) from srcpart where ds is not null group by ds
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select ds, count(1) from srcpart where ds is not null group by ds
+POSTHOOK: type: QUERY
+POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=|).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL ds)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_WHERE (TOK_FUNCTION TOK_ISNOTNULL (TOK_TABLE_OR_COL ds))) (TOK_GROUPBY (TOK_TABLE_OR_COL ds))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        srcpart 
+          TableScan
+            alias: srcpart
+            Select Operator
+              expressions:
+                    expr: ds
+                    type: string
+              outputColumnNames: ds
+              Group By Operator
+                aggregations:
+                      expr: count(1)
+                bucketGroup: false
+                keys:
+                      expr: ds
+                      type: string
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions:
+                        expr: _col0
+                        type: string
+                  sort order: +
+                  Map-reduce partition columns:
+                        expr: _col0
+                        type: string
+                  tag: -1
+                  value expressions:
+                        expr: _col1
+                        type: bigint
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations:
+                expr: count(VALUE._col0)
+          bucketGroup: false
+          keys:
+                expr: KEY._col0
+                type: string
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: string
+                  expr: _col1
+                  type: bigint
+            outputColumnNames: _col0, _col1
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: select ds, count(1) from srcpart where ds is not null group by ds
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select ds, count(1) from srcpart where ds is not null group by ds
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=val_9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: combine2 PARTITION(value=|).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:key, type:string, comment:default), ]
+2008-04-08	1000
+2008-04-09	1000

Modified: hive/trunk/ql/src/test/results/clientpositive/escape1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/escape1.q.out?rev=1403878&r1=1403877&r2=1403878&view=diff
==============================================================================
Files hive/trunk/ql/src/test/results/clientpositive/escape1.q.out (original) and hive/trunk/ql/src/test/results/clientpositive/escape1.q.out Tue Oct 30 21:35:34 2012 differ

Modified: hive/trunk/ql/src/test/results/clientpositive/escape2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/escape2.q.out?rev=1403878&r1=1403877&r2=1403878&view=diff
==============================================================================
Files hive/trunk/ql/src/test/results/clientpositive/escape2.q.out (original) and hive/trunk/ql/src/test/results/clientpositive/escape2.q.out Tue Oct 30 21:35:34 2012 differ

Modified: hive/trunk/ql/src/test/results/clientpositive/input_part10.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/input_part10.q.out?rev=1403878&r1=1403877&r2=1403878&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/input_part10.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/input_part10.q.out Tue Oct 30 21:35:34 2012
@@ -1,4 +1,7 @@
-PREHOOK: query: CREATE TABLE part_special (
+PREHOOK: query: -- EXCLUDE_OS_WINDOWS
+-- excluded on windows because of difference in file name encoding logic
+
+CREATE TABLE part_special (
   a STRING,
   b STRING
 ) PARTITIONED BY (
@@ -6,7 +9,10 @@ PREHOOK: query: CREATE TABLE part_specia
   ts STRING
 )
 PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE part_special (
+POSTHOOK: query: -- EXCLUDE_OS_WINDOWS
+-- excluded on windows because of difference in file name encoding logic
+
+CREATE TABLE part_special (
   a STRING,
   b STRING
 ) PARTITIONED BY (

Added: hive/trunk/ql/src/test/results/clientpositive/input_part10_win.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/input_part10_win.q.out?rev=1403878&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/input_part10_win.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/input_part10_win.q.out Tue Oct 30 21:35:34 2012
@@ -0,0 +1,124 @@
+PREHOOK: query: -- INCLUDE_OS_WINDOWS
+
+CREATE TABLE part_special (
+  a STRING,
+  b STRING
+) PARTITIONED BY (
+  ds STRING,
+  ts STRING
+)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: -- INCLUDE_OS_WINDOWS
+
+CREATE TABLE part_special (
+  a STRING,
+  b STRING
+) PARTITIONED BY (
+  ds STRING,
+  ts STRING
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@part_special
+PREHOOK: query: EXPLAIN
+INSERT OVERWRITE TABLE part_special PARTITION(ds='2008 04 08', ts = '10:11:12=455')
+SELECT 1, 2 FROM src LIMIT 1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+INSERT OVERWRITE TABLE part_special PARTITION(ds='2008 04 08', ts = '10:11:12=455')
+SELECT 1, 2 FROM src LIMIT 1
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME part_special) (TOK_PARTSPEC (TOK_PARTVAL ds '2008 04 08') (TOK_PARTVAL ts '10:11:12=455')))) (TOK_SELECT (TOK_SELEXPR 1) (TOK_SELEXPR 2)) (TOK_LIMIT 1)))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        src 
+          TableScan
+            alias: src
+            Select Operator
+              expressions:
+                    expr: 1
+                    type: int
+                    expr: 2
+                    type: int
+              outputColumnNames: _col0, _col1
+              Limit
+                Reduce Output Operator
+                  sort order: 
+                  tag: -1
+                  value expressions:
+                        expr: _col0
+                        type: int
+                        expr: _col1
+                        type: int
+      Reduce Operator Tree:
+        Extract
+          Limit
+            File Output Operator
+              compressed: false
+              GlobalTableId: 1
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.part_special
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2008 04 08
+            ts 10:11:12=455
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.part_special
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+
+PREHOOK: query: INSERT OVERWRITE TABLE part_special PARTITION(ds='2008 04 08', ts = '10:11:12=455')
+SELECT 1, 2 FROM src LIMIT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@part_special@ds=2008%2004%2008/ts=10%3A11%3A12%3D455
+POSTHOOK: query: INSERT OVERWRITE TABLE part_special PARTITION(ds='2008 04 08', ts = '10:11:12=455')
+SELECT 1, 2 FROM src LIMIT 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@part_special@ds=2008%2004%2008/ts=10%3A11%3A12%3D455
+POSTHOOK: Lineage: part_special PARTITION(ds=2008 04 08,ts=10:11:12=455).a SIMPLE []
+POSTHOOK: Lineage: part_special PARTITION(ds=2008 04 08,ts=10:11:12=455).b SIMPLE []
+PREHOOK: query: DESCRIBE EXTENDED part_special PARTITION(ds='2008 04 08', ts = '10:11:12=455')
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: DESCRIBE EXTENDED part_special PARTITION(ds='2008 04 08', ts = '10:11:12=455')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Lineage: part_special PARTITION(ds=2008 04 08,ts=10:11:12=455).a SIMPLE []
+POSTHOOK: Lineage: part_special PARTITION(ds=2008 04 08,ts=10:11:12=455).b SIMPLE []
+a	string	
+b	string	
+ds	string	
+ts	string	
+	 	 
+#### A masked pattern was here ####
+PREHOOK: query: SELECT * FROM part_special WHERE ds='2008 04 08' AND ts = '10:11:12=455'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part_special@ds=2008%2004%2008/ts=10%3A11%3A12%3D455
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM part_special WHERE ds='2008 04 08' AND ts = '10:11:12=455'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part_special@ds=2008%2004%2008/ts=10%3A11%3A12%3D455
+#### A masked pattern was here ####
+POSTHOOK: Lineage: part_special PARTITION(ds=2008 04 08,ts=10:11:12=455).a SIMPLE []
+POSTHOOK: Lineage: part_special PARTITION(ds=2008 04 08,ts=10:11:12=455).b SIMPLE []
+1	2	2008 04 08	10:11:12=455

Modified: hive/trunk/ql/src/test/results/clientpositive/load_dyn_part14.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/load_dyn_part14.q.out?rev=1403878&r1=1403877&r2=1403878&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/load_dyn_part14.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/load_dyn_part14.q.out Tue Oct 30 21:35:34 2012
@@ -1,7 +1,13 @@
-PREHOOK: query: create table if not exists nzhang_part14 (key string) 
+PREHOOK: query: -- EXCLUDE_OS_WINDOWS
+-- excluded on windows because of difference in file name encoding logic
+
+create table if not exists nzhang_part14 (key string) 
   partitioned by (value string)
 PREHOOK: type: CREATETABLE
-POSTHOOK: query: create table if not exists nzhang_part14 (key string) 
+POSTHOOK: query: -- EXCLUDE_OS_WINDOWS
+-- excluded on windows because of difference in file name encoding logic
+
+create table if not exists nzhang_part14 (key string) 
   partitioned by (value string)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@nzhang_part14

Added: hive/trunk/ql/src/test/results/clientpositive/load_dyn_part14_win.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/load_dyn_part14_win.q.out?rev=1403878&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/load_dyn_part14_win.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/load_dyn_part14_win.q.out Tue Oct 30 21:35:34 2012
@@ -0,0 +1,299 @@
+PREHOOK: query: -- INCLUDE_OS_WINDOWS
+
+create table if not exists nzhang_part14 (key string) 
+  partitioned by (value string)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: -- INCLUDE_OS_WINDOWS
+
+create table if not exists nzhang_part14 (key string) 
+  partitioned by (value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: describe extended nzhang_part14
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended nzhang_part14
+POSTHOOK: type: DESCTABLE
+key	string	
+value	string	
+	 	 
+#### A masked pattern was here ####
+PREHOOK: query: explain
+insert overwrite table nzhang_part14 partition(value) 
+select key, value from (
+  select 'k1' as key, cast(null as string) as value from src limit 2
+  union all
+  select 'k2' as key, '' as value from src limit 2
+  union all 
+  select 'k3' as key, ' ' as value from src limit 2
+) T
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+insert overwrite table nzhang_part14 partition(value) 
+select key, value from (
+  select 'k1' as key, cast(null as string) as value from src limit 2
+  union all
+  select 'k2' as key, '' as value from src limit 2
+  union all 
+  select 'k3' as key, ' ' as value from src limit 2
+) T
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_UNION (TOK_UNION (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 'k1' key) (TOK_SELEXPR (TOK_FUNCTION TOK_STRING TOK_NULL) value)) (TOK_LIMIT 2))) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 'k2' key) (TOK_SELEXPR '' value)) (TOK_LIMIT 2)))) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 'k3' key) (TOK_SELEXPR ' ' value)) (TOK_LIMIT 2)))) T)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME nzhang_part14) (TOK_PARTSPEC (TOK_PARTVAL value)))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value)))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1, Stage-7, Stage-8
+  Stage-6 depends on stages: Stage-2 , consists of Stage-5, Stage-4
+  Stage-5
+  Stage-0 depends on stages: Stage-5, Stage-4
+  Stage-3 depends on stages: Stage-0
+  Stage-4
+  Stage-7 is a root stage
+  Stage-8 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        null-subquery1-subquery2:t-subquery1-subquery2:src 
+          TableScan
+            alias: src
+            Select Operator
+              expressions:
+                    expr: 'k2'
+                    type: string
+                    expr: ''
+                    type: string
+              outputColumnNames: _col0, _col1
+              Limit
+                Reduce Output Operator
+                  sort order: 
+                  tag: -1
+                  value expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+      Reduce Operator Tree:
+        Extract
+          Limit
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+  Stage: Stage-2
+    Map Reduce
+      Alias -> Map Operator Tree:
+#### A masked pattern was here ####
+          TableScan
+            Union
+              Select Operator
+                expressions:
+                      expr: _col0
+                      type: string
+                      expr: _col1
+                      type: string
+                outputColumnNames: _col0, _col1
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 1
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.nzhang_part14
+#### A masked pattern was here ####
+          TableScan
+            Union
+              Select Operator
+                expressions:
+                      expr: _col0
+                      type: string
+                      expr: _col1
+                      type: string
+                outputColumnNames: _col0, _col1
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 1
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.nzhang_part14
+#### A masked pattern was here ####
+          TableScan
+            Union
+              Select Operator
+                expressions:
+                      expr: _col0
+                      type: string
+                      expr: _col1
+                      type: string
+                outputColumnNames: _col0, _col1
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 1
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.nzhang_part14
+
+  Stage: Stage-6
+    Conditional Operator
+
+  Stage: Stage-5
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            value 
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.nzhang_part14
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+  Stage: Stage-4
+    Map Reduce
+      Alias -> Map Operator Tree:
+#### A masked pattern was here ####
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.nzhang_part14
+
+  Stage: Stage-7
+    Map Reduce
+      Alias -> Map Operator Tree:
+        null-subquery2:t-subquery2:src 
+          TableScan
+            alias: src
+            Select Operator
+              expressions:
+                    expr: 'k3'
+                    type: string
+                    expr: ' '
+                    type: string
+              outputColumnNames: _col0, _col1
+              Limit
+                Reduce Output Operator
+                  sort order: 
+                  tag: -1
+                  value expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+      Reduce Operator Tree:
+        Extract
+          Limit
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+  Stage: Stage-8
+    Map Reduce
+      Alias -> Map Operator Tree:
+        null-subquery1-subquery1:t-subquery1-subquery1:src 
+          TableScan
+            alias: src
+            Select Operator
+              expressions:
+                    expr: 'k1'
+                    type: string
+                    expr: UDFToString(null)
+                    type: string
+              outputColumnNames: _col0, _col1
+              Limit
+                Reduce Output Operator
+                  sort order: 
+                  tag: -1
+                  value expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+      Reduce Operator Tree:
+        Extract
+          Limit
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+
+PREHOOK: query: insert overwrite table nzhang_part14 partition(value) 
+select key, value from (
+  select 'k1' as key, cast(null as string) as value from src limit 2
+  union all
+  select 'k2' as key, '' as value from src limit 2
+  union all 
+  select 'k3' as key, ' ' as value from src limit 2
+) T
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: insert overwrite table nzhang_part14 partition(value) 
+select key, value from (
+  select 'k1' as key, cast(null as string) as value from src limit 2
+  union all
+  select 'k2' as key, '' as value from src limit 2
+  union all 
+  select 'k3' as key, ' ' as value from src limit 2
+) T
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@nzhang_part14@value=%20
+POSTHOOK: Output: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value= ).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=__HIVE_DEFAULT_PARTITION__).key EXPRESSION []
+PREHOOK: query: show partitions nzhang_part14
+PREHOOK: type: SHOWPARTITIONS
+POSTHOOK: query: show partitions nzhang_part14
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value= ).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=__HIVE_DEFAULT_PARTITION__).key EXPRESSION []
+value=%20
+value=__HIVE_DEFAULT_PARTITION__
+PREHOOK: query: select * from nzhang_part14 where value <> 'a'
+order by key, value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@nzhang_part14@value=%20
+PREHOOK: Input: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__
+#### A masked pattern was here ####
+POSTHOOK: query: select * from nzhang_part14 where value <> 'a'
+order by key, value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@nzhang_part14@value=%20
+POSTHOOK: Input: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__
+#### A masked pattern was here ####
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value= ).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=__HIVE_DEFAULT_PARTITION__).key EXPRESSION []
+k1	__HIVE_DEFAULT_PARTITION__
+k1	__HIVE_DEFAULT_PARTITION__
+k2	__HIVE_DEFAULT_PARTITION__
+k2	__HIVE_DEFAULT_PARTITION__
+k3	 
+k3