You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2014/05/19 08:44:33 UTC

svn commit: r1595755 - in /hive/trunk/ql/src: java/org/apache/hadoop/hive/ql/exec/ java/org/apache/hadoop/hive/ql/parse/ java/org/apache/hadoop/hive/ql/udf/ptf/ test/queries/clientpositive/ test/results/clientpositive/

Author: hashutosh
Date: Mon May 19 06:44:33 2014
New Revision: 1595755

URL: http://svn.apache.org/r1595755
Log:
HIVE-6999 : Add streaming mode to PTFs (Harish Butani via Ashutosh Chauhan)

Added:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/NoopStreaming.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/NoopWithMapStreaming.java
    hive/trunk/ql/src/test/queries/clientpositive/ptf_streaming.q
    hive/trunk/ql/src/test/results/clientpositive/ptf_streaming.q.out
Modified:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java
    hive/trunk/ql/src/test/results/clientpositive/show_functions.q.out

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java?rev=1595755&r1=1595754&r2=1595755&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java Mon May 19 06:44:33 2014
@@ -116,7 +116,9 @@ import org.apache.hadoop.hive.ql.udf.UDF
 import org.apache.hadoop.hive.ql.udf.generic.*;
 import org.apache.hadoop.hive.ql.udf.ptf.MatchPath.MatchPathResolver;
 import org.apache.hadoop.hive.ql.udf.ptf.Noop.NoopResolver;
+import org.apache.hadoop.hive.ql.udf.ptf.NoopStreaming.NoopStreamingResolver;
 import org.apache.hadoop.hive.ql.udf.ptf.NoopWithMap.NoopWithMapResolver;
+import org.apache.hadoop.hive.ql.udf.ptf.NoopWithMapStreaming.NoopWithMapStreamingResolver;
 import org.apache.hadoop.hive.ql.udf.ptf.TableFunctionResolver;
 import org.apache.hadoop.hive.ql.udf.ptf.WindowingTableFunction.WindowingTableFunctionResolver;
 import org.apache.hadoop.hive.ql.udf.xml.GenericUDFXPath;
@@ -170,8 +172,10 @@ public final class FunctionRegistry {
 
 
   public static final String WINDOWING_TABLE_FUNCTION = "windowingtablefunction";
-  public static final String NOOP_TABLE_FUNCTION = "noop";
-  public static final String NOOP_MAP_TABLE_FUNCTION = "noopwithmap";
+  private static final String NOOP_TABLE_FUNCTION = "noop";
+  private static final String NOOP_MAP_TABLE_FUNCTION = "noopwithmap";
+  private static final String NOOP_STREAMING_TABLE_FUNCTION = "noopstreaming";
+  private static final String NOOP_STREAMING_MAP_TABLE_FUNCTION = "noopwithmapstreaming";
 
   static Map<String, WindowFunctionInfo> windowFunctions = Collections.synchronizedMap(new LinkedHashMap<String, WindowFunctionInfo>());
 
@@ -442,6 +446,8 @@ public final class FunctionRegistry {
 
     registerTableFunction(NOOP_TABLE_FUNCTION, NoopResolver.class);
     registerTableFunction(NOOP_MAP_TABLE_FUNCTION, NoopWithMapResolver.class);
+    registerTableFunction(NOOP_STREAMING_TABLE_FUNCTION, NoopStreamingResolver.class);
+    registerTableFunction(NOOP_STREAMING_MAP_TABLE_FUNCTION, NoopWithMapStreamingResolver.class);
     registerTableFunction(WINDOWING_TABLE_FUNCTION,  WindowingTableFunctionResolver.class);
     registerTableFunction("matchpath", MatchPathResolver.class);
   }
@@ -1927,10 +1933,13 @@ public final class FunctionRegistry {
   {
     return getTableFunctionResolver(WINDOWING_TABLE_FUNCTION);
   }
-
-  public static TableFunctionResolver getNoopTableFunction()
-  {
-    return getTableFunctionResolver(NOOP_TABLE_FUNCTION);
+  
+  public static boolean isNoopFunction(String fnName) {
+    fnName = fnName.toLowerCase();
+    return fnName.equals(NOOP_MAP_TABLE_FUNCTION) ||
+        fnName.equals(NOOP_STREAMING_MAP_TABLE_FUNCTION) ||
+        fnName.equals(NOOP_TABLE_FUNCTION) ||
+        fnName.equals(NOOP_STREAMING_TABLE_FUNCTION);
   }
 
   public static void registerTableFunction(String name, Class<? extends TableFunctionResolver> tFnCls)

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java?rev=1595755&r1=1595754&r2=1595755&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java Mon May 19 06:44:33 2014
@@ -19,8 +19,6 @@
 package org.apache.hadoop.hive.ql.exec;
 
 import java.io.Serializable;
-import java.util.ArrayDeque;
-import java.util.Deque;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Stack;
@@ -30,7 +28,6 @@ import org.apache.hadoop.hive.conf.HiveC
 import org.apache.hadoop.hive.ql.exec.PTFPartition.PTFPartitionIterator;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
-import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.PTFDesc;
 import org.apache.hadoop.hive.ql.plan.PTFDeserializer;
 import org.apache.hadoop.hive.ql.plan.api.OperatorType;
@@ -49,14 +46,18 @@ import org.apache.hadoop.hive.serde2.obj
 public class PTFOperator extends Operator<PTFDesc> implements Serializable {
 
 	private static final long serialVersionUID = 1L;
-	PTFPartition inputPart;
 	boolean isMapOperator;
 
 	transient KeyWrapperFactory keyWrapperFactory;
 	protected transient KeyWrapper currentKeys;
 	protected transient KeyWrapper newKeys;
+	/*
+	 * for map-side invocation of PTFs, we cannot utilize the currentkeys null check
+	 * to decide on invoking startPartition in streaming mode. Hence this extra flag. 
+	 */
+	transient boolean firstMapRow;
 	transient Configuration hiveConf;
-
+	transient PTFInvocation ptfInvocation;
 
 	/*
 	 * 1. Find out if the operator is invoked at Map-Side or Reduce-side
@@ -67,12 +68,10 @@ public class PTFOperator extends Operato
 	@Override
 	protected void initializeOp(Configuration jobConf) throws HiveException {
 		hiveConf = jobConf;
-    // if the parent is ExtractOperator, this invocation is from reduce-side
-		Operator<? extends OperatorDesc> parentOp = getParentOperators().get(0);
+		// if the parent is ExtractOperator, this invocation is from reduce-side
 		isMapOperator = conf.isMapSide();
 
 		reconstructQueryDef(hiveConf);
-    inputPart = createFirstPartitionForChain(inputObjInspectors[0], isMapOperator);
 
 		if (isMapOperator) {
 			PartitionedTableFunctionDef tDef = conf.getStartOfChain();
@@ -82,6 +81,9 @@ public class PTFOperator extends Operato
 		}
 
 		setupKeysWrapper(inputObjInspectors[0]);
+		
+		ptfInvocation = setupChain();
+		firstMapRow = true;
 
 		super.initializeOp(jobConf);
 	}
@@ -89,21 +91,8 @@ public class PTFOperator extends Operato
 	@Override
 	protected void closeOp(boolean abort) throws HiveException {
 		super.closeOp(abort);
-    if(inputPart.size() != 0){
-      if (isMapOperator) {
-        processMapFunction();
-      } else {
-        processInputPartition();
-      }
-    }
-    inputPart.close();
-    inputPart = null;
-
-    for (PTFInputDef iDef = conf.getFuncDef(); iDef != null; iDef = iDef.getInput()) {
-      if (iDef instanceof PartitionedTableFunctionDef) {
-        ((PartitionedTableFunctionDef)iDef).getTFunction().close();
-      }
-    }
+    ptfInvocation.finishPartition();
+    ptfInvocation.close();
   }
 
 	@Override
@@ -117,26 +106,28 @@ public class PTFOperator extends Operato
        *  - reset input Partition
        * - set currentKey to the newKey if it is null or has changed.
        */
-      newKeys.getNewKey(row, inputPart.getInputOI());
+      newKeys.getNewKey(row, inputObjInspectors[0]);
       boolean keysAreEqual = (currentKeys != null && newKeys != null)?
               newKeys.equals(currentKeys) : false;
 
       if (currentKeys != null && !keysAreEqual) {
-        processInputPartition();
-        inputPart.reset();
+        ptfInvocation.finishPartition();
       }
 
       if (currentKeys == null || !keysAreEqual) {
+        ptfInvocation.startPartition();
         if (currentKeys == null) {
           currentKeys = newKeys.copyKey();
         } else {
           currentKeys.copyKey(newKeys);
         }
       }
+    } else if ( firstMapRow ) {
+      ptfInvocation.startPartition();
+      firstMapRow = false;
     }
 
-    // add row to current Partition.
-    inputPart.append(row);
+    ptfInvocation.processRow(row);
 	}
 
 	/**
@@ -179,28 +170,6 @@ public class PTFOperator extends Operato
 	  newKeys = keyWrapperFactory.getKeyWrapper();
 	}
 
-	protected void processInputPartition() throws HiveException {
-    Iterator<Object> pItr = executeChain(inputPart);
-
-    while (pItr.hasNext()) {
-      Object oRow = pItr.next();
-      forward(oRow, outputObjInspector);
-    }
-	}
-
-	protected void processMapFunction() throws HiveException {
-	  PartitionedTableFunctionDef tDef = conf.getStartOfChain();
-
-    Iterator<Object> pItr = tDef.getTFunction().canIterateOutput() ?
-        tDef.getTFunction().transformRawInputIterator(inputPart.iterator()) :
-          tDef.getTFunction().transformRawInput(inputPart).iterator();
-
-    while (pItr.hasNext()) {
-      Object oRow = pItr.next();
-      forward(oRow, outputObjInspector);
-    }
-	}
-
 	/**
 	 * @return the name of the operator
 	 */
@@ -218,19 +187,8 @@ public class PTFOperator extends Operato
 	public OperatorType getType() {
 		return OperatorType.PTF;
 	}
-
-	 /**
-   * For all the table functions to be applied to the input
-   * hive table or query, push them on a stack.
-   * For each table function popped out of the stack,
-   * execute the function on the input partition
-   * and return an output partition.
-   * @param part
-   * @return
-   * @throws HiveException
-   */
-  private Iterator<Object> executeChain(PTFPartition part)
-      throws HiveException {
+  
+  private PTFInvocation setupChain() {
     Stack<PartitionedTableFunctionDef> fnDefs = new Stack<PartitionedTableFunctionDef>();
     PTFInputDef iDef = conf.getFuncDef();
 
@@ -238,62 +196,17 @@ public class PTFOperator extends Operato
       fnDefs.push((PartitionedTableFunctionDef) iDef);
       iDef = ((PartitionedTableFunctionDef) iDef).getInput();
     }
-
-    PartitionedTableFunctionDef currFnDef;
-    int i = fnDefs.size();
-    while (i > 1) {
-      currFnDef = fnDefs.pop();
-      part = currFnDef.getTFunction().execute(part);
-      i--;
-    }
-
-    currFnDef = fnDefs.pop();
-    if (!currFnDef.getTFunction().canIterateOutput()) {
-      part = currFnDef.getTFunction().execute(part);
-      return part.iterator();
-    } else {
-      return currFnDef.getTFunction().iterator(part.iterator());
+    
+    PTFInvocation curr = null, first = null;
+    
+    while(!fnDefs.isEmpty()) {
+      PartitionedTableFunctionDef currFn = fnDefs.pop();
+      curr = new PTFInvocation(curr, currFn.getTFunction());
+      if ( first == null ) {
+        first = curr;
+      }
     }
-
-  }
-
-
-  /**
-   * Create a new Partition.
-   * A partition has 2 OIs: the OI for the rows being put in and the OI for the rows
-   * coming out. You specify the output OI by giving the Serde to use to Serialize.
-   * Typically these 2 OIs are the same; but not always. For the
-   * first PTF in a chain the OI of the incoming rows is dictated by the Parent Op
-   * to this PTFOp. The output OI from the Partition is typically LazyBinaryStruct, but
-   * not always. In the case of Noop/NoopMap we keep the Strcuture the same as
-   * what is given to us.
-   * <p>
-   * The Partition we want to create here is for feeding the First table function in the chain.
-   * So for map-side processing use the Serde from the output Shape its InputDef.
-   * For reduce-side processing use the Serde from its RawInputShape(the shape
-   * after map-side processing).
-   * @param oi
-   * @param hiveConf
-   * @param isMapSide
-   * @return
-   * @throws HiveException
-   */
-  public PTFPartition createFirstPartitionForChain(ObjectInspector oi,
-    boolean isMapSide) throws HiveException {
-    PartitionedTableFunctionDef tabDef = conf.getStartOfChain();
-
-    PTFPartition part = null;
-    SerDe serde = isMapSide ? tabDef.getInput().getOutputShape().getSerde() :
-      tabDef.getRawInputShape().getSerde();
-    StructObjectInspector outputOI = isMapSide ? tabDef.getInput().getOutputShape().getOI() :
-      tabDef.getRawInputShape().getOI();
-    part = PTFPartition.create(conf.getCfg(),
-        serde,
-        (StructObjectInspector) oi,
-        outputOI);
-
-    return part;
-
+    return first;
   }
 
   public static void connectLeadLagFunctionsToPartition(PTFDesc ptfDesc,
@@ -308,5 +221,190 @@ public class PTFOperator extends Operato
       llFn.setpItr(pItr);
     }
   }
-
+  
+  /*
+   * Responsible for the flow of rows through the PTF Chain.
+   * An Invocation wraps a TableFunction. 
+   * The PTFOp hands the chain each row through the processRow call. 
+   * It also notifies the chain of when a Partition starts/finishes.
+   * 
+   * There are several combinations depending
+   * whether the TableFunction and its successor support Streaming or Batch mode.
+   * 
+   * Combination 1: Streaming + Streaming
+   * - Start Partition: invoke startPartition on tabFn.
+   * - Process Row: invoke process Row on tabFn. 
+   *   Any output rows hand to next tabFn in chain or forward to next Operator.
+   * - Finish Partition: invoke finishPartition on tabFn.
+   *   Any output rows hand to next tabFn in chain or forward to next Operator.
+   *   
+   * Combination 2: Streaming + Batch
+   * same as Combination 1
+   * 
+   * Combination 3: Batch + Batch
+   * - Start Partition: create or reset the Input Partition for the tabFn
+   *   caveat is: if prev is also batch and it is not providing an Output Iterator
+   *   then we can just use its Output Partition.
+   * - Process Row: collect row in Input Partition
+   * - Finish Partition : invoke evaluate on tabFn on Input Partition
+   *   If function gives an Output Partition: set it on next Invocation's Input Partition
+   *   If function gives an Output Iterator: iterate and call processRow on next Invocation.
+   *   For last Invocation in chain: forward rows to next Operator.
+   *   
+   * Combination 3: Batch + Stream
+   * Similar to Combination 3, except Finish Partition behavior slightly different
+   * - Finish Partition : invoke evaluate on tabFn on Input Partition
+   *   iterate output rows: hand to next tabFn in chain or forward to next Operator.
+   * 
+   */
+  class PTFInvocation {
+    
+    PTFInvocation prev;
+    PTFInvocation next;
+    TableFunctionEvaluator tabFn;
+    PTFPartition inputPart;
+    PTFPartition outputPart;
+    Iterator<Object> outputPartRowsItr;
+    
+    public PTFInvocation(PTFInvocation prev, TableFunctionEvaluator tabFn) {
+      this.prev = prev;
+      this.tabFn = tabFn;
+      if ( prev != null ) {
+        prev.next = this;
+      }
+    }
+    
+    boolean isOutputIterator() {
+      return tabFn.canAcceptInputAsStream() || tabFn.canIterateOutput();
+    }
+    
+    boolean isStreaming() {
+      return tabFn.canAcceptInputAsStream();
+    }
+    
+    void startPartition() throws HiveException {
+      if ( isStreaming() ) {
+        tabFn.startPartition();
+      } else {
+        if ( prev == null || prev.isOutputIterator() ) {
+          if ( inputPart == null ) {
+            createInputPartition();
+          } else {
+            inputPart.reset();
+          }
+        }
+      }
+      if ( next != null ) {
+        next.startPartition();
+      }
+    }
+    
+    void processRow(Object row) throws HiveException {
+      if ( isStreaming() ) {
+        if ( prev == null ) {
+          /*
+           * this is needed because during Translation we are still assuming that rows
+           * are collected into a PTFPartition.
+           * @Todo make translation handle the case when the first PTF is Streaming.
+           */
+          row = ObjectInspectorUtils.copyToStandardObject(row, inputObjInspectors[0], 
+              ObjectInspectorCopyOption.WRITABLE);
+        }
+        handleOutputRows(tabFn.processRow(row));
+      } else {
+        inputPart.append(row);
+      }
+    }
+    
+    void handleOutputRows(List<Object> outRows) throws HiveException {
+      if ( outRows != null ) {
+        for (Object orow : outRows ) {
+          if ( next != null ) {
+            next.processRow(orow);
+          } else {
+            forward(orow, outputObjInspector);
+          }
+        }
+      }
+    }
+    
+    void finishPartition() throws HiveException {
+      if ( isStreaming() ) {
+        handleOutputRows(tabFn.finishPartition());
+      } else {
+        if ( tabFn.canIterateOutput() ) {
+          outputPartRowsItr = tabFn.iterator(inputPart.iterator());
+        } else {
+          outputPart = tabFn.execute(inputPart);
+          outputPartRowsItr = outputPart.iterator();
+        }
+        if ( next != null ) {
+          if (!next.isStreaming() && !isOutputIterator() ) {
+            next.inputPart = outputPart;
+          } else {
+            while(outputPartRowsItr.hasNext() ) {
+              next.processRow(outputPartRowsItr.next());
+            }
+          }
+        }
+      }
+      
+      if ( next != null ) {
+        next.finishPartition();
+      } else {
+        if (!isStreaming() ) {
+          while(outputPartRowsItr.hasNext() ) {
+            forward(outputPartRowsItr.next(), outputObjInspector);
+          }
+        }
+      }
+    }
+    
+    /**
+     * Create a new Partition.
+     * A partition has 2 OIs: the OI for the rows being put in and the OI for the rows
+     * coming out. You specify the output OI by giving the Serde to use to Serialize.
+     * Typically these 2 OIs are the same; but not always. For the
+     * first PTF in a chain the OI of the incoming rows is dictated by the Parent Op
+     * to this PTFOp. The output OI from the Partition is typically LazyBinaryStruct, but
+     * not always. In the case of Noop/NoopMap we keep the Strcuture the same as
+     * what is given to us.
+     * <p>
+     * The Partition we want to create here is for feeding the First table function in the chain.
+     * So for map-side processing use the Serde from the output Shape its InputDef.
+     * For reduce-side processing use the Serde from its RawInputShape(the shape
+     * after map-side processing).
+     * @param oi
+     * @param hiveConf
+     * @param isMapSide
+     * @return
+     * @throws HiveException
+     */
+    private void createInputPartition() throws HiveException {
+      PartitionedTableFunctionDef tabDef = tabFn.getTableDef();
+      PTFInputDef inputDef = tabDef.getInput();
+      ObjectInspector inputOI = conf.getStartOfChain() == tabDef ? 
+          inputObjInspectors[0] : inputDef.getOutputShape().getOI();
+
+      SerDe serde = conf.isMapSide() ? tabDef.getInput().getOutputShape().getSerde() :
+        tabDef.getRawInputShape().getSerde();
+      StructObjectInspector outputOI = conf.isMapSide() ? tabDef.getInput().getOutputShape().getOI() :
+        tabDef.getRawInputShape().getOI();
+      inputPart = PTFPartition.create(conf.getCfg(),
+          serde,
+          (StructObjectInspector) inputOI,
+          outputOI);
+    }
+    
+    void close() {
+      if ( inputPart != null ) {
+        inputPart.close();
+      }
+      tabFn.close();
+      if ( next != null ) {
+        next.close();
+      }
+    }
+  }
+  
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java?rev=1595755&r1=1595754&r2=1595755&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java Mon May 19 06:44:33 2014
@@ -630,9 +630,7 @@ public class PTFTranslator {
   private ShapeDetails setupTableFnShape(String fnName, ShapeDetails inpShape,
       StructObjectInspector OI, List<String> columnNames, RowResolver rr)
       throws SemanticException {
-    if (fnName.equals(FunctionRegistry.NOOP_TABLE_FUNCTION)
-        || fnName.equals(
-            FunctionRegistry.NOOP_MAP_TABLE_FUNCTION)) {
+    if (FunctionRegistry.isNoopFunction(fnName)) {
       return setupShapeForNoop(inpShape, OI, columnNames, rr);
     }
     return setupShape(OI, columnNames, rr);
@@ -882,8 +880,7 @@ public class PTFTranslator {
       StructObjectInspector rowObjectInspector,
       List<String> outputColNames, RowResolver inputRR) throws SemanticException {
 
-    if (tbFnName.equals(FunctionRegistry.NOOP_TABLE_FUNCTION) ||
-        tbFnName.equals(FunctionRegistry.NOOP_MAP_TABLE_FUNCTION)) {
+    if (FunctionRegistry.isNoopFunction(tbFnName)) {
       return buildRowResolverForNoop(tabAlias, rowObjectInspector, inputRR);
     }
 

Added: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/NoopStreaming.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/NoopStreaming.java?rev=1595755&view=auto
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/NoopStreaming.java (added)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/NoopStreaming.java Mon May 19 06:44:33 2014
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.udf.ptf;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.plan.PTFDesc;
+import org.apache.hadoop.hive.ql.plan.ptf.PartitionedTableFunctionDef;
+
+public class NoopStreaming extends Noop {
+  
+  List<Object> rows;
+  
+  NoopStreaming() {
+    rows = new ArrayList<Object>();
+  }
+  
+  public boolean canAcceptInputAsStream() {
+    return true;
+  } 
+  
+  public List<Object> processRow(Object row) throws HiveException {
+    if (!canAcceptInputAsStream() ) {
+      throw new HiveException(String.format(
+          "Internal error: PTF %s, doesn't support Streaming",
+          getClass().getName()));
+    }
+    rows.clear();
+    rows.add(row);
+    return rows;
+  }
+  
+  public static class NoopStreamingResolver extends NoopResolver {
+
+    @Override
+    protected TableFunctionEvaluator createEvaluator(PTFDesc ptfDesc, PartitionedTableFunctionDef tDef) {
+      return new NoopStreaming();
+    }
+  }
+}

Added: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/NoopWithMapStreaming.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/NoopWithMapStreaming.java?rev=1595755&view=auto
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/NoopWithMapStreaming.java (added)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/NoopWithMapStreaming.java Mon May 19 06:44:33 2014
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.udf.ptf;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.plan.PTFDesc;
+import org.apache.hadoop.hive.ql.plan.ptf.PartitionedTableFunctionDef;
+
+public class NoopWithMapStreaming extends NoopWithMap {
+  List<Object> rows;
+  
+  NoopWithMapStreaming() {
+    rows = new ArrayList<Object>();
+  }
+  
+  public boolean canAcceptInputAsStream() {
+    return true;
+  } 
+  
+  public List<Object> processRow(Object row) throws HiveException {
+    if (!canAcceptInputAsStream() ) {
+      throw new HiveException(String.format(
+          "Internal error: PTF %s, doesn't support Streaming",
+          getClass().getName()));
+    }
+    rows.clear();
+    rows.add(row);
+    return rows;
+  }
+  
+  public static class NoopWithMapStreamingResolver extends NoopWithMapResolver {
+
+    @Override
+    protected TableFunctionEvaluator createEvaluator(PTFDesc ptfDesc, PartitionedTableFunctionDef tDef) {
+      return new NoopStreaming();
+    }
+  }
+}

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java?rev=1595755&r1=1595754&r2=1595755&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java Mon May 19 06:44:33 2014
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hive.ql.udf.ptf;
 
 import java.util.Iterator;
+import java.util.List;
 
 import org.apache.hadoop.hive.ql.exec.PTFOperator;
 import org.apache.hadoop.hive.ql.exec.PTFPartition;
@@ -30,6 +31,32 @@ import org.apache.hadoop.hive.ql.plan.pt
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 
+/*
+ * Interface Design:
+ * A TableFunction provides 2 interfaces of execution 'Batch' and 'Streaming'.
+ * - In Batch mode the contract is Partition in - Partition Out
+ * - In Streaming mode the contract is a stream of processRow calls - each of which may return 0 or more rows.
+ * 
+ * A Partition is not just a batch of rows, it enables more than a single iteration of
+ * the i/p data: multiple passes, arbitrary access of input rows, relative navigation between
+ * rows(for e.g. lead/lag fns). Most PTFs will work in batch mode.
+ * 
+ * The Streaming mode gives up on the capabilities of Partitions for the benefit of smaller footprint,
+ * and faster processing. Window Function processing is an e.g. of this: when there are only Ranking
+ * functions each row needs to be accessed once in the order it is provided; hence there is no need 
+ * to hold all input rows in a Partition. The 'pattern' is: any time you want to only enhance/enrich 
+ * an Input Row Streaming mode is the right choice. This is the fundamental difference between Ranking
+ * fns and UDAFs: Ranking functions keep the original data intact whereas UDAF only return aggregate
+ * information.
+ * 
+ * Finally we have provided a 'mixed' mode where a non Streaming TableFunction can provide its output
+ * as an Iterator. As far as we can tell, this is a special case for Windowing handling. If Windowing
+ * is the only or last TableFunction in a chain, it makes no sense to collect the output rows into a 
+ * output Partition. We justify the pollution of the api by the observation that Windowing is a very 
+ * common use case.
+ * 
+ */
+
 /**
  * Based on Hive {@link GenericUDAFEvaluator}. Break up the responsibility of the old AsbtractTableFunction
  * class into a Resolver and Evaluator.
@@ -66,7 +93,6 @@ public abstract class TableFunctionEvalu
   transient protected PTFPartition outputPartition;
 
   static {
-    //TODO is this a bug? The field is not named outputOI it is named OI
     PTFUtils.makeTransient(TableFunctionEvaluator.class, "outputOI", "rawInputOI");
   }
 
@@ -112,6 +138,9 @@ public abstract class TableFunctionEvalu
 
   public PTFPartition execute(PTFPartition iPart)
       throws HiveException {
+    if ( ptfDesc.isMapSide() ) {
+      return transformRawInput(iPart);
+    }
     PTFPartitionIterator<Object> pItr = iPart.iterator();
     PTFOperator.connectLeadLagFunctionsToPartition(ptfDesc, pItr);
 
@@ -129,7 +158,7 @@ public abstract class TableFunctionEvalu
 
   protected abstract void execute(PTFPartitionIterator<Object> pItr, PTFPartition oPart) throws HiveException;
 
-  public PTFPartition transformRawInput(PTFPartition iPart) throws HiveException {
+  protected PTFPartition transformRawInput(PTFPartition iPart) throws HiveException {
     if (!isTransformsRawInput()) {
       throw new HiveException(String.format("Internal Error: mapExecute called on function (%s)that has no Map Phase", tableDef.getName()));
     }
@@ -152,6 +181,11 @@ public abstract class TableFunctionEvalu
   }
 
   public Iterator<Object> iterator(PTFPartitionIterator<Object> pItr) throws HiveException {
+    
+    if ( ptfDesc.isMapSide() ) {
+      return transformRawInputIterator(pItr);
+    }
+    
     if (!canIterateOutput()) {
       throw new HiveException(
           "Internal error: iterator called on a PTF that cannot provide its output as an Iterator");
@@ -161,7 +195,7 @@ public abstract class TableFunctionEvalu
         getClass().getName()));
   }
   
-  public Iterator<Object> transformRawInputIterator(PTFPartitionIterator<Object> pItr) throws HiveException {
+  protected Iterator<Object> transformRawInputIterator(PTFPartitionIterator<Object> pItr) throws HiveException {
     if (!canIterateOutput()) {
       throw new HiveException(
           "Internal error: iterator called on a PTF that cannot provide its output as an Iterator");
@@ -170,6 +204,46 @@ public abstract class TableFunctionEvalu
         "Internal error: PTF %s, provides no iterator method",
         getClass().getName()));
   }
+  
+  /*
+   * A TableFunction may be able to accept its input as a stream.
+   * In this case the contract is:
+   * - startPartition must be invoked to give the PTF a chance to initialize stream processing.
+   * - each input row is passed in via a processRow(or processRows) invocation. processRow 
+   *   can return 0 or more o/p rows.
+   * - finishPartition is invoked to give the PTF a chance to finish processing and return any 
+   *   remaining o/p rows.
+   */
+  public boolean canAcceptInputAsStream() {
+    return false;
+  }
+  
+  public void startPartition() throws HiveException {
+    if (!canAcceptInputAsStream() ) {
+      throw new HiveException(String.format(
+          "Internal error: PTF %s, doesn't support Streaming",
+          getClass().getName()));
+    }
+  }
+  
+  public List<Object> processRow(Object row) throws HiveException {
+    if (!canAcceptInputAsStream() ) {
+      throw new HiveException(String.format(
+          "Internal error: PTF %s, doesn't support Streaming",
+          getClass().getName()));
+    }
+    
+    return null;
+  }
+  
+  public List<Object> finishPartition() throws HiveException {
+    if (!canAcceptInputAsStream() ) {
+      throw new HiveException(String.format(
+          "Internal error: PTF %s, doesn't support Streaming",
+          getClass().getName()));
+    }
+    return null;
+  }
 
   public void close() {
     if (outputPartition != null) {

Added: hive/trunk/ql/src/test/queries/clientpositive/ptf_streaming.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/ptf_streaming.q?rev=1595755&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/ptf_streaming.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/ptf_streaming.q Mon May 19 06:44:33 2014
@@ -0,0 +1,150 @@
+DROP TABLE part;
+
+-- data setup
+CREATE TABLE part( 
+    p_partkey INT,
+    p_name STRING,
+    p_mfgr STRING,
+    p_brand STRING,
+    p_type STRING,
+    p_size INT,
+    p_container STRING,
+    p_retailprice DOUBLE,
+    p_comment STRING
+);
+
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
+
+create temporary function noopstreaming as 'org.apache.hadoop.hive.ql.udf.ptf.NoopStreaming$NoopStreamingResolver';
+
+--1. test1
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopstreaming(on part 
+  partition by p_mfgr
+  order by p_name
+  );
+  
+  -- 2. testJoinWithNoop
+select p_mfgr, p_name,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noopstreaming (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j
+distribute by j.p_mfgr
+sort by j.p_name)
+;  
+
+-- 7. testJoin
+select abc.* 
+from noopstreaming(on part 
+partition by p_mfgr 
+order by p_name 
+) abc join part p1 on abc.p_partkey = p1.p_partkey;
+
+-- 9. testNoopWithMap
+select p_mfgr, p_name, p_size, 
+rank() over (partition by p_mfgr order by p_name, p_size desc) as r
+from noopwithmapstreaming(on part
+partition by p_mfgr
+order by p_name, p_size desc);
+
+-- 10. testNoopWithMapWithWindowing 
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopwithmapstreaming(on part 
+  partition by p_mfgr
+  order by p_name);
+  
+-- 12. testFunctionChain
+select p_mfgr, p_name, p_size, 
+rank() over (partition by p_mfgr order by p_name) as r, 
+dense_rank() over (partition by p_mfgr order by p_name) as dr, 
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row)  as s1
+from noopstreaming(on noopwithmapstreaming(on noopstreaming(on part 
+partition by p_mfgr 
+order by p_mfgr, p_name
+)));
+
+-- 12.1 testFunctionChain
+select p_mfgr, p_name, p_size, 
+rank() over (partition by p_mfgr order by p_name) as r, 
+dense_rank() over (partition by p_mfgr order by p_name) as dr, 
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row)  as s1
+from noopstreaming(on noopwithmap(on noopstreaming(on part 
+partition by p_mfgr 
+order by p_mfgr, p_name
+)));
+
+-- 12.2 testFunctionChain
+select p_mfgr, p_name, p_size, 
+rank() over (partition by p_mfgr order by p_name) as r, 
+dense_rank() over (partition by p_mfgr order by p_name) as dr, 
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row)  as s1
+from noop(on noopwithmapstreaming(on noopstreaming(on part 
+partition by p_mfgr 
+order by p_mfgr, p_name
+)));
+
+-- 14. testPTFJoinWithWindowingWithCount
+select abc.p_mfgr, abc.p_name, 
+rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r, 
+dense_rank() over (distribute by abc.p_mfgr sort by abc.p_name) as dr, 
+count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd, 
+abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, 
+abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz 
+from noopstreaming(on part 
+partition by p_mfgr 
+order by p_name 
+) abc join part p1 on abc.p_partkey = p1.p_partkey 
+;
+
+-- 18. testMulti2OperatorsFunctionChainWithMap
+select p_mfgr, p_name,  
+rank() over (partition by p_mfgr,p_name) as r, 
+dense_rank() over (partition by p_mfgr,p_name) as dr, 
+p_size, sum(p_size) over (partition by p_mfgr,p_name rows between unbounded preceding and current row)  as s1
+from noopstreaming(on 
+        noopwithmap(on 
+          noop(on 
+              noopstreaming(on part 
+              partition by p_mfgr 
+              order by p_mfgr) 
+            ) 
+          partition by p_mfgr,p_name 
+          order by p_mfgr,p_name) 
+        partition by p_mfgr,p_name  
+        order by p_mfgr,p_name) ;
+        
+-- 19. testMulti3OperatorsFunctionChain
+select p_mfgr, p_name,  
+rank() over (partition by p_mfgr order by p_name) as r, 
+dense_rank() over (partition by p_mfgr order by p_name) as dr, 
+p_size, sum(p_size) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row)  as s1
+from noop(on 
+        noopstreaming(on 
+          noop(on 
+              noopstreaming(on part 
+              partition by p_mfgr 
+              order by p_mfgr) 
+            ) 
+          partition by p_mfgr,p_name 
+          order by p_mfgr,p_name) 
+        partition by p_mfgr  
+        order by p_mfgr ) ;
+        
+-- 23. testMultiOperatorChainWithDiffPartitionForWindow2
+select p_mfgr, p_name,  
+rank() over (partition by p_mfgr order by p_name) as r, 
+dense_rank() over (partition by p_mfgr order by p_name) as dr, 
+p_size, 
+sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row) as s1, 
+sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row)  as s2
+from noopwithmapstreaming(on 
+        noop(on 
+              noopstreaming(on part 
+              partition by p_mfgr, p_name 
+              order by p_mfgr, p_name) 
+          ));

Added: hive/trunk/ql/src/test/results/clientpositive/ptf_streaming.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/ptf_streaming.q.out?rev=1595755&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/ptf_streaming.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/ptf_streaming.q.out Mon May 19 06:44:33 2014
@@ -0,0 +1,673 @@
+PREHOOK: query: DROP TABLE part
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE part
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: -- data setup
+CREATE TABLE part( 
+    p_partkey INT,
+    p_name STRING,
+    p_mfgr STRING,
+    p_brand STRING,
+    p_type STRING,
+    p_size INT,
+    p_container STRING,
+    p_retailprice DOUBLE,
+    p_comment STRING
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: -- data setup
+CREATE TABLE part( 
+    p_partkey INT,
+    p_name STRING,
+    p_mfgr STRING,
+    p_brand STRING,
+    p_type STRING,
+    p_size INT,
+    p_container STRING,
+    p_retailprice DOUBLE,
+    p_comment STRING
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@part
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@part
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@part
+PREHOOK: query: create temporary function noopstreaming as 'org.apache.hadoop.hive.ql.udf.ptf.NoopStreaming$NoopStreamingResolver'
+PREHOOK: type: CREATEFUNCTION
+PREHOOK: Output: database:default
+POSTHOOK: query: create temporary function noopstreaming as 'org.apache.hadoop.hive.ql.udf.ptf.NoopStreaming$NoopStreamingResolver'
+POSTHOOK: type: CREATEFUNCTION
+POSTHOOK: Output: database:default
+PREHOOK: query: --1. test1
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopstreaming(on part 
+  partition by p_mfgr
+  order by p_name
+  )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: --1. test1
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopstreaming(on part 
+  partition by p_mfgr
+  order by p_name
+  )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1	almond antique burnished rose metallic	2	1	1	1173.15
+Manufacturer#1	almond antique burnished rose metallic	2	1	1	2346.3
+Manufacturer#1	almond antique chartreuse lavender yellow	34	3	2	4100.06
+Manufacturer#1	almond antique salmon chartreuse burlywood	6	4	3	5702.650000000001
+Manufacturer#1	almond aquamarine burnished black steel	28	5	4	7117.070000000001
+Manufacturer#1	almond aquamarine pink moccasin thistle	42	6	5	8749.730000000001
+Manufacturer#2	almond antique violet chocolate turquoise	14	1	1	1690.68
+Manufacturer#2	almond antique violet turquoise frosted	40	2	2	3491.38
+Manufacturer#2	almond aquamarine midnight light salmon	2	3	3	5523.360000000001
+Manufacturer#2	almond aquamarine rose maroon antique	25	4	4	7222.02
+Manufacturer#2	almond aquamarine sandy cyan gainsboro	18	5	5	8923.62
+Manufacturer#3	almond antique chartreuse khaki white	17	1	1	1671.68
+Manufacturer#3	almond antique forest lavender goldenrod	14	2	2	2861.95
+Manufacturer#3	almond antique metallic orange dim	19	3	3	4272.34
+Manufacturer#3	almond antique misty red olive	1	4	4	6195.32
+Manufacturer#3	almond antique olive coral navajo	45	5	5	7532.61
+Manufacturer#4	almond antique gainsboro frosted violet	10	1	1	1620.67
+Manufacturer#4	almond antique violet mint lemon	39	2	2	2996.09
+Manufacturer#4	almond aquamarine floral ivory bisque	27	3	3	4202.35
+Manufacturer#4	almond aquamarine yellow dodger mint	7	4	4	6047.27
+Manufacturer#4	almond azure aquamarine papaya violet	12	5	5	7337.620000000001
+Manufacturer#5	almond antique blue firebrick mint	31	1	1	1789.69
+Manufacturer#5	almond antique medium spring khaki	6	2	2	3401.3500000000004
+Manufacturer#5	almond antique sky peru orange	2	3	3	5190.08
+Manufacturer#5	almond aquamarine dodger light gainsboro	46	4	4	6208.18
+Manufacturer#5	almond azure blanched chiffon midnight	23	5	5	7672.66
+PREHOOK: query: -- 2. testJoinWithNoop
+select p_mfgr, p_name,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noopstreaming (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j
+distribute by j.p_mfgr
+sort by j.p_name)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: -- 2. testJoinWithNoop
+select p_mfgr, p_name,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noopstreaming (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j
+distribute by j.p_mfgr
+sort by j.p_name)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1	almond antique burnished rose metallic	2	0
+Manufacturer#1	almond antique burnished rose metallic	2	0
+Manufacturer#1	almond antique burnished rose metallic	2	0
+Manufacturer#1	almond antique burnished rose metallic	2	0
+Manufacturer#1	almond antique chartreuse lavender yellow	34	32
+Manufacturer#1	almond antique salmon chartreuse burlywood	6	-28
+Manufacturer#1	almond aquamarine burnished black steel	28	22
+Manufacturer#1	almond aquamarine pink moccasin thistle	42	14
+Manufacturer#2	almond antique violet chocolate turquoise	14	0
+Manufacturer#2	almond antique violet turquoise frosted	40	26
+Manufacturer#2	almond aquamarine midnight light salmon	2	-38
+Manufacturer#2	almond aquamarine rose maroon antique	25	23
+Manufacturer#2	almond aquamarine sandy cyan gainsboro	18	-7
+Manufacturer#3	almond antique chartreuse khaki white	17	0
+Manufacturer#3	almond antique forest lavender goldenrod	14	-3
+Manufacturer#3	almond antique metallic orange dim	19	5
+Manufacturer#3	almond antique misty red olive	1	-18
+Manufacturer#3	almond antique olive coral navajo	45	44
+Manufacturer#4	almond antique gainsboro frosted violet	10	0
+Manufacturer#4	almond antique violet mint lemon	39	29
+Manufacturer#4	almond aquamarine floral ivory bisque	27	-12
+Manufacturer#4	almond aquamarine yellow dodger mint	7	-20
+Manufacturer#4	almond azure aquamarine papaya violet	12	5
+Manufacturer#5	almond antique blue firebrick mint	31	0
+Manufacturer#5	almond antique medium spring khaki	6	-25
+Manufacturer#5	almond antique sky peru orange	2	-4
+Manufacturer#5	almond aquamarine dodger light gainsboro	46	44
+Manufacturer#5	almond azure blanched chiffon midnight	23	-23
+PREHOOK: query: -- 7. testJoin
+select abc.* 
+from noopstreaming(on part 
+partition by p_mfgr 
+order by p_name 
+) abc join part p1 on abc.p_partkey = p1.p_partkey
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: -- 7. testJoin
+select abc.* 
+from noopstreaming(on part 
+partition by p_mfgr 
+order by p_name 
+) abc join part p1 on abc.p_partkey = p1.p_partkey
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+15103	almond aquamarine dodger light gainsboro	Manufacturer#5	Brand#53	ECONOMY BURNISHED STEEL	46	LG PACK	1018.1	packages hinder carefu
+17273	almond antique forest lavender goldenrod	Manufacturer#3	Brand#35	PROMO ANODIZED TIN	14	JUMBO CASE	1190.27	along the
+17927	almond aquamarine yellow dodger mint	Manufacturer#4	Brand#41	ECONOMY BRUSHED COPPER	7	SM PKG	1844.92	ites. eve
+33357	almond azure aquamarine papaya violet	Manufacturer#4	Brand#41	STANDARD ANODIZED TIN	12	WRAP CASE	1290.35	reful
+40982	almond antique misty red olive	Manufacturer#3	Brand#32	ECONOMY PLATED COPPER	1	LG PKG	1922.98	c foxes can s
+42669	almond antique medium spring khaki	Manufacturer#5	Brand#51	STANDARD BURNISHED TIN	6	MED CAN	1611.66	sits haggl
+45261	almond aquamarine floral ivory bisque	Manufacturer#4	Brand#42	SMALL PLATED STEEL	27	WRAP CASE	1206.26	careful
+48427	almond antique violet mint lemon	Manufacturer#4	Brand#42	PROMO POLISHED STEEL	39	SM CASE	1375.42	hely ironic i
+49671	almond antique gainsboro frosted violet	Manufacturer#4	Brand#41	SMALL BRUSHED BRASS	10	SM BOX	1620.67	ccounts run quick
+65667	almond aquamarine pink moccasin thistle	Manufacturer#1	Brand#12	LARGE BURNISHED STEEL	42	JUMBO CASE	1632.66	e across the expr
+78486	almond azure blanched chiffon midnight	Manufacturer#5	Brand#52	LARGE BRUSHED BRASS	23	MED BAG	1464.48	hely blith
+85768	almond antique chartreuse lavender yellow	Manufacturer#1	Brand#12	LARGE BRUSHED STEEL	34	SM BAG	1753.76	refull
+86428	almond aquamarine burnished black steel	Manufacturer#1	Brand#12	STANDARD ANODIZED STEEL	28	WRAP BAG	1414.42	arefully 
+90681	almond antique chartreuse khaki white	Manufacturer#3	Brand#31	MEDIUM BURNISHED TIN	17	SM CASE	1671.68	are slyly after the sl
+105685	almond antique violet chocolate turquoise	Manufacturer#2	Brand#22	MEDIUM ANODIZED COPPER	14	MED CAN	1690.68	ly pending requ
+110592	almond antique salmon chartreuse burlywood	Manufacturer#1	Brand#15	PROMO BURNISHED NICKEL	6	JUMBO PKG	1602.59	 to the furiously
+112398	almond antique metallic orange dim	Manufacturer#3	Brand#32	MEDIUM BURNISHED BRASS	19	JUMBO JAR	1410.39	ole car
+121152	almond antique burnished rose metallic	Manufacturer#1	Brand#14	PROMO PLATED TIN	2	JUMBO BOX	1173.15	e pinto beans h
+121152	almond antique burnished rose metallic	Manufacturer#1	Brand#14	PROMO PLATED TIN	2	JUMBO BOX	1173.15	e pinto beans h
+121152	almond antique burnished rose metallic	Manufacturer#1	Brand#14	PROMO PLATED TIN	2	JUMBO BOX	1173.15	e pinto beans h
+121152	almond antique burnished rose metallic	Manufacturer#1	Brand#14	PROMO PLATED TIN	2	JUMBO BOX	1173.15	e pinto beans h
+132666	almond aquamarine rose maroon antique	Manufacturer#2	Brand#24	SMALL POLISHED NICKEL	25	MED BOX	1698.66	even 
+144293	almond antique olive coral navajo	Manufacturer#3	Brand#34	STANDARD POLISHED STEEL	45	JUMBO CAN	1337.29	ag furiously about 
+146985	almond aquamarine midnight light salmon	Manufacturer#2	Brand#23	MEDIUM BURNISHED COPPER	2	SM CASE	2031.98	s cajole caref
+155733	almond antique sky peru orange	Manufacturer#5	Brand#53	SMALL PLATED BRASS	2	WRAP DRUM	1788.73	furiously. bra
+191709	almond antique violet turquoise frosted	Manufacturer#2	Brand#22	ECONOMY POLISHED STEEL	40	MED BOX	1800.7	 haggle
+192697	almond antique blue firebrick mint	Manufacturer#5	Brand#52	MEDIUM BURNISHED TIN	31	LG DRUM	1789.69	ickly ir
+195606	almond aquamarine sandy cyan gainsboro	Manufacturer#2	Brand#25	STANDARD PLATED TIN	18	SM PKG	1701.6	ic de
+PREHOOK: query: -- 9. testNoopWithMap
+select p_mfgr, p_name, p_size, 
+rank() over (partition by p_mfgr order by p_name, p_size desc) as r
+from noopwithmapstreaming(on part
+partition by p_mfgr
+order by p_name, p_size desc)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: -- 9. testNoopWithMap
+select p_mfgr, p_name, p_size, 
+rank() over (partition by p_mfgr order by p_name, p_size desc) as r
+from noopwithmapstreaming(on part
+partition by p_mfgr
+order by p_name, p_size desc)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1	almond antique burnished rose metallic	2	1
+Manufacturer#1	almond antique burnished rose metallic	2	1
+Manufacturer#1	almond antique chartreuse lavender yellow	34	3
+Manufacturer#1	almond antique salmon chartreuse burlywood	6	4
+Manufacturer#1	almond aquamarine burnished black steel	28	5
+Manufacturer#1	almond aquamarine pink moccasin thistle	42	6
+Manufacturer#2	almond antique violet chocolate turquoise	14	1
+Manufacturer#2	almond antique violet turquoise frosted	40	2
+Manufacturer#2	almond aquamarine midnight light salmon	2	3
+Manufacturer#2	almond aquamarine rose maroon antique	25	4
+Manufacturer#2	almond aquamarine sandy cyan gainsboro	18	5
+Manufacturer#3	almond antique chartreuse khaki white	17	1
+Manufacturer#3	almond antique forest lavender goldenrod	14	2
+Manufacturer#3	almond antique metallic orange dim	19	3
+Manufacturer#3	almond antique misty red olive	1	4
+Manufacturer#3	almond antique olive coral navajo	45	5
+Manufacturer#4	almond antique gainsboro frosted violet	10	1
+Manufacturer#4	almond antique violet mint lemon	39	2
+Manufacturer#4	almond aquamarine floral ivory bisque	27	3
+Manufacturer#4	almond aquamarine yellow dodger mint	7	4
+Manufacturer#4	almond azure aquamarine papaya violet	12	5
+Manufacturer#5	almond antique blue firebrick mint	31	1
+Manufacturer#5	almond antique medium spring khaki	6	2
+Manufacturer#5	almond antique sky peru orange	2	3
+Manufacturer#5	almond aquamarine dodger light gainsboro	46	4
+Manufacturer#5	almond azure blanched chiffon midnight	23	5
+PREHOOK: query: -- 10. testNoopWithMapWithWindowing 
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopwithmapstreaming(on part 
+  partition by p_mfgr
+  order by p_name)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: -- 10. testNoopWithMapWithWindowing 
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopwithmapstreaming(on part 
+  partition by p_mfgr
+  order by p_name)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1	almond antique burnished rose metallic	2	1	1	1173.15
+Manufacturer#1	almond antique burnished rose metallic	2	1	1	2346.3
+Manufacturer#1	almond antique chartreuse lavender yellow	34	3	2	4100.06
+Manufacturer#1	almond antique salmon chartreuse burlywood	6	4	3	5702.650000000001
+Manufacturer#1	almond aquamarine burnished black steel	28	5	4	7117.070000000001
+Manufacturer#1	almond aquamarine pink moccasin thistle	42	6	5	8749.730000000001
+Manufacturer#2	almond antique violet chocolate turquoise	14	1	1	1690.68
+Manufacturer#2	almond antique violet turquoise frosted	40	2	2	3491.38
+Manufacturer#2	almond aquamarine midnight light salmon	2	3	3	5523.360000000001
+Manufacturer#2	almond aquamarine rose maroon antique	25	4	4	7222.02
+Manufacturer#2	almond aquamarine sandy cyan gainsboro	18	5	5	8923.62
+Manufacturer#3	almond antique chartreuse khaki white	17	1	1	1671.68
+Manufacturer#3	almond antique forest lavender goldenrod	14	2	2	2861.95
+Manufacturer#3	almond antique metallic orange dim	19	3	3	4272.34
+Manufacturer#3	almond antique misty red olive	1	4	4	6195.32
+Manufacturer#3	almond antique olive coral navajo	45	5	5	7532.61
+Manufacturer#4	almond antique gainsboro frosted violet	10	1	1	1620.67
+Manufacturer#4	almond antique violet mint lemon	39	2	2	2996.09
+Manufacturer#4	almond aquamarine floral ivory bisque	27	3	3	4202.35
+Manufacturer#4	almond aquamarine yellow dodger mint	7	4	4	6047.27
+Manufacturer#4	almond azure aquamarine papaya violet	12	5	5	7337.620000000001
+Manufacturer#5	almond antique blue firebrick mint	31	1	1	1789.69
+Manufacturer#5	almond antique medium spring khaki	6	2	2	3401.3500000000004
+Manufacturer#5	almond antique sky peru orange	2	3	3	5190.08
+Manufacturer#5	almond aquamarine dodger light gainsboro	46	4	4	6208.18
+Manufacturer#5	almond azure blanched chiffon midnight	23	5	5	7672.66
+PREHOOK: query: -- 12. testFunctionChain
+select p_mfgr, p_name, p_size, 
+rank() over (partition by p_mfgr order by p_name) as r, 
+dense_rank() over (partition by p_mfgr order by p_name) as dr, 
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row)  as s1
+from noopstreaming(on noopwithmapstreaming(on noopstreaming(on part 
+partition by p_mfgr 
+order by p_mfgr, p_name
+)))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: -- 12. testFunctionChain
+select p_mfgr, p_name, p_size, 
+rank() over (partition by p_mfgr order by p_name) as r, 
+dense_rank() over (partition by p_mfgr order by p_name) as dr, 
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row)  as s1
+from noopstreaming(on noopwithmapstreaming(on noopstreaming(on part 
+partition by p_mfgr 
+order by p_mfgr, p_name
+)))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1	almond antique burnished rose metallic	2	1	1	1173.15
+Manufacturer#1	almond antique burnished rose metallic	2	1	1	2346.3
+Manufacturer#1	almond antique chartreuse lavender yellow	34	3	2	4100.06
+Manufacturer#1	almond antique salmon chartreuse burlywood	6	4	3	5702.650000000001
+Manufacturer#1	almond aquamarine burnished black steel	28	5	4	7117.070000000001
+Manufacturer#1	almond aquamarine pink moccasin thistle	42	6	5	8749.730000000001
+Manufacturer#2	almond antique violet chocolate turquoise	14	1	1	1690.68
+Manufacturer#2	almond antique violet turquoise frosted	40	2	2	3491.38
+Manufacturer#2	almond aquamarine midnight light salmon	2	3	3	5523.360000000001
+Manufacturer#2	almond aquamarine rose maroon antique	25	4	4	7222.02
+Manufacturer#2	almond aquamarine sandy cyan gainsboro	18	5	5	8923.62
+Manufacturer#3	almond antique chartreuse khaki white	17	1	1	1671.68
+Manufacturer#3	almond antique forest lavender goldenrod	14	2	2	2861.95
+Manufacturer#3	almond antique metallic orange dim	19	3	3	4272.34
+Manufacturer#3	almond antique misty red olive	1	4	4	6195.32
+Manufacturer#3	almond antique olive coral navajo	45	5	5	7532.61
+Manufacturer#4	almond antique gainsboro frosted violet	10	1	1	1620.67
+Manufacturer#4	almond antique violet mint lemon	39	2	2	2996.09
+Manufacturer#4	almond aquamarine floral ivory bisque	27	3	3	4202.35
+Manufacturer#4	almond aquamarine yellow dodger mint	7	4	4	6047.27
+Manufacturer#4	almond azure aquamarine papaya violet	12	5	5	7337.620000000001
+Manufacturer#5	almond antique blue firebrick mint	31	1	1	1789.69
+Manufacturer#5	almond antique medium spring khaki	6	2	2	3401.3500000000004
+Manufacturer#5	almond antique sky peru orange	2	3	3	5190.08
+Manufacturer#5	almond aquamarine dodger light gainsboro	46	4	4	6208.18
+Manufacturer#5	almond azure blanched chiffon midnight	23	5	5	7672.66
+PREHOOK: query: -- 12.1 testFunctionChain
+select p_mfgr, p_name, p_size, 
+rank() over (partition by p_mfgr order by p_name) as r, 
+dense_rank() over (partition by p_mfgr order by p_name) as dr, 
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row)  as s1
+from noopstreaming(on noopwithmap(on noopstreaming(on part 
+partition by p_mfgr 
+order by p_mfgr, p_name
+)))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: -- 12.1 testFunctionChain
+select p_mfgr, p_name, p_size, 
+rank() over (partition by p_mfgr order by p_name) as r, 
+dense_rank() over (partition by p_mfgr order by p_name) as dr, 
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row)  as s1
+from noopstreaming(on noopwithmap(on noopstreaming(on part 
+partition by p_mfgr 
+order by p_mfgr, p_name
+)))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1	almond antique burnished rose metallic	2	1	1	1173.15
+Manufacturer#1	almond antique burnished rose metallic	2	1	1	2346.3
+Manufacturer#1	almond antique chartreuse lavender yellow	34	3	2	4100.06
+Manufacturer#1	almond antique salmon chartreuse burlywood	6	4	3	5702.650000000001
+Manufacturer#1	almond aquamarine burnished black steel	28	5	4	7117.070000000001
+Manufacturer#1	almond aquamarine pink moccasin thistle	42	6	5	8749.730000000001
+Manufacturer#2	almond antique violet chocolate turquoise	14	1	1	1690.68
+Manufacturer#2	almond antique violet turquoise frosted	40	2	2	3491.38
+Manufacturer#2	almond aquamarine midnight light salmon	2	3	3	5523.360000000001
+Manufacturer#2	almond aquamarine rose maroon antique	25	4	4	7222.02
+Manufacturer#2	almond aquamarine sandy cyan gainsboro	18	5	5	8923.62
+Manufacturer#3	almond antique chartreuse khaki white	17	1	1	1671.68
+Manufacturer#3	almond antique forest lavender goldenrod	14	2	2	2861.95
+Manufacturer#3	almond antique metallic orange dim	19	3	3	4272.34
+Manufacturer#3	almond antique misty red olive	1	4	4	6195.32
+Manufacturer#3	almond antique olive coral navajo	45	5	5	7532.61
+Manufacturer#4	almond antique gainsboro frosted violet	10	1	1	1620.67
+Manufacturer#4	almond antique violet mint lemon	39	2	2	2996.09
+Manufacturer#4	almond aquamarine floral ivory bisque	27	3	3	4202.35
+Manufacturer#4	almond aquamarine yellow dodger mint	7	4	4	6047.27
+Manufacturer#4	almond azure aquamarine papaya violet	12	5	5	7337.620000000001
+Manufacturer#5	almond antique blue firebrick mint	31	1	1	1789.69
+Manufacturer#5	almond antique medium spring khaki	6	2	2	3401.3500000000004
+Manufacturer#5	almond antique sky peru orange	2	3	3	5190.08
+Manufacturer#5	almond aquamarine dodger light gainsboro	46	4	4	6208.18
+Manufacturer#5	almond azure blanched chiffon midnight	23	5	5	7672.66
+PREHOOK: query: -- 12.2 testFunctionChain
+select p_mfgr, p_name, p_size, 
+rank() over (partition by p_mfgr order by p_name) as r, 
+dense_rank() over (partition by p_mfgr order by p_name) as dr, 
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row)  as s1
+from noop(on noopwithmapstreaming(on noopstreaming(on part 
+partition by p_mfgr 
+order by p_mfgr, p_name
+)))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: -- 12.2 testFunctionChain
+select p_mfgr, p_name, p_size, 
+rank() over (partition by p_mfgr order by p_name) as r, 
+dense_rank() over (partition by p_mfgr order by p_name) as dr, 
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row)  as s1
+from noop(on noopwithmapstreaming(on noopstreaming(on part 
+partition by p_mfgr 
+order by p_mfgr, p_name
+)))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1	almond antique burnished rose metallic	2	1	1	1173.15
+Manufacturer#1	almond antique burnished rose metallic	2	1	1	2346.3
+Manufacturer#1	almond antique chartreuse lavender yellow	34	3	2	4100.06
+Manufacturer#1	almond antique salmon chartreuse burlywood	6	4	3	5702.650000000001
+Manufacturer#1	almond aquamarine burnished black steel	28	5	4	7117.070000000001
+Manufacturer#1	almond aquamarine pink moccasin thistle	42	6	5	8749.730000000001
+Manufacturer#2	almond antique violet chocolate turquoise	14	1	1	1690.68
+Manufacturer#2	almond antique violet turquoise frosted	40	2	2	3491.38
+Manufacturer#2	almond aquamarine midnight light salmon	2	3	3	5523.360000000001
+Manufacturer#2	almond aquamarine rose maroon antique	25	4	4	7222.02
+Manufacturer#2	almond aquamarine sandy cyan gainsboro	18	5	5	8923.62
+Manufacturer#3	almond antique chartreuse khaki white	17	1	1	1671.68
+Manufacturer#3	almond antique forest lavender goldenrod	14	2	2	2861.95
+Manufacturer#3	almond antique metallic orange dim	19	3	3	4272.34
+Manufacturer#3	almond antique misty red olive	1	4	4	6195.32
+Manufacturer#3	almond antique olive coral navajo	45	5	5	7532.61
+Manufacturer#4	almond antique gainsboro frosted violet	10	1	1	1620.67
+Manufacturer#4	almond antique violet mint lemon	39	2	2	2996.09
+Manufacturer#4	almond aquamarine floral ivory bisque	27	3	3	4202.35
+Manufacturer#4	almond aquamarine yellow dodger mint	7	4	4	6047.27
+Manufacturer#4	almond azure aquamarine papaya violet	12	5	5	7337.620000000001
+Manufacturer#5	almond antique blue firebrick mint	31	1	1	1789.69
+Manufacturer#5	almond antique medium spring khaki	6	2	2	3401.3500000000004
+Manufacturer#5	almond antique sky peru orange	2	3	3	5190.08
+Manufacturer#5	almond aquamarine dodger light gainsboro	46	4	4	6208.18
+Manufacturer#5	almond azure blanched chiffon midnight	23	5	5	7672.66
+PREHOOK: query: -- 14. testPTFJoinWithWindowingWithCount
+select abc.p_mfgr, abc.p_name, 
+rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r, 
+dense_rank() over (distribute by abc.p_mfgr sort by abc.p_name) as dr, 
+count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd, 
+abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, 
+abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz 
+from noopstreaming(on part 
+partition by p_mfgr 
+order by p_name 
+) abc join part p1 on abc.p_partkey = p1.p_partkey
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: -- 14. testPTFJoinWithWindowingWithCount
+select abc.p_mfgr, abc.p_name, 
+rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r, 
+dense_rank() over (distribute by abc.p_mfgr sort by abc.p_name) as dr, 
+count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd, 
+abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, 
+abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz 
+from noopstreaming(on part 
+partition by p_mfgr 
+order by p_name 
+) abc join part p1 on abc.p_partkey = p1.p_partkey
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1	almond antique burnished rose metallic	1	1	4	1173.15	1173.15	2	0
+Manufacturer#1	almond antique burnished rose metallic	1	1	4	1173.15	2346.3	2	0
+Manufacturer#1	almond antique burnished rose metallic	1	1	4	1173.15	3519.4500000000003	2	0
+Manufacturer#1	almond antique burnished rose metallic	1	1	4	1173.15	4692.6	2	0
+Manufacturer#1	almond antique chartreuse lavender yellow	5	2	5	1753.76	6446.360000000001	34	32
+Manufacturer#1	almond antique salmon chartreuse burlywood	6	3	6	1602.59	8048.950000000001	6	-28
+Manufacturer#1	almond aquamarine burnished black steel	7	4	7	1414.42	9463.37	28	22
+Manufacturer#1	almond aquamarine pink moccasin thistle	8	5	8	1632.66	11096.03	42	14
+Manufacturer#2	almond antique violet chocolate turquoise	1	1	1	1690.68	1690.68	14	0
+Manufacturer#2	almond antique violet turquoise frosted	2	2	2	1800.7	3491.38	40	26
+Manufacturer#2	almond aquamarine midnight light salmon	3	3	3	2031.98	5523.360000000001	2	-38
+Manufacturer#2	almond aquamarine rose maroon antique	4	4	4	1698.66	7222.02	25	23
+Manufacturer#2	almond aquamarine sandy cyan gainsboro	5	5	5	1701.6	8923.62	18	-7
+Manufacturer#3	almond antique chartreuse khaki white	1	1	1	1671.68	1671.68	17	0
+Manufacturer#3	almond antique forest lavender goldenrod	2	2	2	1190.27	2861.95	14	-3
+Manufacturer#3	almond antique metallic orange dim	3	3	3	1410.39	4272.34	19	5
+Manufacturer#3	almond antique misty red olive	4	4	4	1922.98	6195.32	1	-18
+Manufacturer#3	almond antique olive coral navajo	5	5	5	1337.29	7532.61	45	44
+Manufacturer#4	almond antique gainsboro frosted violet	1	1	1	1620.67	1620.67	10	0
+Manufacturer#4	almond antique violet mint lemon	2	2	2	1375.42	2996.09	39	29
+Manufacturer#4	almond aquamarine floral ivory bisque	3	3	3	1206.26	4202.35	27	-12
+Manufacturer#4	almond aquamarine yellow dodger mint	4	4	4	1844.92	6047.27	7	-20
+Manufacturer#4	almond azure aquamarine papaya violet	5	5	5	1290.35	7337.620000000001	12	5
+Manufacturer#5	almond antique blue firebrick mint	1	1	1	1789.69	1789.69	31	0
+Manufacturer#5	almond antique medium spring khaki	2	2	2	1611.66	3401.3500000000004	6	-25
+Manufacturer#5	almond antique sky peru orange	3	3	3	1788.73	5190.08	2	-4
+Manufacturer#5	almond aquamarine dodger light gainsboro	4	4	4	1018.1	6208.18	46	44
+Manufacturer#5	almond azure blanched chiffon midnight	5	5	5	1464.48	7672.66	23	-23
+PREHOOK: query: -- 18. testMulti2OperatorsFunctionChainWithMap
+select p_mfgr, p_name,  
+rank() over (partition by p_mfgr,p_name) as r, 
+dense_rank() over (partition by p_mfgr,p_name) as dr, 
+p_size, sum(p_size) over (partition by p_mfgr,p_name rows between unbounded preceding and current row)  as s1
+from noopstreaming(on 
+        noopwithmap(on 
+          noop(on 
+              noopstreaming(on part 
+              partition by p_mfgr 
+              order by p_mfgr) 
+            ) 
+          partition by p_mfgr,p_name 
+          order by p_mfgr,p_name) 
+        partition by p_mfgr,p_name  
+        order by p_mfgr,p_name)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: -- 18. testMulti2OperatorsFunctionChainWithMap
+select p_mfgr, p_name,  
+rank() over (partition by p_mfgr,p_name) as r, 
+dense_rank() over (partition by p_mfgr,p_name) as dr, 
+p_size, sum(p_size) over (partition by p_mfgr,p_name rows between unbounded preceding and current row)  as s1
+from noopstreaming(on 
+        noopwithmap(on 
+          noop(on 
+              noopstreaming(on part 
+              partition by p_mfgr 
+              order by p_mfgr) 
+            ) 
+          partition by p_mfgr,p_name 
+          order by p_mfgr,p_name) 
+        partition by p_mfgr,p_name  
+        order by p_mfgr,p_name)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1	almond antique burnished rose metallic	1	1	2	2
+Manufacturer#1	almond antique burnished rose metallic	1	1	2	4
+Manufacturer#1	almond antique chartreuse lavender yellow	1	1	34	34
+Manufacturer#1	almond antique salmon chartreuse burlywood	1	1	6	6
+Manufacturer#1	almond aquamarine burnished black steel	1	1	28	28
+Manufacturer#1	almond aquamarine pink moccasin thistle	1	1	42	42
+Manufacturer#2	almond antique violet chocolate turquoise	1	1	14	14
+Manufacturer#2	almond antique violet turquoise frosted	1	1	40	40
+Manufacturer#2	almond aquamarine midnight light salmon	1	1	2	2
+Manufacturer#2	almond aquamarine rose maroon antique	1	1	25	25
+Manufacturer#2	almond aquamarine sandy cyan gainsboro	1	1	18	18
+Manufacturer#3	almond antique chartreuse khaki white	1	1	17	17
+Manufacturer#3	almond antique forest lavender goldenrod	1	1	14	14
+Manufacturer#3	almond antique metallic orange dim	1	1	19	19
+Manufacturer#3	almond antique misty red olive	1	1	1	1
+Manufacturer#3	almond antique olive coral navajo	1	1	45	45
+Manufacturer#4	almond antique gainsboro frosted violet	1	1	10	10
+Manufacturer#4	almond antique violet mint lemon	1	1	39	39
+Manufacturer#4	almond aquamarine floral ivory bisque	1	1	27	27
+Manufacturer#4	almond aquamarine yellow dodger mint	1	1	7	7
+Manufacturer#4	almond azure aquamarine papaya violet	1	1	12	12
+Manufacturer#5	almond antique blue firebrick mint	1	1	31	31
+Manufacturer#5	almond antique medium spring khaki	1	1	6	6
+Manufacturer#5	almond antique sky peru orange	1	1	2	2
+Manufacturer#5	almond aquamarine dodger light gainsboro	1	1	46	46
+Manufacturer#5	almond azure blanched chiffon midnight	1	1	23	23
+PREHOOK: query: -- 19. testMulti3OperatorsFunctionChain
+select p_mfgr, p_name,  
+rank() over (partition by p_mfgr order by p_name) as r, 
+dense_rank() over (partition by p_mfgr order by p_name) as dr, 
+p_size, sum(p_size) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row)  as s1
+from noop(on 
+        noopstreaming(on 
+          noop(on 
+              noopstreaming(on part 
+              partition by p_mfgr 
+              order by p_mfgr) 
+            ) 
+          partition by p_mfgr,p_name 
+          order by p_mfgr,p_name) 
+        partition by p_mfgr  
+        order by p_mfgr )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: -- 19. testMulti3OperatorsFunctionChain
+select p_mfgr, p_name,  
+rank() over (partition by p_mfgr order by p_name) as r, 
+dense_rank() over (partition by p_mfgr order by p_name) as dr, 
+p_size, sum(p_size) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row)  as s1
+from noop(on 
+        noopstreaming(on 
+          noop(on 
+              noopstreaming(on part 
+              partition by p_mfgr 
+              order by p_mfgr) 
+            ) 
+          partition by p_mfgr,p_name 
+          order by p_mfgr,p_name) 
+        partition by p_mfgr  
+        order by p_mfgr )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1	almond antique burnished rose metallic	1	1	2	2
+Manufacturer#1	almond antique burnished rose metallic	1	1	2	4
+Manufacturer#1	almond antique chartreuse lavender yellow	3	2	34	38
+Manufacturer#1	almond antique salmon chartreuse burlywood	4	3	6	44
+Manufacturer#1	almond aquamarine burnished black steel	5	4	28	72
+Manufacturer#1	almond aquamarine pink moccasin thistle	6	5	42	114
+Manufacturer#2	almond antique violet chocolate turquoise	1	1	14	14
+Manufacturer#2	almond antique violet turquoise frosted	2	2	40	54
+Manufacturer#2	almond aquamarine midnight light salmon	3	3	2	56
+Manufacturer#2	almond aquamarine rose maroon antique	4	4	25	81
+Manufacturer#2	almond aquamarine sandy cyan gainsboro	5	5	18	99
+Manufacturer#3	almond antique chartreuse khaki white	1	1	17	17
+Manufacturer#3	almond antique forest lavender goldenrod	2	2	14	31
+Manufacturer#3	almond antique metallic orange dim	3	3	19	50
+Manufacturer#3	almond antique misty red olive	4	4	1	51
+Manufacturer#3	almond antique olive coral navajo	5	5	45	96
+Manufacturer#4	almond antique gainsboro frosted violet	1	1	10	10
+Manufacturer#4	almond antique violet mint lemon	2	2	39	49
+Manufacturer#4	almond aquamarine floral ivory bisque	3	3	27	76
+Manufacturer#4	almond aquamarine yellow dodger mint	4	4	7	83
+Manufacturer#4	almond azure aquamarine papaya violet	5	5	12	95
+Manufacturer#5	almond antique blue firebrick mint	1	1	31	31
+Manufacturer#5	almond antique medium spring khaki	2	2	6	37
+Manufacturer#5	almond antique sky peru orange	3	3	2	39
+Manufacturer#5	almond aquamarine dodger light gainsboro	4	4	46	85
+Manufacturer#5	almond azure blanched chiffon midnight	5	5	23	108
+PREHOOK: query: -- 23. testMultiOperatorChainWithDiffPartitionForWindow2
+select p_mfgr, p_name,  
+rank() over (partition by p_mfgr order by p_name) as r, 
+dense_rank() over (partition by p_mfgr order by p_name) as dr, 
+p_size, 
+sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row) as s1, 
+sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row)  as s2
+from noopwithmapstreaming(on 
+        noop(on 
+              noopstreaming(on part 
+              partition by p_mfgr, p_name 
+              order by p_mfgr, p_name) 
+          ))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: -- 23. testMultiOperatorChainWithDiffPartitionForWindow2
+select p_mfgr, p_name,  
+rank() over (partition by p_mfgr order by p_name) as r, 
+dense_rank() over (partition by p_mfgr order by p_name) as dr, 
+p_size, 
+sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row) as s1, 
+sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row)  as s2
+from noopwithmapstreaming(on 
+        noop(on 
+              noopstreaming(on part 
+              partition by p_mfgr, p_name 
+              order by p_mfgr, p_name) 
+          ))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1	almond antique burnished rose metallic	1	1	2	4	4
+Manufacturer#1	almond antique burnished rose metallic	1	1	2	4	4
+Manufacturer#1	almond antique chartreuse lavender yellow	3	2	34	38	38
+Manufacturer#1	almond antique salmon chartreuse burlywood	4	3	6	44	44
+Manufacturer#1	almond aquamarine burnished black steel	5	4	28	72	72
+Manufacturer#1	almond aquamarine pink moccasin thistle	6	5	42	114	114
+Manufacturer#2	almond antique violet chocolate turquoise	1	1	14	14	14
+Manufacturer#2	almond antique violet turquoise frosted	2	2	40	54	54
+Manufacturer#2	almond aquamarine midnight light salmon	3	3	2	56	56
+Manufacturer#2	almond aquamarine rose maroon antique	4	4	25	81	81
+Manufacturer#2	almond aquamarine sandy cyan gainsboro	5	5	18	99	99
+Manufacturer#3	almond antique chartreuse khaki white	1	1	17	17	17
+Manufacturer#3	almond antique forest lavender goldenrod	2	2	14	31	31
+Manufacturer#3	almond antique metallic orange dim	3	3	19	50	50
+Manufacturer#3	almond antique misty red olive	4	4	1	51	51
+Manufacturer#3	almond antique olive coral navajo	5	5	45	96	96
+Manufacturer#4	almond antique gainsboro frosted violet	1	1	10	10	10
+Manufacturer#4	almond antique violet mint lemon	2	2	39	49	49
+Manufacturer#4	almond aquamarine floral ivory bisque	3	3	27	76	76
+Manufacturer#4	almond aquamarine yellow dodger mint	4	4	7	83	83
+Manufacturer#4	almond azure aquamarine papaya violet	5	5	12	95	95
+Manufacturer#5	almond antique blue firebrick mint	1	1	31	31	31
+Manufacturer#5	almond antique medium spring khaki	2	2	6	37	37
+Manufacturer#5	almond antique sky peru orange	3	3	2	39	39
+Manufacturer#5	almond aquamarine dodger light gainsboro	4	4	46	85	85
+Manufacturer#5	almond azure blanched chiffon midnight	5	5	23	108	108

Modified: hive/trunk/ql/src/test/results/clientpositive/show_functions.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/show_functions.q.out?rev=1595755&r1=1595754&r2=1595755&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/show_functions.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/show_functions.q.out Mon May 19 06:44:33 2014
@@ -118,7 +118,9 @@ named_struct
 negative
 ngrams
 noop
+noopstreaming
 noopwithmap
+noopwithmapstreaming
 not
 ntile
 nvl