You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pig.apache.org by ch...@apache.org on 2014/02/24 22:41:41 UTC

svn commit: r1571454 [5/5] - in /pig/branches/tez: ./ conf/ contrib/piggybank/java/src/main/java/org/apache/pig/piggybank/evaluation/ contrib/piggybank/java/src/main/java/org/apache/pig/piggybank/evaluation/string/ contrib/piggybank/java/src/main/java/...

Modified: pig/branches/tez/test/org/apache/pig/test/TestAssert.java
URL: http://svn.apache.org/viewvc/pig/branches/tez/test/org/apache/pig/test/TestAssert.java?rev=1571454&r1=1571453&r2=1571454&view=diff
==============================================================================
--- pig/branches/tez/test/org/apache/pig/test/TestAssert.java (original)
+++ pig/branches/tez/test/org/apache/pig/test/TestAssert.java Mon Feb 24 21:41:38 2014
@@ -21,11 +21,15 @@ import static junit.framework.Assert.ass
 import static org.apache.pig.builtin.mock.Storage.resetData;
 import static org.apache.pig.builtin.mock.Storage.tuple;
 
+import java.io.ByteArrayInputStream;
+import java.io.InputStream;
 import java.util.List;
+import java.util.Properties;
 
 import junit.framework.Assert;
 
 import org.apache.pig.ExecType;
+import org.apache.pig.PigConfiguration;
 import org.apache.pig.PigServer;
 import org.apache.pig.builtin.mock.Storage.Data;
 import org.apache.pig.data.Tuple;
@@ -53,7 +57,7 @@ public class TestAssert {
       pigServer.registerQuery("A = LOAD 'foo' USING mock.Storage() AS (i:int);");
       pigServer.registerQuery("ASSERT A BY i > 0;");
       pigServer.registerQuery("STORE A INTO 'bar' USING mock.Storage();");
-      
+
       pigServer.executeBatch();
 
       List<Tuple> out = data.get("bar");
@@ -62,7 +66,38 @@ public class TestAssert {
       assertEquals(tuple(2), out.get(1));
       assertEquals(tuple(3), out.get(2));
   }
-  
+
+  /**
+   * Verify that ASSERT operator works in a Pig script
+   * See PIG-3670
+   * @throws Exception
+   */
+  @Test
+  public void testInScript() throws Exception {
+      PigServer pigServer = new PigServer(ExecType.LOCAL);
+      Data data = resetData(pigServer);
+
+      data.set("foo",
+              tuple(1),
+              tuple(2),
+              tuple(3)
+              );
+
+      StringBuffer query = new StringBuffer();
+      query.append("A = LOAD 'foo' USING mock.Storage() AS (i:int);\n");
+      query.append("ASSERT A BY i > 0;\n");
+      query.append("STORE A INTO 'bar' USING mock.Storage();");
+
+      InputStream is = new ByteArrayInputStream(query.toString().getBytes());
+      pigServer.registerScript(is);
+
+      List<Tuple> out = data.get("bar");
+      assertEquals(3, out.size());
+      assertEquals(tuple(1), out.get(0));
+      assertEquals(tuple(2), out.get(1));
+      assertEquals(tuple(3), out.get(2));
+  }
+
   /**
    * Verify that ASSERT operator works
    * @throws Exception
@@ -84,9 +119,60 @@ public class TestAssert {
       try {
           pigServer.openIterator("A");
       } catch (FrontendException fe) {
+            Assert.assertTrue(fe.getCause().getCause().getMessage().contains("Assertion violated"));
+      }
+  }
+
+  /**
+   * Verify that ASSERT operator works. Disable fetch for this testcase.
+   * @throws Exception
+   */
+  @Test
+  public void testNegativeWithoutFetch() throws Exception {
+      PigServer pigServer = new PigServer(ExecType.LOCAL);
+      Data data = resetData(pigServer);
+
+      data.set("foo",
+              tuple(1),
+              tuple(2),
+              tuple(3)
+              );
+
+      pigServer.registerQuery("A = LOAD 'foo' USING mock.Storage() AS (i:int);");
+      pigServer.registerQuery("ASSERT A BY i > 1 , 'i should be greater than 1';");
+
+      Properties props = pigServer.getPigContext().getProperties();
+      props.setProperty(PigConfiguration.OPT_FETCH, "false");
+      try {
+          pigServer.openIterator("A");
+      } catch (FrontendException fe) {
           Assert.assertTrue(fe.getCause().getMessage().contains(
                   "Job terminated with anomalous status FAILED"));
       }
-       
   }
+  
+  /**
+   * Verify that alias is not assignable to the ASSERT operator
+   * @throws Exception
+   */
+  @Test(expected=FrontendException.class)
+  public void testNegativeWithAlias() throws Exception {
+      PigServer pigServer = new PigServer(ExecType.LOCAL);
+      Data data = resetData(pigServer);
+
+      data.set("foo",
+              tuple(1),
+              tuple(2),
+              tuple(3)
+              );
+      try {
+          pigServer.registerQuery("A = LOAD 'foo' USING mock.Storage() AS (i:int);");
+          pigServer.registerQuery("B = ASSERT A BY i > 1 , 'i should be greater than 1';");
+      }
+      catch (FrontendException fe) {
+          Util.checkMessageInException(fe, "Syntax error, unexpected symbol at or near 'B'");
+          throw fe;
+      }
+  }
+
 }

Modified: pig/branches/tez/test/org/apache/pig/test/TestEvalPipeline2.java
URL: http://svn.apache.org/viewvc/pig/branches/tez/test/org/apache/pig/test/TestEvalPipeline2.java?rev=1571454&r1=1571453&r2=1571454&view=diff
==============================================================================
--- pig/branches/tez/test/org/apache/pig/test/TestEvalPipeline2.java (original)
+++ pig/branches/tez/test/org/apache/pig/test/TestEvalPipeline2.java Mon Feb 24 21:41:38 2014
@@ -29,10 +29,12 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Map;
+import java.util.Properties;
 import java.util.Random;
 
 import org.apache.pig.EvalFunc;
 import org.apache.pig.ExecType;
+import org.apache.pig.PigConfiguration;
 import org.apache.pig.PigException;
 import org.apache.pig.PigServer;
 import org.apache.pig.builtin.BinStorage;
@@ -825,17 +827,22 @@ public class TestEvalPipeline2 {
         
         Iterator<Tuple> iter = pigServer.openIterator("e");
         
+        Map<Object, Object> expected = new HashMap<Object, Object>(3);
+        expected.put(1, null);
+        expected.put(2, null);
+        expected.put(4, null);
+
         Tuple t = iter.next();
         Assert.assertTrue(t.size()==1);
-        Assert.assertTrue((Integer)t.get(0)==1);
+        Assert.assertTrue(expected.containsKey(t.get(0)));
         
         t = iter.next();
         Assert.assertTrue(t.size()==1);
-        Assert.assertTrue((Integer)t.get(0)==4);
+        Assert.assertTrue(expected.containsKey(t.get(0)));
         
         t = iter.next();
         Assert.assertTrue(t.size()==1);
-        Assert.assertTrue((Integer)t.get(0)==2);
+        Assert.assertTrue(expected.containsKey(t.get(0)));
         
         Assert.assertFalse(iter.hasNext());
     }
@@ -1409,10 +1416,36 @@ public class TestEvalPipeline2 {
             pigServer.openIterator("b");
             Assert.fail();
         } catch (Exception e) {
-            Assert.assertTrue(e.getMessage().contains(ArrayList.class.getName()));
+            String message = e.getCause().getCause().getMessage();
+            Assert.assertTrue(message.contains(ArrayList.class.getName()));
         }
     }
     
+    // See PIG-1826
+    @Test
+    public void testNonStandardDataWithoutFetch() throws Exception{
+        Properties props = pigServer.getPigContext().getProperties();
+        props.setProperty(PigConfiguration.OPT_FETCH, "false");
+        String[] input1 = {
+                "0",
+        };
+        try {
+            Util.createInputFile(cluster, "table_testNonStandardDataWithoutFetch", input1);
+            pigServer.registerQuery("a = load 'table_testNonStandardDataWithoutFetch' as (a0);");
+            pigServer.registerQuery("b = foreach a generate " + UDFWithNonStandardType.class.getName() + "(a0);");
+
+            try {
+                pigServer.openIterator("b");
+                Assert.fail();
+            } catch (Exception e) {
+                Assert.assertTrue(e.getMessage().contains(ArrayList.class.getName()));
+            }
+        }
+        finally {
+            props.setProperty(PigConfiguration.OPT_FETCH, "true");
+        }
+    }
+
     // See PIG-2078
     @Test
     public void testProjectNullBag() throws Exception{

Modified: pig/branches/tez/test/org/apache/pig/test/TestJsonLoaderStorage.java
URL: http://svn.apache.org/viewvc/pig/branches/tez/test/org/apache/pig/test/TestJsonLoaderStorage.java?rev=1571454&r1=1571453&r2=1571454&view=diff
==============================================================================
--- pig/branches/tez/test/org/apache/pig/test/TestJsonLoaderStorage.java (original)
+++ pig/branches/tez/test/org/apache/pig/test/TestJsonLoaderStorage.java Mon Feb 24 21:41:38 2014
@@ -113,6 +113,9 @@ public class TestJsonLoaderStorage {
     "\"m\":null" +
     "}";
 
+  private static final String jsonOutput =
+    "{\"f1\":\"18\",\"count\":3}";
+
   private Iterator<Tuple> loadJson(String input) throws IOException {
     File tempFile = File.createTempFile("json", null);
     tempFile.deleteOnExit();
@@ -321,4 +324,53 @@ public class TestJsonLoaderStorage {
 
     br.close();
   }
+
+  @Test
+  public void testSimpleMapSideStreaming() throws Exception {
+    PigServer pigServer = new PigServer(ExecType.LOCAL);
+    File input = Util.createInputFile("tmp", "", new String [] {"1,2,3;4,5,6,7,8",
+        "1,2,3;4,5,6,7,9",
+        "1,2,3;4,5,6,7,18"});
+    File tempJsonFile = File.createTempFile("json", "");
+    tempJsonFile.delete();
+
+    // Pig query to run
+    pigServer.registerQuery("IP = load '"+  Util.generateURI(Util.encodeEscape(input.toString()), pigServer.getPigContext())
+        +"' using PigStorage (';') as (ID:chararray,DETAILS:chararray);");
+    pigServer.registerQuery(
+        "id_details = FOREACH IP GENERATE " +
+            "FLATTEN" +
+            "(STRSPLIT" +
+            "(ID,',',3)) AS (drop, code, transaction) ," +
+            "FLATTEN" +
+            "(STRSPLIT" +
+            "(DETAILS,',',5)) AS (lname, fname, date, price, product);");
+    pigServer.registerQuery(
+        "transactions = FOREACH id_details GENERATE $0 .. ;");
+    pigServer.registerQuery(
+        "transactionsG = group transactions by code;");
+    pigServer.registerQuery(
+        "uniqcnt  = foreach transactionsG {"+
+            "sym = transactions.product ;"+
+            "dsym =  distinct sym ;"+
+            "generate flatten(dsym.product) as f1, COUNT(dsym) as count ;" +
+            "};");
+    pigServer.store("uniqcnt", tempJsonFile.getAbsolutePath(), "JsonStorage");
+
+    BufferedReader br = new BufferedReader(new FileReader(tempJsonFile.getAbsolutePath()+ "/part-r-00000"));
+    String data = br.readLine();
+
+    assertEquals(jsonOutput, data);
+
+    String line = data;
+    int count = 0;
+    while (line != null) {
+      line = br.readLine();
+      count++;
+    }
+    assertEquals(3, count);
+
+    br.close();
+    tempJsonFile.deleteOnExit();
+  }
 }

Modified: pig/branches/tez/test/org/apache/pig/test/TestMultiQueryLocal.java
URL: http://svn.apache.org/viewvc/pig/branches/tez/test/org/apache/pig/test/TestMultiQueryLocal.java?rev=1571454&r1=1571453&r2=1571454&view=diff
==============================================================================
--- pig/branches/tez/test/org/apache/pig/test/TestMultiQueryLocal.java (original)
+++ pig/branches/tez/test/org/apache/pig/test/TestMultiQueryLocal.java Mon Feb 24 21:41:38 2014
@@ -69,7 +69,7 @@ public class TestMultiQueryLocal {
         context.getProperties().setProperty(PigConfiguration.OPT_MULTIQUERY, ""+true);
         myPig = new PigServer(context);
         myPig.getPigContext().getProperties().setProperty("pig.usenewlogicalplan", "false");
-        myPig.getPigContext().getProperties().setProperty("pig.temp.dir", "build/test/tmp/");
+        myPig.getPigContext().getProperties().setProperty(PigConfiguration.PIG_TEMP_DIR, "build/test/tmp/");
         TMP_DIR = FileLocalizer.getTemporaryPath(myPig.getPigContext()).toUri().getPath();
         deleteOutputFiles();
     }

Modified: pig/branches/tez/test/org/apache/pig/test/TestNewPartitionFilterPushDown.java
URL: http://svn.apache.org/viewvc/pig/branches/tez/test/org/apache/pig/test/TestNewPartitionFilterPushDown.java?rev=1571454&r1=1571453&r2=1571454&view=diff
==============================================================================
--- pig/branches/tez/test/org/apache/pig/test/TestNewPartitionFilterPushDown.java (original)
+++ pig/branches/tez/test/org/apache/pig/test/TestNewPartitionFilterPushDown.java Mon Feb 24 21:41:38 2014
@@ -61,6 +61,7 @@ import org.apache.pig.newplan.logical.ex
 import org.apache.pig.newplan.logical.expression.ProjectExpression;
 import org.apache.pig.newplan.logical.optimizer.LogicalPlanOptimizer;
 import org.apache.pig.newplan.logical.relational.LOFilter;
+import org.apache.pig.newplan.logical.relational.LogToPhyTranslationVisitor;
 import org.apache.pig.newplan.logical.relational.LogicalPlan;
 import org.apache.pig.newplan.logical.rules.LoadTypeCastInserter;
 import org.apache.pig.newplan.logical.rules.PartitionFilterOptimizer;
@@ -466,6 +467,10 @@ public class TestNewPartitionFilterPushD
                         (it.next() instanceof LOFilter));
             }
         }
+
+        // Test that the filtered plan can be translated to physical plan (PIG-3657)
+        LogToPhyTranslationVisitor translator = new LogToPhyTranslationVisitor(newLogicalPlan);
+        translator.visit();
     }
 
     /**
@@ -671,6 +676,16 @@ public class TestNewPartitionFilterPushD
         negativeTest(q, Arrays.asList("srcid", "mrkt", "dstid"));
     }
 
+    // PIG-3657
+    @Test
+    public void testFilteredPlanWithLogToPhyTranslator() throws Exception {
+        String q = "a = load 'foo' using " + TestLoader.class.getName() +
+                "('srcid:int, mrkt:chararray', 'srcid') as (f1, f2);" +
+                "b = filter a by (f1 < 5 or (f1 == 10 and f2 == 'UK'));" +
+                "store b into 'out';";
+        testFull(q, "((srcid < 5) or (srcid == 10))", "((f1 < 5) or (f2 == 'UK'))", false);
+    }
+
     //// helper methods ///////
     private FilterExtractor test(String query, List<String> partitionCols,
             String expPartFilterString, String expFilterString)

Modified: pig/branches/tez/test/org/apache/pig/test/TestNewPlanFilterRule.java
URL: http://svn.apache.org/viewvc/pig/branches/tez/test/org/apache/pig/test/TestNewPlanFilterRule.java?rev=1571454&r1=1571453&r2=1571454&view=diff
==============================================================================
--- pig/branches/tez/test/org/apache/pig/test/TestNewPlanFilterRule.java (original)
+++ pig/branches/tez/test/org/apache/pig/test/TestNewPlanFilterRule.java Mon Feb 24 21:41:38 2014
@@ -51,6 +51,7 @@ import org.apache.pig.newplan.logical.re
 import org.apache.pig.newplan.logical.relational.LogicalPlan;
 import org.apache.pig.newplan.logical.relational.LogicalRelationalOperator;
 import org.apache.pig.newplan.logical.relational.LogicalSchema;
+import org.apache.pig.newplan.logical.rules.FilterAboveForeach;
 import org.apache.pig.newplan.logical.rules.LoadTypeCastInserter;
 import org.apache.pig.newplan.logical.rules.MergeFilter;
 import org.apache.pig.newplan.logical.rules.PushUpFilter;
@@ -570,6 +571,110 @@ public class TestNewPlanFilterRule {
         
     }
 
+    /**
+     * Test that filter cannot get pushed up over nested Distinct (see PIG-3347)
+     */
+    @Test
+    public void testFilterAfterNestedDistinct() throws Exception {
+        String query = "a = LOAD 'file.txt';" +
+            "a_group = group a by $0;" +
+            "b = foreach a_group { a_distinct = distinct a.$0;generate group, a_distinct;}" +
+            "c = filter b by SIZE(a_distinct) == 1;" +
+            "store c into 'empty';";
+
+        // filter should not be pushed above nested distinct,
+        //ie expect - loload -> locogroup -> foreach -> filter
+        LogicalPlan newLogicalPlan = migrateAndOptimizePlan( query );
+        newLogicalPlan.explain(System.out, "text", true);
+
+        Operator load = newLogicalPlan.getSources().get( 0 );
+        Assert.assertTrue( load instanceof LOLoad );
+        Operator cogroup = newLogicalPlan.getSuccessors( load ).get( 0 );
+        Assert.assertTrue( cogroup instanceof LOCogroup );
+        Operator foreach = newLogicalPlan.getSuccessors(cogroup).get( 0 );
+        Assert.assertTrue( foreach instanceof LOForEach );
+        Operator filter = newLogicalPlan.getSuccessors(foreach).get( 0 );
+        Assert.assertTrue( filter instanceof LOFilter );
+    }
+
+    /**
+     * Test that filter cannot get pushed up over nested Limit (see PIG-3347)
+     */
+    @Test
+    public void testFilterAfterNestedLimit() throws Exception {
+        String query = "a = LOAD 'file.txt';" +
+            "a_group = group a by $0;" +
+            "b = foreach a_group { a_limit = limit a.$0 5;generate group, a_limit;}" +
+            "c = filter b by SIZE(a_limit) == 1;" +
+            "store c into 'empty';";
+
+        // filter should not be pushed above nested distinct,
+        //ie expect - loload -> locogroup -> foreach -> filter
+        LogicalPlan newLogicalPlan = migrateAndOptimizePlan( query );
+        newLogicalPlan.explain(System.out, "text", true);
+
+        Operator load = newLogicalPlan.getSources().get( 0 );
+        Assert.assertTrue( load instanceof LOLoad );
+        Operator cogroup = newLogicalPlan.getSuccessors( load ).get( 0 );
+        Assert.assertTrue( cogroup instanceof LOCogroup );
+        Operator foreach = newLogicalPlan.getSuccessors(cogroup).get( 0 );
+        Assert.assertTrue( foreach instanceof LOForEach );
+        Operator filter = newLogicalPlan.getSuccessors(foreach).get( 0 );
+        Assert.assertTrue( filter instanceof LOFilter );
+    }
+
+    /**
+     * Test that filter cannot get pushed up over nested Filter (see PIG-3347)
+     */
+    @Test
+    public void testFilterAfterNestedFilter() throws Exception {
+        String query = "a = LOAD 'file.txt';" +
+            "a_group = group a by $0;" +
+            "b = foreach a_group { a_filter = filter a by $0 == 1;generate group, a_filter;}" +
+            "c = filter b by SIZE(a_filter) == 1;" +
+            "store c into 'empty';";
+
+        // filter should not be pushed above nested distinct,
+        //ie expect - loload -> locogroup -> foreach -> filter
+        LogicalPlan newLogicalPlan = migrateAndOptimizePlan( query );
+        newLogicalPlan.explain(System.out, "text", true);
+
+        Operator load = newLogicalPlan.getSources().get( 0 );
+        Assert.assertTrue( load instanceof LOLoad );
+        Operator cogroup = newLogicalPlan.getSuccessors( load ).get( 0 );
+        Assert.assertTrue( cogroup instanceof LOCogroup );
+        Operator foreach = newLogicalPlan.getSuccessors(cogroup).get( 0 );
+        Assert.assertTrue( foreach instanceof LOForEach );
+        Operator filter = newLogicalPlan.getSuccessors(foreach).get( 0 );
+        Assert.assertTrue( filter instanceof LOFilter );
+    }
+    
+    /**
+     * Test that filter does not get blocked for PushUpFilter/FilterAboveForeach
+     * by an unrelated nested filter (see PIG-3347)
+     */
+    @Test
+    public void testFilterAfterUnrelatedNestedFilter() throws Exception {
+        String query = "a = LOAD 'file.txt' as (a0:int, a1_bag:bag{(X:int)}, a2_bag:bag{(Y:int)});" +
+            "b = foreach a { a1_filter = filter a1_bag by X == 1; generate a0, a1_filter, a2_bag;}" +
+            "c = filter b by SIZE(a2_bag) == 1;" +
+            "store c into 'empty';";
+
+        // filter should be pushed above nested filter,
+        //ie expect - loload -> locogroup -> foreach -> filter
+        LogicalPlan newLogicalPlan = migrateAndOptimizePlan( query );
+        newLogicalPlan.explain(System.out, "text", true);
+
+        Operator load = newLogicalPlan.getSources().get( 0 );
+        Assert.assertTrue( load instanceof LOLoad );
+        Operator foreach1 = newLogicalPlan.getSuccessors(load).get( 0 );
+        Assert.assertTrue( foreach1 instanceof LOForEach );
+        Operator filter = newLogicalPlan.getSuccessors( foreach1 ).get( 0 );
+        Assert.assertTrue( filter instanceof LOFilter );
+        Operator foreach2 = newLogicalPlan.getSuccessors(filter).get( 0 );
+        Assert.assertTrue( foreach2 instanceof LOForEach );
+    }
+
     private LogicalPlan migrateAndOptimizePlan(String query) throws Exception {
     	PigServer pigServer = new PigServer(pc);
         LogicalPlan newLogicalPlan = Util.buildLp(pigServer, query);
@@ -602,6 +707,11 @@ public class TestNewPlanFilterRule {
             r = new PushUpFilter( "PushUpFilter" );
             s.add(r);            
             ls.add(s);
+
+            s = new HashSet<Rule>();
+            r = new FilterAboveForeach( "PushUpFilter" );
+            s.add(r);
+            ls.add(s);
             
             return ls;
         }

Modified: pig/branches/tez/test/org/apache/pig/test/TestPOPartialAgg.java
URL: http://svn.apache.org/viewvc/pig/branches/tez/test/org/apache/pig/test/TestPOPartialAgg.java?rev=1571454&r1=1571453&r2=1571454&view=diff
==============================================================================
--- pig/branches/tez/test/org/apache/pig/test/TestPOPartialAgg.java (original)
+++ pig/branches/tez/test/org/apache/pig/test/TestPOPartialAgg.java Mon Feb 24 21:41:38 2014
@@ -17,8 +17,7 @@
  */
 package org.apache.pig.test;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -45,6 +44,8 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.base.Strings;
+
 /**
  * Test POPartialAgg runtime
  */
@@ -55,10 +56,10 @@ public class TestPOPartialAgg {
 
     @Before
     public void setUp() throws Exception {
-        createPOPartialPlan();
+        createPOPartialPlan(1);
     }
 
-    private void createPOPartialPlan() throws PlanException {
+    private void createPOPartialPlan(int valueCount) throws PlanException {
         parentPlan = new PhysicalPlan();
         partAggOp = GenPhyOp.topPOPartialAgg();
         partAggOp.setParentPlan(parentPlan);
@@ -70,24 +71,27 @@ public class TestPOPartialAgg {
         keyPlan.add(keyProj);
         partAggOp.setKeyPlan(keyPlan);
 
-        // setup value plan
-        // project arg for udf
-        PhysicalPlan valPlan1 = new PhysicalPlan();
-        POProject projVal1 = new POProject(GenPhyOp.getOK(), -1, 1);
-        projVal1.setResultType(DataType.BAG);
-        valPlan1.add(projVal1);
-
-        // setup udf
-        List<PhysicalOperator> udfInps = new ArrayList<PhysicalOperator>();
-        udfInps.add(projVal1);
-        FuncSpec sumSpec = new FuncSpec(IntSum.Intermediate.class.getName());
-        POUserFunc sumUdf = new POUserFunc(GenPhyOp.getOK(), -1, udfInps,
-                sumSpec);
-        valPlan1.add(sumUdf);
-        valPlan1.connect(projVal1, sumUdf);
-
+        // setup value plans
         List<PhysicalPlan> valuePlans = new ArrayList<PhysicalPlan>();
-        valuePlans.add(valPlan1);
+        
+        for (int i = 0; i < valueCount; i++) {
+            // project arg for udf
+            PhysicalPlan valPlan = new PhysicalPlan();
+            POProject projVal1 = new POProject(GenPhyOp.getOK(), -1, i + 1);
+            projVal1.setResultType(DataType.BAG);
+            valPlan.add(projVal1);
+    
+            // setup udf
+            List<PhysicalOperator> udfInps = new ArrayList<PhysicalOperator>();
+            udfInps.add(projVal1);
+            FuncSpec sumSpec = new FuncSpec(IntSum.Intermediate.class.getName());
+            POUserFunc sumUdf = new POUserFunc(GenPhyOp.getOK(), -1, udfInps,
+                    sumSpec);
+            valPlan.add(sumUdf);
+            valPlan.connect(projVal1, sumUdf);
+    
+            valuePlans.add(valPlan);
+        }
 
         partAggOp.setValuePlans(valuePlans);
     }
@@ -217,7 +221,57 @@ public class TestPOPartialAgg {
         checkInputAndOutput(inputTups, outputTups, false);
     }
 
+    @Test
+    public void testMultiVals() throws Exception {
+        // more than one value to be aggregated
+        createPOPartialPlan(2);
+        
+        // input tuple has key, and bag containing SUM.Init output
+        String[] inputTups = { "(1,(1L),(2L))", "(2,(2L),(1L))", "(1,(2L),(2L))" };
+        String[] outputTups = { "(1,(3L),(4L))", "(2,(2L),(1L))" };
+        checkInputAndOutput(inputTups, outputTups, false);
+    }
 
+    @Test
+    public void testMultiValCheckNotDisabled() throws Exception {
+        // "large" number of values per input to aggregate but good reduction 
+        // in size due to aggregation. 
+        // This case should result in a reduction from 10500 inputs to 500 
+        // outputs (factor of 20), so in-memory aggregation should not be 
+        // disabled in checkSize(). If it is disabled, too many output rows 
+        // will be generated.
+        
+        int numKeys = 500;
+        int numVals = 3;
+
+        createPOPartialPlan(numVals);
+        
+        // Build a string of values to use in all input tuples
+        String vals = Strings.repeat(",(1L)", numVals);
+        
+        // And input tuples.
+        // We need the next multiple of numKeys over 10,000 because we need to
+        // trigger the size check (at 10,000), and we want an even multiple of
+        // numKeys so result values end up even across keys
+        int numInputs = (10000 + numKeys * 2 - 1) / numKeys * numKeys;
+        String[] inputTups = new String[numInputs];
+        for (int i = 0; i < numInputs; i++) {
+            inputTups[i] = "(" + (i % numKeys) + vals + ")";
+        }
+        
+        // Build expected results
+        int expectedVal = numInputs / numKeys;
+        vals = Strings.repeat(",(" + expectedVal + "L)", numVals);
+        String[] outputTups = new String[numKeys];
+        for (int i = 0; i < numKeys; i++) {
+            outputTups[i] = "(" + i + vals + ")";
+        }
+        
+        // input tuple has key, and bag containing SUM.Init output
+        checkInputAndOutput(inputTups, outputTups, false);
+    }
+    
+    
     /**
      * run the plan on inputTups and check if output matches outputTups if
      * isMapMemEmpty is set to true, set memory available for the hash-map to

Modified: pig/branches/tez/test/org/apache/pig/test/TestPigRunner.java
URL: http://svn.apache.org/viewvc/pig/branches/tez/test/org/apache/pig/test/TestPigRunner.java?rev=1571454&r1=1571453&r2=1571454&view=diff
==============================================================================
--- pig/branches/tez/test/org/apache/pig/test/TestPigRunner.java (original)
+++ pig/branches/tez/test/org/apache/pig/test/TestPigRunner.java Mon Feb 24 21:41:38 2014
@@ -52,6 +52,7 @@ import org.apache.pig.tools.pigstats.Out
 import org.apache.pig.tools.pigstats.PigProgressNotificationListener;
 import org.apache.pig.tools.pigstats.PigStats;
 import org.apache.pig.tools.pigstats.PigStatsUtil;
+import org.apache.pig.tools.pigstats.SimpleFetchPigStats;
 import org.apache.pig.tools.pigstats.mapreduce.MRJobStats;
 import org.apache.pig.tools.pigstats.mapreduce.MRPigStatsUtil;
 import org.junit.AfterClass;
@@ -154,7 +155,7 @@ public class TestPigRunner {
         w.close();
 
         try {
-            String[] args = { "-Dstop.on.failure=true", "-Dopt.multiquery=false", "-Daggregate.warning=false", PIG_FILE };
+            String[] args = { "-Dstop.on.failure=true", "-Dopt.multiquery=false", "-Dopt.fetch=false", "-Daggregate.warning=false", PIG_FILE };
             PigStats stats = PigRunner.run(args, new TestNotificationListener());
 
             assertTrue(stats.isSuccessful());
@@ -172,6 +173,36 @@ public class TestPigRunner {
             assertTrue(conf.getBoolean("stop.on.failure", false));
             assertTrue(!conf.getBoolean("aggregate.warning", true));
             assertTrue(!conf.getBoolean(PigConfiguration.OPT_MULTIQUERY, true));
+            assertTrue(!conf.getBoolean("opt.fetch", true));
+        } finally {
+            new File(PIG_FILE).delete();
+            Util.deleteFile(cluster, OUTPUT_FILE);
+        }
+    }
+
+    @Test
+    public void simpleTest2() throws Exception {
+        PrintWriter w = new PrintWriter(new FileWriter(PIG_FILE));
+        w.println("A = load '" + INPUT_FILE + "' as (a0:int, a1:int, a2:int);");
+        w.println("B = filter A by a0 == 3;");
+        w.println("C = limit B 1;");
+        w.println("dump C;");
+        w.close();
+
+        try {
+            String[] args = { "-Dstop.on.failure=true", "-Dopt.multiquery=false", "-Daggregate.warning=false", PIG_FILE };
+            PigStats stats = PigRunner.run(args, new TestNotificationListener());
+
+            assertTrue(stats instanceof SimpleFetchPigStats);
+            assertTrue(stats.isSuccessful());
+            assertEquals(0, stats.getNumberJobs());
+            assertEquals(stats.getJobGraph().size(), 0);
+
+            Configuration conf = ConfigurationUtil.toConfiguration(stats.getPigProperties());
+            assertTrue(conf.getBoolean("stop.on.failure", false));
+            assertTrue(!conf.getBoolean("aggregate.warning", true));
+            assertTrue(!conf.getBoolean(PigConfiguration.OPT_MULTIQUERY, true));
+            assertTrue(conf.getBoolean("opt.fetch", true));
         } finally {
             new File(PIG_FILE).delete();
             Util.deleteFile(cluster, OUTPUT_FILE);

Modified: pig/branches/tez/test/org/apache/pig/test/TestPigServer.java
URL: http://svn.apache.org/viewvc/pig/branches/tez/test/org/apache/pig/test/TestPigServer.java?rev=1571454&r1=1571453&r2=1571454&view=diff
==============================================================================
--- pig/branches/tez/test/org/apache/pig/test/TestPigServer.java (original)
+++ pig/branches/tez/test/org/apache/pig/test/TestPigServer.java Mon Feb 24 21:41:38 2014
@@ -50,6 +50,7 @@ import java.util.Properties;
 import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.DocumentBuilderFactory;
 
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.Job;
@@ -58,6 +59,7 @@ import org.apache.pig.PigConfiguration;
 import org.apache.pig.PigServer;
 import org.apache.pig.ResourceSchema;
 import org.apache.pig.backend.executionengine.ExecException;
+import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil;
 import org.apache.pig.builtin.mock.Storage;
 import org.apache.pig.builtin.mock.Storage.Data;
 import org.apache.pig.data.DataType;
@@ -776,14 +778,20 @@ public class TestPigServer {
         File propertyFile = new File(tempDir, "pig.properties");
         propertyFile.deleteOnExit();
         PrintWriter out = new PrintWriter(new FileWriter(propertyFile));
-        out.println("pig.temp.dir=/opt/temp");
+        out.println(PigConfiguration.PIG_TEMP_DIR + "=/tmp/test");
         out.close();
         Properties properties = PropertiesUtil.loadDefaultProperties();
         PigContext pigContext=new PigContext(ExecType.LOCAL, properties);
         pigContext.connect();
         FileLocalizer.setInitialized(false);
         String tempPath= FileLocalizer.getTemporaryPath(pigContext).toString();
-        assertTrue(tempPath.startsWith("file:/opt/temp"));
+        assertTrue(tempPath.startsWith("file:/tmp/test/"));
+        Path path = new Path(tempPath);
+        FileSystem fs = FileSystem.get(path.toUri(),
+                ConfigurationUtil.toConfiguration(pigContext.getProperties()));
+        FileStatus status = fs.getFileStatus(path.getParent());
+        // Temporary root dir should have 700 as permission
+        assertEquals("rwx------", status.getPermission().toString());
         propertyFile.delete();
         FileLocalizer.setInitialized(false);
     }

Modified: pig/branches/tez/test/org/apache/pig/test/TestPigStorage.java
URL: http://svn.apache.org/viewvc/pig/branches/tez/test/org/apache/pig/test/TestPigStorage.java?rev=1571454&r1=1571453&r2=1571454&view=diff
==============================================================================
--- pig/branches/tez/test/org/apache/pig/test/TestPigStorage.java (original)
+++ pig/branches/tez/test/org/apache/pig/test/TestPigStorage.java Mon Feb 24 21:41:38 2014
@@ -20,11 +20,7 @@ package org.apache.pig.test;
 
 import static org.apache.pig.ExecType.MAPREDUCE;
 import static org.apache.pig.builtin.mock.Storage.tuple;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.FileWriter;
@@ -259,6 +255,27 @@ public class TestPigStorage  {
     }
 
     @Test
+    public void testPruneColumnsWithSchema() throws Exception {
+        pigContext.connect();
+        String query = "a = LOAD '" + datadir + "originput' using PigStorage(',') " +
+        "as (f1:chararray, f2:int);";
+        pig.registerQuery(query);
+        pig.store("a", datadir + "aout", "PigStorage('\\t', '-schema')");
+    
+        // aout now has a schema.
+    
+        // Verify that loaded data has the correct data type after the prune
+        pig.registerQuery("b = LOAD '" + datadir + "aout' using PigStorage('\\t'); c = FOREACH b GENERATE f2;");
+        
+        Iterator<Tuple> it = pig.openIterator("c");
+        Assert.assertTrue("results were produced", it.hasNext());
+        
+        Tuple t = it.next();
+        
+        Assert.assertTrue("data is correct type", t.get(0) instanceof Integer);
+    }
+
+    @Test
     public void testSchemaConversion() throws Exception {
 
         Util.createLocalInputFile(datadir + "originput2",
@@ -644,5 +661,54 @@ public class TestPigStorage  {
         assertTrue(it.hasNext());
         assertEquals(tuple(1,null,null), it.next());
         assertFalse(it.hasNext());
+        
+        // Now, test with prune
+        pig.registerQuery("a = load '"+Util.encodeEscape(inputDir.getAbsolutePath())+"'; b = foreach a generate y, z;");
+        it = pig.openIterator("b");
+        assertTrue(it.hasNext());
+        assertEquals(tuple(null,null), it.next());
+        assertFalse(it.hasNext());
+    }
+
+
+    @Test
+    public void testPigStorageSchemaWithOverwrite() throws Exception {
+        pigContext.connect();
+        String query = "a = LOAD '" + datadir
+                + "originput' using PigStorage(',') "
+                + "as (f1:chararray, f2:int);";
+
+        List<Tuple> expectedResults = Util
+                .getTuplesFromConstantTupleStrings(new String[] { "('A',1L)",
+                        "('B',2L)", "('C',3L)", "('D',2L)", "('A',5L)",
+                        "('B',5L)", "('C',8L)", "('A',8L)", "('D',8L)",
+                        "('A',9L)", });
+
+        pig.registerQuery(query);
+        pig.store("a", datadir + "aout", "PigStorage(',')");
+        // below shouldn't fail & we should get the same result in the end
+        pig.store("a", datadir + "aout", "PigStorage(',', '--overwrite true')");
+        pig.registerQuery("b = LOAD '" + datadir + "aout' using PigStorage(',');");
+        Iterator<Tuple> iter = pig.openIterator("b");
+        int counter = 0;
+        while (iter.hasNext()) {
+            String tuple = iter.next().toString();
+            Assert.assertEquals(expectedResults.get(counter++).toString(),
+                    tuple);
+        }
+        Assert.assertEquals(expectedResults.size(), counter);
+
+    }
+
+    @Test(expected = Exception.class)
+    public void testPigStorageSchemaFailureWithoutOverwrite() throws Exception {
+        pigContext.connect();
+        String query = "a = LOAD '" + datadir + "originput' using PigStorage(',') "
+                + "as (f1:chararray, f2:int);";
+        pig.registerQuery(query);
+        // should fail without the overwrite flag
+        pig.store("a", datadir + "aout", "PigStorage(',')");
+        pig.store("a", datadir + "aout", "PigStorage(',')");
     }
+    
 }

Modified: pig/branches/tez/test/org/apache/pig/test/TestPruneColumn.java
URL: http://svn.apache.org/viewvc/pig/branches/tez/test/org/apache/pig/test/TestPruneColumn.java?rev=1571454&r1=1571453&r2=1571454&view=diff
==============================================================================
--- pig/branches/tez/test/org/apache/pig/test/TestPruneColumn.java (original)
+++ pig/branches/tez/test/org/apache/pig/test/TestPruneColumn.java Mon Feb 24 21:41:38 2014
@@ -2143,7 +2143,7 @@ public class TestPruneColumn {
         pigServer.registerQuery("cm_data_raw = LOAD '" + Util.encodeEscape(input2.getAbsolutePath()) +
                 "' AS (s, m, l);");
         pigServer.registerQuery("cm_serve = FOREACH cm_data_raw GENERATE  s#'key3' AS f1,  s#'key4' AS f2, s#'key5' AS f3 ;");
-        pigServer.registerQuery("cm_serve_lowercase = stream cm_serve through `tr [:upper:] [:lower:]`;");
+        pigServer.registerQuery("cm_serve_lowercase = stream cm_serve through `tr '[:upper:]' '[:lower:]'`;");
         pigServer.registerQuery("cm_serve_final = FOREACH cm_serve_lowercase GENERATE  $0 AS cm_event_guid, $1 AS cm_receive_time, $2 AS cm_ctx_url;");
         pigServer.registerQuery("event_serve_project = FOREACH  event_serve GENERATE  s#'key3' AS event_guid, s#'key4' AS receive_time;");
         pigServer.registerQuery("event_serve_join = join cm_serve_final by (cm_event_guid), event_serve_project by (event_guid);");
@@ -2167,7 +2167,7 @@ public class TestPruneColumn {
             "serve_raw IF (( (chararray) (s#'type') == '0') AND ( (chararray) (s#'source') == '5'))," +
             "cm_click_raw IF (( (chararray) (s#'type') == '1') AND ( (chararray) (s#'source') == '5'));");
         pigServer.registerQuery("cm_serve = FOREACH serve_raw GENERATE  s#'cm_serve_id' AS cm_event_guid,  s#'cm_serve_timestamp_ms' AS cm_receive_time, s#'p_url' AS ctx ;");
-        pigServer.registerQuery("cm_serve_lowercase = stream cm_serve through `tr [:upper:] [:lower:]`;");
+        pigServer.registerQuery("cm_serve_lowercase = stream cm_serve through `tr '[:upper:]' '[:lower:]'`;");
         pigServer.registerQuery("cm_serve_final = FOREACH cm_serve_lowercase GENERATE  $0 AS cm_event_guid, $1 AS cm_receive_time, $2 AS ctx;");
         pigServer.registerQuery("filtered = FILTER event_serve BY (chararray) (s#'filter_key') neq 'xxxx' AND (chararray) (s#'filter_key') neq 'yyyy';");
         pigServer.registerQuery("event_serve_project = FOREACH filtered GENERATE s#'event_guid' AS event_guid, s#'receive_time' AS receive_time;");

Modified: pig/branches/tez/test/org/apache/pig/test/TestRank3.java
URL: http://svn.apache.org/viewvc/pig/branches/tez/test/org/apache/pig/test/TestRank3.java?rev=1571454&r1=1571453&r2=1571454&view=diff
==============================================================================
--- pig/branches/tez/test/org/apache/pig/test/TestRank3.java (original)
+++ pig/branches/tez/test/org/apache/pig/test/TestRank3.java Mon Feb 24 21:41:38 2014
@@ -52,6 +52,7 @@ public class TestRank3 {
             pigServer = new PigServer("local");
 
             data = resetData(pigServer);
+            data.set("empty");
             data.set(
                     "testcascade",
                     tuple(3,2,3),
@@ -142,6 +143,19 @@ public class TestRank3 {
         verifyExpected(data.get("result"), expected);
     }
 
+    // See PIG-3726
+    @Test
+    public void testRankEmptyRelation() throws Exception {
+      String query = "DATA = LOAD 'empty' USING mock.Storage();"
+        + "A = rank DATA;"
+        + "store A into 'empty_result' using mock.Storage();";
+
+      Util.registerMultiLineQuery(pigServer, query);
+
+      Set<Tuple> expected = ImmutableSet.of();
+      verifyExpected(data.get("empty_result"), expected);
+    }
+
     public void verifyExpected(List<Tuple> out, Set<Tuple> expected) {
         for (Tuple tup : out) {
             assertTrue(expected + " contains " + tup, expected.contains(tup));

Modified: pig/branches/tez/test/org/apache/pig/test/TestRegisteredJarVisibility.java
URL: http://svn.apache.org/viewvc/pig/branches/tez/test/org/apache/pig/test/TestRegisteredJarVisibility.java?rev=1571454&r1=1571453&r2=1571454&view=diff
==============================================================================
--- pig/branches/tez/test/org/apache/pig/test/TestRegisteredJarVisibility.java (original)
+++ pig/branches/tez/test/org/apache/pig/test/TestRegisteredJarVisibility.java Mon Feb 24 21:41:38 2014
@@ -37,23 +37,20 @@ import javax.tools.JavaFileObject;
 import javax.tools.StandardJavaFileManager;
 import javax.tools.ToolProvider;
 
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.io.ByteStreams;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.Path;
 import org.apache.pig.ExecType;
 import org.apache.pig.PigServer;
 import org.apache.pig.impl.util.JarManager;
-import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
-import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.io.ByteStreams;
+
 /**
  * Ensure classes from a registered jar are available in the UDFContext.
  * Please see PIG-2532 for additional details.
@@ -99,6 +96,7 @@ public class TestRegisteredJarVisibility
         jar(filesToJar);
 
         cluster = MiniCluster.buildCluster();
+        Util.copyFromLocalToCluster(cluster, INPUT_FILE.getPath(), INPUT_FILE.getName());
     }
 
     @AfterClass()
@@ -116,14 +114,22 @@ public class TestRegisteredJarVisibility
         }
         Assert.assertTrue(exceptionThrown);
     }
-
-    @Test()
-    public void testRegisteredJarVisibility() throws IOException {
-        Util.copyFromLocalToCluster(cluster, INPUT_FILE.getPath(), INPUT_FILE.getName());
+    
+    @Test
+    public void testRegisterJarVisibilityMR() throws IOException {
         PigServer pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
+        testRegisteredJarVisibility(pigServer, INPUT_FILE.getName());
+    }
 
+    @Test
+    public void testRegisteredJarVisibilityLocal() throws IOException {
+        PigServer pigServer = new PigServer(ExecType.LOCAL, new Properties());
+        testRegisteredJarVisibility(pigServer, INPUT_FILE.getAbsolutePath());
+    }
+    
+    public void testRegisteredJarVisibility(PigServer pigServer, String inputPath) throws IOException {
         String query = "register " + jarFile.getAbsolutePath() + ";\n"
-                + "a = load '" + INPUT_FILE.getName()
+                + "a = load '" + inputPath
                 + "' using org.apache.pig.test.RegisteredJarVisibilityLoader();\n"
                 // register again to test classloader consistency
                 + "register " +  jarFile.getAbsolutePath() + ";\n"

Modified: pig/branches/tez/test/org/apache/pig/test/TestStore.java
URL: http://svn.apache.org/viewvc/pig/branches/tez/test/org/apache/pig/test/TestStore.java?rev=1571454&r1=1571453&r2=1571454&view=diff
==============================================================================
--- pig/branches/tez/test/org/apache/pig/test/TestStore.java (original)
+++ pig/branches/tez/test/org/apache/pig/test/TestStore.java Mon Feb 24 21:41:38 2014
@@ -19,6 +19,7 @@ package org.apache.pig.test;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
 
 import java.io.BufferedReader;
@@ -85,6 +86,7 @@ import org.apache.pig.test.utils.TestHel
 import org.joda.time.DateTimeZone;
 import org.junit.After;
 import org.junit.AfterClass;
+import org.junit.Assume;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -692,6 +694,86 @@ public class TestStore {
             Util.deleteFile(ps.getPigContext(), TESTDIR);
         }
     }
+    
+    /**
+     * Test whether "part-m-00000" file is created on empty output when 
+     * {@link PigConfiguration#PIG_OUTPUT_LAZY} is set and if LazyOutputFormat is
+     * supported by Hadoop.
+     * The test covers multi store and single store case in local and mapreduce mode
+     *
+     * @throws IOException
+     */
+    @Test
+    public void testEmptyPartFileCreation() throws IOException {
+        
+        boolean isLazyOutputPresent = true;
+        try {
+            Class<?> clazz = PigContext
+                    .resolveClassName("org.apache.hadoop.mapreduce.lib.output.LazyOutputFormat");
+            clazz.getMethod("setOutputFormatClass", Job.class, Class.class);
+        }
+        catch (Exception e) {
+            isLazyOutputPresent = false;
+        }
+        
+        //skip test if LazyOutputFormat is not supported (<= Hadoop 1.0.0)
+        Assume.assumeTrue("LazyOutputFormat couldn't be loaded, test is skipped", isLazyOutputPresent);
+        
+        PigServer ps = null;
+
+        try {
+            ExecType[] modes = new ExecType[] { ExecType.LOCAL, ExecType.MAPREDUCE};
+            String[] inputData = new String[]{"hello\tworld", "hi\tworld", "bye\tworld"};
+
+            String multiStoreScript = "a = load '"+ inputFileName + "';" +
+                    "b = filter a by $0 == 'hey';" +
+                    "c = filter a by $1 == 'globe';" +
+                    "d = limit a 2;" +
+                    "e = foreach d generate *, 'x';" +
+                    "f = filter e by $3 == 'y';" +
+                    "store b into '" + outputFileName + "_1';" +
+                    "store c into '" + outputFileName + "_2';" +
+                    "store f into '" + outputFileName + "_3';";
+
+            String singleStoreScript =  "a = load '"+ inputFileName + "';" +
+                    "b = filter a by $0 == 'hey';" +
+                    "store b into '" + outputFileName + "_1';" ;
+
+            for (ExecType execType : modes) {
+                for(boolean isMultiStore: new boolean[] { true, false}) {
+                    String script = (isMultiStore ? multiStoreScript :
+                        singleStoreScript);
+                    // since we will be switching between map red and local modes
+                    // we will need to make sure filelocalizer is reset before each
+                    // run.
+                    FileLocalizer.setInitialized(false);
+                    if(execType == ExecType.MAPREDUCE) {
+                        ps = new PigServer(ExecType.MAPREDUCE,
+                                cluster.getProperties());
+                    } else {
+                        Properties props = new Properties();
+                        props.setProperty(MapRedUtil.FILE_SYSTEM_NAME, "file:///");
+                        ps = new PigServer(ExecType.LOCAL, props);
+                    }
+                    ps.getPigContext().getProperties().setProperty(
+                            PigConfiguration.PIG_OUTPUT_LAZY, "true");
+                    Util.deleteFile(ps.getPigContext(), TESTDIR);
+                    ps.setBatchOn();
+                    Util.createInputFile(ps.getPigContext(),
+                            inputFileName, inputData);
+                    Util.registerMultiLineQuery(ps, script);
+                    ps.executeBatch();
+                    for(int i = 1; i <= (isMultiStore ? 3 : 1); i++) {
+                        String output = "part-m-00000";
+                        assertFalse("For an empty output part-m-00000 should not exist in " +
+                                execType + " mode", Util.exists(ps.getPigContext(), output));
+                    }
+                }
+            }
+        } finally {
+            Util.deleteFile(ps.getPigContext(), TESTDIR);
+        }
+    }
 
     // A UDF which always throws an Exception so that the job can fail
     public static class FailUDF extends EvalFunc<String> {

Propchange: pig/branches/tez/test/org/apache/pig/test/data/bzipdir1.bz2/bzipdir2.bz2/recordLossblockHeaderEndsAt136500.txt.bz2
------------------------------------------------------------------------------
  Merged /pig/trunk/test/org/apache/pig/test/data/bzipdir1.bz2/bzipdir2.bz2/recordLossblockHeaderEndsAt136500.txt.bz2:r1554090-1571421

Modified: pig/branches/tez/test/perf/pigmix/src/java/org/apache/pig/test/pigmix/udf/PigPerformanceLoader.java
URL: http://svn.apache.org/viewvc/pig/branches/tez/test/perf/pigmix/src/java/org/apache/pig/test/pigmix/udf/PigPerformanceLoader.java?rev=1571454&r1=1571453&r2=1571454&view=diff
==============================================================================
--- pig/branches/tez/test/perf/pigmix/src/java/org/apache/pig/test/pigmix/udf/PigPerformanceLoader.java (original)
+++ pig/branches/tez/test/perf/pigmix/src/java/org/apache/pig/test/pigmix/udf/PigPerformanceLoader.java Mon Feb 24 21:41:38 2014
@@ -121,7 +121,7 @@ public class PigPerformanceLoader extend
 
                 byte[] copy = new byte[pos - start];
                 int i, j;
-                for (i = start + 1, j = 0; i < pos; i++, j++) copy[j] = b[i];
+                for (i = start, j = 0; i < pos; i++, j++) copy[j] = b[i];
                 String val = bytesToCharArray(copy);
                 m.put(key, val);
                 pos++; // move past ^C
@@ -172,7 +172,7 @@ public class PigPerformanceLoader extend
         @Override
         public Map<String, Object> bytesToMap(byte[] arg0,
                 ResourceFieldSchema fs) throws IOException {
-            return helper.bytesToMap(arg0, fs);
+            return bytesToMap(arg0);
         }
 
         @Override