You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pig.apache.org by ch...@apache.org on 2012/11/07 06:58:06 UTC

svn commit: r1406469 [9/10] - in /pig/trunk: ./ test/org/apache/pig/test/

Modified: pig/trunk/test/org/apache/pig/test/TestRank1.java
URL: http://svn.apache.org/viewvc/pig/trunk/test/org/apache/pig/test/TestRank1.java?rev=1406469&r1=1406468&r2=1406469&view=diff
==============================================================================
--- pig/trunk/test/org/apache/pig/test/TestRank1.java (original)
+++ pig/trunk/test/org/apache/pig/test/TestRank1.java Wed Nov  7 05:58:04 2012
@@ -19,83 +19,54 @@ package org.apache.pig.test;
 
 import static org.apache.pig.builtin.mock.Storage.resetData;
 import static org.apache.pig.builtin.mock.Storage.tuple;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.util.List;
 import java.util.Set;
 
-import junit.framework.TestCase;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.apache.pig.ExecType;
 import org.apache.pig.PigServer;
-import org.apache.pig.backend.executionengine.ExecException;
 import org.apache.pig.builtin.mock.Storage.Data;
 import org.apache.pig.data.Tuple;
 import org.apache.pig.data.TupleFactory;
-import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableSet;
 
-public class TestRank1 extends TestCase {
-
-    private final Log log = LogFactory.getLog(getClass());
+public class TestRank1 {
     private static TupleFactory tf = TupleFactory.getInstance();
     private static PigServer pigServer;
     private Data data;
 
-    @BeforeClass
-    public static void oneTimeSetUp() throws Exception {
-    }
-
-    @Override
     @Before
     public void setUp() throws Exception {
+        pigServer = new PigServer(ExecType.LOCAL);
 
-        try {
-            pigServer = new PigServer("local");
-
-            data = resetData(pigServer);
-            data.set("test01", tuple("A", 1, "N"), tuple("B", 2, "N"),
-                    tuple("C", 3, "M"), tuple("D", 4, "P"), tuple("E", 4, "Q"),
-                    tuple("E", 4, "Q"), tuple("F", 8, "Q"), tuple("F", 7, "Q"),
-                    tuple("F", 8, "T"), tuple("F", 8, "Q"), tuple("G", 10, "V"));
-
-            data.set(
-                    "test02",
-                    tuple("Michael", "Blythe", 1, 1, 1, 1, 4557045.046, 98027),
-                    tuple("Linda", "Mitchell", 2, 1, 1, 1, 5200475.231, 98027),
-                    tuple("Jillian", "Carson", 3, 1, 1, 1, 3857163.633, 98027),
-                    tuple("Garrett", "Vargas", 4, 1, 1, 1, 1764938.986, 98027),
-                    tuple("Tsvi", "Reiter", 5, 1, 1, 2, 2811012.715, 98027),
-                    tuple("Shu", "Ito", 6, 6, 2, 2, 3018725.486, 98055),
-                    tuple("Jose", "Saraiva", 7, 6, 2, 2, 3189356.247, 98055),
-                    tuple("David", "Campbell", 8, 6, 2, 3, 3587378.426, 98055),
-                    tuple("Tete", "Mensa-Annan", 9, 6, 2, 3, 1931620.184, 98055),
-                    tuple("Lynn", "Tsoflias", 10, 6, 2, 3, 1758385.926, 98055),
-                    tuple("Rachel", "Valdez", 11, 6, 2, 4, 2241204.042, 98055),
-                    tuple("Jae", "Pak", 12, 6, 2, 4, 5015682.375, 98055),
-                    tuple("Ranjit", "Varkey Chudukatil", 13, 6, 2, 4,
-                            3827950.238, 98055));
-
-        } catch (ExecException e) {
-            IOException ioe = new IOException("Failed to create Pig Server");
-            ioe.initCause(e);
-            throw ioe;
-        }
-    }
-
-    @Override
-    @After
-    public void tearDown() throws Exception {
-    }
-
-    @AfterClass
-    public static void oneTimeTearDown() throws Exception {
+        data = resetData(pigServer);
+        data.set("test01", tuple("A", 1, "N"), tuple("B", 2, "N"),
+                tuple("C", 3, "M"), tuple("D", 4, "P"), tuple("E", 4, "Q"),
+                tuple("E", 4, "Q"), tuple("F", 8, "Q"), tuple("F", 7, "Q"),
+                tuple("F", 8, "T"), tuple("F", 8, "Q"), tuple("G", 10, "V"));
+
+        data.set(
+                "test02",
+                tuple("Michael", "Blythe", 1, 1, 1, 1, 4557045.046, 98027),
+                tuple("Linda", "Mitchell", 2, 1, 1, 1, 5200475.231, 98027),
+                tuple("Jillian", "Carson", 3, 1, 1, 1, 3857163.633, 98027),
+                tuple("Garrett", "Vargas", 4, 1, 1, 1, 1764938.986, 98027),
+                tuple("Tsvi", "Reiter", 5, 1, 1, 2, 2811012.715, 98027),
+                tuple("Shu", "Ito", 6, 6, 2, 2, 3018725.486, 98055),
+                tuple("Jose", "Saraiva", 7, 6, 2, 2, 3189356.247, 98055),
+                tuple("David", "Campbell", 8, 6, 2, 3, 3587378.426, 98055),
+                tuple("Tete", "Mensa-Annan", 9, 6, 2, 3, 1931620.184, 98055),
+                tuple("Lynn", "Tsoflias", 10, 6, 2, 3, 1758385.926, 98055),
+                tuple("Rachel", "Valdez", 11, 6, 2, 4, 2241204.042, 98055),
+                tuple("Jae", "Pak", 12, 6, 2, 4, 5015682.375, 98055),
+                tuple("Ranjit", "Varkey Chudukatil", 13, 6, 2, 4,
+                        3827950.238, 98055));
     }
 
     @Test
@@ -323,7 +294,6 @@ public class TestRank1 extends TestCase 
     }
 
     public void verifyExpected(List<Tuple> out, Set<Tuple> expected) {
-
         for (Tuple tup : out) {
             assertTrue(expected + " contains " + tup, expected.contains(tup));
         }

Modified: pig/trunk/test/org/apache/pig/test/TestRank2.java
URL: http://svn.apache.org/viewvc/pig/trunk/test/org/apache/pig/test/TestRank2.java?rev=1406469&r1=1406468&r2=1406469&view=diff
==============================================================================
--- pig/trunk/test/org/apache/pig/test/TestRank2.java (original)
+++ pig/trunk/test/org/apache/pig/test/TestRank2.java Wed Nov  7 05:58:04 2012
@@ -19,84 +19,53 @@ package org.apache.pig.test;
 
 import static org.apache.pig.builtin.mock.Storage.resetData;
 import static org.apache.pig.builtin.mock.Storage.tuple;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.List;
 import java.util.Set;
 
-import junit.framework.TestCase;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.pig.PigServer;
-import org.apache.pig.backend.executionengine.ExecException;
 import org.apache.pig.builtin.mock.Storage.Data;
 import org.apache.pig.data.Tuple;
 import org.apache.pig.data.TupleFactory;
-import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableSet;
 
-public class TestRank2 extends TestCase {
-
-    private final Log log = LogFactory.getLog(getClass());
+public class TestRank2 {
     private static PigServer pigServer;
     private static TupleFactory tf = TupleFactory.getInstance();
     private Data data;
 
-    @BeforeClass
-    public static void oneTimeSetUp() throws Exception {
-    }
-
-    @Override
     @Before
     public void setUp() throws Exception {
+        pigServer = new PigServer("local");
 
-        try {
-            pigServer = new PigServer("local");
-
-            data = resetData(pigServer);
-            data.set("test01", tuple("A", 1, "N"), tuple("B", 2, "N"),
-                    tuple("C", 3, "M"), tuple("D", 4, "P"), tuple("E", 4, "Q"),
-                    tuple("E", 4, "Q"), tuple("F", 8, "Q"), tuple("F", 7, "Q"),
-                    tuple("F", 8, "T"), tuple("F", 8, "Q"), tuple("G", 10, "V"));
-
-            data.set(
-                    "test02",
-                    tuple("Michael", "Blythe", 1, 1, 1, 1, 4557045.046, 98027),
-                    tuple("Linda", "Mitchell", 2, 1, 1, 1, 5200475.231, 98027),
-                    tuple("Jillian", "Carson", 3, 1, 1, 1, 3857163.633, 98027),
-                    tuple("Garrett", "Vargas", 4, 1, 1, 1, 1764938.986, 98027),
-                    tuple("Tsvi", "Reiter", 5, 1, 1, 2, 2811012.715, 98027),
-                    tuple("Shu", "Ito", 6, 6, 2, 2, 3018725.486, 98055),
-                    tuple("Jose", "Saraiva", 7, 6, 2, 2, 3189356.247, 98055),
-                    tuple("David", "Campbell", 8, 6, 2, 3, 3587378.426, 98055),
-                    tuple("Tete", "Mensa-Annan", 9, 6, 2, 3, 1931620.184, 98055),
-                    tuple("Lynn", "Tsoflias", 10, 6, 2, 3, 1758385.926, 98055),
-                    tuple("Rachel", "Valdez", 11, 6, 2, 4, 2241204.042, 98055),
-                    tuple("Jae", "Pak", 12, 6, 2, 4, 5015682.375, 98055),
-                    tuple("Ranjit", "Varkey Chudukatil", 13, 6, 2, 4,
-                            3827950.238, 98055));
-
-        } catch (ExecException e) {
-            IOException ioe = new IOException("Failed to create Pig Server");
-            ioe.initCause(e);
-            throw ioe;
-        }
-    }
-
-    @Override
-    @After
-    public void tearDown() throws Exception {
-    }
-
-    @AfterClass
-    public static void oneTimeTearDown() throws Exception {
+        data = resetData(pigServer);
+        data.set("test01", tuple("A", 1, "N"), tuple("B", 2, "N"),
+                tuple("C", 3, "M"), tuple("D", 4, "P"), tuple("E", 4, "Q"),
+                tuple("E", 4, "Q"), tuple("F", 8, "Q"), tuple("F", 7, "Q"),
+                tuple("F", 8, "T"), tuple("F", 8, "Q"), tuple("G", 10, "V"));
+
+        data.set(
+                "test02",
+                tuple("Michael", "Blythe", 1, 1, 1, 1, 4557045.046, 98027),
+                tuple("Linda", "Mitchell", 2, 1, 1, 1, 5200475.231, 98027),
+                tuple("Jillian", "Carson", 3, 1, 1, 1, 3857163.633, 98027),
+                tuple("Garrett", "Vargas", 4, 1, 1, 1, 1764938.986, 98027),
+                tuple("Tsvi", "Reiter", 5, 1, 1, 2, 2811012.715, 98027),
+                tuple("Shu", "Ito", 6, 6, 2, 2, 3018725.486, 98055),
+                tuple("Jose", "Saraiva", 7, 6, 2, 2, 3189356.247, 98055),
+                tuple("David", "Campbell", 8, 6, 2, 3, 3587378.426, 98055),
+                tuple("Tete", "Mensa-Annan", 9, 6, 2, 3, 1931620.184, 98055),
+                tuple("Lynn", "Tsoflias", 10, 6, 2, 3, 1758385.926, 98055),
+                tuple("Rachel", "Valdez", 11, 6, 2, 4, 2241204.042, 98055),
+                tuple("Jae", "Pak", 12, 6, 2, 4, 5015682.375, 98055),
+                tuple("Ranjit", "Varkey Chudukatil", 13, 6, 2, 4,
+                        3827950.238, 98055));
     }
 
     @Test
@@ -145,7 +114,6 @@ public class TestRank2 extends TestCase 
                 tf.newTuple(ImmutableList.of((long) 7, "G", 10, "V")));
 
         verifyExpected(data.get("result"), expected);
-
     }
 
     @Test
@@ -197,10 +165,8 @@ public class TestRank2 extends TestCase 
     }
 
     public void verifyExpected(List<Tuple> out, Set<Tuple> expected) {
-
         for (Tuple tup : out) {
             assertTrue(expected + " contains " + tup, expected.contains(tup));
         }
     }
-
-}
+}
\ No newline at end of file

Modified: pig/trunk/test/org/apache/pig/test/TestRank3.java
URL: http://svn.apache.org/viewvc/pig/trunk/test/org/apache/pig/test/TestRank3.java?rev=1406469&r1=1406468&r2=1406469&view=diff
==============================================================================
--- pig/trunk/test/org/apache/pig/test/TestRank3.java (original)
+++ pig/trunk/test/org/apache/pig/test/TestRank3.java Wed Nov  7 05:58:04 2012
@@ -19,22 +19,17 @@ package org.apache.pig.test;
 
 import static org.apache.pig.builtin.mock.Storage.resetData;
 import static org.apache.pig.builtin.mock.Storage.tuple;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.List;
 import java.util.Set;
 
-import junit.framework.TestCase;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.pig.PigServer;
 import org.apache.pig.backend.executionengine.ExecException;
 import org.apache.pig.builtin.mock.Storage.Data;
 import org.apache.pig.data.Tuple;
 import org.apache.pig.data.TupleFactory;
-import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -42,9 +37,7 @@ import org.junit.Test;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableSet;
 
-public class TestRank3 extends TestCase {
-
-    private final Log log = LogFactory.getLog(getClass());
+public class TestRank3 {
     private static PigServer pigServer;
     private static TupleFactory tf = TupleFactory.getInstance();
     private Data data;
@@ -53,10 +46,8 @@ public class TestRank3 extends TestCase 
     public static void oneTimeSetUp() throws Exception {
     }
 
-    @Override
     @Before
     public void setUp() throws Exception {
-
         try {
             pigServer = new PigServer("local");
 
@@ -101,15 +92,6 @@ public class TestRank3 extends TestCase 
         }
     }
 
-    @Override
-    @After
-    public void tearDown() throws Exception {
-    }
-
-    @AfterClass
-    public static void oneTimeTearDown() throws Exception {
-    }
-
     @Test
     public void testRankCascade() throws IOException {
         String query = "R1 = LOAD 'testcascade' USING mock.Storage() AS (a:long,b:long,c:long);"
@@ -125,43 +107,42 @@ public class TestRank3 extends TestCase 
         Util.registerMultiLineQuery(pigServer, query);
 
         Set<Tuple> expected = ImmutableSet.of(
-                tf.newTuple(ImmutableList.of((long)1,(long)21,(long)5,(long)7,(long)1,(long)1,(long)0,(long)8,(long)8)),
-                tf.newTuple(ImmutableList.of((long)2,(long)26,(long)2,(long)3,(long)2,(long)5,(long)1,(long)9,(long)10)),
-                tf.newTuple(ImmutableList.of((long)3,(long)30,(long)24,(long)21,(long)2,(long)3,(long)1,(long)3,(long)10)),
-                tf.newTuple(ImmutableList.of((long)4,(long)6,(long)10,(long)8,(long)3,(long)4,(long)1,(long)7,(long)2)),
-                tf.newTuple(ImmutableList.of((long)5,(long)8,(long)28,(long)25,(long)3,(long)2,(long)1,(long)0,(long)2)),
-                tf.newTuple(ImmutableList.of((long)6,(long)28,(long)11,(long)12,(long)4,(long)6,(long)2,(long)7,(long)10)),
-                tf.newTuple(ImmutableList.of((long)7,(long)9,(long)26,(long)22,(long)5,(long)7,(long)3,(long)2,(long)3)),
-                tf.newTuple(ImmutableList.of((long)8,(long)5,(long)6,(long)5,(long)6,(long)8,(long)3,(long)8,(long)1)),
-                tf.newTuple(ImmutableList.of((long)9,(long)29,(long)16,(long)15,(long)7,(long)9,(long)4,(long)6,(long)10)),
-                tf.newTuple(ImmutableList.of((long)10,(long)18,(long)12,(long)10,(long)8,(long)11,(long)5,(long)7,(long)6)),
-                tf.newTuple(ImmutableList.of((long)11,(long)14,(long)17,(long)14,(long)9,(long)10,(long)5,(long)6,(long)5)),
-                tf.newTuple(ImmutableList.of((long)12,(long)6,(long)12,(long)8,(long)10,(long)11,(long)5,(long)7,(long)2)),
-                tf.newTuple(ImmutableList.of((long)13,(long)2,(long)17,(long)13,(long)11,(long)10,(long)5,(long)6,(long)0)),
-                tf.newTuple(ImmutableList.of((long)14,(long)26,(long)3,(long)3,(long)12,(long)14,(long)6,(long)9,(long)10)),
-                tf.newTuple(ImmutableList.of((long)15,(long)15,(long)20,(long)18,(long)13,(long)13,(long)6,(long)4,(long)5)),
-                tf.newTuple(ImmutableList.of((long)16,(long)3,(long)29,(long)24,(long)14,(long)12,(long)6,(long)0,(long)0)),
-                tf.newTuple(ImmutableList.of((long)17,(long)23,(long)21,(long)19,(long)15,(long)16,(long)7,(long)4,(long)8)),
-                tf.newTuple(ImmutableList.of((long)18,(long)19,(long)19,(long)16,(long)16,(long)17,(long)7,(long)5,(long)6)),
-                tf.newTuple(ImmutableList.of((long)19,(long)20,(long)30,(long)26,(long)16,(long)15,(long)7,(long)0,(long)6)),
-                tf.newTuple(ImmutableList.of((long)20,(long)12,(long)21,(long)17,(long)17,(long)16,(long)7,(long)4,(long)4)),
-                tf.newTuple(ImmutableList.of((long)21,(long)4,(long)1,(long)1,(long)18,(long)19,(long)7,(long)10,(long)1)),
-                tf.newTuple(ImmutableList.of((long)22,(long)1,(long)7,(long)4,(long)19,(long)18,(long)7,(long)8,(long)0)),
-                tf.newTuple(ImmutableList.of((long)23,(long)24,(long)14,(long)11,(long)20,(long)21,(long)8,(long)7,(long)9)),
-                tf.newTuple(ImmutableList.of((long)24,(long)16,(long)25,(long)20,(long)21,(long)20,(long)8,(long)3,(long)5)),
-                tf.newTuple(ImmutableList.of((long)25,(long)25,(long)27,(long)23,(long)22,(long)22,(long)9,(long)1,(long)9)),
-                tf.newTuple(ImmutableList.of((long)26,(long)21,(long)8,(long)7,(long)23,(long)25,(long)9,(long)8,(long)8)),
-                tf.newTuple(ImmutableList.of((long)27,(long)17,(long)4,(long)2,(long)24,(long)26,(long)9,(long)9,(long)6)),
-                tf.newTuple(ImmutableList.of((long)28,(long)10,(long)8,(long)6,(long)25,(long)25,(long)9,(long)8,(long)4)),
-                tf.newTuple(ImmutableList.of((long)29,(long)11,(long)15,(long)9,(long)25,(long)24,(long)9,(long)7,(long)4)),
-                tf.newTuple(ImmutableList.of((long)30,(long)12,(long)23,(long)17,(long)25,(long)23,(long)9,(long)4,(long)4))
+                tf.newTuple(ImmutableList.of(1L,21L,5L,7L,1L,1L,0L,8L,8L)),
+                tf.newTuple(ImmutableList.of(2L,26L,2L,3L,2L,5L,1L,9L,10L)),
+                tf.newTuple(ImmutableList.of(3L,30L,24L,21L,2L,3L,1L,3L,10L)),
+                tf.newTuple(ImmutableList.of(4L,6L,10L,8L,3L,4L,1L,7L,2L)),
+                tf.newTuple(ImmutableList.of(5L,8L,28L,25L,3L,2L,1L,0L,2L)),
+                tf.newTuple(ImmutableList.of(6L,28L,11L,12L,4L,6L,2L,7L,10L)),
+                tf.newTuple(ImmutableList.of(7L,9L,26L,22L,5L,7L,3L,2L,3L)),
+                tf.newTuple(ImmutableList.of(8L,5L,6L,5L,6L,8L,3L,8L,1L)),
+                tf.newTuple(ImmutableList.of(9L,29L,16L,15L,7L,9L,4L,6L,10L)),
+                tf.newTuple(ImmutableList.of(10L,18L,12L,10L,8L,11L,5L,7L,6L)),
+                tf.newTuple(ImmutableList.of(11L,14L,17L,14L,9L,10L,5L,6L,5L)),
+                tf.newTuple(ImmutableList.of(12L,6L,12L,8L,10L,11L,5L,7L,2L)),
+                tf.newTuple(ImmutableList.of(13L,2L,17L,13L,11L,10L,5L,6L,0L)),
+                tf.newTuple(ImmutableList.of(14L,26L,3L,3L,12L,14L,6L,9L,10L)),
+                tf.newTuple(ImmutableList.of(15L,15L,20L,18L,13L,13L,6L,4L,5L)),
+                tf.newTuple(ImmutableList.of(16L,3L,29L,24L,14L,12L,6L,0L,0L)),
+                tf.newTuple(ImmutableList.of(17L,23L,21L,19L,15L,16L,7L,4L,8L)),
+                tf.newTuple(ImmutableList.of(18L,19L,19L,16L,16L,17L,7L,5L,6L)),
+                tf.newTuple(ImmutableList.of(19L,20L,30L,26L,16L,15L,7L,0L,6L)),
+                tf.newTuple(ImmutableList.of(20L,12L,21L,17L,17L,16L,7L,4L,4L)),
+                tf.newTuple(ImmutableList.of(21L,4L,1L,1L,18L,19L,7L,10L,1L)),
+                tf.newTuple(ImmutableList.of(22L,1L,7L,4L,19L,18L,7L,8L,0L)),
+                tf.newTuple(ImmutableList.of(23L,24L,14L,11L,20L,21L,8L,7L,9L)),
+                tf.newTuple(ImmutableList.of(24L,16L,25L,20L,21L,20L,8L,3L,5L)),
+                tf.newTuple(ImmutableList.of(25L,25L,27L,23L,22L,22L,9L,1L,9L)),
+                tf.newTuple(ImmutableList.of(26L,21L,8L,7L,23L,25L,9L,8L,8L)),
+                tf.newTuple(ImmutableList.of(27L,17L,4L,2L,24L,26L,9L,9L,6L)),
+                tf.newTuple(ImmutableList.of(28L,10L,8L,6L,25L,25L,9L,8L,4L)),
+                tf.newTuple(ImmutableList.of(29L,11L,15L,9L,25L,24L,9L,7L,4L)),
+                tf.newTuple(ImmutableList.of(30L,12L,23L,17L,25L,23L,9L,4L,4L))
         );
 
         verifyExpected(data.get("result"), expected);
     }
 
     public void verifyExpected(List<Tuple> out, Set<Tuple> expected) {
-
         for (Tuple tup : out) {
             assertTrue(expected + " contains " + tup, expected.contains(tup));
         }

Modified: pig/trunk/test/org/apache/pig/test/TestRelationToExprProject.java
URL: http://svn.apache.org/viewvc/pig/trunk/test/org/apache/pig/test/TestRelationToExprProject.java?rev=1406469&r1=1406468&r2=1406469&view=diff
==============================================================================
--- pig/trunk/test/org/apache/pig/test/TestRelationToExprProject.java (original)
+++ pig/trunk/test/org/apache/pig/test/TestRelationToExprProject.java Wed Nov  7 05:58:04 2012
@@ -17,34 +17,35 @@
  */
 package org.apache.pig.test;
 
-import static org.apache.pig.ExecType.MAPREDUCE;
+import static org.apache.pig.builtin.mock.Storage.resetData;
+import static org.apache.pig.builtin.mock.Storage.tuple;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
 
+import org.apache.pig.ExecType;
 import org.apache.pig.PigServer;
+import org.apache.pig.builtin.mock.Storage.Data;
 import org.apache.pig.data.BagFactory;
 import org.apache.pig.data.DefaultTuple;
 import org.apache.pig.data.Tuple;
 import org.apache.pig.data.TupleFactory;
 import org.apache.pig.parser.ParserException;
 import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
-
-import junit.framework.TestCase;
 
 /**
  * Test PORelationToExprProject which is a special project
  * introduced to handle the following case:
  * This project is Project(*) introduced after a relational operator
  * to supply a bag as output (as an expression). This project is either
- * providing the bag as input to a successor expression operator or is 
+ * providing the bag as input to a successor expression operator or is
  * itself the leaf in a inner plan
  * If the predecessor relational operator sends an EOP
  * then send an empty bag first to signal "empty" output
@@ -56,21 +57,18 @@ import junit.framework.TestCase;
  * a = load 'baginp.txt' as (b:bag{t:tuple()}); b = foreach a generate $0; dump b;
  * will go through a regular project (without the following flag)
  */
-@RunWith(JUnit4.class)
-public class TestRelationToExprProject extends TestCase {
-
-    private static MiniCluster cluster = MiniCluster.buildCluster();
+public class TestRelationToExprProject {
     private PigServer pigServer;
-    private static final String TEST_FILTER_COUNT3_INPUT="test/org/apache/pig/test/data/TestRelationToExprProjectInput.txt"; 
-    
+    private static final String TEST_FILTER_COUNT3_INPUT="test/org/apache/pig/test/data/TestRelationToExprProjectInput.txt";
+
     /* (non-Javadoc)
      * @see junit.framework.TestCase#setUp()
      */
     @Before
     public void setUp() throws Exception {
-        pigServer = new PigServer(MAPREDUCE, cluster.getProperties());
+        pigServer = new PigServer(ExecType.LOCAL);
     }
-    
+
     /* (non-Javadoc)
      * @see junit.framework.TestCase#tearDown()
      */
@@ -78,28 +76,22 @@ public class TestRelationToExprProject e
     public void tearDown() throws Exception {
         pigServer.shutdown();
     }
-    
-    @AfterClass
-    public static void oneTimeTearDown() throws Exception {
-        cluster.shutDown();
-    }
-    
+
     // based on the script provided in the jira issue:PIG-514
     // tests that when a filter inside a foreach filters away all tuples
     // for a group, an empty bag is still provided to udfs whose
     // input is the filter
     @Test
     public void testFilterCount1() throws IOException, ParserException {
-        
-        String[] inputData = new String[] {"1\t1\t3","1\t2\t3", "2\t1\t3", "2\t1\t3"};
-        Util.createInputFile(cluster, "test.txt", inputData);
-        String script = "test   = load 'test.txt' as (col1: int, col2: int, col3: int);" +
-        		"test2 = group test by col1;" +
-        		"test3 = foreach test2 {" +
-        		"        filter_one    = filter test by (col2==1);" +
-        		"        filter_notone = filter test by (col2!=1);" +
-        		"        generate group as col1, COUNT(filter_one) as cnt_one, COUNT(filter_notone) as cnt_notone;};";
-        Util.registerMultiLineQuery(pigServer, script);
+        Data data = resetData(pigServer);
+        data.set("foo", tuple(1,1,3), tuple(1,2,3), tuple(2,1,3), tuple(2,1,3));
+        String script = "test   = load 'foo' using mock.Storage() as (col1: int, col2: int, col3: int);" +
+                "test2 = group test by col1;" +
+                "test3 = foreach test2 {" +
+                "        filter_one    = filter test by (col2==1);" +
+                "        filter_notone = filter test by (col2!=1);" +
+                "        generate group as col1, COUNT(filter_one) as cnt_one, COUNT(filter_notone) as cnt_notone;};";
+        pigServer.registerQuery(script);
         Iterator<Tuple> it = pigServer.openIterator("test3");
         Tuple[] expected = new DefaultTuple[2];
         expected[0] = (Tuple) Util.getPigConstant("(1,1L,1L)");
@@ -122,37 +114,37 @@ public class TestRelationToExprProject e
         for (int j = 0; j < expected.length; j++) {
             assertTrue(expected[j].equals(results[j]));
         }
-        Util.deleteFile(cluster, "test.txt");
     }
-    
+
     // based on jira PIG-710
     // tests that when a filter inside a foreach filters away all tuples
     // for a group, an empty bag is still provided to udfs whose
     // input is the filter
     @Test
     public void testFilterCount2() throws IOException, ParserException {
-        Util.createInputFile(cluster, "filterbug.data", new String[] {
-                "a\thello" ,
-                "a\tgoodbye" ,
-                "b\tgoodbye" ,
-                "c\thello" ,
-                "c\thello" ,
-                "c\thello" ,
-                "d\twhat"
-        });
-        String query = "A = load 'filterbug.data' using PigStorage() as ( id:chararray, str:chararray );" +
-        		"B = group A by ( id );" +
-        		"Cfiltered = foreach B {" +
-        		"        D = filter A by (" +
-        		"                str matches 'hello'" +
-        		"                );" +
-        		"        matchedcount = COUNT(D);" +
-        		"        generate" +
-        		"                group," +
-        		"                matchedcount as matchedcount," +
-        		"                A.str;" +
-        		"        };";  
-        Util.registerMultiLineQuery(pigServer, query);
+        Data data = resetData(pigServer);
+        data.set("foo",
+                tuple("a", "hello"),
+                tuple("a", "goodbye"),
+                tuple("b", "goodbye"),
+                tuple("c", "hello"),
+                tuple("c", "hello"),
+                tuple("c", "hello"),
+                tuple("d", "what")
+                );
+        String query = "A = load 'foo' using mock.Storage() as ( id:chararray, str:chararray );" +
+                "B = group A by ( id );" +
+                "Cfiltered = foreach B {" +
+                "        D = filter A by (" +
+                "                str matches 'hello'" +
+                "                );" +
+                "        matchedcount = COUNT(D);" +
+                "        generate" +
+                "                group," +
+                "                matchedcount as matchedcount," +
+                "                A.str;" +
+                "        };";
+        pigServer.registerQuery(query);
         Iterator<Tuple> it = pigServer.openIterator("Cfiltered");
         Map<String, Tuple> expected = new HashMap<String, Tuple>();
         expected.put("a", (Tuple) Util.getPigConstant("('a',1L,{('hello'),('goodbye')})"));
@@ -166,29 +158,27 @@ public class TestRelationToExprProject e
             i++;
         }
         assertEquals(4, i);
-        Util.deleteFile(cluster, "filterbug.data");
     }
-    
+
     // based on jira PIG-739
     // tests that when a filter inside a foreach filters away all tuples
     // for a group, an empty bag is still provided to udfs whose
     // input is the filter
     @Test
     public void testFilterCount3() throws IOException, ParserException {
-        Util.copyFromLocalToCluster(cluster, TEST_FILTER_COUNT3_INPUT, "testdata");
-        String query = "TESTDATA =  load 'testdata' using PigStorage() as (timestamp:chararray, testid:chararray, userid: chararray, sessionid:chararray, value:long, flag:int);" +
-        		"TESTDATA_FILTERED = filter TESTDATA by (timestamp gte '1230800400000' and timestamp lt '1230804000000' and value != 0);" +
-        		"TESTDATA_GROUP = group TESTDATA_FILTERED by testid;" +
-        		"TESTDATA_AGG = foreach TESTDATA_GROUP {" +
-        		"                        A = filter TESTDATA_FILTERED by (userid eq sessionid);" +
-        		"                        C = distinct A.userid;" +
-        		"                        generate group as testid, COUNT(TESTDATA_FILTERED) as counttestdata, COUNT(C) as distcount, SUM(TESTDATA_FILTERED.flag) as total_flags;" +
-        		"                }" +
-        		"TESTDATA_AGG_1 = group TESTDATA_AGG ALL;" +
-        		"TESTDATA_AGG_2 = foreach TESTDATA_AGG_1 generate COUNT(TESTDATA_AGG);" ;
-        Util.registerMultiLineQuery(pigServer, query);
+        String query = "TESTDATA =  load '"+TEST_FILTER_COUNT3_INPUT+"' using PigStorage() as (timestamp:chararray, testid:chararray, userid: chararray, sessionid:chararray, value:long, flag:int);" +
+                "TESTDATA_FILTERED = filter TESTDATA by (timestamp gte '1230800400000' and timestamp lt '1230804000000' and value != 0);" +
+                "TESTDATA_GROUP = group TESTDATA_FILTERED by testid;" +
+                "TESTDATA_AGG = foreach TESTDATA_GROUP {" +
+                "                        A = filter TESTDATA_FILTERED by (userid eq sessionid);" +
+                "                        C = distinct A.userid;" +
+                "                        generate group as testid, COUNT(TESTDATA_FILTERED) as counttestdata, COUNT(C) as distcount, SUM(TESTDATA_FILTERED.flag) as total_flags;" +
+                "                }" +
+                "TESTDATA_AGG_1 = group TESTDATA_AGG ALL;" +
+                "TESTDATA_AGG_2 = foreach TESTDATA_AGG_1 generate COUNT(TESTDATA_AGG);" ;
+        pigServer.registerQuery(query);
         Iterator<Tuple> it = pigServer.openIterator("TESTDATA_AGG_2");
-        
+
         int i = 0;
         while(it.hasNext()) {
             Tuple actual = it.next();
@@ -196,23 +186,22 @@ public class TestRelationToExprProject e
             i++;
         }
         assertEquals(1, i);
-        Util.deleteFile(cluster, "testdata");
     }
-    
+
     // test case where RelationToExprProject is present in the
     // single inner plan of foreach - this will test that it does
     // send an EOP eventually for each input of the foreach
     @Test
     public void testFilter1() throws IOException, ParserException {
-        
-        String[] inputData = new String[] {"1\t1\t3","1\t2\t3", "2\t1\t3", "2\t1\t3", "3\t4\t4"};
-        Util.createInputFile(cluster, "test.txt", inputData);
-        String script = "test   = load 'test.txt' as (col1: int, col2: int, col3: int);" +
+        Data data = resetData(pigServer);
+        data.set("foo", tuple(1,1,3), tuple(1,2,3), tuple(2,1,3), tuple(2,1,3), tuple(3,4,4));
+
+        String script = "test   = load 'foo' using mock.Storage() as (col1: int, col2: int, col3: int);" +
                 "test2 = group test by col1;" +
                 "test3 = foreach test2 {" +
                 "        filter_one    = filter test by (col2==1);" +
                 "        generate filter_one;};";
-        Util.registerMultiLineQuery(pigServer, script);
+        pigServer.registerQuery(script);
         Iterator<Tuple> it = pigServer.openIterator("test3");
         Map<Tuple, Integer> expected = new HashMap<Tuple, Integer>();
         expected.put((Tuple) Util.getPigConstant("({(1,1,3)})"), 0);
@@ -235,9 +224,8 @@ public class TestRelationToExprProject e
         for (Integer occurences : expected.values()) {
             assertEquals(new Integer(1), occurences);
         }
-        Util.deleteFile(cluster, "test.txt");
     }
-    
+
     // test case where RelationToExprProject is present in a
     // different inner plan along with another plan to project the group
     // in foreach - this will test that reset() correctly resets
@@ -245,10 +233,10 @@ public class TestRelationToExprProject e
     // input has been seen on a fresh input from foreach.
     @Test
     public void testFilter2() throws IOException, ParserException {
-        
-        String[] inputData = new String[] {"1\t1\t3","1\t2\t3", "2\t1\t3", "2\t1\t3", "3\t4\t4"};
-        Util.createInputFile(cluster, "test.txt", inputData);
-        String script = "test   = load 'test.txt' as (col1: int, col2: int, col3: int);" +
+        Data data = resetData(pigServer);
+        data.set("foo", tuple(1,1,3), tuple(1,2,3), tuple(2,1,3), tuple(2,1,3), tuple(3,4,4));
+
+        String script = "test   = load 'foo' using mock.Storage() as (col1: int, col2: int, col3: int);" +
                 "test2 = group test by col1;" +
                 "test3 = foreach test2 {" +
                 "        filter_one    = filter test by (col2==1);" +
@@ -275,10 +263,7 @@ public class TestRelationToExprProject e
             i++;
         }
         for (Integer occurences : expected.values()) {
-            assertEquals(new Integer(1), occurences);
+            assertEquals(Integer.valueOf(1), occurences);
         }
-        Util.deleteFile(cluster, "test.txt");
     }
-
-    
 }

Modified: pig/trunk/test/org/apache/pig/test/TestSchemaUtil.java
URL: http://svn.apache.org/viewvc/pig/trunk/test/org/apache/pig/test/TestSchemaUtil.java?rev=1406469&r1=1406468&r2=1406469&view=diff
==============================================================================
--- pig/trunk/test/org/apache/pig/test/TestSchemaUtil.java (original)
+++ pig/trunk/test/org/apache/pig/test/TestSchemaUtil.java Wed Nov  7 05:58:04 2012
@@ -18,83 +18,77 @@
 
 package org.apache.pig.test;
 
-import java.util.Arrays;
+import static org.junit.Assert.assertEquals;
 
-import junit.framework.TestCase;
+import java.util.Arrays;
 
 import org.apache.pig.data.DataType;
-import org.apache.pig.impl.logicalLayer.FrontendException;
 import org.apache.pig.impl.logicalLayer.schema.Schema;
 import org.apache.pig.impl.logicalLayer.schema.SchemaUtil;
+import org.junit.Test;
 
-public class TestSchemaUtil extends TestCase {
+public class TestSchemaUtil {
 
-    public void testTupleSchema() {
-        try {
-            String tupleName = "mytuple";
-            String[] fieldNames = new String[] { "field_0", "field_1" };
-            Byte[] dataTypes = new Byte[] { DataType.LONG, DataType.CHARARRAY };
-
-            String expected = "{mytuple: (field_0: long,field_1: chararray)}";
-            Schema tupleSchema = SchemaUtil.newTupleSchema(tupleName,
-                    fieldNames, dataTypes);
-            assertEquals(expected, tupleSchema.toString());
-
-            tupleSchema = SchemaUtil.newTupleSchema(tupleName, Arrays
-                    .asList(fieldNames), Arrays.asList(dataTypes));
-            assertEquals(expected, tupleSchema.toString());
-
-            expected = "{t: (field_0: long,field_1: chararray)}";
-            tupleSchema = SchemaUtil.newTupleSchema(fieldNames, dataTypes);
-            assertEquals(expected, tupleSchema.toString());
-
-            tupleSchema = SchemaUtil.newTupleSchema(Arrays.asList(fieldNames),
-                    Arrays.asList(dataTypes));
-            assertEquals(expected, tupleSchema.toString());
-
-            expected = "{t: (f0: long,f1: chararray)}";
-            tupleSchema = SchemaUtil.newTupleSchema(dataTypes);
-            assertEquals(expected, tupleSchema.toString());
-
-            tupleSchema = SchemaUtil.newTupleSchema(Arrays.asList(dataTypes));
-            assertEquals(expected, tupleSchema.toString());
-        } catch (FrontendException e) {
-            fail();
-        }
+    @Test
+    public void testTupleSchema() throws Exception {
+        String tupleName = "mytuple";
+        String[] fieldNames = new String[] { "field_0", "field_1" };
+        Byte[] dataTypes = new Byte[] { DataType.LONG, DataType.CHARARRAY };
+
+        String expected = "{mytuple: (field_0: long,field_1: chararray)}";
+        Schema tupleSchema = SchemaUtil.newTupleSchema(tupleName,
+                fieldNames, dataTypes);
+        assertEquals(expected, tupleSchema.toString());
+
+        tupleSchema = SchemaUtil.newTupleSchema(tupleName, Arrays
+                .asList(fieldNames), Arrays.asList(dataTypes));
+        assertEquals(expected, tupleSchema.toString());
+
+        expected = "{t: (field_0: long,field_1: chararray)}";
+        tupleSchema = SchemaUtil.newTupleSchema(fieldNames, dataTypes);
+        assertEquals(expected, tupleSchema.toString());
+
+        tupleSchema = SchemaUtil.newTupleSchema(Arrays.asList(fieldNames),
+                Arrays.asList(dataTypes));
+        assertEquals(expected, tupleSchema.toString());
+
+        expected = "{t: (f0: long,f1: chararray)}";
+        tupleSchema = SchemaUtil.newTupleSchema(dataTypes);
+        assertEquals(expected, tupleSchema.toString());
+
+        tupleSchema = SchemaUtil.newTupleSchema(Arrays.asList(dataTypes));
+        assertEquals(expected, tupleSchema.toString());
     }
 
-    public void testBagSchema() {
-        try {
-            String bagName="mybag";
-            String tupleName = "mytuple";
-            String[] fieldNames = new String[] { "field_0", "field_1" };
-            Byte[] dataTypes = new Byte[] { DataType.LONG, DataType.CHARARRAY };
-
-            String expected = "{mybag: {mytuple: (field_0: long,field_1: chararray)}}";
-            Schema bagSchema = SchemaUtil.newBagSchema(bagName,tupleName,
-                    fieldNames, dataTypes);
-            assertEquals(expected, bagSchema.toString());
-
-            bagSchema = SchemaUtil.newBagSchema(bagName,tupleName, Arrays
-                    .asList(fieldNames), Arrays.asList(dataTypes));
-            assertEquals(expected, bagSchema.toString());
-
-            expected = "{b: {t: (field_0: long,field_1: chararray)}}";
-            bagSchema = SchemaUtil.newBagSchema(fieldNames, dataTypes);
-            assertEquals(expected, bagSchema.toString());
-
-            bagSchema = SchemaUtil.newBagSchema(Arrays.asList(fieldNames),
-                    Arrays.asList(dataTypes));
-            assertEquals(expected, bagSchema.toString());
-
-            expected = "{b: {t: (f0: long,f1: chararray)}}";
-            bagSchema = SchemaUtil.newBagSchema(dataTypes);
-            assertEquals(expected, bagSchema.toString());
-
-            bagSchema = SchemaUtil.newBagSchema(Arrays.asList(dataTypes));
-            assertEquals(expected, bagSchema.toString());
-        } catch (FrontendException e) {
-            fail();
-        }
+    @Test
+    public void testBagSchema() throws Exception {
+        String bagName="mybag";
+        String tupleName = "mytuple";
+        String[] fieldNames = new String[] { "field_0", "field_1" };
+        Byte[] dataTypes = new Byte[] { DataType.LONG, DataType.CHARARRAY };
+
+        String expected = "{mybag: {mytuple: (field_0: long,field_1: chararray)}}";
+        Schema bagSchema = SchemaUtil.newBagSchema(bagName,tupleName,
+                fieldNames, dataTypes);
+        assertEquals(expected, bagSchema.toString());
+
+        bagSchema = SchemaUtil.newBagSchema(bagName,tupleName, Arrays
+                .asList(fieldNames), Arrays.asList(dataTypes));
+        assertEquals(expected, bagSchema.toString());
+
+        expected = "{b: {t: (field_0: long,field_1: chararray)}}";
+        bagSchema = SchemaUtil.newBagSchema(fieldNames, dataTypes);
+        assertEquals(expected, bagSchema.toString());
+
+        bagSchema = SchemaUtil.newBagSchema(Arrays.asList(fieldNames),
+                Arrays.asList(dataTypes));
+        assertEquals(expected, bagSchema.toString());
+
+        expected = "{b: {t: (f0: long,f1: chararray)}}";
+        bagSchema = SchemaUtil.newBagSchema(dataTypes);
+        assertEquals(expected, bagSchema.toString());
+
+        bagSchema = SchemaUtil.newBagSchema(Arrays.asList(dataTypes));
+        assertEquals(expected, bagSchema.toString());
     }
 }

Modified: pig/trunk/test/org/apache/pig/test/TestStore.java
URL: http://svn.apache.org/viewvc/pig/trunk/test/org/apache/pig/test/TestStore.java?rev=1406469&r1=1406468&r2=1406469&view=diff
==============================================================================
--- pig/trunk/test/org/apache/pig/test/TestStore.java (original)
+++ pig/trunk/test/org/apache/pig/test/TestStore.java Wed Nov  7 05:58:04 2012
@@ -17,6 +17,10 @@
  */
 package org.apache.pig.test;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileReader;
@@ -27,8 +31,6 @@ import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.Random;
 
-import junit.framework.Assert;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -37,10 +39,10 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.JobStatus.State;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.OutputFormat;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 import org.apache.pig.EvalFunc;
 import org.apache.pig.ExecType;
 import org.apache.pig.PigException;
@@ -84,41 +86,36 @@ import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
 
-@RunWith(JUnit4.class)
-public class TestStore extends junit.framework.TestCase {
+public class TestStore {
     POStore st;
     DataBag inpDB;
     static MiniCluster cluster = MiniCluster.buildCluster();
     PigContext pc;
     POProject proj;
     PigServer pig;
-        
+
     String inputFileName;
     String outputFileName;
-    
+
     private static final String DUMMY_STORE_CLASS_NAME
     = "org.apache.pig.test.TestStore\\$DummyStore";
 
     private static final String FAIL_UDF_NAME
     = "org.apache.pig.test.TestStore\\$FailUDF";
-    private static final String MAP_MAX_ATTEMPTS = "mapred.map.max.attempts"; 
+    private static final String MAP_MAX_ATTEMPTS = "mapred.map.max.attempts";
     private static final String TESTDIR = "/tmp/" + TestStore.class.getSimpleName();
 
-    @Override
     @Before
     public void setUp() throws Exception {
         pig = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
         pc = pig.getPigContext();
         inputFileName = TESTDIR + "/TestStore-" + new Random().nextLong() + ".txt";
         outputFileName = TESTDIR + "/TestStore-output-" + new Random().nextLong() + ".txt";
-        
+
         DateTimeZone.setDefault(DateTimeZone.forOffsetMillis(DateTimeZone.UTC.getOffset(null)));
     }
 
-    @Override
     @After
     public void tearDown() throws Exception {
         Util.deleteDirectory(new File(TESTDIR));
@@ -140,7 +137,7 @@ public class TestStore extends junit.fra
     public static void oneTimeTearDown() throws Exception {
         cluster.shutDown();
     }
-    
+
     @Test
     public void testValidation() throws Exception{
         String outputFileName = "test-output.txt";
@@ -153,12 +150,12 @@ public class TestStore extends junit.fra
         } catch (PlanValidationException e){
                 // Since output file is not present, validation should pass
                 // and not throw this exception.
-                fail("Store validation test failed.");                
+                fail("Store validation test failed.");
         } finally {
             Util.deleteFile(pig.getPigContext(), outputFileName);
         }
     }
-    
+
     @Test
     public void testValidationFailure() throws Exception{
         String input[] = new String[] { "some data" };
@@ -173,7 +170,7 @@ public class TestStore extends junit.fra
             new InputOutputFileValidator(lp, pig.getPigContext()).validate();
         } catch (FrontendException pve){
             // Since output file is present, validation should fail
-            // and throw this exception 
+            // and throw this exception
             assertEquals(6000,pve.getErrorCode());
             assertEquals(PigException.REMOTE_ENVIRONMENT, pve.getErrorSource());
             assertTrue(pve.getCause() instanceof IOException);
@@ -183,12 +180,12 @@ public class TestStore extends junit.fra
             Util.deleteFile(pig.getPigContext(), outputFileName);
         }
     }
-    
+
     @Test
     public void testStore() throws Exception {
         inpDB = GenRandomData.genRandSmallTupDataBag(new Random(), 10, 100);
         storeAndCopyLocally(inpDB);
-        
+
         int size = 0;
         BufferedReader br = new BufferedReader(new FileReader(outputFileName));
         for(String line=br.readLine();line!=null;line=br.readLine()){
@@ -196,20 +193,20 @@ public class TestStore extends junit.fra
             Tuple t = new DefaultTuple();
             t.append(flds[0].compareTo("")!=0 ? flds[0] : null);
             t.append(flds[1].compareTo("")!=0 ? Integer.parseInt(flds[1]) : null);
-            
+
             System.err.println("Simple data: ");
             System.err.println(line);
             System.err.println("t: ");
             System.err.println(t);
-            assertEquals(true, TestHelper.bagContains(inpDB, t));
+            assertTrue(TestHelper.bagContains(inpDB, t));
             ++size;
         }
-        assertEquals(true, size==inpDB.size());
+        assertEquals(size, inpDB.size());
     }
 
     /**
      * @param inpD
-     * @throws IOException 
+     * @throws IOException
      */
     private void setUpInputFileOnCluster(DataBag inpD) throws IOException {
         String[] data = new String[(int) inpD.size()];
@@ -217,10 +214,10 @@ public class TestStore extends junit.fra
         for (Tuple tuple : inpD) {
             data[i] = toDelimitedString(tuple, "\t");
             i++;
-        } 
+        }
         Util.createInputFile(cluster, inputFileName, data);
     }
-    
+
     @SuppressWarnings("unchecked")
     private String toDelimitedString(Tuple t, String delim) throws ExecException {
         StringBuilder buf = new StringBuilder();
@@ -236,7 +233,7 @@ public class TestStore extends junit.fra
                     buf.append(field.toString());
                 }
             }
-            
+
             if (i != t.size() - 1)
                 buf.append(delim);
         }
@@ -253,10 +250,10 @@ public class TestStore extends junit.fra
         for(String line=br.readLine();line!=null;line=br.readLine()){
             String[] flds = line.split("\t",-1);
             Tuple t = new DefaultTuple();
-            
+
             ResourceFieldSchema bagfs = GenRandomData.getSmallTupDataBagFieldSchema();
             ResourceFieldSchema tuplefs = GenRandomData.getSmallTupleFieldSchema();
-            
+
             t.append(flds[0].compareTo("")!=0 ? ps.getLoadCaster().bytesToBag(flds[0].getBytes(), bagfs) : null);
             t.append(flds[1].compareTo("")!=0 ? new DataByteArray(flds[1].getBytes()) : null);
             t.append(flds[2].compareTo("")!=0 ? ps.getLoadCaster().bytesToCharArray(flds[2].getBytes()) : null);
@@ -281,31 +278,30 @@ public class TestStore extends junit.fra
         inpDB.add(inputTuple);
         storeAndCopyLocally(inpDB);
         PigStorage ps = new PigStorage("\t");
-        int size = 0;
         BufferedReader br = new BufferedReader(new FileReader(outputFileName));
         for(String line=br.readLine();line!=null;line=br.readLine()){
             System.err.println("Complex data: ");
             System.err.println(line);
             String[] flds = line.split("\t",-1);
             Tuple t = new DefaultTuple();
-            
+
             ResourceFieldSchema stringfs = new ResourceFieldSchema();
             stringfs.setType(DataType.CHARARRAY);
             ResourceFieldSchema intfs = new ResourceFieldSchema();
             intfs.setType(DataType.INTEGER);
-            
+
             ResourceSchema tupleSchema = new ResourceSchema();
             tupleSchema.setFields(new ResourceFieldSchema[]{stringfs, intfs});
             ResourceFieldSchema tuplefs = new ResourceFieldSchema();
             tuplefs.setSchema(tupleSchema);
             tuplefs.setType(DataType.TUPLE);
-            
+
             ResourceSchema bagSchema = new ResourceSchema();
             bagSchema.setFields(new ResourceFieldSchema[]{tuplefs});
             ResourceFieldSchema bagfs = new ResourceFieldSchema();
             bagfs.setSchema(bagSchema);
             bagfs.setType(DataType.BAG);
-            
+
             t.append(flds[0].compareTo("")!=0 ? ps.getLoadCaster().bytesToBag(flds[0].getBytes(), bagfs) : null);
             t.append(flds[1].compareTo("")!=0 ? new DataByteArray(flds[1].getBytes()) : null);
             t.append(flds[2].compareTo("")!=0 ? ps.getLoadCaster().bytesToCharArray(flds[2].getBytes()) : null);
@@ -319,7 +315,6 @@ public class TestStore extends junit.fra
             t.append(flds[10].compareTo("")!=0 ? ps.getLoadCaster().bytesToDateTime(flds[10].getBytes()) : null);
             t.append(flds[11].compareTo("")!=0 ? ps.getLoadCaster().bytesToCharArray(flds[10].getBytes()) : null);
             assertTrue(TestHelper.tupleEquals(inputTuple, t));
-            ++size;
         }
     }
     @Test
@@ -328,7 +323,7 @@ public class TestStore extends junit.fra
         String inputFileName = "testGetSchema-input.txt";
         String outputFileName = "testGetSchema-output.txt";
         try {
-            Util.createInputFile(pig.getPigContext(), 
+            Util.createInputFile(pig.getPigContext(),
                     inputFileName, input);
             String query = "a = load '" + inputFileName + "' as (c:chararray, " +
                     "i:int,d:double);store a into '" + outputFileName + "' using " +
@@ -336,12 +331,12 @@ public class TestStore extends junit.fra
             pig.setBatchOn();
             Util.registerMultiLineQuery(pig, query);
             pig.executeBatch();
-            ResourceSchema rs = new BinStorage().getSchema(outputFileName, 
+            ResourceSchema rs = new BinStorage().getSchema(outputFileName,
                     new Job(ConfigurationUtil.toConfiguration(pig.getPigContext().
                             getProperties())));
             Schema expectedSchema = Utils.getSchemaFromString(
                     "c:chararray,i:int,d:double");
-            Assert.assertTrue("Checking binstorage getSchema output", Schema.equals( 
+            assertTrue("Checking binstorage getSchema output", Schema.equals(
                     expectedSchema, Schema.getPigSchema(rs), true, true));
         } finally {
             Util.deleteFile(pig.getPigContext(), inputFileName);
@@ -391,100 +386,90 @@ public class TestStore extends junit.fra
         filesToVerify.put(DummyOutputCommitter.FILE_COMMITJOB_CALLED, Boolean.TRUE);
         filesToVerify.put(DummyOutputCommitter.FILE_ABORTJOB_CALLED, Boolean.FALSE);
         filesToVerify.put(DummyOutputCommitter.FILE_CLEANUPJOB_CALLED, Boolean.FALSE);
-        try {
-            ExecType[] modes = new ExecType[] { ExecType.MAPREDUCE, ExecType.LOCAL};
-            String[] inputData = new String[]{"hello\tworld", "bye\tworld"};
-            
-            String script = "a = load '"+ inputFileName + "' as (a0:chararray, a1:chararray);" +
-            		"store a into '" + outputFileName + "' using " + 
-            		DUMMY_STORE_CLASS_NAME + "();";
-            
-            for (ExecType execType : modes) {
-                FileLocalizer.setInitialized(false);
-                if(execType == ExecType.MAPREDUCE) {
-                    ps = new PigServer(ExecType.MAPREDUCE, 
-                            cluster.getProperties());
-                } else {
-                    Properties props = new Properties();                                          
-                    props.setProperty(MapRedUtil.FILE_SYSTEM_NAME, "file:///");
-                    ps = new PigServer(ExecType.LOCAL, props);
-                    if (Util.isHadoop1_0()) {
-                        // MAPREDUCE-1447/3563 (LocalJobRunner does not call methods of mapreduce
-                        // OutputCommitter) is fixed only in 0.23.1
-                        filesToVerify.put(DummyOutputCommitter.FILE_SETUPJOB_CALLED, Boolean.FALSE);
-                        filesToVerify.put(DummyOutputCommitter.FILE_COMMITJOB_CALLED, Boolean.FALSE);
-                    }
-                }
-                ps.setBatchOn();
-                Util.deleteFile(ps.getPigContext(), TESTDIR);
-                Util.createInputFile(ps.getPigContext(), 
-                        inputFileName, inputData);
-                Util.registerMultiLineQuery(ps, script);
-                ps.executeBatch();
-                for (Entry<String, Boolean> entry : filesToVerify.entrySet()) {
-                    String condition = entry.getValue() ? "" : "not";
-                    assertEquals("Checking if file " + entry.getKey() +
-                            " does " + condition + " exists in " + execType +
-                            " mode", (boolean) entry.getValue(),
-                            Util.exists(ps.getPigContext(), entry.getKey()));
+        ExecType[] modes = new ExecType[] { ExecType.MAPREDUCE, ExecType.LOCAL};
+        String[] inputData = new String[]{"hello\tworld", "bye\tworld"};
+
+        String script = "a = load '"+ inputFileName + "' as (a0:chararray, a1:chararray);" +
+                "store a into '" + outputFileName + "' using " +
+                DUMMY_STORE_CLASS_NAME + "();";
+
+        for (ExecType execType : modes) {
+            FileLocalizer.setInitialized(false);
+            if(execType == ExecType.MAPREDUCE) {
+                ps = new PigServer(ExecType.MAPREDUCE,
+                        cluster.getProperties());
+            } else {
+                Properties props = new Properties();
+                props.setProperty(MapRedUtil.FILE_SYSTEM_NAME, "file:///");
+                ps = new PigServer(ExecType.LOCAL, props);
+                if (Util.isHadoop1_0()) {
+                    // MAPREDUCE-1447/3563 (LocalJobRunner does not call methods of mapreduce
+                    // OutputCommitter) is fixed only in 0.23.1
+                    filesToVerify.put(DummyOutputCommitter.FILE_SETUPJOB_CALLED, Boolean.FALSE);
+                    filesToVerify.put(DummyOutputCommitter.FILE_COMMITJOB_CALLED, Boolean.FALSE);
                 }
             }
-        } catch (Exception e) {
-            e.printStackTrace();
-            Assert.fail("Exception encountered - hence failing:" + e);
+            ps.setBatchOn();
+            Util.deleteFile(ps.getPigContext(), TESTDIR);
+            Util.createInputFile(ps.getPigContext(),
+                    inputFileName, inputData);
+            Util.registerMultiLineQuery(ps, script);
+            ps.executeBatch();
+            for (Entry<String, Boolean> entry : filesToVerify.entrySet()) {
+                String condition = entry.getValue() ? "" : "not";
+                assertEquals("Checking if file " + entry.getKey() +
+                        " does " + condition + " exists in " + execType +
+                        " mode", (boolean) entry.getValue(),
+                        Util.exists(ps.getPigContext(), entry.getKey()));
+            }
         }
     }
-    
+
     @Test
     public void testCleanupOnFailure() throws Exception {
         PigServer ps = null;
         String cleanupSuccessFile = outputFileName + "_cleanupOnFailure_succeeded";
         String cleanupFailFile = outputFileName + "_cleanupOnFailure_failed";
-        try {
-            ExecType[] modes = new ExecType[] { ExecType.LOCAL, ExecType.MAPREDUCE};
-            String[] inputData = new String[]{"hello\tworld", "bye\tworld"};
-            
-            String script = "a = load '"+ inputFileName + "';" +
-                    "store a into '" + outputFileName + "' using " + 
-                    DUMMY_STORE_CLASS_NAME + "('true');";
-            
-            for (ExecType execType : modes) {
-                if(execType == ExecType.MAPREDUCE) {
-                    ps = new PigServer(ExecType.MAPREDUCE, 
-                            cluster.getProperties());
-                } else {
-                    Properties props = new Properties();                                          
-                    props.setProperty(MapRedUtil.FILE_SYSTEM_NAME, "file:///");
-                    ps = new PigServer(ExecType.LOCAL, props);
-                }
-                Util.deleteFile(ps.getPigContext(), TESTDIR);
-                ps.setBatchOn();
-                Util.createInputFile(ps.getPigContext(), 
-                        inputFileName, inputData);
-                Util.registerMultiLineQuery(ps, script);
-                ps.executeBatch();
-                assertEquals(
-                        "Checking if file indicating that cleanupOnFailure failed " +
-                        " does not exists in " + execType + " mode", false, 
-                        Util.exists(ps.getPigContext(), cleanupFailFile));
-                assertEquals(
-                        "Checking if file indicating that cleanupOnFailure was " +
-                        "successfully called exists in " + execType + " mode", true, 
-                        Util.exists(ps.getPigContext(), cleanupSuccessFile));
-            }
-        } catch (Exception e) {
-            e.printStackTrace();
-            Assert.fail("Exception encountered - hence failing:" + e);
+        ExecType[] modes = new ExecType[] { ExecType.LOCAL, ExecType.MAPREDUCE};
+        String[] inputData = new String[]{"hello\tworld", "bye\tworld"};
+
+        String script = "a = load '"+ inputFileName + "';" +
+                "store a into '" + outputFileName + "' using " +
+                DUMMY_STORE_CLASS_NAME + "('true');";
+
+        for (ExecType execType : modes) {
+            if(execType == ExecType.MAPREDUCE) {
+                ps = new PigServer(ExecType.MAPREDUCE,
+                        cluster.getProperties());
+            } else {
+                Properties props = new Properties();
+                props.setProperty(MapRedUtil.FILE_SYSTEM_NAME, "file:///");
+                ps = new PigServer(ExecType.LOCAL, props);
+            }
+            Util.deleteFile(ps.getPigContext(), TESTDIR);
+            ps.setBatchOn();
+            Util.createInputFile(ps.getPigContext(),
+                    inputFileName, inputData);
+            Util.registerMultiLineQuery(ps, script);
+            ps.executeBatch();
+            assertEquals(
+                    "Checking if file indicating that cleanupOnFailure failed " +
+                    " does not exists in " + execType + " mode", false,
+                    Util.exists(ps.getPigContext(), cleanupFailFile));
+            assertEquals(
+                    "Checking if file indicating that cleanupOnFailure was " +
+                    "successfully called exists in " + execType + " mode", true,
+                    Util.exists(ps.getPigContext(), cleanupSuccessFile));
         }
     }
-    
-    
+
+
     @Test
     public void testCleanupOnFailureMultiStore() throws Exception {
         PigServer ps = null;
         String outputFileName1 = TESTDIR + "/TestStore-output-" + new Random().nextLong() + ".txt";
         String outputFileName2 = TESTDIR + "/TestStore-output-" + new Random().nextLong() + ".txt";
-        
+
         Map<String, Boolean> filesToVerify = new HashMap<String, Boolean>();
         filesToVerify.put(outputFileName1 + "_cleanupOnFailure_succeeded1", Boolean.TRUE);
         filesToVerify.put(outputFileName2 + "_cleanupOnFailure_succeeded2", Boolean.TRUE);
@@ -504,58 +489,53 @@ public class TestStore extends junit.fra
         filesToVerify.put(DummyOutputCommitter.FILE_ABORTJOB_CALLED + "2", Boolean.TRUE);
         filesToVerify.put(DummyOutputCommitter.FILE_CLEANUPJOB_CALLED + "1", Boolean.FALSE);
         filesToVerify.put(DummyOutputCommitter.FILE_CLEANUPJOB_CALLED + "2", Boolean.FALSE);
-        
-        try {
-            ExecType[] modes = new ExecType[] { ExecType.MAPREDUCE, ExecType.LOCAL};
-            String[] inputData = new String[]{"hello\tworld", "bye\tworld"};
-            
-            // though the second store should
-            // not cause a failure, the first one does and the result should be
-            // that both stores are considered to have failed
-            String script = "a = load '"+ inputFileName + "';" +
-                    "store a into '" + outputFileName1 + "' using " + 
-                    DUMMY_STORE_CLASS_NAME + "('true', '1');" +
-                    "store a into '" + outputFileName2 + "' using " + 
-                    DUMMY_STORE_CLASS_NAME + "('false', '2');"; 
-            
-            for (ExecType execType : modes) {
-                FileLocalizer.setInitialized(false);
-                if(execType == ExecType.MAPREDUCE) {
-                    ps = new PigServer(ExecType.MAPREDUCE, 
-                            cluster.getProperties());
-                } else {
-                    Properties props = new Properties();                                          
-                    props.setProperty(MapRedUtil.FILE_SYSTEM_NAME, "file:///");
-                    ps = new PigServer(ExecType.LOCAL, props);
-                    // LocalJobRunner does not call abortTask
-                    filesToVerify.put(DummyOutputCommitter.FILE_ABORTTASK_CALLED + "1", Boolean.FALSE);
-                    filesToVerify.put(DummyOutputCommitter.FILE_ABORTTASK_CALLED + "2", Boolean.FALSE);
-                    if (Util.isHadoop1_0()) {
-                        // MAPREDUCE-1447/3563 (LocalJobRunner does not call methods of mapreduce
-                        // OutputCommitter) is fixed only in 0.23.1
-                        filesToVerify.put(DummyOutputCommitter.FILE_SETUPJOB_CALLED + "1", Boolean.FALSE);
-                        filesToVerify.put(DummyOutputCommitter.FILE_SETUPJOB_CALLED + "2", Boolean.FALSE);
-                        filesToVerify.put(DummyOutputCommitter.FILE_ABORTJOB_CALLED + "1", Boolean.FALSE);
-                        filesToVerify.put(DummyOutputCommitter.FILE_ABORTJOB_CALLED + "2", Boolean.FALSE);
-                    }
-                }
-                Util.deleteFile(ps.getPigContext(), TESTDIR);
-                ps.setBatchOn();
-                Util.createInputFile(ps.getPigContext(), 
-                        inputFileName, inputData);
-                Util.registerMultiLineQuery(ps, script);
-                ps.executeBatch();
-                for (Entry<String, Boolean> entry : filesToVerify.entrySet()) {
-                    String condition = entry.getValue() ? "" : "not";
-                    assertEquals("Checking if file " + entry.getKey() +
-                            " does " + condition + " exists in " + execType +
-                            " mode", (boolean) entry.getValue(),
-                            Util.exists(ps.getPigContext(), entry.getKey()));
+
+        ExecType[] modes = new ExecType[] { ExecType.MAPREDUCE, ExecType.LOCAL};
+        String[] inputData = new String[]{"hello\tworld", "bye\tworld"};
+
+        // though the second store should
+        // not cause a failure, the first one does and the result should be
+        // that both stores are considered to have failed
+        String script = "a = load '"+ inputFileName + "';" +
+                "store a into '" + outputFileName1 + "' using " +
+                DUMMY_STORE_CLASS_NAME + "('true', '1');" +
+                "store a into '" + outputFileName2 + "' using " +
+                DUMMY_STORE_CLASS_NAME + "('false', '2');";
+
+        for (ExecType execType : modes) {
+            FileLocalizer.setInitialized(false);
+            if(execType == ExecType.MAPREDUCE) {
+                ps = new PigServer(ExecType.MAPREDUCE,
+                        cluster.getProperties());
+            } else {
+                Properties props = new Properties();
+                props.setProperty(MapRedUtil.FILE_SYSTEM_NAME, "file:///");
+                ps = new PigServer(ExecType.LOCAL, props);
+                // LocalJobRunner does not call abortTask
+                filesToVerify.put(DummyOutputCommitter.FILE_ABORTTASK_CALLED + "1", Boolean.FALSE);
+                filesToVerify.put(DummyOutputCommitter.FILE_ABORTTASK_CALLED + "2", Boolean.FALSE);
+                if (Util.isHadoop1_0()) {
+                    // MAPREDUCE-1447/3563 (LocalJobRunner does not call methods of mapreduce
+                    // OutputCommitter) is fixed only in 0.23.1
+                    filesToVerify.put(DummyOutputCommitter.FILE_SETUPJOB_CALLED + "1", Boolean.FALSE);
+                    filesToVerify.put(DummyOutputCommitter.FILE_SETUPJOB_CALLED + "2", Boolean.FALSE);
+                    filesToVerify.put(DummyOutputCommitter.FILE_ABORTJOB_CALLED + "1", Boolean.FALSE);
+                    filesToVerify.put(DummyOutputCommitter.FILE_ABORTJOB_CALLED + "2", Boolean.FALSE);
                 }
             }
-        } catch (Exception e) {
-            e.printStackTrace();
-            Assert.fail("Exception encountered - hence failing:" + e);
+            Util.deleteFile(ps.getPigContext(), TESTDIR);
+            ps.setBatchOn();
+            Util.createInputFile(ps.getPigContext(),
+                    inputFileName, inputData);
+            Util.registerMultiLineQuery(ps, script);
+            ps.executeBatch();
+            for (Entry<String, Boolean> entry : filesToVerify.entrySet()) {
+                String condition = entry.getValue() ? "" : "not";
+                assertEquals("Checking if file " + entry.getKey() +
+                        " does " + condition + " exists in " + execType +
+                        " mode", (boolean) entry.getValue(),
+                        Util.exists(ps.getPigContext(), entry.getKey()));
+            }
         }
     }
 
@@ -567,53 +547,53 @@ public class TestStore extends junit.fra
     @Test
     public void testSuccessFileCreation1() throws Exception {
         PigServer ps = null;
-        
+
         try {
             ExecType[] modes = new ExecType[] { ExecType.LOCAL, ExecType.MAPREDUCE};
             String[] inputData = new String[]{"hello\tworld", "hi\tworld", "bye\tworld"};
-            
+
             String multiStoreScript = "a = load '"+ inputFileName + "';" +
                     "b = filter a by $0 == 'hello';" +
                     "c = filter a by $0 == 'hi';" +
                     "d = filter a by $0 == 'bye';" +
                     "store b into '" + outputFileName + "_1';" +
-                    "store c into '" + outputFileName + "_2';" + 
+                    "store c into '" + outputFileName + "_2';" +
                     "store d into '" + outputFileName + "_3';";
-            
+
             String singleStoreScript =  "a = load '"+ inputFileName + "';" +
                 "store a into '" + outputFileName + "_1';" ;
-            
+
             for (ExecType execType : modes) {
                 for(boolean isPropertySet: new boolean[] { true, false}) {
                     for(boolean isMultiStore: new boolean[] { true, false}) {
-                        String script = (isMultiStore ? multiStoreScript : 
+                        String script = (isMultiStore ? multiStoreScript :
                             singleStoreScript);
                         // since we will be switching between map red and local modes
                         // we will need to make sure filelocalizer is reset before each
                         // run.
                         FileLocalizer.setInitialized(false);
                         if(execType == ExecType.MAPREDUCE) {
-                            ps = new PigServer(ExecType.MAPREDUCE, 
+                            ps = new PigServer(ExecType.MAPREDUCE,
                                     cluster.getProperties());
                         } else {
-                            Properties props = new Properties();                                          
+                            Properties props = new Properties();
                             props.setProperty(MapRedUtil.FILE_SYSTEM_NAME, "file:///");
                             ps = new PigServer(ExecType.LOCAL, props);
                         }
                         ps.getPigContext().getProperties().setProperty(
-                                MapReduceLauncher.SUCCESSFUL_JOB_OUTPUT_DIR_MARKER, 
+                                MapReduceLauncher.SUCCESSFUL_JOB_OUTPUT_DIR_MARKER,
                                 Boolean.toString(isPropertySet));
                         Util.deleteFile(ps.getPigContext(), TESTDIR);
                         ps.setBatchOn();
-                        Util.createInputFile(ps.getPigContext(), 
+                        Util.createInputFile(ps.getPigContext(),
                                 inputFileName, inputData);
                         Util.registerMultiLineQuery(ps, script);
                         ps.executeBatch();
                         for(int i = 1; i <= (isMultiStore ? 3 : 1); i++) {
-                            String sucFile = outputFileName + "_" + i + "/" + 
+                            String sucFile = outputFileName + "_" + i + "/" +
                                                MapReduceLauncher.SUCCEEDED_FILE_NAME;
-                            assertEquals("Checking if _SUCCESS file exists in " + 
-                                    execType + " mode", isPropertySet, 
+                            assertEquals("Checking if _SUCCESS file exists in " +
+                                    execType + " mode", isPropertySet,
                                     Util.exists(ps.getPigContext(), sucFile));
                         }
                     }
@@ -624,7 +604,7 @@ public class TestStore extends junit.fra
         }
     }
 
-    // Test _SUCCESS file is NOT created when job fails and when 
+    // Test _SUCCESS file is NOT created when job fails and when
     // "mapreduce.fileoutputcommitter.marksuccessfuljobs" property is set to true
     // The test covers multi store and single store case in local and mapreduce mode
     // The test also checks that "_SUCCESS" file is NOT created when the property
@@ -638,46 +618,46 @@ public class TestStore extends junit.fra
             System.err.println("XXX: " + TestStore.FailUDF.class.getName());
             String multiStoreScript = "a = load '"+ inputFileName + "';" +
                     "b = filter a by $0 == 'hello';" +
-                    "b = foreach b generate " + FAIL_UDF_NAME + "($0);" + 
+                    "b = foreach b generate " + FAIL_UDF_NAME + "($0);" +
                     "c = filter a by $0 == 'hi';" +
                     "d = filter a by $0 == 'bye';" +
                     "store b into '" + outputFileName + "_1';" +
-                    "store c into '" + outputFileName + "_2';" + 
+                    "store c into '" + outputFileName + "_2';" +
                     "store d into '" + outputFileName + "_3';";
-            
+
             String singleStoreScript =  "a = load '"+ inputFileName + "';" +
-                "b = foreach a generate " + FAIL_UDF_NAME + "($0);" + 
+                "b = foreach a generate " + FAIL_UDF_NAME + "($0);" +
                 "store b into '" + outputFileName + "_1';" ;
-            
+
             for (ExecType execType : modes) {
                 for(boolean isPropertySet: new boolean[] { true, false}) {
                     for(boolean isMultiStore: new boolean[] { true, false}) {
-                        String script = (isMultiStore ? multiStoreScript : 
+                        String script = (isMultiStore ? multiStoreScript :
                             singleStoreScript);
                         // since we will be switching between map red and local modes
                         // we will need to make sure filelocalizer is reset before each
                         // run.
                         FileLocalizer.setInitialized(false);
                         if(execType == ExecType.MAPREDUCE) {
-                            // since the job is guaranteed to fail, let's set 
+                            // since the job is guaranteed to fail, let's set
                             // number of retries to 1.
                             Properties props = cluster.getProperties();
                             props.setProperty(MAP_MAX_ATTEMPTS, "1");
                             ps = new PigServer(ExecType.MAPREDUCE, props);
                         } else {
-                            Properties props = new Properties();                                          
+                            Properties props = new Properties();
                             props.setProperty(MapRedUtil.FILE_SYSTEM_NAME, "file:///");
-                            // since the job is guaranteed to fail, let's set 
+                            // since the job is guaranteed to fail, let's set
                             // number of retries to 1.
                             props.setProperty(MAP_MAX_ATTEMPTS, "1");
                             ps = new PigServer(ExecType.LOCAL, props);
                         }
                         ps.getPigContext().getProperties().setProperty(
-                                MapReduceLauncher.SUCCESSFUL_JOB_OUTPUT_DIR_MARKER, 
+                                MapReduceLauncher.SUCCESSFUL_JOB_OUTPUT_DIR_MARKER,
                                 Boolean.toString(isPropertySet));
                         Util.deleteFile(ps.getPigContext(), TESTDIR);
                         ps.setBatchOn();
-                        Util.createInputFile(ps.getPigContext(), 
+                        Util.createInputFile(ps.getPigContext(),
                                 inputFileName, inputData);
                         Util.registerMultiLineQuery(ps, script);
                         try {
@@ -689,10 +669,10 @@ public class TestStore extends junit.fra
                             }
                         }
                         for(int i = 1; i <= (isMultiStore ? 3 : 1); i++) {
-                            String sucFile = outputFileName + "_" + i + "/" + 
+                            String sucFile = outputFileName + "_" + i + "/" +
                                                MapReduceLauncher.SUCCEEDED_FILE_NAME;
-                            assertEquals("Checking if _SUCCESS file exists in " + 
-                                    execType + " mode", false, 
+                            assertEquals("Checking if _SUCCESS file exists in " +
+                                    execType + " mode", false,
                                     Util.exists(ps.getPigContext(), sucFile));
                         }
                     }
@@ -710,24 +690,24 @@ public class TestStore extends junit.fra
         public String exec(Tuple input) throws IOException {
             throw new IOException("FailUDFException");
         }
-        
+
     }
 
     public static class DummyStore extends PigStorage implements StoreMetadata{
 
         private boolean failInPutNext = false;
-        
+
         private String outputFileSuffix= "";
 
         public DummyStore(String failInPutNextStr) {
             failInPutNext = Boolean.parseBoolean(failInPutNextStr);
         }
-        
+
         public DummyStore(String failInPutNextStr, String outputFileSuffix) {
             failInPutNext = Boolean.parseBoolean(failInPutNextStr);
             this.outputFileSuffix = outputFileSuffix;
         }
-        
+
         public DummyStore() {
         }
 
@@ -753,14 +733,14 @@ public class TestStore extends junit.fra
             // verify that output is available prior to storeSchema call
             Path resultPath = new Path(location, "part-m-00000");
             if (!fs.exists(resultPath)) {
-	            FileStatus[] listing = fs.listStatus(new Path(location));
-	            for (FileStatus fstat : listing) {
-	            	System.err.println(fstat.getPath());
-	            }
-	            // not creating the marker file below fails the test
-	            throw new IOException("" + resultPath + " not available in storeSchema");
+                FileStatus[] listing = fs.listStatus(new Path(location));
+                for (FileStatus fstat : listing) {
+                    System.err.println(fstat.getPath());
+                }
+                // not creating the marker file below fails the test
+                throw new IOException("" + resultPath + " not available in storeSchema");
             }
-            
+
             // create a file to test that this method got called - if it gets called
             // multiple times, the create will throw an Exception
             fs.create(
@@ -772,29 +752,29 @@ public class TestStore extends junit.fra
         public void cleanupOnFailure(String location, Job job)
                 throws IOException {
             super.cleanupOnFailure(location, job);
-            
+
             // check that the output file location is not present
             Configuration conf = job.getConfiguration();
             FileSystem fs = FileSystem.get(conf);
             if(fs.exists(new Path(location))) {
                 // create a file to inidicate that the cleanup did not happen
-                fs.create(new Path(location + "_cleanupOnFailure_failed" + 
+                fs.create(new Path(location + "_cleanupOnFailure_failed" +
                         outputFileSuffix), false);
             }
             // create a file to test that this method got called successfully
-            // if it gets called multiple times, the create will throw an Exception 
+            // if it gets called multiple times, the create will throw an Exception
             fs.create(
-                    new Path(location + "_cleanupOnFailure_succeeded" + 
+                    new Path(location + "_cleanupOnFailure_succeeded" +
                             outputFileSuffix), false);
         }
 
         @Override
         public void storeStatistics(ResourceStatistics stats, String location,
                 Job job) throws IOException {
-            
+
         }
     }
-    
+
     private void checkStorePath(String orig, String expected) throws Exception {
         checkStorePath(orig, expected, false);
     }
@@ -805,25 +785,25 @@ public class TestStore extends junit.fra
         DataStorage dfs = pc.getDfs();
         dfs.setActiveContainer(dfs.asContainer("/tmp"));
         Map<String, String> fileNameMap = new HashMap<String, String>();
-        
+
         QueryParserDriver builder = new QueryParserDriver(pc, "Test-Store", fileNameMap);
-        
+
         String query = "a = load 'foo';" + "store a into '"+orig+"';";
         LogicalPlan lp = builder.parse(query);
 
-        Assert.assertTrue(lp.size()>1);
+        assertTrue(lp.size()>1);
         Operator op = lp.getSinks().get(0);
-        
-        Assert.assertTrue(op instanceof LOStore);
+
+        assertTrue(op instanceof LOStore);
         LOStore store = (LOStore)op;
 
         String p = store.getFileSpec().getFileName();
         p = p.replaceAll("hdfs://[\\-\\w:\\.]*/","/");
-        
+
         if (isTmp) {
-            Assert.assertTrue(p.matches("/tmp.*"));
+            assertTrue(p.matches("/tmp.*"));
         } else {
-            Assert.assertEquals(expected, p);
+            assertEquals(expected, p);
         }
     }
 
@@ -933,6 +913,5 @@ public class TestStore extends junit.fra
             FSDataOutputStream out = fs.create(new Path(fileName), true);
             out.close();
         }
-
     }
 }

Modified: pig/trunk/test/org/apache/pig/test/TestStoreOld.java
URL: http://svn.apache.org/viewvc/pig/trunk/test/org/apache/pig/test/TestStoreOld.java?rev=1406469&r1=1406468&r2=1406469&view=diff
==============================================================================
--- pig/trunk/test/org/apache/pig/test/TestStoreOld.java (original)
+++ pig/trunk/test/org/apache/pig/test/TestStoreOld.java Wed Nov  7 05:58:04 2012
@@ -17,6 +17,8 @@
  */
 package org.apache.pig.test;
 
+import static org.junit.Assert.assertEquals;
+
 import java.io.File;
 import java.io.PrintWriter;
 import java.util.Iterator;
@@ -29,35 +31,30 @@ import org.apache.pig.impl.io.FileLocali
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
-
-import junit.framework.TestCase;
 
-@RunWith(JUnit4.class)
-public class TestStoreOld extends TestCase {
+public class TestStoreOld {
 
     static MiniCluster cluster = MiniCluster.buildCluster();
     private int LOOP_COUNT = 1024;
-    
+
     String fileName;
     String tmpFile1, tmpFile2;
     PigServer pig;
-    
+
     @AfterClass
     public static void oneTimeTearDown() throws Exception {
         cluster.shutDown();
     }
-    
+
     @Test
     public void testSingleStore() throws Exception{
         pig.registerQuery("A = load " + fileName + ";");
-        
+
         pig.store("A", tmpFile1);
-        
+
         pig.registerQuery("B = load " + tmpFile1 + ";");
         Iterator<Tuple> iter  = pig.openIterator("B");
-        
+
         int i =0;
         while (iter.hasNext()){
             Tuple t = iter.next();
@@ -66,32 +63,31 @@ public class TestStoreOld extends TestCa
             i++;
         }
     }
-    
+
     @Test
     public void testMultipleStore() throws Exception{
         pig.registerQuery("A = load " + fileName + ";");
-        
+
         pig.store("A", tmpFile1);
-        
+
         pig.registerQuery("B = foreach (group A by $0) generate $0, SUM($1.$0);");
         pig.store("B", tmpFile2);
         pig.registerQuery("C = load " + tmpFile2 + ";");
         Iterator<Tuple> iter  = pig.openIterator("C");
-        
+
         int i =0;
         while (iter.hasNext()){
             Tuple t = iter.next();
             i++;
-            
         }
-        
+
         assertEquals(LOOP_COUNT, i);
-        
+
     }
-    
+
     @Test
     public void testStoreWithMultipleMRJobs() throws Exception{
-        pig.registerQuery("A = load " + fileName + ";");        
+        pig.registerQuery("A = load " + fileName + ";");
         pig.registerQuery("B = foreach (group A by $0) generate $0, SUM($1.$0);");
         pig.registerQuery("C = foreach (group B by $0) generate $0, SUM($1.$0);");
         pig.registerQuery("D = foreach (group C by $0) generate $0, SUM($1.$0);");
@@ -99,22 +95,19 @@ public class TestStoreOld extends TestCa
         pig.store("D", tmpFile2);
         pig.registerQuery("E = load " + tmpFile2 + ";");
         Iterator<Tuple> iter  = pig.openIterator("E");
-        
+
         int i =0;
         while (iter.hasNext()){
             Tuple t = iter.next();
             i++;
         }
-        
+
         assertEquals(LOOP_COUNT, i);
-        
+
     }
 
-    
-    @Override
     @Before
     public void setUp() throws Exception {
-        super.setUp();
         File f = File.createTempFile("tmp", "");
         PrintWriter pw = new PrintWriter(f);
         for (int i=0;i<LOOP_COUNT; i++){
@@ -127,5 +120,5 @@ public class TestStoreOld extends TestCa
         tmpFile2 = "'" + FileLocalizer.getTemporaryPath(pig.getPigContext()).toString() + "'";
         f.delete();
     }
-    
+
 }