You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pig.apache.org by ya...@apache.org on 2010/03/22 08:54:53 UTC

svn commit: r925988 [7/8] - in /hadoop/pig/trunk/contrib/zebra: ./ src/test/org/apache/hadoop/zebra/ src/test/org/apache/hadoop/zebra/mapred/ src/test/org/apache/hadoop/zebra/mapreduce/ src/test/org/apache/hadoop/zebra/pig/

Modified: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoin2.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoin2.java?rev=925988&r1=925987&r2=925988&view=diff
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoin2.java (original)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoin2.java Mon Mar 22 07:54:51 2010
@@ -18,44 +18,35 @@
 
 package org.apache.hadoop.zebra.pig;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
 import junit.framework.Assert;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.zebra.BaseTestCase;
 import org.apache.hadoop.zebra.io.BasicTable;
+import org.apache.hadoop.zebra.io.BasicTable.Reader.RangeSplit;
 import org.apache.hadoop.zebra.io.TableInserter;
 import org.apache.hadoop.zebra.io.TableScanner;
-import org.apache.hadoop.zebra.io.BasicTable.Reader.RangeSplit;
-import org.apache.hadoop.zebra.pig.TableStorer;
-import org.apache.hadoop.zebra.schema.Schema;
 import org.apache.hadoop.zebra.parser.ParseException;
+import org.apache.hadoop.zebra.schema.Schema;
 import org.apache.hadoop.zebra.types.TypesUtils;
-import org.apache.pig.ExecType;
-import org.apache.pig.PigServer;
 import org.apache.pig.backend.executionengine.ExecException;
 import org.apache.pig.data.DataByteArray;
 import org.apache.pig.data.Tuple;
-import org.apache.pig.test.MiniCluster;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-public class TestMergeJoin2 {
-  protected static ExecType execType = ExecType.MAPREDUCE;
-  private static MiniCluster cluster;
-  protected static PigServer pigServer;
-  private static Configuration conf;
-  private static FileSystem fs;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+public class TestMergeJoin2 extends BaseTestCase
+{
   final static int numsBatch = 4;
   final static int numsInserters = 1;
-  static Path pathWorking;
+
   static Path pathTable1;
   static Path pathTable2;
   final static String STR_SCHEMA1 = "a:int,b:float,c:long,d:double,e:string,f:bytes,r1:record(f1:string, f2:string),m1:map(string)";
@@ -65,31 +56,18 @@ public class TestMergeJoin2 {
   final static String STR_STORAGE2 = "[a];[b]; [c]; [e]; [f]; [r1.f1]; [m1#{a}]";
   static int t1 =0;
 
+
   @BeforeClass
   public static void setUp() throws Exception {
-    if (System.getProperty("hadoop.log.dir") == null) {
-      String base = new File(".").getPath(); // getAbsolutePath();
-      System
-          .setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
-    }
-
-    if (execType == ExecType.MAPREDUCE) {
-      cluster = MiniCluster.buildCluster();
-      pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
-    } else {
-      pigServer = new PigServer(ExecType.LOCAL);
-    }
-
-    fs = cluster.getFileSystem();
-
-    conf = new Configuration();
-    pathWorking = fs.getWorkingDirectory();
-    pathTable1 = new Path(pathWorking, "table1");
-    pathTable2 = new Path(pathWorking, "table2");
-    System.out.println("pathTable1 =" + pathTable1);
+    init();
+    pathTable1 = getTableFullPath("TestMergeJoin1") ;
+    pathTable2 = getTableFullPath("TestMergeJoin2") ;
+    removeDir(pathTable1);
+    removeDir(pathTable2);
     createFirstTable();
     createSecondTable();
   }
+  
   public static void createFirstTable() throws IOException, ParseException {
     BasicTable.Writer writer = new BasicTable.Writer(pathTable1, STR_SCHEMA1,
         STR_STORAGE1, conf);
@@ -438,6 +416,7 @@ public class TestMergeJoin2 {
     this.t1++;
 
     String table1path = this.pathTable1.toString() + Integer.toString(this.t1);
+    removeDir(new Path(table1path));
     pigServer.store("sort1", table1path, TableStorer.class.getCanonicalName()
         + "('[a, b, c]; [d, e, f, r1, m1]')");
 
@@ -466,6 +445,7 @@ public class TestMergeJoin2 {
      */
     this.t1++;
     String table2path = this.pathTable2.toString() + Integer.toString(this.t1);
+    removeDir(new Path(table2path));
     pigServer.store("sort2", table2path, TableStorer.class.getCanonicalName()
         + "('[a, b, c]; [d,e,f,r1,m1]')");
 

Modified: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoinEmpty.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoinEmpty.java?rev=925988&r1=925987&r2=925988&view=diff
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoinEmpty.java (original)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoinEmpty.java Mon Mar 22 07:54:51 2010
@@ -18,44 +18,36 @@
 
 package org.apache.hadoop.zebra.pig;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
 import junit.framework.Assert;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.zebra.BaseTestCase;
 import org.apache.hadoop.zebra.io.BasicTable;
+import org.apache.hadoop.zebra.io.BasicTable.Reader.RangeSplit;
 import org.apache.hadoop.zebra.io.TableInserter;
 import org.apache.hadoop.zebra.io.TableScanner;
-import org.apache.hadoop.zebra.io.BasicTable.Reader.RangeSplit;
-import org.apache.hadoop.zebra.pig.TableStorer;
-import org.apache.hadoop.zebra.schema.Schema;
 import org.apache.hadoop.zebra.parser.ParseException;
+import org.apache.hadoop.zebra.schema.Schema;
 import org.apache.hadoop.zebra.types.TypesUtils;
-import org.apache.pig.ExecType;
-import org.apache.pig.PigServer;
 import org.apache.pig.backend.executionengine.ExecException;
 import org.apache.pig.data.DataByteArray;
 import org.apache.pig.data.Tuple;
-import org.apache.pig.test.MiniCluster;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-public class TestMergeJoinEmpty {
-  protected static ExecType execType = ExecType.MAPREDUCE;
-  private static MiniCluster cluster;
-  protected static PigServer pigServer;
-  private static Configuration conf;
-  private static FileSystem fs;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+public class TestMergeJoinEmpty extends BaseTestCase
+{
+
   final static int numsBatch = 4;
   final static int numsInserters = 1;
-  static Path pathWorking;
+
   static Path pathTable1;
   static Path pathTable2;
   final static String STR_SCHEMA1 = "a:int,b:float,c:long,d:double,e:string,f:bytes,r1:record(f1:string, f2:string),m1:map(string)";
@@ -64,36 +56,20 @@ public class TestMergeJoinEmpty {
   final static String STR_STORAGE1 = "[a, b, c]; [e, f]; [r1.f1]; [m1#{a}]";
   final static String STR_STORAGE2 = "[a];[b]; [c]; [e]; [f]; [r1.f1]; [m1#{a}]";
   static int t1 =0;
- 
+  
+
   @BeforeClass
   public static void setUp() throws Exception {
-    if (System.getProperty("hadoop.log.dir") == null) {
-      String base = new File(".").getPath(); // getAbsolutePath();
-      System
-          .setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
-    }
-
-    if (execType == ExecType.MAPREDUCE) {
-      cluster = MiniCluster.buildCluster();
-      pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
-    } else {
-      pigServer = new PigServer(ExecType.LOCAL);
-    }
-   
-   
-    fs = cluster.getFileSystem();
-   
-
- conf = new Configuration();
-  
-    
-    pathWorking = fs.getWorkingDirectory();
-    pathTable1 = new Path(pathWorking, "table1");
-    pathTable2 = new Path(pathWorking, "table2");
-    System.out.println("pathTable1 =" + pathTable1);
+    init();
+    pathTable1 = getTableFullPath("TestMergeJoinEmpty1") ;
+    pathTable2 = getTableFullPath("TestMergeJoinEmpty2") ;
+    removeDir(pathTable1);
+    removeDir(pathTable2);
     createFirstTable();
-    createSecondTable();
+    createSecondTable();    
+    
   }
+  
   public static void createFirstTable() throws IOException, ParseException {
     BasicTable.Writer writer = new BasicTable.Writer(pathTable1, STR_SCHEMA1,
         STR_STORAGE1, conf);
@@ -177,7 +153,6 @@ public class TestMergeJoinEmpty {
       inserters[i].close();
     }
     writer.close();
-    
   
     //check table is setup correctly
     String projection = new String("a,b,c,d,e,f,r1,m1");
@@ -202,6 +177,7 @@ public class TestMergeJoinEmpty {
     reader.close();
     
   }
+  
   public static void createSecondTable() throws IOException, ParseException {
     BasicTable.Writer writer = new BasicTable.Writer(pathTable2, STR_SCHEMA2,
         STR_STORAGE2, conf);
@@ -285,10 +261,7 @@ public class TestMergeJoinEmpty {
       inserters[i].close();
     }
     writer.close();
-    
-    
-    
-    
+ 
     //check table is setup correctly
     String projection = new String("a,b,c,d,e,f,r1,m1");
     
@@ -311,7 +284,6 @@ public class TestMergeJoinEmpty {
     System.out.println("done insert table");
     */
 
-
     reader.close();
     
   }
@@ -319,6 +291,7 @@ public class TestMergeJoinEmpty {
   public static void sortTable(Path tablePath, String sortkey){
     
   }
+  
   @AfterClass
   public static void tearDown() throws Exception {
     pigServer.shutdown();

Modified: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoinNegative.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoinNegative.java?rev=925988&r1=925987&r2=925988&view=diff
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoinNegative.java (original)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoinNegative.java Mon Mar 22 07:54:51 2010
@@ -18,14 +18,10 @@
 
 package org.apache.hadoop.zebra.pig;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.Map;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.zebra.io.BasicTable;
@@ -33,18 +29,16 @@ import org.apache.hadoop.zebra.io.TableI
 import org.apache.hadoop.zebra.pig.TableStorer;
 import org.apache.hadoop.zebra.schema.Schema;
 import org.apache.hadoop.zebra.types.TypesUtils;
-import org.apache.pig.ExecType;
-import org.apache.pig.PigServer;
+import org.apache.hadoop.zebra.BaseTestCase;
 import org.apache.pig.backend.executionengine.ExecException;
 import org.apache.pig.data.DataByteArray;
 import org.apache.pig.data.Tuple;
-import org.apache.pig.test.MiniCluster;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 
-public class TestMergeJoinNegative {
+public class TestMergeJoinNegative extends BaseTestCase {
 	
 	final static String STR_SCHEMA1 = "a:int,b:float,c:long,d:double,e:string,f:bytes,m1:map(string)";
 	final static String STR_STORAGE1 = "[a, b, c]; [e, f]; [m1#{a}]";
@@ -55,37 +49,24 @@ public class TestMergeJoinNegative {
 	
 	static int fileId = 0;
 	
-	protected static ExecType execType = ExecType.MAPREDUCE;
-	private static MiniCluster cluster;
-	protected static PigServer pigServer;
 	private static Path pathTable1;
 	private static Path pathTable2;
 	private static Path pathTable3;
 	private static Path pathTable4;
-	private static Configuration conf;
 	
 	@BeforeClass
 	public static void setUp() throws Exception {
-		if (System.getProperty("hadoop.log.dir") == null) {
-			String base = new File(".").getPath(); // getAbsolutePath();
-			System.setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
-		}
-
-		if (execType == ExecType.MAPREDUCE) {
-			cluster = MiniCluster.buildCluster();
-			pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
-		} else {
-			pigServer = new PigServer(ExecType.LOCAL);
-		}
-		
-		conf = new Configuration();
-		FileSystem fs = cluster.getFileSystem();
-		Path pathWorking = fs.getWorkingDirectory();
-		pathTable1 = new Path(pathWorking, "table1");
-		pathTable2 = new Path(pathWorking, "table2");
-		pathTable3 = new Path(pathWorking, "table3");
-		pathTable4 = new Path(pathWorking, "table4");
-		
+    init();
+    
+    pathTable1 = getTableFullPath("TestMergeJoinNegative1");
+    pathTable2 = getTableFullPath("TestMergeJoinNegative2"); 
+    pathTable3 = getTableFullPath("TestMergeJoinNegative3");
+    pathTable4 = getTableFullPath("TestMergeJoinNegative4");
+    removeDir(pathTable1);
+    removeDir(pathTable2);
+    removeDir(pathTable3);
+    removeDir(pathTable4);
+    
 		// Create table1 data
 		Map<String, String> m1 = new HashMap<String, String>();
 		m1.put("a","m1-a");
@@ -219,7 +200,7 @@ public class TestMergeJoinNegative {
 			" USING \"merge\";";    // n2 is wrong data type
 		pigServer.registerQuery(join);
 		
-		Iterator<Tuple> it = pigServer.openIterator("joinRecords");  // get iterator to trigger error
+		pigServer.openIterator("joinRecords");  // get iterator to trigger error
 	}
 	
 	@Test(expected = IOException.class)
@@ -263,7 +244,7 @@ public class TestMergeJoinNegative {
 			" USING \"merge\";";
 		pigServer.registerQuery(join);
 		
-		Iterator<Tuple> it = pigServer.openIterator("joinRecords");  // get iterator to trigger error
+		pigServer.openIterator("joinRecords");  // get iterator to trigger error
 	}
 	
 	@Test(expected = IOException.class)
@@ -303,7 +284,7 @@ public class TestMergeJoinNegative {
 			" USING \"merge\";";
 		pigServer.registerQuery(join);
 		
-		Iterator<Tuple> it = pigServer.openIterator("joinRecords");  // get iterator to trigger error
+		pigServer.openIterator("joinRecords");  // get iterator to trigger error
 	}
 	
 	@Test(expected = IOException.class)
@@ -343,7 +324,7 @@ public class TestMergeJoinNegative {
 			" USING \"merge\";";
 		pigServer.registerQuery(join);
 		
-		Iterator<Tuple> it = pigServer.openIterator("joinRecords");  // get iterator to trigger error
+		pigServer.openIterator("joinRecords");  // get iterator to trigger error
 	}
 	
 	@Test(expected = IOException.class)
@@ -546,7 +527,7 @@ public class TestMergeJoinNegative {
 			" USING \"merge\";";					// sort key a is different data type for records1 and records4
 		pigServer.registerQuery(join);
 		
-		Iterator<Tuple> it = pigServer.openIterator("joinRecords");  // get iterator to trigger error
+	  pigServer.openIterator("joinRecords");  // get iterator to trigger error
 	}
 	
 }

Modified: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoinPartial.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoinPartial.java?rev=925988&r1=925987&r2=925988&view=diff
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoinPartial.java (original)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoinPartial.java Mon Mar 22 07:54:51 2010
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.zebra.pig;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -27,9 +26,6 @@ import java.util.ArrayList;
 
 import junit.framework.Assert;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.zebra.io.BasicTable;
@@ -37,56 +33,36 @@ import org.apache.hadoop.zebra.io.TableI
 import org.apache.hadoop.zebra.pig.TableStorer;
 import org.apache.hadoop.zebra.schema.Schema;
 import org.apache.hadoop.zebra.types.TypesUtils;
-import org.apache.pig.ExecType;
-import org.apache.pig.PigServer;
+import org.apache.hadoop.zebra.BaseTestCase;
 import org.apache.pig.backend.executionengine.ExecException;
 import org.apache.pig.data.DataByteArray;
 import org.apache.pig.data.Tuple;
-import org.apache.pig.test.MiniCluster;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 
-public class TestMergeJoinPartial {
+public class TestMergeJoinPartial extends BaseTestCase {
 	
 	final static String STR_SCHEMA1 = "a:int,b:float,c:long,d:double,e:string,f:bytes,m1:map(string)";
 	final static String STR_STORAGE1 = "[a, b, c]; [e, f]; [m1#{a}]";
 	
 	static int fileId = 0;
 	
-	protected static ExecType execType = ExecType.MAPREDUCE;
-	private static MiniCluster cluster;
-	protected static PigServer pigServer;
 	private static Path pathTable1;
 	private static Path pathTable2;
-	private static Configuration conf;
 	
 	private static Object[][] table1;
 	private static Object[][] table2;
 	
 	@BeforeClass
 	public static void setUp() throws Exception {
-		if (System.getProperty("hadoop.log.dir") == null) {
-			String base = new File(".").getPath(); // getAbsolutePath();
-			System.setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
-		}
-
-		if (execType == ExecType.MAPREDUCE) {
-			cluster = MiniCluster.buildCluster();
-			pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
-		} else {
-			pigServer = new PigServer(ExecType.LOCAL);
-		}
-		FileSystem fs = cluster.getFileSystem();
-		
-		conf = new Configuration();
-//		pigServer = new PigServer(ExecType.LOCAL);
-//		FileSystem fs = LocalFileSystem.get(conf);
-		
-		Path pathWorking = fs.getWorkingDirectory();
-		pathTable1 = new Path(pathWorking, "table1");
-		pathTable2 = new Path(pathWorking, "table2");
+    init();
+    
+    pathTable1 = getTableFullPath("TestMergeJoinPartial1");
+    pathTable2 = getTableFullPath("TestMergeJoinPartial2");    
+    removeDir(pathTable1);
+    removeDir(pathTable2);
 		
 		// Create table1 data
 		Map<String, String> m1 = new HashMap<String, String>();
@@ -329,7 +305,7 @@ public class TestMergeJoinPartial {
 			" USING \"merge\";";
 		pigServer.registerQuery(join);
 		
-		Iterator<Tuple> it = pigServer.openIterator("joinRecords");  // get iterator to trigger error
+		pigServer.openIterator("joinRecords");  // get iterator to trigger error
 	}
 	
 	public void printTable(String tablename) throws IOException {

Modified: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoinPrune.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoinPrune.java?rev=925988&r1=925987&r2=925988&view=diff
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoinPrune.java (original)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMergeJoinPrune.java Mon Mar 22 07:54:51 2010
@@ -18,15 +18,12 @@
 
 package org.apache.hadoop.zebra.pig;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import junit.framework.Assert;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.zebra.io.BasicTable;
@@ -37,27 +34,18 @@ import org.apache.hadoop.zebra.pig.Table
 import org.apache.hadoop.zebra.schema.Schema;
 import org.apache.hadoop.zebra.parser.ParseException;
 import org.apache.hadoop.zebra.types.TypesUtils;
-import org.apache.pig.ExecType;
-import org.apache.pig.PigServer;
+import org.apache.hadoop.zebra.BaseTestCase;
 import org.apache.pig.backend.executionengine.ExecException;
 import org.apache.pig.backend.executionengine.ExecJob;
-import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil;
 import org.apache.pig.data.DataByteArray;
 import org.apache.pig.data.Tuple;
-import org.apache.pig.test.MiniCluster;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-public class TestMergeJoinPrune {
-  protected static ExecType execType = ExecType.MAPREDUCE;
-  private static MiniCluster cluster;
-  protected static PigServer pigServer;
-  private static Configuration conf;
-  private static FileSystem fs;
+public class TestMergeJoinPrune extends BaseTestCase {
   final static int numsBatch = 1;
   final static int numsInserters = 1;
-  static Path pathWorking;
   static Path pathTable1;
   static Path pathTable2;
   final static String STR_SCHEMA1 = "a:int,b:float,c:long,d:double,e:string,f:bytes,r1:record(f1:string, f2:string),m1:map(string)";
@@ -66,75 +54,16 @@ public class TestMergeJoinPrune {
   final static String STR_STORAGE1 = "[a, b, c]; [e, f]; [r1.f1]; [m1#{a}]";
   final static String STR_STORAGE2 = "[a];[b]; [c]; [e]; [f]; [r1.f1]; [m1#{a}]";
  
-  private static String zebraJar;
-  private static String whichCluster;
-  
   private static int t1;
 
   @BeforeClass
   public static void setUp() throws Exception {
-    if (System.getProperty("hadoop.log.dir") == null) {
-      String base = new File(".").getPath(); // getAbsolutePath();
-      System
-          .setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
-    }
-
-    // if whichCluster is not defined, or defined something other than
-    // "realCluster" or "miniCluster", set it to "miniCluster"
-    if (System.getProperty("whichCluster") == null
-        || ((!System.getProperty("whichCluster")
-            .equalsIgnoreCase("realCluster")) && (!System.getProperty(
-            "whichCluster").equalsIgnoreCase("miniCluster")))) {
-      System.setProperty("whichCluster", "miniCluster");
-      whichCluster = System.getProperty("whichCluster");
-    } else {
-      whichCluster = System.getProperty("whichCluster");
-    }
-
-    System.out.println("cluster: " + whichCluster);
-    if (whichCluster.equalsIgnoreCase("realCluster")
-        && System.getenv("HADOOP_HOME") == null) {
-      System.out.println("Please set HADOOP_HOME");
-      System.exit(0);
-    }
-
-    conf = new Configuration();
-
-    if (whichCluster.equalsIgnoreCase("realCluster")
-        && System.getenv("USER") == null) {
-      System.out.println("Please set USER");
-      System.exit(0);
-    }
-    zebraJar = System.getenv("HADOOP_HOME") + "/../jars/zebra.jar";
-    File file = new File(zebraJar);
-    if (!file.exists() && whichCluster.equalsIgnoreCase("realCulster")) {
-      System.out.println("Please put zebra.jar at hadoop_home/../jars");
-      System.exit(0);
-    }
-
-    if (whichCluster.equalsIgnoreCase("realCluster")) {
-      pigServer = new PigServer(ExecType.MAPREDUCE, ConfigurationUtil
-          .toProperties(conf));
-      pigServer.registerJar(zebraJar);
-      pathTable1 = new Path("/user/" + System.getenv("USER")
-          + "/TestMergeJoin1");
-      pathTable2 = new Path("/user/" + System.getenv("USER")
-          + "/TestMergeJoin2");
-      fs = pathTable1.getFileSystem(conf);
-    }
-
-    if (whichCluster.equalsIgnoreCase("miniCluster")) {
-      if (execType == ExecType.MAPREDUCE) {
-        cluster = MiniCluster.buildCluster();
-        pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
-        fs = cluster.getFileSystem();
-        pathTable1 = new Path(fs.getWorkingDirectory() + "/TestMergeJoin1");
-        pathTable2 = new Path(fs.getWorkingDirectory() + "/TestMergeJoin2");
-        System.out.println("path1 =" + pathTable1);
-      } else {
-        pigServer = new PigServer(ExecType.LOCAL);
-      }
-    }
+    init();
+    pathTable1 = getTableFullPath("TestMergeJoinPrune1");
+    pathTable2 = getTableFullPath("TestMergeJoinPrune2");    
+    removeDir(pathTable1);
+    removeDir(pathTable2);
+    
     createFirstTable();
     createSecondTable();
   }
@@ -230,7 +159,6 @@ public class TestMergeJoinPrune {
     reader.setProjection(projection);
     List<RangeSplit> splits = reader.rangeSplit(1);
     TableScanner scanner = reader.getScanner(splits.get(0), true);
-    BytesWritable key = new BytesWritable();
     Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
   
     scanner.getValue(RowValue);
@@ -339,7 +267,6 @@ public class TestMergeJoinPrune {
     reader.setProjection(projection);
     List<RangeSplit> splits = reader.rangeSplit(1);
     TableScanner scanner = reader.getScanner(splits.get(0), true);
-    BytesWritable key = new BytesWritable();
     Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
 
     scanner.getValue(RowValue);
@@ -367,25 +294,6 @@ public class TestMergeJoinPrune {
     
   }
 
-  public void removeDir(Path outPath) throws IOException {
-    String command = null;
-    if (whichCluster.equalsIgnoreCase("realCluster")) {
-    command = System.getenv("HADOOP_HOME") +"/bin/hadoop fs -rmr " + outPath.toString();
-    }
-    else{
-    command = "rm -rf " + outPath.toString();
-    }
-    Runtime runtime = Runtime.getRuntime();
-    Process proc = runtime.exec(command);
-    int exitVal = -1;
-    try {
-      exitVal = proc.waitFor();
-    } catch (InterruptedException e) {
-      System.err.println(e);
-    }
-    
-  }
-
   public void verify(Iterator<Tuple> it3) throws ExecException {
     int row = 0;
     Tuple RowValue3 = null;
@@ -465,12 +373,12 @@ public class TestMergeJoinPrune {
   }
 
   public Iterator<Tuple> joinTable(String table1, String table2, String sortkey1, String sortkey2) throws IOException {
-    String query1 = "records1 = LOAD '" + this.pathTable1.toString()
+    String query1 = "records1 = LOAD '" + pathTable1.toString()
         + "' USING org.apache.hadoop.zebra.pig.TableLoader();";
     System.out.println("query1:" + query1);
     pigServer.registerQuery(query1);
 
-    String query2 = "records2 = LOAD '" + this.pathTable2.toString()
+    String query2 = "records2 = LOAD '" + pathTable2.toString()
         + "' USING org.apache.hadoop.zebra.pig.TableLoader();";
     System.out.println("query2:" + query2);
     pigServer.registerQuery(query2);
@@ -481,9 +389,9 @@ public class TestMergeJoinPrune {
     pigServer.registerQuery(orderby2);
 
    
-    this.t1++;
+    t1++;
     
-    String table1path = this.pathTable1.toString() + Integer.toString(this.t1);
+    String table1path = pathTable1.toString() + Integer.toString(t1);
      
 
 ExecJob pigJob =pigServer.store("sort1", table1path, TableStorer.class.getCanonicalName()

Modified: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMixedType1.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMixedType1.java?rev=925988&r1=925987&r2=925988&view=diff
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMixedType1.java (original)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestMixedType1.java Mon Mar 22 07:54:51 2010
@@ -17,38 +17,24 @@ package org.apache.hadoop.zebra.pig;
  * the License.
  */
 
-import java.io.File;
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Iterator;
-import java.util.List;
 import java.util.Map;
-import java.util.Random;
 
 import junit.framework.Assert;
-import junit.framework.TestCase;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RawLocalFileSystem;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.zebra.io.BasicTable;
 import org.apache.hadoop.zebra.io.TableInserter;
-import org.apache.hadoop.zebra.io.TableScanner;
-import org.apache.hadoop.zebra.io.BasicTable.Reader.RangeSplit;
 import org.apache.hadoop.zebra.parser.ParseException;
-import org.apache.hadoop.zebra.types.Projection;
 import org.apache.hadoop.zebra.schema.Schema;
+import org.apache.hadoop.zebra.BaseTestCase;
 import org.apache.hadoop.zebra.types.TypesUtils;
-import org.apache.pig.ExecType;
-import org.apache.pig.PigServer;
-import org.apache.pig.backend.executionengine.ExecException;
 import org.apache.pig.data.DataBag;
 import org.apache.pig.data.DataByteArray;
 import org.apache.pig.data.Tuple;
-import org.apache.pig.test.MiniCluster;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -58,40 +44,20 @@ import org.junit.Test;
  * Test projections on complicated column types.
  * 
  */
-public class TestMixedType1 {
+public class TestMixedType1 extends BaseTestCase {
   final static String STR_SCHEMA = "s1:bool, s2:int, s3:long, s4:float, s5:string, s6:bytes, r1:record(f1:int, f2:long), r2:record(r3:record(f3:float, f4)), m1:map(string),m2:map(map(int)), c:collection(record(f13:double, f14:float, f15:bytes))";
   final static String STR_STORAGE = "[s1, s2]; [m1#{a}]; [r1.f1]; [s3, s4, r2.r3.f3]; [s5, s6, m2#{x|y}]; [r1.f2, m1#{b}]; [r2.r3.f4, m2#{z}]";
-  private static Configuration conf;
-  private static FileSystem fs;
 
-  protected static ExecType execType = ExecType.MAPREDUCE;
-  private static MiniCluster cluster;
-  protected static PigServer pigServer;
   private static Path path;
 
   @BeforeClass
-  public static void setUpOnce() throws IOException {
+  public static void setUpOnce() throws IOException, Exception {
     System.out.println("ONCE SETUP !! ---------");
-    if (System.getProperty("hadoop.log.dir") == null) {
-      String base = new File(".").getPath(); // getAbsolutePath();
-      System
-          .setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
-    }
-
-    if (execType == ExecType.MAPREDUCE) {
-      cluster = MiniCluster.buildCluster();
-      pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
-    } else {
-      pigServer = new PigServer(ExecType.LOCAL);
-    }
-
-    conf = new Configuration();
-    FileSystem fs = cluster.getFileSystem();
-    Path pathWorking = fs.getWorkingDirectory();
-    // path = new Path(pathWorking, this.getClass().getSimpleName());
-    path = fs.getWorkingDirectory();
-    System.out.println("path =" + path);
-
+    init();
+    
+    path = getTableFullPath("");  
+    removeDir(path);
+    
     BasicTable.Writer writer = new BasicTable.Writer(path, STR_SCHEMA,
         STR_STORAGE, conf);
     writer.finish();

Modified: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveMultiTable.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveMultiTable.java?rev=925988&r1=925987&r2=925988&view=diff
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveMultiTable.java (original)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveMultiTable.java Mon Mar 22 07:54:51 2010
@@ -19,15 +19,12 @@
 package org.apache.hadoop.zebra.pig;
 
 import java.io.ByteArrayOutputStream;
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.util.Iterator;
 import java.util.ArrayList;
 import java.util.StringTokenizer;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.zebra.io.BasicTable;
@@ -35,14 +32,11 @@ import org.apache.hadoop.zebra.io.TableI
 import org.apache.hadoop.zebra.pig.TableStorer;
 import org.apache.hadoop.zebra.schema.Schema;
 import org.apache.hadoop.zebra.types.TypesUtils;
-import org.apache.pig.ExecType;
-import org.apache.pig.PigServer;
+import org.apache.hadoop.zebra.BaseTestCase;
 import org.apache.pig.backend.executionengine.ExecException;
 import org.apache.pig.backend.executionengine.ExecJob;
-import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil;
 import org.apache.pig.data.DataByteArray;
 import org.apache.pig.data.Tuple;
-import org.apache.pig.test.MiniCluster;
 
 import junit.framework.Assert;
 import org.junit.AfterClass;
@@ -50,7 +44,7 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 
 
-public class TestOrderPreserveMultiTable {
+public class TestOrderPreserveMultiTable extends BaseTestCase {
 	
 	final static int NUMB_TABLE = 10;  		// number of tables for stress test
 	final static int NUMB_TABLE_ROWS = 5;	// number of rows for each table
@@ -61,91 +55,20 @@ public class TestOrderPreserveMultiTable
 	static int fileId = 0;
 	static int sortId = 0;
 	
-	protected static ExecType execType = ExecType.MAPREDUCE;
-	private static MiniCluster cluster;
-	protected static PigServer pigServer;
 	protected static ExecJob pigJob;
 	
 	private static ArrayList<Path> pathTables;
 	private static int totalTableRows =0;
 	
-	private static Configuration conf;
-	private static FileSystem fs;
-	
-	private static String zebraJar;
-	private static String whichCluster;
-	
 	@BeforeClass
 	public static void setUp() throws Exception {
-		if (System.getProperty("hadoop.log.dir") == null) {
-			String base = new File(".").getPath(); // getAbsolutePath();
-			System.setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
-		}
-		
-		// if whichCluster is not defined, or defined something other than
-		// "realCluster" or "miniCluster", set it to "realCluster"
-		if (System.getProperty("whichCluster") == null
-				|| ((!System.getProperty("whichCluster")
-						.equalsIgnoreCase("realCluster")) && (!System.getProperty(
-						"whichCluster").equalsIgnoreCase("miniCluster")))) {
-			System.setProperty("whichCluster", "miniCluster");
-			whichCluster = System.getProperty("whichCluster");
-		} else {
-			whichCluster = System.getProperty("whichCluster");
-		}
-		
-		System.out.println("cluster: " + whichCluster);
-		if (whichCluster.equalsIgnoreCase("realCluster")
-				&& System.getenv("HADOOP_HOME") == null) {
-			System.out.println("Please set HADOOP_HOME");
-			System.exit(0);
-		}
-		
-		conf = new Configuration();
-		
-		if (whichCluster.equalsIgnoreCase("realCluster")
-				&& System.getenv("USER") == null) {
-			System.out.println("Please set USER");
-			System.exit(0);
-		}
-		zebraJar = System.getenv("HADOOP_HOME") + "/../jars/zebra.jar";
-		File file = new File(zebraJar);
-		if (!file.exists() && whichCluster.equalsIgnoreCase("realCulster")) {
-			System.out.println("Please put zebra.jar at hadoop_home/../jars");
-			System.exit(0);
-		}
-		
-		if (whichCluster.equalsIgnoreCase("realCluster")) {
-			pigServer = new PigServer(ExecType.MAPREDUCE, ConfigurationUtil
-					.toProperties(conf));
-			pigServer.registerJar(zebraJar);
+    init();
 			
-			pathTables = new ArrayList<Path>();
-			for (int i=0; i<NUMB_TABLE; ++i) {
-				Path pathTable = new Path("/user/" + System.getenv("USER")
-						+ "/TestOderPerserveMultiTable" + i);
-				pathTables.add(pathTable);
-				removeDir(pathTable);
-			}
-			fs = pathTables.get(0).getFileSystem(conf);
-		}
-		
-		if (whichCluster.equalsIgnoreCase("miniCluster")) {
-			if (execType == ExecType.MAPREDUCE) {
-				cluster = MiniCluster.buildCluster();
-				pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
-				fs = cluster.getFileSystem();
-				
-				pathTables = new ArrayList<Path>();
-				for (int i=0; i<NUMB_TABLE; ++i) {
-					Path pathTable = new Path(fs.getWorkingDirectory()
-							+ "/TestOderPerserveMultiTable" + i);
-					pathTables.add(pathTable);
-					removeDir(pathTable);
-				}
-			} else {
-				pigServer = new PigServer(ExecType.LOCAL);
-			}
+		pathTables = new ArrayList<Path>();
+		for (int i=0; i<NUMB_TABLE; ++i) {
+		  Path pathTable = getTableFullPath("TestOderPerserveMultiTable" + i);
+			pathTables.add(pathTable);
+			removeDir(pathTable);
 		}
 		
 		// Create tables
@@ -197,24 +120,6 @@ public class TestOrderPreserveMultiTable
 		pigServer.shutdown();
 	}
 	
-	public static void removeDir(Path outPath) throws IOException {
-		String command = null;
-		if (whichCluster.equalsIgnoreCase("realCluster")) {
-			command = System.getenv("HADOOP_HOME") +"/bin/hadoop fs -rmr " + outPath.toString();
-		}
-		else{
-			command = "rm -rf " + outPath.toString();
-		}
-		Runtime runtime = Runtime.getRuntime();
-		Process proc = runtime.exec(command);
-		int exitVal = -1;
-		try {
-			exitVal = proc.waitFor();
-		} catch (InterruptedException e) {
-			System.err.println(e);
-		}
-	}
-	
 	private Iterator<Tuple> testOrderPreserveUnion(ArrayList<String> inputTables, String sortkey, String columns)
 				throws IOException {
 		//

Modified: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveMultiTableGlob.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveMultiTableGlob.java?rev=925988&r1=925987&r2=925988&view=diff
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveMultiTableGlob.java (original)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveMultiTableGlob.java Mon Mar 22 07:54:51 2010
@@ -19,15 +19,12 @@
 package org.apache.hadoop.zebra.pig;
 
 import java.io.ByteArrayOutputStream;
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.util.Iterator;
 import java.util.ArrayList;
 import java.util.StringTokenizer;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.zebra.io.BasicTable;
@@ -35,14 +32,11 @@ import org.apache.hadoop.zebra.io.TableI
 import org.apache.hadoop.zebra.pig.TableStorer;
 import org.apache.hadoop.zebra.schema.Schema;
 import org.apache.hadoop.zebra.types.TypesUtils;
-import org.apache.pig.ExecType;
-import org.apache.pig.PigServer;
+import org.apache.hadoop.zebra.BaseTestCase;
 import org.apache.pig.backend.executionengine.ExecException;
 import org.apache.pig.backend.executionengine.ExecJob;
-import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil;
 import org.apache.pig.data.DataByteArray;
 import org.apache.pig.data.Tuple;
-import org.apache.pig.test.MiniCluster;
 
 import junit.framework.Assert;
 import org.junit.AfterClass;
@@ -50,7 +44,7 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 
 
-public class TestOrderPreserveMultiTableGlob {
+public class TestOrderPreserveMultiTableGlob extends BaseTestCase {
 	
 	final static int NUMB_TABLE = 10;  		// number of tables for stress test
 	final static int NUMB_TABLE_ROWS = 5;	// number of rows for each table
@@ -61,92 +55,23 @@ public class TestOrderPreserveMultiTable
 	static int fileId = 0;
 	static int sortId = 0;
 	
-	protected static ExecType execType = ExecType.MAPREDUCE;
-	private static MiniCluster cluster;
-	protected static PigServer pigServer;
 	protected static ExecJob pigJob;
 	
 	private static ArrayList<Path> pathTables;
 	private static int totalTableRows =0;
 	
-	private static Configuration conf;
-	private static FileSystem fs;
-	
-	private static String zebraJar;
-	private static String whichCluster;
-	
 	@BeforeClass
 	public static void setUp() throws Exception {
-		if (System.getProperty("hadoop.log.dir") == null) {
-			String base = new File(".").getPath(); // getAbsolutePath();
-			System.setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
-		}
-		
-		// if whichCluster is not defined, or defined something other than
-		// "realCluster" or "miniCluster", set it to "realCluster"
-		if (System.getProperty("whichCluster") == null
-				|| ((!System.getProperty("whichCluster")
-						.equalsIgnoreCase("realCluster")) && (!System.getProperty(
-						"whichCluster").equalsIgnoreCase("miniCluster")))) {
-			System.setProperty("whichCluster", "miniCluster");
-			whichCluster = System.getProperty("whichCluster");
-		} else {
-			whichCluster = System.getProperty("whichCluster");
-		}
-		
-		System.out.println("cluster: " + whichCluster);
-		if (whichCluster.equalsIgnoreCase("realCluster")
-				&& System.getenv("HADOOP_HOME") == null) {
-			System.out.println("Please set HADOOP_HOME");
-			System.exit(0);
-		}
-		
-		conf = new Configuration();
+		init();
 		
-		if (whichCluster.equalsIgnoreCase("realCluster")
-				&& System.getenv("USER") == null) {
-			System.out.println("Please set USER");
-			System.exit(0);
-		}
-		zebraJar = System.getenv("HADOOP_HOME") + "/../jars/zebra.jar";
-		File file = new File(zebraJar);
-		if (!file.exists() && whichCluster.equalsIgnoreCase("realCulster")) {
-			System.out.println("Please put zebra.jar at hadoop_home/../jars");
-			System.exit(0);
+
+		pathTables = new ArrayList<Path>();
+		for (int i=0; i<NUMB_TABLE; ++i) {
+			Path pathTable = getTableFullPath("TestOderPerserveMultiTable" + i);
+			pathTables.add(pathTable);
+			removeDir(pathTable);
 		}
-		
-		if (whichCluster.equalsIgnoreCase("realCluster")) {
-			pigServer = new PigServer(ExecType.MAPREDUCE, ConfigurationUtil
-					.toProperties(conf));
-			pigServer.registerJar(zebraJar);
 			
-			pathTables = new ArrayList<Path>();
-			for (int i=0; i<NUMB_TABLE; ++i) {
-				Path pathTable = new Path("/user/" + System.getenv("USER")
-						+ "/TestOderPerserveMultiTable" + i);
-				pathTables.add(pathTable);
-				removeDir(pathTable);
-			}
-			fs = pathTables.get(0).getFileSystem(conf);
-		}
-		
-		if (whichCluster.equalsIgnoreCase("miniCluster")) {
-			if (execType == ExecType.MAPREDUCE) {
-				cluster = MiniCluster.buildCluster();
-				pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
-				fs = cluster.getFileSystem();
-				
-				pathTables = new ArrayList<Path>();
-				for (int i=0; i<NUMB_TABLE; ++i) {
-					Path pathTable = new Path(fs.getWorkingDirectory()
-							+ "/TestOderPerserveMultiTable" + i);
-					pathTables.add(pathTable);
-					removeDir(pathTable);
-				}
-			} else {
-				pigServer = new PigServer(ExecType.LOCAL);
-			}
-		}
 		
 		// Create tables
 		for (int i=0; i<NUMB_TABLE; ++i) {
@@ -197,24 +122,6 @@ public class TestOrderPreserveMultiTable
 		pigServer.shutdown();
 	}
 	
-	public static void removeDir(Path outPath) throws IOException {
-		String command = null;
-		if (whichCluster.equalsIgnoreCase("realCluster")) {
-			command = System.getenv("HADOOP_HOME") +"/bin/hadoop fs -rmr " + outPath.toString();
-		}
-		else{
-			command = "rm -rf " + outPath.toString();
-		}
-		Runtime runtime = Runtime.getRuntime();
-		Process proc = runtime.exec(command);
-		int exitVal = -1;
-		try {
-			exitVal = proc.waitFor();
-		} catch (InterruptedException e) {
-			System.err.println(e);
-		}
-	}
-	
 	private Iterator<Tuple> testOrderPreserveUnion(ArrayList<String> inputTables, String sortkey, String columns)
 				throws IOException {
 		//

Modified: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveProjection.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveProjection.java?rev=925988&r1=925987&r2=925988&view=diff
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveProjection.java (original)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveProjection.java Mon Mar 22 07:54:51 2010
@@ -19,7 +19,6 @@
 package org.apache.hadoop.zebra.pig;
 
 import java.io.ByteArrayOutputStream;
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.StringWriter;
@@ -33,7 +32,6 @@ import java.util.StringTokenizer;
 import junit.framework.Assert;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.zebra.io.BasicTable;
@@ -41,19 +39,17 @@ import org.apache.hadoop.zebra.io.TableI
 import org.apache.hadoop.zebra.pig.TableStorer;
 import org.apache.hadoop.zebra.schema.Schema;
 import org.apache.hadoop.zebra.types.TypesUtils;
-import org.apache.pig.ExecType;
-import org.apache.pig.PigServer;
+import org.apache.hadoop.zebra.BaseTestCase;
 import org.apache.pig.backend.executionengine.ExecException;
 import org.apache.pig.backend.executionengine.ExecJob;
 import org.apache.pig.data.DataByteArray;
 import org.apache.pig.data.Tuple;
-import org.apache.pig.test.MiniCluster;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 
-public class TestOrderPreserveProjection {
+public class TestOrderPreserveProjection extends BaseTestCase {
 	
 	final static String TABLE1_SCHEMA = "a:int,b:float,c:long,d:double,e:string,f:bytes,m1:map(string)";
 	final static String TABLE1_STORAGE = "[a, b, c]; [d, e, f]; [m1#{a}]";
@@ -79,9 +75,6 @@ public class TestOrderPreserveProjection
 	static int fileId = 0;
 	static int sortId = 0;
 	
-	protected static ExecType execType = ExecType.MAPREDUCE;
-	private static MiniCluster cluster;
-	protected static PigServer pigServer;
 	protected static ExecJob pigJob;
 	
 	private static Path pathTable1;
@@ -95,8 +88,6 @@ public class TestOrderPreserveProjection
 	
 	private static ArrayList<String> tableList;
 	
-	private static Configuration conf;
-	
 	private static Object[][] table1;
 	private static Object[][] table2;
 	private static Object[][] table3;
@@ -111,29 +102,14 @@ public class TestOrderPreserveProjection
 	
 	@BeforeClass
 	public static void setUp() throws Exception {
-		if (System.getProperty("hadoop.log.dir") == null) {
-			String base = new File(".").getPath(); // getAbsolutePath();
-			System.setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
-		}
-
-		if (execType == ExecType.MAPREDUCE) {
-			cluster = MiniCluster.buildCluster();
-			pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
-		} else {
-			pigServer = new PigServer(ExecType.LOCAL);
-		}
-		
-		conf = new Configuration();
-		FileSystem fs = cluster.getFileSystem();
-		Path pathWorking = fs.getWorkingDirectory();
-		
-		pathTable1 = new Path(pathWorking, "table1");
-		pathTable2 = new Path(pathWorking, "table2");
-		pathTable3 = new Path(pathWorking, "table3");
-		pathTable4 = new Path(pathWorking, "table4");
-		pathTable5 = new Path(pathWorking, "table5");
-		pathTable6 = new Path(pathWorking, "table6");
-		pathTable7 = new Path(pathWorking, "table7");
+		init();
+		pathTable1 = getTableFullPath("table1");
+		pathTable2 = getTableFullPath("table2");
+		pathTable3 = getTableFullPath("table3");
+		pathTable4 = getTableFullPath("table4");
+		pathTable5 = getTableFullPath("table5");
+		pathTable6 = getTableFullPath("table6");
+		pathTable7 = getTableFullPath("table7");
 		
 		// Create table1 data
 		m1 = new HashMap<String, String>();

Modified: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveProjectionNegative.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveProjectionNegative.java?rev=925988&r1=925987&r2=925988&view=diff
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveProjectionNegative.java (original)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveProjectionNegative.java Mon Mar 22 07:54:51 2010
@@ -19,7 +19,6 @@
 package org.apache.hadoop.zebra.pig;
 
 import java.io.ByteArrayOutputStream;
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.StringWriter;
@@ -32,8 +31,6 @@ import java.util.StringTokenizer;
 
 import junit.framework.Assert;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.zebra.io.BasicTable;
@@ -41,19 +38,17 @@ import org.apache.hadoop.zebra.io.TableI
 import org.apache.hadoop.zebra.pig.TableStorer;
 import org.apache.hadoop.zebra.schema.Schema;
 import org.apache.hadoop.zebra.types.TypesUtils;
-import org.apache.pig.ExecType;
-import org.apache.pig.PigServer;
+import org.apache.hadoop.zebra.BaseTestCase;
 import org.apache.pig.backend.executionengine.ExecException;
 import org.apache.pig.backend.executionengine.ExecJob;
 import org.apache.pig.data.DataByteArray;
 import org.apache.pig.data.Tuple;
-import org.apache.pig.test.MiniCluster;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 
-public class TestOrderPreserveProjectionNegative {
+public class TestOrderPreserveProjectionNegative extends BaseTestCase {
 	
 	final static String TABLE1_SCHEMA = "a:int,b:float,c:long,d:double,e:string,f:bytes,m1:map(string)";
 	final static String TABLE1_STORAGE = "[a, b, c]; [d, e, f]; [m1#{a}]";
@@ -76,14 +71,9 @@ public class TestOrderPreserveProjection
 	final static String TABLE7_SCHEMA = "int2:int";
 	final static String TABLE7_STORAGE = "[int2]";
 	
-	static Path pathWorking = null;
-	
 	static int fileId = 0;
 	static int sortId = 0;
 	
-	protected static ExecType execType = ExecType.MAPREDUCE;
-	private static MiniCluster cluster;
-	protected static PigServer pigServer;
 	protected static ExecJob pigJob;
 	
 	private static Path pathTable1;
@@ -95,8 +85,6 @@ public class TestOrderPreserveProjection
 	private static Path pathTable7;
 	private static HashMap<String, String> tableStorage;
 	
-	private static Configuration conf;
-	
 	private static Object[][] table1;
 	private static Object[][] table2;
 	private static Object[][] table3;
@@ -104,33 +92,26 @@ public class TestOrderPreserveProjection
 	private static Object[][] table5;
 	private static Object[][] table6;
 	private static Object[][] table7;
-	
-	@BeforeClass
-	public static void setUp() throws Exception {
-		if (System.getProperty("hadoop.log.dir") == null) {
-			String base = new File(".").getPath(); // getAbsolutePath();
-			System.setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
-		}
 
-		if (execType == ExecType.MAPREDUCE) {
-			cluster = MiniCluster.buildCluster();
-			pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
-		} else {
-			pigServer = new PigServer(ExecType.LOCAL);
-		}
-		
-		conf = new Configuration();
-		FileSystem fs = cluster.getFileSystem();
-		pathWorking = fs.getWorkingDirectory();
-		
-		pathTable1 = new Path(pathWorking, "table1");
-		pathTable2 = new Path(pathWorking, "table2");
-		pathTable3 = new Path(pathWorking, "table3");
-		pathTable4 = new Path(pathWorking, "table4");
-		pathTable5 = new Path(pathWorking, "table5");
-		pathTable6 = new Path(pathWorking, "table6");
-		pathTable7 = new Path(pathWorking, "table7");
-		
+  @BeforeClass
+  public static void setUp() throws Exception {
+      init();
+      pathTable1 = getTableFullPath("TestOrderPerserveProjectionNegative1");
+      pathTable2 = getTableFullPath("TestOrderPerserveProjectionNegative2");
+      pathTable3 = getTableFullPath("TestOrderPerserveProjectionNegative3");
+      pathTable4 = getTableFullPath("TestOrderPerserveProjectionNegative4");
+      pathTable5 = getTableFullPath("TestOrderPerserveProjectionNegative5");
+      pathTable6 = getTableFullPath("TestOrderPerserveProjectionNegative6");
+      pathTable7 = getTableFullPath("TestOrderPerserveProjectionNegative7");
+      
+      removeDir(pathTable1);
+      removeDir(pathTable2);
+      removeDir(pathTable3);
+      removeDir(pathTable4);
+      removeDir(pathTable5);
+      removeDir(pathTable6);
+      removeDir(pathTable7);
+
 		// Create table1 data
 		Map<String, String> m1 = new HashMap<String, String>();
 		m1.put("a","m1-a");
@@ -357,7 +338,7 @@ public class TestOrderPreserveProjection
 			{"a3",	"e",	3},
 			{"a4",	"a",	1} };
 		
-		Path pathTable1 = new Path(pathWorking, "table_test");
+//		Path pathTable1 = new Path(pathWorking, "table_test");
 		
 		try {
 			// Create table1
@@ -455,7 +436,7 @@ public class TestOrderPreserveProjection
 		} finally {
 			//System.out.println(getStackTrace(exception));
 			Assert.assertNotNull(exception);
-      Assert.assertTrue(getStackTrace(exception).contains("Input path does not exist: "));
+                        Assert.assertTrue(getStackTrace(exception).contains("Input path does not exist: "));
 		}
 	}
 	

Modified: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveSimple.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveSimple.java?rev=925988&r1=925987&r2=925988&view=diff
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveSimple.java (original)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveSimple.java Mon Mar 22 07:54:51 2010
@@ -19,7 +19,6 @@
 package org.apache.hadoop.zebra.pig;
 
 import java.io.ByteArrayOutputStream;
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.util.HashMap;
@@ -30,8 +29,6 @@ import java.util.StringTokenizer;
 
 import junit.framework.Assert;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.zebra.io.BasicTable;
@@ -39,61 +36,43 @@ import org.apache.hadoop.zebra.io.TableI
 import org.apache.hadoop.zebra.pig.TableStorer;
 import org.apache.hadoop.zebra.schema.Schema;
 import org.apache.hadoop.zebra.types.TypesUtils;
-import org.apache.pig.ExecType;
-import org.apache.pig.PigServer;
+import org.apache.hadoop.zebra.BaseTestCase;
 import org.apache.pig.backend.executionengine.ExecException;
 import org.apache.pig.backend.executionengine.ExecJob;
 import org.apache.pig.data.DataByteArray;
 import org.apache.pig.data.Tuple;
-import org.apache.pig.test.MiniCluster;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 
-public class TestOrderPreserveSimple {
+public class TestOrderPreserveSimple extends BaseTestCase {
 	
 	final static String STR_SCHEMA1 = "a:int,b:float,c:long,d:double,e:string,f:bytes,m1:map(string)";
 	final static String STR_STORAGE1 = "[a, b, c]; [d, e, f]; [m1#{a}]";
 	
 	static int fileId = 0;
 	
-	protected static ExecType execType = ExecType.MAPREDUCE;
-	private static MiniCluster cluster;
-	protected static PigServer pigServer;
 	protected static ExecJob pigJob;
 	
 	private static Path pathTable1;
 	private static Path pathTable2;
-	private static Configuration conf;
 	
 	private static Object[][] table1;
 	private static Object[][] table2;
 	
 	private static Map<String, String> m1;
 	private static Map<String, String> m2;
-	
-	@BeforeClass
-	public static void setUp() throws Exception {
-		if (System.getProperty("hadoop.log.dir") == null) {
-			String base = new File(".").getPath(); // getAbsolutePath();
-			System.setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
-		}
 
-		if (execType == ExecType.MAPREDUCE) {
-			cluster = MiniCluster.buildCluster();
-			pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
-		} else {
-			pigServer = new PigServer(ExecType.LOCAL);
-		}
-		
-		conf = new Configuration();
-		FileSystem fs = cluster.getFileSystem();
-		Path pathWorking = fs.getWorkingDirectory();
-		
-		pathTable1 = new Path(pathWorking, "table1");
-		pathTable2 = new Path(pathWorking, "table2");
-		
+  @BeforeClass
+  public static void setUp() throws Exception {
+    init();
+    
+    pathTable1 = getTableFullPath("TestOrderPreserveSimple1");
+    pathTable2 = getTableFullPath("TestOrderPreserveSimple2");    
+    removeDir(pathTable1);
+    removeDir(pathTable2);
+    
 		// Create table1 data
 		m1 = new HashMap<String, String>();
 		m1.put("a","m1-a");
@@ -158,7 +137,7 @@ public class TestOrderPreserveSimple {
 	public static void tearDown() throws Exception {
 		pigServer.shutdown();
 	}
-	
+ 
 	private Iterator<Tuple> orderPreserveUnion(String sortkey, String columns) throws IOException {
 		//
 		// Test sorted union with two tables that are different

Modified: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveUnion.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveUnion.java?rev=925988&r1=925987&r2=925988&view=diff
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveUnion.java (original)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveUnion.java Mon Mar 22 07:54:51 2010
@@ -19,17 +19,12 @@
 package org.apache.hadoop.zebra.pig;
 
 import java.io.ByteArrayOutputStream;
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.util.Iterator;
 import java.util.StringTokenizer;
 
 import junit.framework.Assert;
-import junit.framework.TestCase;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.zebra.io.BasicTable;
@@ -37,14 +32,10 @@ import org.apache.hadoop.zebra.io.TableI
 import org.apache.hadoop.zebra.pig.TableStorer;
 import org.apache.hadoop.zebra.schema.Schema;
 import org.apache.hadoop.zebra.types.TypesUtils;
-import org.apache.pig.ExecType;
-import org.apache.pig.PigServer;
+import org.apache.hadoop.zebra.BaseTestCase;
 import org.apache.pig.backend.executionengine.ExecException;
 import org.apache.pig.data.Tuple;
-import org.apache.pig.test.MiniCluster;
-import org.junit.After;
 import org.junit.AfterClass;
-import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -55,32 +46,15 @@ import org.junit.Test;
  * app/debug configuration, when run this from inside the Eclipse.
  * 
  */
-public class TestOrderPreserveUnion {
-  protected static ExecType execType = ExecType.MAPREDUCE;
-  private static MiniCluster cluster;
-  protected static PigServer pigServer;
+public class TestOrderPreserveUnion extends BaseTestCase {
   private static Path pathTable;
 
   @BeforeClass
   public static void setUp() throws Exception {
-    if (System.getProperty("hadoop.log.dir") == null) {
-      String base = new File(".").getPath(); // getAbsolutePath();
-      System
-          .setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
-    }
-
-    if (execType == ExecType.MAPREDUCE) {
-      cluster = MiniCluster.buildCluster();
-      pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
-    } else {
-      pigServer = new PigServer(ExecType.LOCAL);
-    }
-
-    Configuration conf = new Configuration();
-    FileSystem fs = cluster.getFileSystem();
-    Path pathWorking = fs.getWorkingDirectory();
-    pathTable = new Path(pathWorking, "TestTableStorer");
-    System.out.println("pathTable =" + pathTable);
+    init();
+    
+    pathTable = getTableFullPath("TestOrderPreserveUnion");    
+    removeDir(pathTable);
     BasicTable.Writer writer = new BasicTable.Writer(pathTable,
         "SF_a:string,SF_b:string,SF_c,SF_d,SF_e,SF_f,SF_g",
         "[SF_a, SF_b, SF_c]; [SF_e, SF_f, SF_g]", conf);

Added: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveUnionHDFS.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveUnionHDFS.java?rev=925988&view=auto
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveUnionHDFS.java (added)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveUnionHDFS.java Mon Mar 22 07:54:51 2010
@@ -0,0 +1,268 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.zebra.pig;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.ArrayList;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.zebra.io.BasicTable;
+import org.apache.hadoop.zebra.io.TableInserter;
+import org.apache.hadoop.zebra.pig.TableStorer;
+import org.apache.hadoop.zebra.schema.Schema;
+import org.apache.hadoop.zebra.types.TypesUtils;
+import org.apache.pig.backend.executionengine.ExecException;
+import org.apache.pig.backend.executionengine.ExecJob;
+import org.apache.pig.data.Tuple;
+import org.apache.hadoop.zebra.BaseTestCase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+
+public class TestOrderPreserveUnionHDFS extends BaseTestCase {
+	
+	final static String STR_SCHEMA1 = "a:string,b:string,c:string";
+	final static String STR_STORAGE1 = "[a, b]; [c]";
+	
+	static int fileId = 0;
+	
+	protected static ExecJob pigJob;
+	
+	private static Path pathTable1;
+	private static Path pathTable2;
+	
+	private static Object[][] table1;
+	private static Object[][] table2;	
+	
+	@BeforeClass
+	public static void setUp() throws Exception {
+	  init();
+	  
+    pathTable1 = getTableFullPath("TestOrderPerserveSimple1");
+    pathTable2 = getTableFullPath("TestOrderPerserveSimple2");
+	  removeDir(pathTable1);
+		removeDir(pathTable2);
+				
+		// Create table1 data
+		table1 = new Object[][] {
+			{"a1",	"z",	"5"},
+			{"a2",	"r",	"4"},
+			{"a3",	"e",	"3"},
+			{"a4",	"a",	"1"} };
+		
+		// Create table1
+		createTable(pathTable1, STR_SCHEMA1, STR_STORAGE1, table1);
+		
+		// Create table2 data
+		table2 = new Object[][] {
+			{"b1",	"a",	"a"},
+			{"b2",	"a",	"a"},
+			{"b3",	"a",	"a"},
+			{"b4",	"a",	"a"} };
+		
+		// Create table2
+		createTable(pathTable2, STR_SCHEMA1, STR_STORAGE1, table2);
+		
+		// Load table1
+		String query1 = "table1 = LOAD '" + pathTable1.toString() + "' USING org.apache.hadoop.zebra.pig.TableLoader();";
+		pigServer.registerQuery(query1);
+		
+		// Load table2
+		String query2 = "table2 = LOAD '" + pathTable2.toString() + "' USING org.apache.hadoop.zebra.pig.TableLoader();";
+		pigServer.registerQuery(query2);
+	}
+	
+	private static void createTable(Path path, String schemaString, String storageString, Object[][] tableData)
+			throws IOException {
+		//
+		// Create table from tableData array
+		//
+		BasicTable.Writer writer = new BasicTable.Writer(path, schemaString, storageString, conf);
+		
+		Schema schema = writer.getSchema();
+		Tuple tuple = TypesUtils.createTuple(schema);
+		TableInserter inserter = writer.getInserter("ins", false);
+		
+		for (int i = 0; i < tableData.length; ++i) {
+			TypesUtils.resetTuple(tuple);
+			for (int k = 0; k < tableData[i].length; ++k) {
+				tuple.set(k, tableData[i][k]);
+				System.out.println("DEBUG: setting tuple k=" + k + "value= " + tableData[i][k]);
+			}
+			inserter.insert(new BytesWritable(("key" + i).getBytes()), tuple);
+		}
+		inserter.close();
+		writer.close();
+	}
+	
+	@AfterClass
+	public static void tearDown() throws Exception {
+		pigServer.shutdown();
+	}
+	
+	@Test
+	public void test_sorted_table_union_hdfs() throws ExecException, IOException {
+		//
+		// Test sorted union with two tables that are different and that use
+		// hdfs file URLs in Pig LOAD statement
+		//
+		
+		// Sort tables
+		String orderby1 = "sort1 = ORDER table1 BY " + "a" + " ;";
+		pigServer.registerQuery(orderby1);
+		
+		String orderby2 = "sort2 = ORDER table2 BY " + "a" + " ;";
+		pigServer.registerQuery(orderby2);
+		
+		// Store sorted tables
+		++fileId;  // increment filename suffix
+		String pathSort1 = pathTable1.toString() + Integer.toString(fileId);
+		pigJob = pigServer.store("sort1", pathSort1, TableStorer.class.getCanonicalName() +
+			"('[a, b]; [c]')");
+		Assert.assertNull(pigJob.getException());
+		
+		String pathSort2 = pathTable2.toString() + Integer.toString(fileId);
+		pigJob = pigServer.store("sort2", pathSort2, TableStorer.class.getCanonicalName() +
+			"('[a, b]; [c]')");
+		Assert.assertNull(pigJob.getException());
+		
+		String queryLoad = "records1 = LOAD '"
+	        + pathSort1 + ","
+	        + pathSort2
+	        +	"' USING org.apache.hadoop.zebra.pig.TableLoader('a,b,c', 'sorted');";
+		
+		System.out.println("queryLoad: " + queryLoad);
+		
+		pigServer.registerQuery(queryLoad);
+		
+		// Verify union table
+		ArrayList<ArrayList<Object>> resultTable = new ArrayList<ArrayList<Object>>();
+		
+		addResultRow(resultTable, "a1",	"z",	"5");
+		addResultRow(resultTable, "a2",	"r",	"4");
+		addResultRow(resultTable, "a3",	"e",	"3");
+		addResultRow(resultTable, "a4",	"a",	"1");
+		
+		addResultRow(resultTable, "b1",	"a",	"a");
+		addResultRow(resultTable, "b2",	"a",	"a");
+		addResultRow(resultTable, "b3",	"a",	"a");
+		addResultRow(resultTable, "b4",	"a",	"a");
+		
+		// Verify union table
+		Iterator<Tuple> it = pigServer.openIterator("records1");
+		int numbRows = verifyTable(resultTable, 0, it);
+		
+		Assert.assertEquals(numbRows, table1.length + table2.length);
+	}
+	
+	/**
+	 * Verify union output table with expected results
+	 * 
+	 */
+	private int verifyTable(ArrayList<ArrayList<Object>> resultTable, int keyColumn, Iterator<Tuple> it) throws IOException {
+		int numbRows = 0;
+		int index = 0;
+		Object value = resultTable.get(index).get(keyColumn);  // get value of primary key
+		
+		while (it.hasNext()) {
+			Tuple rowValues = it.next();
+			
+			// If last primary sort key does match then search for next matching key
+			if (! compareObj(value, rowValues.get(keyColumn))) {
+				int subIndex = index + 1;
+				while (subIndex < resultTable.size()) {
+					if ( ! compareObj(value, resultTable.get(subIndex).get(keyColumn)) ) {  // found new key
+						index = subIndex;
+						value = resultTable.get(index).get(keyColumn);
+						break;
+					}
+					++subIndex;
+				}
+				Assert.assertEquals("Table comparison error for row : " + numbRows + " - no key found for : "
+					+ rowValues.get(keyColumn), value, rowValues.get(keyColumn));
+			}
+			// Search for matching row with this primary key
+			int subIndex = index;
+			
+			while (subIndex < resultTable.size()) {
+				// Compare row
+				ArrayList<Object> resultRow = resultTable.get(subIndex);
+				if ( compareRow(rowValues, resultRow) )
+					break; // found matching row
+				++subIndex;
+				Assert.assertEquals("Table comparison error for row : " + numbRows + " - no matching row found for : "
+					+ rowValues.get(keyColumn), value, resultTable.get(subIndex).get(keyColumn));
+			}
+			++numbRows;
+		}
+		Assert.assertEquals(resultTable.size(), numbRows);  // verify expected row count
+		return numbRows;
+	}
+	
+	/**
+	 * Compare table rows
+	 * 
+	 */
+	private boolean compareRow(Tuple rowValues, ArrayList<Object> resultRow) throws IOException {
+		boolean result = true;
+		Assert.assertEquals(resultRow.size(), rowValues.size());
+		for (int i = 0; i < rowValues.size(); ++i) {
+			if (! compareObj(rowValues.get(i), resultRow.get(i)) ) {
+				result = false;
+				break;
+			}
+		}
+		return result;
+	}
+	
+	/**
+	 * Compare table values
+	 * 
+	 */
+	private boolean compareObj(Object object1, Object object2) {
+		if (object1 == null) {
+			if (object2 == null)
+				return true;
+			else
+				return false;
+		} else if (object1.equals(object2))
+			return true;
+		else
+			return false;
+	}
+	
+	/**
+	 *Add a row to expected results table
+	 * 
+	 */
+	private void addResultRow(ArrayList<ArrayList<Object>> resultTable, Object ... values) {
+		ArrayList<Object> resultRow = new ArrayList<Object>();
+		
+		for (int i = 0; i < values.length; i++) {
+			resultRow.add(values[i]);
+		}
+		resultTable.add(resultRow);
+	}
+	
+}

Modified: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveVariableTable.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveVariableTable.java?rev=925988&r1=925987&r2=925988&view=diff
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveVariableTable.java (original)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestOrderPreserveVariableTable.java Mon Mar 22 07:54:51 2010
@@ -20,7 +20,6 @@
 package org.apache.hadoop.zebra.pig;
 
 import java.io.ByteArrayOutputStream;
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.util.HashMap;
@@ -30,9 +29,6 @@ import java.util.ArrayList;
 import java.util.StringTokenizer;
 
 import junit.framework.Assert;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.zebra.io.BasicTable;
@@ -40,19 +36,17 @@ import org.apache.hadoop.zebra.io.TableI
 import org.apache.hadoop.zebra.pig.TableStorer;
 import org.apache.hadoop.zebra.schema.Schema;
 import org.apache.hadoop.zebra.types.TypesUtils;
-import org.apache.pig.ExecType;
-import org.apache.pig.PigServer;
+import org.apache.hadoop.zebra.BaseTestCase;
 import org.apache.pig.backend.executionengine.ExecException;
 import org.apache.pig.backend.executionengine.ExecJob;
 import org.apache.pig.data.DataByteArray;
 import org.apache.pig.data.Tuple;
-import org.apache.pig.test.MiniCluster;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 
-public class TestOrderPreserveVariableTable {
+public class TestOrderPreserveVariableTable extends BaseTestCase {
 	
 	final static String TABLE1_SCHEMA = "a:int,b:float,c:long,d:double,e:string,f:bytes,m1:map(string)";
 	final static String TABLE1_STORAGE = "[a, b, c]; [d, e, f]; [m1#{a}]";
@@ -69,9 +63,6 @@ public class TestOrderPreserveVariableTa
 	static int fileId = 0;
 	static int sortId = 0;
 	
-	protected static ExecType execType = ExecType.MAPREDUCE;
-	private static MiniCluster cluster;
-	protected static PigServer pigServer;
 	protected static ExecJob pigJob;
 	
 	private static Path pathTable1;
@@ -80,8 +71,6 @@ public class TestOrderPreserveVariableTa
 	private static Path pathTable4;
 	private static HashMap<String, String> tableStorage;
 	
-	private static Configuration conf;
-	
 	private static Object[][] table1;
 	private static Object[][] table2;
 	private static Object[][] table3;
@@ -89,30 +78,21 @@ public class TestOrderPreserveVariableTa
 	
 	private static Map<String, String> m1;
 	private static Map<String, String> m2;
-	
-	@BeforeClass
-	public static void setUp() throws Exception {
-		if (System.getProperty("hadoop.log.dir") == null) {
-			String base = new File(".").getPath(); // getAbsolutePath();
-			System.setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
-		}
 
-		if (execType == ExecType.MAPREDUCE) {
-			cluster = MiniCluster.buildCluster();
-			pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
-		} else {
-			pigServer = new PigServer(ExecType.LOCAL);
-		}
-		
-		conf = new Configuration();
-		FileSystem fs = cluster.getFileSystem();
-		Path pathWorking = fs.getWorkingDirectory();
-		
-		pathTable1 = new Path(pathWorking, "table1");
-		pathTable2 = new Path(pathWorking, "table2");
-		pathTable3 = new Path(pathWorking, "table3");
-		pathTable4 = new Path(pathWorking, "table4");
-		
+  @BeforeClass
+  public static void setUp() throws Exception {
+    init();
+    
+    pathTable1 = getTableFullPath("TestOrderPreserveVariableTable1");
+    pathTable2 = getTableFullPath("TestOrderPreserveVariableTable2");
+    pathTable3 = getTableFullPath("TestOrderPreserveVariableTable3");
+    pathTable4 = getTableFullPath("TestOrderPreserveVariableTable4");
+  
+    removeDir(pathTable1);
+    removeDir(pathTable2);
+    removeDir(pathTable3);
+    removeDir(pathTable4);
+    
 		// Create table1 data
 		m1 = new HashMap<String, String>();
 		m1.put("a","m1-a");

Modified: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestSimpleType.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestSimpleType.java?rev=925988&r1=925987&r2=925988&view=diff
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestSimpleType.java (original)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestSimpleType.java Mon Mar 22 07:54:51 2010
@@ -17,45 +17,26 @@
 package org.apache.hadoop.zebra.pig;
 
 import java.io.ByteArrayOutputStream;
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintWriter;
-import java.util.HashMap;
 import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
 import java.util.StringTokenizer;
 
 import junit.framework.Assert;
-import junit.framework.TestCase;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RawLocalFileSystem;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.zebra.io.BasicTable;
 import org.apache.hadoop.zebra.io.TableInserter;
-import org.apache.hadoop.zebra.io.TableScanner;
-import org.apache.hadoop.zebra.io.BasicTable.Reader.RangeSplit;
 import org.apache.hadoop.zebra.pig.TableStorer;
 import org.apache.hadoop.zebra.parser.ParseException;
-import org.apache.hadoop.zebra.types.Projection;
 import org.apache.hadoop.zebra.schema.Schema;
 import org.apache.hadoop.zebra.types.TypesUtils;
-import org.apache.pig.ExecType;
-import org.apache.pig.PigServer;
+import org.apache.hadoop.zebra.BaseTestCase;
 import org.apache.pig.backend.executionengine.ExecException;
-import org.apache.pig.data.DataBag;
 import org.apache.pig.data.DataByteArray;
 import org.apache.pig.data.Tuple;
-import org.apache.pig.test.MiniCluster;
 import org.apache.pig.backend.executionengine.ExecJob;
-import org.junit.After;
 import org.junit.AfterClass;
-import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -64,40 +45,19 @@ import org.junit.Test;
  * Test projections on complicated column types.
  * 
  */
-public class TestSimpleType {
+public class TestSimpleType extends BaseTestCase {
 
   final static String STR_SCHEMA = "s1:bool, s2:int, s3:long, s4:float, s5:string, s6:bytes";
   final static String STR_STORAGE = "[s1, s2]; [s3, s4]; [s5, s6]";
-  private static Configuration conf;
-  private static FileSystem fs;
-
-  protected static ExecType execType = ExecType.MAPREDUCE;
-  private static MiniCluster cluster;
-  protected static PigServer pigServer;
   private static Path path;
 
   @BeforeClass
-  public static void setUpOnce() throws IOException {
-    System.out.println("ONCE SETUP !! ---------");
-    if (System.getProperty("hadoop.log.dir") == null) {
-      String base = new File(".").getPath(); // getAbsolutePath();
-      System
-          .setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
-    }
-
-    if (execType == ExecType.MAPREDUCE) {
-      cluster = MiniCluster.buildCluster();
-      pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
-    } else {
-      pigServer = new PigServer(ExecType.LOCAL);
-    }
-
-    conf = new Configuration();
-    FileSystem fs = cluster.getFileSystem();
-    Path pathWorking = fs.getWorkingDirectory();
-    // path = new Path(pathWorking, this.getClass().getSimpleName());
-    path = fs.getWorkingDirectory();
-    System.out.println("path =" + path);
+  public static void setUpOnce() throws IOException, Exception {
+    init();
+
+
+    path = getTableFullPath("TesMapType");
+    removeDir(path);
 
     BasicTable.Writer writer = new BasicTable.Writer(path, STR_SCHEMA,
         STR_STORAGE, conf);

Modified: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestSortedTableUnion.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestSortedTableUnion.java?rev=925988&r1=925987&r2=925988&view=diff
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestSortedTableUnion.java (original)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestSortedTableUnion.java Mon Mar 22 07:54:51 2010
@@ -40,6 +40,7 @@ import org.apache.hadoop.zebra.types.Typ
 import org.apache.pig.ExecType;
 import org.apache.pig.PigServer;
 import org.apache.pig.backend.executionengine.ExecException;
+import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil;
 import org.apache.pig.data.Tuple;
 import org.apache.pig.test.MiniCluster;
 import org.junit.After;
@@ -47,6 +48,8 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.apache.hadoop.zebra.BaseTestCase;
+
 
 /**
  * Note:
@@ -55,32 +58,16 @@ import org.junit.Test;
  * app/debug configuration, when run this from inside the Eclipse.
  * 
  */
-public class TestSortedTableUnion {
-  protected static ExecType execType = ExecType.MAPREDUCE;
-  private static MiniCluster cluster;
-  protected static PigServer pigServer;
+public class TestSortedTableUnion extends BaseTestCase{
   private static Path pathTable;
 
   @BeforeClass
   public static void setUp() throws Exception {
-    if (System.getProperty("hadoop.log.dir") == null) {
-      String base = new File(".").getPath(); // getAbsolutePath();
-      System
-          .setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
-    }
-
-    if (execType == ExecType.MAPREDUCE) {
-      cluster = MiniCluster.buildCluster();
-      pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
-    } else {
-      pigServer = new PigServer(ExecType.LOCAL);
-    }
 
-    Configuration conf = new Configuration();
-    FileSystem fs = cluster.getFileSystem();
-    Path pathWorking = fs.getWorkingDirectory();
-    pathTable = new Path(pathWorking, "TestTableStorer");
-    System.out.println("pathTable =" + pathTable);
+    init();
+    pathTable = getTableFullPath("TestBasicTableUnionLoader1");
+    removeDir(pathTable);
+    
     BasicTable.Writer writer = new BasicTable.Writer(pathTable,
         "SF_a:string,SF_b:string,SF_c,SF_d,SF_e,SF_f,SF_g",
         "[SF_a, SF_b, SF_c]; [SF_e, SF_f, SF_g]", conf);
@@ -219,4 +206,5 @@ public class TestSortedTableUnion {
     }
     Assert.assertEquals(20, row);
   }
+  
 }

Modified: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestSortedTableUnionMergeJoin.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestSortedTableUnionMergeJoin.java?rev=925988&r1=925987&r2=925988&view=diff
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestSortedTableUnionMergeJoin.java (original)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestSortedTableUnionMergeJoin.java Mon Mar 22 07:54:51 2010
@@ -40,6 +40,7 @@ import org.apache.hadoop.zebra.types.Typ
 import org.apache.pig.ExecType;
 import org.apache.pig.PigServer;
 import org.apache.pig.backend.executionengine.ExecException;
+import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil;
 import org.apache.pig.data.Tuple;
 import org.apache.pig.test.MiniCluster;
 import org.junit.After;
@@ -47,6 +48,7 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.apache.hadoop.zebra.BaseTestCase;
 
 /**
  * Note:
@@ -55,32 +57,17 @@ import org.junit.Test;
  * app/debug configuration, when run this from inside the Eclipse.
  * 
  */
-public class TestSortedTableUnionMergeJoin {
-  protected static ExecType execType = ExecType.MAPREDUCE;
-  private static MiniCluster cluster;
-  protected static PigServer pigServer;
+public class TestSortedTableUnionMergeJoin extends BaseTestCase {
   private static Path pathTable;
 
   @BeforeClass
   public static void setUp() throws Exception {
-    if (System.getProperty("hadoop.log.dir") == null) {
-      String base = new File(".").getPath(); // getAbsolutePath();
-      System
-          .setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
-    }
 
-    if (execType == ExecType.MAPREDUCE) {
-      cluster = MiniCluster.buildCluster();
-      pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
-    } else {
-      pigServer = new PigServer(ExecType.LOCAL);
-    }
+    init();
+    pathTable = getTableFullPath("TestTableStorer");
+    removeDir(pathTable);
+
 
-    Configuration conf = new Configuration();
-    FileSystem fs = cluster.getFileSystem();
-    Path pathWorking = fs.getWorkingDirectory();
-    pathTable = new Path(pathWorking, "TestTableStorer");
-    System.out.println("pathTable =" + pathTable);
     BasicTable.Writer writer = new BasicTable.Writer(pathTable,
         "SF_a:string,SF_b:string,SF_c,SF_d,SF_e,SF_f,SF_g",
         "[SF_a, SF_b, SF_c]; [SF_e, SF_f, SF_g]", conf);

Modified: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableLoader.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableLoader.java?rev=925988&r1=925987&r2=925988&view=diff
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableLoader.java (original)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableLoader.java Mon Mar 22 07:54:51 2010
@@ -33,12 +33,14 @@ import org.apache.hadoop.zebra.types.Typ
 import org.apache.pig.ExecType;
 import org.apache.pig.PigServer;
 import org.apache.pig.backend.executionengine.ExecException;
+import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil;
 import org.apache.pig.data.Tuple;
 import org.apache.pig.test.MiniCluster;
 import org.junit.Test;
 import org.junit.BeforeClass;
 import org.junit.AfterClass;
 import junit.framework.Assert;
+import org.apache.hadoop.zebra.BaseTestCase;
 
 /**
  * Note:
@@ -47,33 +49,15 @@ import junit.framework.Assert;
  * app/debug configuration, when run this from inside the Eclipse.
  * 
  */
-public class TestTableLoader {
-  static protected ExecType execType = ExecType.MAPREDUCE;
-  static private MiniCluster cluster;
-  static protected PigServer pigServer;
+public class TestTableLoader extends BaseTestCase {
   static private Path pathTable;
 
   @BeforeClass
   public static void setUp() throws Exception {
-    if (System.getProperty("hadoop.log.dir") == null) {
-      String base = new File(".").getPath(); // getAbsolutePath();
-      System
-          .setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
-    }
-
-    if (execType == ExecType.MAPREDUCE) {
-      cluster = MiniCluster.buildCluster();
-      pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
-    } else {
-      pigServer = new PigServer(ExecType.LOCAL);
-    }
 
-    Configuration conf = new Configuration();
-    FileSystem fs = cluster.getFileSystem();
-    Path pathWorking = fs.getWorkingDirectory();
-    // pathTable = new Path(pathWorking, this.getClass().getSimpleName());
-    pathTable = new Path(pathWorking, "TestTableLoader");
-    System.out.println("pathTable =" + pathTable);
+    init();
+    pathTable = getTableFullPath("TestTableLoader");
+    removeDir(pathTable);
 
     BasicTable.Writer writer = new BasicTable.Writer(pathTable,
         "a:string,b,c:string,d,e,f,g", "[a,b,c];[d,e,f,g]", conf);

Modified: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableLoaderPrune.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableLoaderPrune.java?rev=925988&r1=925987&r2=925988&view=diff
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableLoaderPrune.java (original)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableLoaderPrune.java Mon Mar 22 07:54:51 2010
@@ -40,6 +40,8 @@ import org.junit.Test;
 import org.junit.BeforeClass;
 import org.junit.AfterClass;
 import junit.framework.Assert;
+import org.apache.hadoop.zebra.BaseTestCase;
+
 
 /**
  * Note:
@@ -48,78 +50,16 @@ import junit.framework.Assert;
  * app/debug configuration, when run this from inside the Eclipse.
  * 
  */
-public class TestTableLoaderPrune {
-  static protected ExecType execType = ExecType.MAPREDUCE;
-  static private MiniCluster cluster;
-  static protected PigServer pigServer;
+public class TestTableLoaderPrune extends BaseTestCase {
   static private Path pathTable;
 
-  private static Configuration conf;
-  private static FileSystem fs;
-  private static String zebraJar;
-  private static String whichCluster;
-
   @BeforeClass
   public static void setUp() throws Exception {
-    if (System.getProperty("hadoop.log.dir") == null) {
-      String base = new File(".").getPath(); // getAbsolutePath();
-      System
-          .setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
-    }
-
-    // if whichCluster is not defined, or defined something other than
-    // "realCluster" or "miniCluster", set it to "miniCluster"
-    if (System.getProperty("whichCluster") == null
-        || ((!System.getProperty("whichCluster")
-            .equalsIgnoreCase("realCluster")) && (!System.getProperty(
-            "whichCluster").equalsIgnoreCase("miniCluster")))) {
-      System.setProperty("whichCluster", "miniCluster");
-      whichCluster = System.getProperty("whichCluster");
-    } else {
-      whichCluster = System.getProperty("whichCluster");
-    }
-
-    System.out.println("cluster: " + whichCluster);
-    if (whichCluster.equalsIgnoreCase("realCluster")
-        && System.getenv("HADOOP_HOME") == null) {
-      System.out.println("Please set HADOOP_HOME");
-      System.exit(0);
-    }
 
-    conf = new Configuration();
+    init();
+    pathTable = getTableFullPath("TestMapType");
+    removeDir(pathTable);
 
-    if (whichCluster.equalsIgnoreCase("realCluster")
-        && System.getenv("USER") == null) {
-      System.out.println("Please set USER");
-      System.exit(0);
-    }
-    zebraJar = System.getenv("HADOOP_HOME") + "/../jars/zebra.jar";
-    File file = new File(zebraJar);
-    if (!file.exists() && whichCluster.equalsIgnoreCase("realCulster")) {
-      System.out.println("Please put zebra.jar at hadoop_home/../jars");
-      System.exit(0);
-    }
-
-    if (whichCluster.equalsIgnoreCase("realCluster")) {
-      pigServer = new PigServer(ExecType.MAPREDUCE, ConfigurationUtil
-          .toProperties(conf));
-      pigServer.registerJar(zebraJar);
-      pathTable = new Path("/user/" + System.getenv("USER")
-          + "/TestMapType");
-      fs = pathTable.getFileSystem(conf);
-    }
-
-    if (whichCluster.equalsIgnoreCase("miniCluster")) {
-      if (execType == ExecType.MAPREDUCE) {
-        cluster = MiniCluster.buildCluster();
-        pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
-        fs = cluster.getFileSystem();
-        pathTable = new Path(fs.getWorkingDirectory() + "TesMapType");
-        System.out.println("path1 =" + pathTable);
-      } else {
-        pigServer = new PigServer(ExecType.LOCAL);
-      }
-    }
 
     BasicTable.Writer writer = new BasicTable.Writer(pathTable,
         "a:string,b,c:string,d,e,f,g", "[a,b,c];[d,e,f,g]", conf);