You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2013/12/12 01:27:06 UTC

[1/4] git commit: ACCUMULO-1981 Convert inert class to unit test

Updated Branches:
  refs/heads/1.6.0-SNAPSHOT 5f90d0b8a -> c1fbeac50


ACCUMULO-1981 Convert inert class to unit test


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/96956216
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/96956216
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/96956216

Branch: refs/heads/1.6.0-SNAPSHOT
Commit: 96956216c406eb7f2ba4f149933afcb5a02c9edd
Parents: 7b7521d
Author: Christopher Tubbs <ct...@apache.org>
Authored: Mon Dec 9 13:38:07 2013 -0500
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Wed Dec 11 16:59:48 2013 -0500

----------------------------------------------------------------------
 .../core/file/BloomFilterLayerLookupTest.java   | 121 ++++++++++---------
 1 file changed, 63 insertions(+), 58 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/96956216/core/src/test/java/org/apache/accumulo/core/file/BloomFilterLayerLookupTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/file/BloomFilterLayerLookupTest.java b/core/src/test/java/org/apache/accumulo/core/file/BloomFilterLayerLookupTest.java
index 3078daf..24bde38 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/BloomFilterLayerLookupTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/BloomFilterLayerLookupTest.java
@@ -16,8 +16,10 @@
  */
 package org.apache.accumulo.core.file;
 
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
 import java.io.IOException;
-import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashSet;
@@ -27,114 +29,117 @@ import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.ConfigurationCopy;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.ByteSequence;
+import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.file.keyfunctor.ColumnFamilyFunctor;
 import org.apache.accumulo.core.file.rfile.RFile;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.Text;
+import org.apache.log4j.Logger;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.junit.rules.TestName;
 
 public class BloomFilterLayerLookupTest {
-  public static void main(String[] args) throws IOException {
-    PrintStream out = System.out;
-    
-    Random r = new Random();
-    
+
+  private static final Logger LOG = Logger.getLogger(BloomFilterLayerLookupTest.class);
+  private static Random random = new Random();
+
+  @Rule
+  public TestName testName = new TestName();
+
+  @Rule
+  public TemporaryFolder tempDir = new TemporaryFolder(new File(System.getProperty("user.dir") + "/target"));
+
+  @Test
+  public void test() throws IOException {
     HashSet<Integer> valsSet = new HashSet<Integer>();
-    
     for (int i = 0; i < 100000; i++) {
-      valsSet.add(r.nextInt(Integer.MAX_VALUE));
+      valsSet.add(random.nextInt(Integer.MAX_VALUE));
     }
-    
+
     ArrayList<Integer> vals = new ArrayList<Integer>(valsSet);
     Collections.sort(vals);
-    
+
     ConfigurationCopy acuconf = new ConfigurationCopy(AccumuloConfiguration.getDefaultConfiguration());
     acuconf.set(Property.TABLE_BLOOM_ENABLED, "true");
-    acuconf.set(Property.TABLE_BLOOM_KEY_FUNCTOR, "accumulo.core.file.keyfunctor.ColumnFamilyFunctor");
+    acuconf.set(Property.TABLE_BLOOM_KEY_FUNCTOR, ColumnFamilyFunctor.class.getName());
     acuconf.set(Property.TABLE_FILE_TYPE, RFile.EXTENSION);
     acuconf.set(Property.TABLE_BLOOM_LOAD_THRESHOLD, "1");
     acuconf.set(Property.TSERV_BLOOM_LOAD_MAXCONCURRENT, "1");
-    
+
     Configuration conf = CachedConfiguration.getInstance();
     FileSystem fs = FileSystem.get(conf);
-    
+
+    // get output file name
     String suffix = FileOperations.getNewFileExtension(acuconf);
-    String fname = "/tmp/test." + suffix;
+    String fname = new File(tempDir.getRoot(), testName + "." + suffix).getAbsolutePath();
     FileSKVWriter bmfw = FileOperations.getInstance().openWriter(fname, fs, conf, acuconf);
-    
+
+    // write data to file
     long t1 = System.currentTimeMillis();
-    
     bmfw.startDefaultLocalityGroup();
-    
     for (Integer i : vals) {
       String fi = String.format("%010d", i);
-      bmfw.append(new org.apache.accumulo.core.data.Key(new Text("r" + fi), new Text("cf1")), new Value(("v" + fi).getBytes()));
-      bmfw.append(new org.apache.accumulo.core.data.Key(new Text("r" + fi), new Text("cf2")), new Value(("v" + fi).getBytes()));
+      bmfw.append(new Key(new Text("r" + fi), new Text("cf1")), new Value(("v" + fi).getBytes()));
+      bmfw.append(new Key(new Text("r" + fi), new Text("cf2")), new Value(("v" + fi).getBytes()));
     }
-    
     long t2 = System.currentTimeMillis();
-    
-    out.printf("write rate %6.2f%n", vals.size() / ((t2 - t1) / 1000.0));
-    
+
+    LOG.debug(String.format("write rate %6.2f%n", vals.size() / ((t2 - t1) / 1000.0)));
     bmfw.close();
-    
+
     t1 = System.currentTimeMillis();
     FileSKVIterator bmfr = FileOperations.getInstance().openReader(fname, false, fs, conf, acuconf);
     t2 = System.currentTimeMillis();
-    out.println("Opened " + fname + " in " + (t2 - t1));
-    
-    t1 = System.currentTimeMillis();
-    
+    LOG.debug("Opened " + fname + " in " + (t2 - t1));
+
     int hits = 0;
+    t1 = System.currentTimeMillis();
     for (int i = 0; i < 5000; i++) {
-      int row = r.nextInt(Integer.MAX_VALUE);
-      String fi = String.format("%010d", row);
-      // bmfr.seek(new Range(new Text("r"+fi)));
-      org.apache.accumulo.core.data.Key k1 = new org.apache.accumulo.core.data.Key(new Text("r" + fi), new Text("cf1"));
-      bmfr.seek(new Range(k1, true, k1.followingKey(PartialKey.ROW_COLFAM), false), new ArrayList<ByteSequence>(), false);
+      int row = random.nextInt(Integer.MAX_VALUE);
+      seek(bmfr, row);
       if (valsSet.contains(row)) {
         hits++;
-        if (!bmfr.hasTop()) {
-          out.println("ERROR " + row);
-        }
+        assertTrue(bmfr.hasTop());
       }
     }
-    
     t2 = System.currentTimeMillis();
-    
-    out.printf("random lookup rate : %6.2f%n", 5000 / ((t2 - t1) / 1000.0));
-    out.println("hits = " + hits);
-    
+
+    double rate1 = 5000 / ((t2 - t1) / 1000.0);
+    LOG.debug(String.format("random lookup rate : %6.2f%n", rate1));
+    LOG.debug("hits = " + hits);
+
     int count = 0;
-    
     t1 = System.currentTimeMillis();
-    
     for (Integer row : valsSet) {
-      String fi = String.format("%010d", row);
-      // bmfr.seek(new Range(new Text("r"+fi)));
-      
-      org.apache.accumulo.core.data.Key k1 = new org.apache.accumulo.core.data.Key(new Text("r" + fi), new Text("cf1"));
-      bmfr.seek(new Range(k1, true, k1.followingKey(PartialKey.ROW_COLFAM), false), new ArrayList<ByteSequence>(), false);
-      
-      if (!bmfr.hasTop()) {
-        out.println("ERROR 2 " + row);
-      }
-      
+      seek(bmfr, row);
+      assertTrue(bmfr.hasTop());
       count++;
-      
       if (count >= 500) {
         break;
       }
     }
-    
     t2 = System.currentTimeMillis();
-    
-    out.printf("existant lookup rate %6.2f%n", 500 / ((t2 - t1) / 1000.0));
-    out.println("expected hits 500.  Receive hits: " + count);
+
+    double rate2 = 500 / ((t2 - t1) / 1000.0);
+    LOG.debug(String.format("existant lookup rate %6.2f%n", rate2));
+    LOG.debug("expected hits 500.  Receive hits: " + count);
     bmfr.close();
+
+    assertTrue(rate1 > rate2);
+  }
+
+  private void seek(FileSKVIterator bmfr, int row) throws IOException {
+    String fi = String.format("%010d", row);
+    // bmfr.seek(new Range(new Text("r"+fi)));
+    Key k1 = new Key(new Text("r" + fi), new Text("cf1"));
+    bmfr.seek(new Range(k1, true, k1.followingKey(PartialKey.ROW_COLFAM), false), new ArrayList<ByteSequence>(), false);
   }
 
 }


[2/4] git commit: ACCUMULO-1992 Remove CachedConfiguration from examples

Posted by ct...@apache.org.
ACCUMULO-1992 Remove CachedConfiguration from examples


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/7b7521dd
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/7b7521dd
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/7b7521dd

Branch: refs/heads/1.6.0-SNAPSHOT
Commit: 7b7521dd92b81f4c91eae2415f6d835944b15355
Parents: 5f90d0b
Author: Christopher Tubbs <ct...@apache.org>
Authored: Mon Dec 9 13:22:21 2013 -0500
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Wed Dec 11 16:59:48 2013 -0500

----------------------------------------------------------------------
 .../simple/filedata/CharacterHistogram.java     | 28 +++---
 .../examples/simple/mapreduce/NGramIngest.java  | 36 ++++----
 .../examples/simple/mapreduce/RegexExample.java | 24 ++---
 .../examples/simple/mapreduce/RowHash.java      | 22 ++---
 .../examples/simple/mapreduce/TableToFile.java  | 28 +++---
 .../simple/mapreduce/TeraSortIngest.java        | 97 ++++++++++----------
 .../simple/mapreduce/TokenFileWordCount.java    | 33 +++----
 .../simple/mapreduce/UniqueColumns.java         | 44 ++++-----
 .../examples/simple/mapreduce/WordCount.java    | 29 +++---
 .../mapreduce/bulk/BulkIngestExample.java       | 51 +++++-----
 .../simple/mapreduce/bulk/GenerateTestData.java | 20 ++--
 .../simple/filedata/ChunkInputFormatTest.java   | 58 ++++++------
 12 files changed, 235 insertions(+), 235 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/7b7521dd/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/CharacterHistogram.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/CharacterHistogram.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/CharacterHistogram.java
index 11eda3e..d0662b6 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/CharacterHistogram.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/CharacterHistogram.java
@@ -29,8 +29,8 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.SummingArrayCombiner;
 import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.examples.simple.mapreduce.JobUtil;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Job;
@@ -46,14 +46,15 @@ import com.beust.jcommander.Parameter;
  */
 public class CharacterHistogram extends Configured implements Tool {
   public static final String VIS = "vis";
-  
+
   public static void main(String[] args) throws Exception {
-    System.exit(ToolRunner.run(CachedConfiguration.getInstance(), new CharacterHistogram(), args));
+    System.exit(ToolRunner.run(new Configuration(), new CharacterHistogram(), args));
   }
-  
+
   public static class HistMapper extends Mapper<List<Entry<Key,Value>>,InputStream,Text,Mutation> {
     private ColumnVisibility cv;
-    
+
+    @Override
     public void map(List<Entry<Key,Value>> k, InputStream v, Context context) throws IOException, InterruptedException {
       Long[] hist = new Long[256];
       for (int i = 0; i < hist.length; i++)
@@ -68,19 +69,18 @@ public class CharacterHistogram extends Configured implements Tool {
       m.put("info", "hist", cv, new Value(SummingArrayCombiner.STRING_ARRAY_ENCODER.encode(Arrays.asList(hist))));
       context.write(new Text(), m);
     }
-    
+
     @Override
     protected void setup(Context context) throws IOException, InterruptedException {
       cv = new ColumnVisibility(context.getConfiguration().get(VIS, ""));
     }
   }
-  
+
   static class Opts extends ClientOnRequiredTable {
-    @Parameter(names="--vis")
+    @Parameter(names = "--vis")
     String visibilities = "";
   }
-  
-  
+
   @Override
   public int run(String[] args) throws Exception {
     Job job = JobUtil.getJob(getConf());
@@ -93,15 +93,15 @@ public class CharacterHistogram extends Configured implements Tool {
     job.setInputFormatClass(ChunkInputFormat.class);
     opts.setAccumuloConfigs(job);
     job.getConfiguration().set(VIS, opts.visibilities.toString());
-    
+
     job.setMapperClass(HistMapper.class);
     job.setMapOutputKeyClass(Text.class);
     job.setMapOutputValueClass(Mutation.class);
-    
+
     job.setNumReduceTasks(0);
-    
+
     job.setOutputFormatClass(AccumuloOutputFormat.class);
-    
+
     job.waitForCompletion(true);
     return job.isSuccessful() ? 0 : 1;
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7b7521dd/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/NGramIngest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/NGramIngest.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/NGramIngest.java
index 2f9b01a..93b589d 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/NGramIngest.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/NGramIngest.java
@@ -24,7 +24,7 @@ import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.LongWritable;
@@ -39,19 +39,18 @@ import org.apache.log4j.Logger;
 import com.beust.jcommander.Parameter;
 
 /**
- * Map job to ingest n-gram files from 
- * http://storage.googleapis.com/books/ngrams/books/datasetsv2.html
+ * Map job to ingest n-gram files from http://storage.googleapis.com/books/ngrams/books/datasetsv2.html
  */
-public class NGramIngest extends Configured implements Tool  {
-  
+public class NGramIngest extends Configured implements Tool {
+
   private static final Logger log = Logger.getLogger(NGramIngest.class);
-  
-  
+
   static class Opts extends ClientOnRequiredTable {
-    @Parameter(names = "--input", required=true)
+    @Parameter(names = "--input", required = true)
     String inputDirectory;
   }
-  static class NGramMapper extends Mapper<LongWritable, Text, Text, Mutation> {
+
+  static class NGramMapper extends Mapper<LongWritable,Text,Text,Mutation> {
 
     @Override
     protected void map(LongWritable location, Text value, Context context) throws IOException, InterruptedException {
@@ -75,19 +74,18 @@ public class NGramIngest extends Configured implements Tool  {
     Job job = JobUtil.getJob(getConf());
     job.setJobName(getClass().getSimpleName());
     job.setJarByClass(getClass());
-    
+
     opts.setAccumuloConfigs(job);
     job.setInputFormatClass(TextInputFormat.class);
     job.setOutputFormatClass(AccumuloOutputFormat.class);
-   
+
     job.setMapperClass(NGramMapper.class);
     job.setMapOutputKeyClass(Text.class);
     job.setMapOutputValueClass(Mutation.class);
-    
+
     job.setNumReduceTasks(0);
     job.setSpeculativeExecution(false);
-    
-    
+
     if (!opts.getConnector().tableOperations().exists(opts.tableName)) {
       log.info("Creating table " + opts.tableName);
       opts.getConnector().tableOperations().create(opts.tableName);
@@ -95,23 +93,23 @@ public class NGramIngest extends Configured implements Tool  {
       String numbers[] = "1 2 3 4 5 6 7 8 9".split("\\s");
       String lower[] = "a b c d e f g h i j k l m n o p q r s t u v w x y z".split("\\s");
       String upper[] = "A B C D E F G H I J K L M N O P Q R S T U V W X Y Z".split("\\s");
-      for (String[] array : new String[][]{numbers, lower, upper}) {
+      for (String[] array : new String[][] {numbers, lower, upper}) {
         for (String s : array) {
           splits.add(new Text(s));
         }
       }
       opts.getConnector().tableOperations().addSplits(opts.tableName, splits);
     }
-      
+
     TextInputFormat.addInputPath(job, new Path(opts.inputDirectory));
     job.waitForCompletion(true);
     return job.isSuccessful() ? 0 : 1;
   }
-  
+
   public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(CachedConfiguration.getInstance(), new NGramIngest(), args);
+    int res = ToolRunner.run(new Configuration(), new NGramIngest(), args);
     if (res != 0)
       System.exit(res);
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7b7521dd/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RegexExample.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RegexExample.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RegexExample.java
index 9acc694..47e5879 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RegexExample.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RegexExample.java
@@ -24,7 +24,7 @@ import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.RegExFilter;
-import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.Job;
@@ -42,7 +42,7 @@ public class RegexExample extends Configured implements Tool {
       context.write(row, data);
     }
   }
-  
+
   static class Opts extends ClientOnRequiredTable {
     @Parameter(names = "--rowRegex")
     String rowRegex;
@@ -55,7 +55,7 @@ public class RegexExample extends Configured implements Tool {
     @Parameter(names = "--output", required = true)
     String destination;
   }
-  
+
   @Override
   public int run(String[] args) throws Exception {
     Opts opts = new Opts();
@@ -64,34 +64,34 @@ public class RegexExample extends Configured implements Tool {
     Job job = JobUtil.getJob(getConf());
     job.setJobName(getClass().getSimpleName());
     job.setJarByClass(getClass());
-    
+
     job.setInputFormatClass(AccumuloInputFormat.class);
     opts.setAccumuloConfigs(job);
-    
+
     IteratorSetting regex = new IteratorSetting(50, "regex", RegExFilter.class);
     RegExFilter.setRegexs(regex, opts.rowRegex, opts.columnFamilyRegex, opts.columnQualifierRegex, opts.valueRegex, false);
     AccumuloInputFormat.addIterator(job, regex);
-    
+
     job.setMapperClass(RegexMapper.class);
     job.setMapOutputKeyClass(Key.class);
     job.setMapOutputValueClass(Value.class);
-    
+
     job.setNumReduceTasks(0);
-    
+
     job.setOutputFormatClass(TextOutputFormat.class);
     TextOutputFormat.setOutputPath(job, new Path(opts.destination));
-    
+
     System.out.println("setRowRegex: " + opts.rowRegex);
     System.out.println("setColumnFamilyRegex: " + opts.columnFamilyRegex);
     System.out.println("setColumnQualifierRegex: " + opts.columnQualifierRegex);
     System.out.println("setValueRegex: " + opts.valueRegex);
-    
+
     job.waitForCompletion(true);
     return job.isSuccessful() ? 0 : 1;
   }
-  
+
   public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(CachedConfiguration.getInstance(), new RegexExample(), args);
+    int res = ToolRunner.run(new Configuration(), new RegexExample(), args);
     if (res != 0)
       System.exit(res);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7b7521dd/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java
index 2ca3587..1fa9b8f 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java
@@ -25,9 +25,9 @@ import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
@@ -50,16 +50,16 @@ public class RowHash extends Configured implements Tool {
       context.write(null, m);
       context.progress();
     }
-    
+
     @Override
     public void setup(Context job) {}
   }
-  
+
   private static class Opts extends ClientOnRequiredTable {
     @Parameter(names = "--column", required = true)
     String column = null;
   }
-  
+
   @Override
   public int run(String[] args) throws Exception {
     Job job = JobUtil.getJob(getConf());
@@ -69,27 +69,27 @@ public class RowHash extends Configured implements Tool {
     opts.parseArgs(RowHash.class.getName(), args);
     job.setInputFormatClass(AccumuloInputFormat.class);
     opts.setAccumuloConfigs(job);
-    
+
     String col = opts.column;
     int idx = col.indexOf(":");
     Text cf = new Text(idx < 0 ? col : col.substring(0, idx));
     Text cq = idx < 0 ? null : new Text(col.substring(idx + 1));
     if (cf.getLength() > 0)
       AccumuloInputFormat.fetchColumns(job, Collections.singleton(new Pair<Text,Text>(cf, cq)));
-    
+
     job.setMapperClass(HashDataMapper.class);
     job.setMapOutputKeyClass(Text.class);
     job.setMapOutputValueClass(Mutation.class);
-    
+
     job.setNumReduceTasks(0);
-    
+
     job.setOutputFormatClass(AccumuloOutputFormat.class);
-    
+
     job.waitForCompletion(true);
     return job.isSuccessful() ? 0 : 1;
   }
-  
+
   public static void main(String[] args) throws Exception {
-    ToolRunner.run(CachedConfiguration.getInstance(), new RowHash(), args);
+    ToolRunner.run(new Configuration(), new RowHash(), args);
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7b7521dd/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
index 8bdc195..3a211e2 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
@@ -25,9 +25,9 @@ import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.format.DefaultFormatter;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.NullWritable;
@@ -45,14 +45,14 @@ import com.beust.jcommander.Parameter;
  * <tablename> <column> <hdfs-output-path>
  */
 public class TableToFile extends Configured implements Tool {
-  
+
   static class Opts extends ClientOnRequiredTable {
     @Parameter(names = "--output", description = "output directory", required = true)
     String output;
     @Parameter(names = "--columns", description = "columns to extract, in cf:cq{,cf:cq,...} form")
     String columns = "";
   }
-  
+
   /**
    * The Mapper class that given a row number, will generate the appropriate output line.
    */
@@ -66,12 +66,12 @@ public class TableToFile extends Configured implements Tool {
         public Key getKey() {
           return r;
         }
-        
+
         @Override
         public Value getValue() {
           return v;
         }
-        
+
         @Override
         public Value setValue(Value value) {
           return null;
@@ -81,7 +81,7 @@ public class TableToFile extends Configured implements Tool {
       context.setStatus("Outputed Value");
     }
   }
-  
+
   @Override
   public int run(String[] args) throws IOException, InterruptedException, ClassNotFoundException, AccumuloSecurityException {
     Job job = JobUtil.getJob(getConf());
@@ -89,10 +89,10 @@ public class TableToFile extends Configured implements Tool {
     job.setJarByClass(this.getClass());
     Opts opts = new Opts();
     opts.parseArgs(getClass().getName(), args);
-    
+
     job.setInputFormatClass(AccumuloInputFormat.class);
     opts.setAccumuloConfigs(job);
-    
+
     HashSet<Pair<Text,Text>> columnsToFetch = new HashSet<Pair<Text,Text>>();
     for (String col : opts.columns.split(",")) {
       int idx = col.indexOf(":");
@@ -103,20 +103,20 @@ public class TableToFile extends Configured implements Tool {
     }
     if (!columnsToFetch.isEmpty())
       AccumuloInputFormat.fetchColumns(job, columnsToFetch);
-    
+
     job.setMapperClass(TTFMapper.class);
     job.setMapOutputKeyClass(NullWritable.class);
     job.setMapOutputValueClass(Text.class);
-    
+
     job.setNumReduceTasks(0);
-    
+
     job.setOutputFormatClass(TextOutputFormat.class);
     TextOutputFormat.setOutputPath(job, new Path(opts.output));
-    
+
     job.waitForCompletion(true);
     return job.isSuccessful() ? 0 : 1;
   }
-  
+
   /**
    * 
    * @param args
@@ -124,6 +124,6 @@ public class TableToFile extends Configured implements Tool {
    * @throws Exception
    */
   public static void main(String[] args) throws Exception {
-    ToolRunner.run(CachedConfiguration.getInstance(), new TableToFile(), args);
+    ToolRunner.run(new Configuration(), new TableToFile(), args);
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7b7521dd/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java
index dd2fea4..f9f2d39 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java
@@ -30,7 +30,6 @@ import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.LongWritable;
@@ -77,37 +76,37 @@ public class TeraSortIngest extends Configured implements Tool {
     static class RangeInputSplit extends InputSplit implements Writable {
       long firstRow;
       long rowCount;
-      
+
       public RangeInputSplit() {}
-      
+
       public RangeInputSplit(long offset, long length) {
         firstRow = offset;
         rowCount = length;
       }
-      
+
       @Override
       public long getLength() throws IOException {
         return 0;
       }
-      
+
       @Override
       public String[] getLocations() throws IOException {
         return new String[] {};
       }
-      
+
       @Override
       public void readFields(DataInput in) throws IOException {
         firstRow = WritableUtils.readVLong(in);
         rowCount = WritableUtils.readVLong(in);
       }
-      
+
       @Override
       public void write(DataOutput out) throws IOException {
         WritableUtils.writeVLong(out, firstRow);
         WritableUtils.writeVLong(out, rowCount);
       }
     }
-    
+
     /**
      * A record reader that will generate a range of numbers.
      */
@@ -115,36 +114,36 @@ public class TeraSortIngest extends Configured implements Tool {
       long startRow;
       long finishedRows;
       long totalRows;
-      
+
       LongWritable currentKey;
-      
+
       public RangeRecordReader(RangeInputSplit split) {
         startRow = split.firstRow;
         finishedRows = 0;
         totalRows = split.rowCount;
       }
-      
+
       @Override
       public void close() throws IOException {}
-      
+
       @Override
       public float getProgress() throws IOException {
         return finishedRows / (float) totalRows;
       }
-      
+
       @Override
       public LongWritable getCurrentKey() throws IOException, InterruptedException {
         return new LongWritable(startRow + finishedRows);
       }
-      
+
       @Override
       public NullWritable getCurrentValue() throws IOException, InterruptedException {
         return NullWritable.get();
       }
-      
+
       @Override
       public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {}
-      
+
       @Override
       public boolean nextKeyValue() throws IOException, InterruptedException {
         if (finishedRows < totalRows) {
@@ -154,13 +153,13 @@ public class TeraSortIngest extends Configured implements Tool {
         return false;
       }
     }
-    
+
     @Override
     public RecordReader<LongWritable,NullWritable> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException {
       // reporter.setStatus("Creating record reader");
       return new RangeRecordReader((RangeInputSplit) split);
     }
-    
+
     /**
      * Create the desired number of splits, dividing the number of rows between the mappers.
      */
@@ -180,12 +179,12 @@ public class TeraSortIngest extends Configured implements Tool {
       System.out.println("Done Generating.");
       return splits;
     }
-    
+
   }
-  
+
   private static String NUMSPLITS = "terasort.overridesplits";
   private static String NUMROWS = "terasort.numrows";
-  
+
   static class RandomGenerator {
     private long seed = 0;
     private static final long mask32 = (1l << 32) - 1;
@@ -199,7 +198,7 @@ public class TeraSortIngest extends Configured implements Tool {
     private static final long[] seeds = new long[] {0L, 4160749568L, 4026531840L, 3892314112L, 3758096384L, 3623878656L, 3489660928L, 3355443200L, 3221225472L,
         3087007744L, 2952790016L, 2818572288L, 2684354560L, 2550136832L, 2415919104L, 2281701376L, 2147483648L, 2013265920L, 1879048192L, 1744830464L,
         1610612736L, 1476395008L, 1342177280L, 1207959552L, 1073741824L, 939524096L, 805306368L, 671088640L, 536870912L, 402653184L, 268435456L, 134217728L,};
-    
+
     /**
      * Start the random number generator on the given iteration.
      * 
@@ -213,17 +212,17 @@ public class TeraSortIngest extends Configured implements Tool {
         next();
       }
     }
-    
+
     RandomGenerator() {
       this(0);
     }
-    
+
     long next() {
       seed = (seed * 3141592621l + 663896637) & mask32;
       return seed;
     }
   }
-  
+
   /**
    * The Mapper class that given a row number, will generate the appropriate output line.
    */
@@ -233,7 +232,7 @@ public class TeraSortIngest extends Configured implements Tool {
     private int maxkeylength = 0;
     private int minvaluelength = 0;
     private int maxvaluelength = 0;
-    
+
     private Text key = new Text();
     private Text value = new Text();
     private RandomGenerator rand;
@@ -248,18 +247,18 @@ public class TeraSortIngest extends Configured implements Tool {
         }
       }
     }
-    
+
     /**
      * Add a random key to the text
      */
     private Random random = new Random();
-    
+
     private void addKey() {
       int range = random.nextInt(maxkeylength - minkeylength + 1);
       int keylen = range + minkeylength;
       int keyceil = keylen + (4 - (keylen % 4));
       keyBytes = new byte[keyceil];
-      
+
       long temp = 0;
       for (int i = 0; i < keyceil / 4; i++) {
         temp = rand.next() / 52;
@@ -273,7 +272,7 @@ public class TeraSortIngest extends Configured implements Tool {
       }
       key.set(keyBytes, 0, keylen);
     }
-    
+
     /**
      * Add the rowid to the row.
      * 
@@ -289,7 +288,7 @@ public class TeraSortIngest extends Configured implements Tool {
       paddedRowIdString.append(rowid, 0, Math.min(rowid.length, 10));
       return paddedRowIdString;
     }
-    
+
     /**
      * Add the required filler bytes. Each row consists of 7 blocks of 10 characters and 1 block of 8 characters.
      * 
@@ -298,22 +297,22 @@ public class TeraSortIngest extends Configured implements Tool {
      */
     private void addFiller(long rowId) {
       int base = (int) ((rowId * 8) % 26);
-      
+
       // Get Random var
       Random random = new Random(rand.seed);
-      
+
       int range = random.nextInt(maxvaluelength - minvaluelength + 1);
       int valuelen = range + minvaluelength;
-      
+
       while (valuelen > 10) {
         value.append(filler[(base + valuelen) % 26], 0, 10);
         valuelen -= 10;
       }
-      
+
       if (valuelen > 0)
         value.append(filler[(base + valuelen) % 26], 0, valuelen);
     }
-    
+
     @Override
     public void map(LongWritable row, NullWritable ignored, Context context) throws IOException, InterruptedException {
       context.setStatus("Entering");
@@ -326,18 +325,18 @@ public class TeraSortIngest extends Configured implements Tool {
       value.clear();
       // addRowId(rowId);
       addFiller(rowId);
-      
+
       // New
       Mutation m = new Mutation(key);
       m.put(new Text("c"), // column family
           getRowIdString(rowId), // column qual
           new Value(value.toString().getBytes())); // data
-      
+
       context.setStatus("About to add to accumulo");
       context.write(table, m);
       context.setStatus("Added to accumulo " + key.toString());
     }
-    
+
     @Override
     public void setup(Context job) {
       minkeylength = job.getConfiguration().getInt("cloudgen.minkeylength", 0);
@@ -347,11 +346,11 @@ public class TeraSortIngest extends Configured implements Tool {
       table = new Text(job.getConfiguration().get("cloudgen.tablename"));
     }
   }
-  
+
   public static void main(String[] args) throws Exception {
-    ToolRunner.run(CachedConfiguration.getInstance(), new TeraSortIngest(), args);
+    ToolRunner.run(new Configuration(), new TeraSortIngest(), args);
   }
-  
+
   static class Opts extends ClientOnRequiredTable {
     @Parameter(names = "--count", description = "number of rows to ingest", required = true)
     long numRows;
@@ -366,7 +365,7 @@ public class TeraSortIngest extends Configured implements Tool {
     @Parameter(names = "--splits", description = "number of splits to create in the table")
     int splits = 0;
   }
-  
+
   @Override
   public int run(String[] args) throws Exception {
     Job job = JobUtil.getJob(getConf());
@@ -374,19 +373,19 @@ public class TeraSortIngest extends Configured implements Tool {
     job.setJarByClass(this.getClass());
     Opts opts = new Opts();
     opts.parseArgs(TeraSortIngest.class.getName(), args);
-    
+
     job.setInputFormatClass(RangeInputFormat.class);
     job.setMapperClass(SortGenMapper.class);
     job.setMapOutputKeyClass(Text.class);
     job.setMapOutputValueClass(Mutation.class);
-    
+
     job.setNumReduceTasks(0);
-    
+
     job.setOutputFormatClass(AccumuloOutputFormat.class);
     opts.setAccumuloConfigs(job);
     BatchWriterConfig bwConfig = new BatchWriterConfig().setMaxMemory(10L * 1000 * 1000);
     AccumuloOutputFormat.setBatchWriterOptions(job, bwConfig);
-    
+
     Configuration conf = job.getConfiguration();
     conf.setLong(NUMROWS, opts.numRows);
     conf.setInt("cloudgen.minkeylength", opts.minKeyLength);
@@ -394,10 +393,10 @@ public class TeraSortIngest extends Configured implements Tool {
     conf.setInt("cloudgen.minvaluelength", opts.minValueLength);
     conf.setInt("cloudgen.maxvaluelength", opts.maxValueLength);
     conf.set("cloudgen.tablename", opts.tableName);
-    
+
     if (args.length > 10)
       conf.setInt(NUMSPLITS, opts.splits);
-    
+
     job.waitForCompletion(true);
     return job.isSuccessful() ? 0 : 1;
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7b7521dd/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TokenFileWordCount.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TokenFileWordCount.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TokenFileWordCount.java
index fc4b27f..c3f6cdb 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TokenFileWordCount.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TokenFileWordCount.java
@@ -22,7 +22,7 @@ import org.apache.accumulo.core.client.ClientConfiguration;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
@@ -38,17 +38,17 @@ import org.apache.hadoop.util.ToolRunner;
  * 
  */
 public class TokenFileWordCount extends Configured implements Tool {
-  
+
   public static class MapClass extends Mapper<LongWritable,Text,Text,Mutation> {
     @Override
     public void map(LongWritable key, Text value, Context output) throws IOException {
       String[] words = value.toString().split("\\s+");
-      
+
       for (String word : words) {
-        
+
         Mutation mutation = new Mutation(new Text(word));
         mutation.put(new Text("count"), new Text("20080906"), new Value("1".getBytes()));
-        
+
         try {
           output.write(null, mutation);
         } catch (InterruptedException e) {
@@ -57,43 +57,44 @@ public class TokenFileWordCount extends Configured implements Tool {
       }
     }
   }
-  
+
+  @Override
   public int run(String[] args) throws Exception {
-    
+
     String instance = args[0];
     String zookeepers = args[1];
     String user = args[2];
     String tokenFile = args[3];
     String input = args[4];
     String tableName = args[5];
-    
+
     Job job = JobUtil.getJob(getConf());
     job.setJobName(TokenFileWordCount.class.getName());
     job.setJarByClass(this.getClass());
-    
+
     job.setInputFormatClass(TextInputFormat.class);
     TextInputFormat.setInputPaths(job, input);
-    
+
     job.setMapperClass(MapClass.class);
-    
+
     job.setNumReduceTasks(0);
-    
+
     job.setOutputFormatClass(AccumuloOutputFormat.class);
     job.setOutputKeyClass(Text.class);
     job.setOutputValueClass(Mutation.class);
-    
+
     // AccumuloInputFormat not used here, but it uses the same functions.
     AccumuloOutputFormat.setZooKeeperInstance(job, ClientConfiguration.loadDefault().withInstance(instance).withZkHosts(zookeepers));
     AccumuloOutputFormat.setConnectorInfo(job, user, tokenFile);
     AccumuloOutputFormat.setCreateTables(job, true);
     AccumuloOutputFormat.setDefaultTableName(job, tableName);
-    
+
     job.waitForCompletion(true);
     return 0;
   }
-  
+
   public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(CachedConfiguration.getInstance(), new TokenFileWordCount(), args);
+    int res = ToolRunner.run(new Configuration(), new TokenFileWordCount(), args);
     System.exit(res);
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7b7521dd/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/UniqueColumns.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/UniqueColumns.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/UniqueColumns.java
index 23d9d47..e0e29ce 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/UniqueColumns.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/UniqueColumns.java
@@ -26,7 +26,7 @@ import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
@@ -44,35 +44,35 @@ import com.beust.jcommander.Parameter;
  * table.
  */
 public class UniqueColumns extends Configured implements Tool {
-  
+
   private static final Text EMPTY = new Text();
-  
+
   public static class UMapper extends Mapper<Key,Value,Text,Text> {
     private Text temp = new Text();
     private static final Text CF = new Text("cf:");
     private static final Text CQ = new Text("cq:");
-    
+
     @Override
     public void map(Key key, Value value, Context context) throws IOException, InterruptedException {
       temp.set(CF);
       ByteSequence cf = key.getColumnFamilyData();
       temp.append(cf.getBackingArray(), cf.offset(), cf.length());
       context.write(temp, EMPTY);
-      
+
       temp.set(CQ);
       ByteSequence cq = key.getColumnQualifierData();
       temp.append(cq.getBackingArray(), cq.offset(), cq.length());
       context.write(temp, EMPTY);
     }
   }
-  
+
   public static class UReducer extends Reducer<Text,Text,Text,Text> {
     @Override
     public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
       context.write(key, EMPTY);
     }
   }
-  
+
   static class Opts extends ClientOnRequiredTable {
     @Parameter(names = "--output", description = "output directory")
     String output;
@@ -81,21 +81,21 @@ public class UniqueColumns extends Configured implements Tool {
     @Parameter(names = "--offline", description = "run against an offline table")
     boolean offline = false;
   }
-  
+
   @Override
   public int run(String[] args) throws Exception {
     Opts opts = new Opts();
     opts.parseArgs(UniqueColumns.class.getName(), args);
-    
+
     String jobName = this.getClass().getSimpleName() + "_" + System.currentTimeMillis();
 
     Job job = JobUtil.getJob(getConf());
     job.setJobName(jobName);
     job.setJarByClass(this.getClass());
-    
+
     String clone = opts.tableName;
     Connector conn = null;
-    
+
     opts.setAccumuloConfigs(job);
 
     if (opts.offline) {
@@ -103,41 +103,41 @@ public class UniqueColumns extends Configured implements Tool {
        * this example clones the table and takes it offline. If you plan to run map reduce jobs over a table many times, it may be more efficient to compact the
        * table, clone it, and then keep using the same clone as input for map reduce.
        */
-      
+
       conn = opts.getConnector();
       clone = opts.tableName + "_" + jobName;
       conn.tableOperations().clone(opts.tableName, clone, true, new HashMap<String,String>(), new HashSet<String>());
       conn.tableOperations().offline(clone);
-      
+
       AccumuloInputFormat.setOfflineTableScan(job, true);
       AccumuloInputFormat.setInputTableName(job, clone);
     }
-    
+
     job.setInputFormatClass(AccumuloInputFormat.class);
 
     job.setMapperClass(UMapper.class);
     job.setMapOutputKeyClass(Text.class);
     job.setMapOutputValueClass(Text.class);
-    
+
     job.setCombinerClass(UReducer.class);
     job.setReducerClass(UReducer.class);
-    
+
     job.setNumReduceTasks(opts.reducers);
-    
+
     job.setOutputFormatClass(TextOutputFormat.class);
     TextOutputFormat.setOutputPath(job, new Path(opts.output));
-    
+
     job.waitForCompletion(true);
-    
+
     if (opts.offline) {
       conn.tableOperations().delete(clone);
     }
-    
+
     return job.isSuccessful() ? 0 : 1;
   }
-  
+
   public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(CachedConfiguration.getInstance(), new UniqueColumns(), args);
+    int res = ToolRunner.run(new Configuration(), new UniqueColumns(), args);
     System.exit(res);
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7b7521dd/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java
index 8ca8cbc..220b85c 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java
@@ -22,7 +22,7 @@ import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.LongWritable;
@@ -40,22 +40,22 @@ import com.beust.jcommander.Parameter;
  * 
  */
 public class WordCount extends Configured implements Tool {
-  
+
   static class Opts extends ClientOnRequiredTable {
-    @Parameter(names="--input", description="input directory")
+    @Parameter(names = "--input", description = "input directory")
     String inputDirectory;
   }
-  
+
   public static class MapClass extends Mapper<LongWritable,Text,Text,Mutation> {
     @Override
     public void map(LongWritable key, Text value, Context output) throws IOException {
       String[] words = value.toString().split("\\s+");
-      
+
       for (String word : words) {
-        
+
         Mutation mutation = new Mutation(new Text(word));
         mutation.put(new Text("count"), new Text("20080906"), new Value("1".getBytes()));
-        
+
         try {
           output.write(null, mutation);
         } catch (InterruptedException e) {
@@ -64,7 +64,8 @@ public class WordCount extends Configured implements Tool {
       }
     }
   }
-  
+
+  @Override
   public int run(String[] args) throws Exception {
     Opts opts = new Opts();
     opts.parseArgs(WordCount.class.getName(), args);
@@ -72,14 +73,14 @@ public class WordCount extends Configured implements Tool {
     Job job = JobUtil.getJob(getConf());
     job.setJobName(WordCount.class.getName());
     job.setJarByClass(this.getClass());
-    
+
     job.setInputFormatClass(TextInputFormat.class);
     TextInputFormat.setInputPaths(job, new Path(opts.inputDirectory));
-    
+
     job.setMapperClass(MapClass.class);
-    
+
     job.setNumReduceTasks(0);
-    
+
     job.setOutputFormatClass(AccumuloOutputFormat.class);
     job.setOutputKeyClass(Text.class);
     job.setOutputValueClass(Mutation.class);
@@ -87,8 +88,8 @@ public class WordCount extends Configured implements Tool {
     job.waitForCompletion(true);
     return 0;
   }
-  
+
   public static void main(String[] args) throws Exception {
-    ToolRunner.run(CachedConfiguration.getInstance(), new WordCount(), args);
+    ToolRunner.run(new Configuration(), new WordCount(), args);
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7b7521dd/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
index 5f9b975..72bd7eb 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
@@ -27,7 +27,6 @@ import org.apache.accumulo.core.client.mapreduce.AccumuloFileOutputFormat;
 import org.apache.accumulo.core.client.mapreduce.lib.partition.RangePartitioner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.examples.simple.mapreduce.JobUtil;
 import org.apache.commons.codec.binary.Base64;
@@ -53,7 +52,7 @@ public class BulkIngestExample extends Configured implements Tool {
   public static class MapClass extends Mapper<LongWritable,Text,Text,Text> {
     private Text outputKey = new Text();
     private Text outputValue = new Text();
-    
+
     @Override
     public void map(LongWritable key, Text value, Context output) throws IOException, InterruptedException {
       // split on tab
@@ -64,7 +63,7 @@ public class BulkIngestExample extends Configured implements Tool {
           break;
         }
       }
-      
+
       if (index > 0) {
         outputKey.set(value.getBytes(), 0, index);
         outputValue.set(value.getBytes(), index + 1, value.getLength() - (index + 1));
@@ -72,8 +71,9 @@ public class BulkIngestExample extends Configured implements Tool {
       }
     }
   }
-  
+
   public static class ReduceClass extends Reducer<Text,Text,Key,Value> {
+    @Override
     public void reduce(Text key, Iterable<Text> values, Context output) throws IOException, InterruptedException {
       // be careful with the timestamp... if you run on a cluster
       // where the time is whacked you may not see your updates in
@@ -82,82 +82,83 @@ public class BulkIngestExample extends Configured implements Tool {
       // cluster or consider using logical time... one options is
       // to let accumulo set the time
       long timestamp = System.currentTimeMillis();
-      
+
       int index = 0;
       for (Text value : values) {
         Key outputKey = new Key(key, new Text("colf"), new Text(String.format("col_%07d", index)), timestamp);
         index++;
-        
+
         Value outputValue = new Value(value.getBytes(), 0, value.getLength());
         output.write(outputKey, outputValue);
       }
     }
   }
-  
+
   static class Opts extends ClientOnRequiredTable {
-    @Parameter(names="--inputDir", required=true)
+    @Parameter(names = "--inputDir", required = true)
     String inputDir;
-    @Parameter(names="--workDir", required=true)
+    @Parameter(names = "--workDir", required = true)
     String workDir;
   }
-  
+
+  @Override
   public int run(String[] args) {
     Opts opts = new Opts();
     opts.parseArgs(BulkIngestExample.class.getName(), args);
-    
+
     Configuration conf = getConf();
     PrintStream out = null;
     try {
       Job job = JobUtil.getJob(conf);
       job.setJobName("bulk ingest example");
       job.setJarByClass(this.getClass());
-      
+
       job.setInputFormatClass(TextInputFormat.class);
-      
+
       job.setMapperClass(MapClass.class);
       job.setMapOutputKeyClass(Text.class);
       job.setMapOutputValueClass(Text.class);
-      
+
       job.setReducerClass(ReduceClass.class);
       job.setOutputFormatClass(AccumuloFileOutputFormat.class);
       opts.setAccumuloConfigs(job);
-      
+
       Connector connector = opts.getConnector();
-      
+
       TextInputFormat.setInputPaths(job, new Path(opts.inputDir));
       AccumuloFileOutputFormat.setOutputPath(job, new Path(opts.workDir + "/files"));
-      
+
       FileSystem fs = FileSystem.get(conf);
       out = new PrintStream(new BufferedOutputStream(fs.create(new Path(opts.workDir + "/splits.txt"))));
-      
+
       Collection<Text> splits = connector.tableOperations().listSplits(opts.tableName, 100);
       for (Text split : splits)
         out.println(new String(Base64.encodeBase64(TextUtil.getBytes(split))));
-      
+
       job.setNumReduceTasks(splits.size() + 1);
       out.close();
-      
+
       job.setPartitionerClass(RangePartitioner.class);
       RangePartitioner.setSplitFile(job, opts.workDir + "/splits.txt");
-      
+
       job.waitForCompletion(true);
       Path failures = new Path(opts.workDir, "failures");
       fs.delete(failures, true);
       fs.mkdirs(new Path(opts.workDir, "failures"));
       connector.tableOperations().importDirectory(opts.tableName, opts.workDir + "/files", opts.workDir + "/failures", false);
-      
+
     } catch (Exception e) {
       throw new RuntimeException(e);
     } finally {
       if (out != null)
         out.close();
     }
-    
+
     return 0;
   }
-  
+
   public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(CachedConfiguration.getInstance(), new BulkIngestExample(), args);
+    int res = ToolRunner.run(new Configuration(), new BulkIngestExample(), args);
     System.exit(res);
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7b7521dd/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/GenerateTestData.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/GenerateTestData.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/GenerateTestData.java
index c1a13b3..5cb4a0b 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/GenerateTestData.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/GenerateTestData.java
@@ -20,34 +20,34 @@ import java.io.BufferedOutputStream;
 import java.io.IOException;
 import java.io.PrintStream;
 
-import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
 import com.beust.jcommander.Parameter;
 
 public class GenerateTestData {
-  
+
   static class Opts extends org.apache.accumulo.core.cli.Help {
-    @Parameter(names="--start-row", required=true)
+    @Parameter(names = "--start-row", required = true)
     int startRow = 0;
-    @Parameter(names="--count", required=true)
+    @Parameter(names = "--count", required = true)
     int numRows = 0;
-    @Parameter(names="--output", required=true)
+    @Parameter(names = "--output", required = true)
     String outputFile;
   }
-  
+
   public static void main(String[] args) throws IOException {
     Opts opts = new Opts();
     opts.parseArgs(GenerateTestData.class.getName(), args);
-    
-    FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
+
+    FileSystem fs = FileSystem.get(new Configuration());
     PrintStream out = new PrintStream(new BufferedOutputStream(fs.create(new Path(opts.outputFile))));
-    
+
     for (int i = 0; i < opts.numRows; i++) {
       out.println(String.format("row_%010d\tvalue_%010d", i + opts.startRow, i + opts.startRow));
     }
     out.close();
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7b7521dd/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputFormatTest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputFormatTest.java b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputFormatTest.java
index 3d99838..dab1e10 100644
--- a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputFormatTest.java
+++ b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputFormatTest.java
@@ -34,8 +34,8 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.examples.simple.mapreduce.JobUtil;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.Mapper;
@@ -48,12 +48,12 @@ public class ChunkInputFormatTest extends TestCase {
   private static AssertionError e1 = null;
   private static AssertionError e2 = null;
   private static IOException e3 = null;
-  
+
   private static final Authorizations AUTHS = new Authorizations("A", "B", "C", "D");
-  
+
   private static List<Entry<Key,Value>> data;
   private static List<Entry<Key,Value>> baddata;
-  
+
   {
     data = new ArrayList<Entry<Key,Value>>();
     ChunkInputStreamTest.addData(data, "a", "refs", "ida\0ext", "A&B", "ext");
@@ -71,16 +71,16 @@ public class ChunkInputFormatTest extends TestCase {
     ChunkInputStreamTest.addData(baddata, "c", "refs", "ida\0ext", "A&B", "ext");
     ChunkInputStreamTest.addData(baddata, "c", "refs", "ida\0name", "A&B", "name");
   }
-  
+
   public static void entryEquals(Entry<Key,Value> e1, Entry<Key,Value> e2) {
     assertEquals(e1.getKey(), e2.getKey());
     assertEquals(e1.getValue(), e2.getValue());
   }
-  
+
   public static class CIFTester extends Configured implements Tool {
     public static class TestMapper extends Mapper<List<Entry<Key,Value>>,InputStream,List<Entry<Key,Value>>,InputStream> {
       int count = 0;
-      
+
       @Override
       protected void map(List<Entry<Key,Value>> key, InputStream value, Context context) throws IOException, InterruptedException {
         byte[] b = new byte[20];
@@ -113,7 +113,7 @@ public class ChunkInputFormatTest extends TestCase {
         }
         count++;
       }
-      
+
       @Override
       protected void cleanup(Context context) throws IOException, InterruptedException {
         try {
@@ -123,10 +123,10 @@ public class ChunkInputFormatTest extends TestCase {
         }
       }
     }
-    
+
     public static class TestNoClose extends Mapper<List<Entry<Key,Value>>,InputStream,List<Entry<Key,Value>>,InputStream> {
       int count = 0;
-      
+
       @Override
       protected void map(List<Entry<Key,Value>> key, InputStream value, Context context) throws IOException, InterruptedException {
         byte[] b = new byte[5];
@@ -152,7 +152,7 @@ public class ChunkInputFormatTest extends TestCase {
         }
       }
     }
-    
+
     public static class TestBadData extends Mapper<List<Entry<Key,Value>>,InputStream,List<Entry<Key,Value>>,InputStream> {
       @Override
       protected void map(List<Entry<Key,Value>> key, InputStream value, Context context) throws IOException, InterruptedException {
@@ -182,13 +182,13 @@ public class ChunkInputFormatTest extends TestCase {
         } catch (Exception e) {}
       }
     }
-    
+
     @Override
     public int run(String[] args) throws Exception {
       if (args.length != 5) {
         throw new IllegalArgumentException("Usage : " + CIFTester.class.getName() + " <instance name> <user> <pass> <table> <mapperClass>");
       }
-      
+
       String instance = args[0];
       String user = args[1];
       String pass = args[2];
@@ -197,39 +197,39 @@ public class ChunkInputFormatTest extends TestCase {
       Job job = JobUtil.getJob(getConf());
       job.setJobName(this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
       job.setJarByClass(this.getClass());
-      
+
       job.setInputFormatClass(ChunkInputFormat.class);
-      
+
       ChunkInputFormat.setConnectorInfo(job, user, new PasswordToken(pass));
       ChunkInputFormat.setInputTableName(job, table);
       ChunkInputFormat.setScanAuthorizations(job, AUTHS);
       ChunkInputFormat.setMockInstance(job, instance);
-      
+
       @SuppressWarnings("unchecked")
       Class<? extends Mapper<?,?,?,?>> forName = (Class<? extends Mapper<?,?,?,?>>) Class.forName(args[4]);
       job.setMapperClass(forName);
       job.setMapOutputKeyClass(Key.class);
       job.setMapOutputValueClass(Value.class);
       job.setOutputFormatClass(NullOutputFormat.class);
-      
+
       job.setNumReduceTasks(0);
-      
+
       job.waitForCompletion(true);
-      
+
       return job.isSuccessful() ? 0 : 1;
     }
-    
+
     public static int main(String[] args) throws Exception {
-      return ToolRunner.run(CachedConfiguration.getInstance(), new CIFTester(), args);
+      return ToolRunner.run(new Configuration(), new CIFTester(), args);
     }
   }
-  
+
   public void test() throws Exception {
     MockInstance instance = new MockInstance("instance1");
     Connector conn = instance.getConnector("root", new PasswordToken(""));
     conn.tableOperations().create("test");
     BatchWriter bw = conn.createBatchWriter("test", new BatchWriterConfig());
-    
+
     for (Entry<Key,Value> e : data) {
       Key k = e.getKey();
       Mutation m = new Mutation(k.getRow());
@@ -237,18 +237,18 @@ public class ChunkInputFormatTest extends TestCase {
       bw.addMutation(m);
     }
     bw.close();
-    
+
     assertEquals(0, CIFTester.main(new String[] {"instance1", "root", "", "test", CIFTester.TestMapper.class.getName()}));
     assertNull(e1);
     assertNull(e2);
   }
-  
+
   public void testErrorOnNextWithoutClose() throws Exception {
     MockInstance instance = new MockInstance("instance2");
     Connector conn = instance.getConnector("root", new PasswordToken(""));
     conn.tableOperations().create("test");
     BatchWriter bw = conn.createBatchWriter("test", new BatchWriterConfig());
-    
+
     for (Entry<Key,Value> e : data) {
       Key k = e.getKey();
       Mutation m = new Mutation(k.getRow());
@@ -256,13 +256,13 @@ public class ChunkInputFormatTest extends TestCase {
       bw.addMutation(m);
     }
     bw.close();
-    
+
     assertEquals(1, CIFTester.main(new String[] {"instance2", "root", "", "test", CIFTester.TestNoClose.class.getName()}));
     assertNull(e1);
     assertNull(e2);
     assertNotNull(e3);
   }
-  
+
   public void testInfoWithoutChunks() throws Exception {
     MockInstance instance = new MockInstance("instance3");
     Connector conn = instance.getConnector("root", new PasswordToken(""));
@@ -275,7 +275,7 @@ public class ChunkInputFormatTest extends TestCase {
       bw.addMutation(m);
     }
     bw.close();
-    
+
     assertEquals(0, CIFTester.main(new String[] {"instance3", "root", "", "test", CIFTester.TestBadData.class.getName()}));
     assertNull(e0);
     assertNull(e1);


[3/4] ACCUMULO-1599 Stop using /tmp wherever possible

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/test/src/test/java/org/apache/accumulo/test/MultiTableBatchWriterTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/MultiTableBatchWriterTest.java b/test/src/test/java/org/apache/accumulo/test/MultiTableBatchWriterTest.java
deleted file mode 100644
index b92eede..0000000
--- a/test/src/test/java/org/apache/accumulo/test/MultiTableBatchWriterTest.java
+++ /dev/null
@@ -1,565 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MultiTableBatchWriter;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.TableOfflineException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.client.impl.MultiTableBatchWriterImpl;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.Credentials;
-import org.apache.accumulo.minicluster.MiniAccumuloCluster;
-import org.apache.accumulo.minicluster.MiniAccumuloConfig;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import com.google.common.collect.Maps;
-
-public class MultiTableBatchWriterTest {
-  public static TemporaryFolder folder = new TemporaryFolder();
-  public static MiniAccumuloCluster cluster;
-  private static final PasswordToken password = new PasswordToken("secret");
-
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    folder.create();
-    MiniAccumuloConfig cfg = new MiniAccumuloConfig(folder.newFolder("miniAccumulo"), new String(password.getPassword()));
-    cluster = new MiniAccumuloCluster(cfg);
-    cluster.start();
-  }
-
-  @AfterClass
-  public static void tearDownAfterClass() throws Exception {
-    cluster.stop();
-    folder.delete();
-  }
-
-  @Test
-  public void testTableRenameDataValidation() throws Exception {
-    ZooKeeperInstance instance = new ZooKeeperInstance(new ClientConfiguration().withInstance(cluster.getInstanceName()).withZkHosts(cluster.getZooKeepers()));
-    Connector connector = instance.getConnector("root", password);
-
-    BatchWriterConfig config = new BatchWriterConfig();
-
-    Credentials creds = new Credentials("root", password);
-    MultiTableBatchWriter mtbw = new MultiTableBatchWriterImpl(instance, creds, config, 60, TimeUnit.SECONDS);
-
-    try {
-      final String table1 = "testTableRenameDataValidation_table1", table2 = "testTableRenameDataValidation_table2";
-
-      TableOperations tops = connector.tableOperations();
-      tops.create(table1);
-
-      BatchWriter bw1 = mtbw.getBatchWriter(table1);
-
-      Mutation m1 = new Mutation("foo");
-      m1.put("col1", "", "val1");
-
-      bw1.addMutation(m1);
-
-      tops.rename(table1, table2);
-      tops.create(table1);
-
-      BatchWriter bw2 = mtbw.getBatchWriter(table1);
-
-      Mutation m2 = new Mutation("bar");
-      m2.put("col1", "", "val1");
-
-      bw1.addMutation(m2);
-      bw2.addMutation(m2);
-
-      mtbw.close();
-
-      Map<Entry<String,String>,String> table1Expectations = new HashMap<Entry<String,String>,String>();
-      table1Expectations.put(Maps.immutableEntry("bar", "col1"), "val1");
-
-      Map<Entry<String,String>,String> table2Expectations = new HashMap<Entry<String,String>,String>();
-      table2Expectations.put(Maps.immutableEntry("foo", "col1"), "val1");
-      table2Expectations.put(Maps.immutableEntry("bar", "col1"), "val1");
-
-      Scanner s = connector.createScanner(table1, new Authorizations());
-      s.setRange(new Range());
-      Map<Entry<String,String>,String> actual = new HashMap<Entry<String,String>,String>();
-      for (Entry<Key,Value> entry : s) {
-        actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
-      }
-
-      Assert.assertEquals("Differing results for " + table1, table1Expectations, actual);
-
-      s = connector.createScanner(table2, new Authorizations());
-      s.setRange(new Range());
-      actual = new HashMap<Entry<String,String>,String>();
-      for (Entry<Key,Value> entry : s) {
-        actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
-      }
-
-      Assert.assertEquals("Differing results for " + table2, table2Expectations, actual);
-
-    } finally {
-      if (null != mtbw) {
-        mtbw.close();
-      }
-    }
-  }
-
-  @Test
-  public void testTableRenameSameWriters() throws Exception {
-    ZooKeeperInstance instance = new ZooKeeperInstance(new ClientConfiguration().withInstance(cluster.getInstanceName()).withZkHosts(cluster.getZooKeepers()));
-    Connector connector = instance.getConnector("root", password);
-
-    BatchWriterConfig config = new BatchWriterConfig();
-
-    Credentials creds = new Credentials("root", password);
-    MultiTableBatchWriter mtbw = new MultiTableBatchWriterImpl(instance, creds, config, 60, TimeUnit.SECONDS);
-
-    try {
-      final String table1 = "testTableRenameSameWriters_table1", table2 = "testTableRenameSameWriters_table2";
-      final String newTable1 = "testTableRenameSameWriters_newTable1", newTable2 = "testTableRenameSameWriters_newTable2";
-
-      TableOperations tops = connector.tableOperations();
-      tops.create(table1);
-      tops.create(table2);
-
-      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
-
-      Mutation m1 = new Mutation("foo");
-      m1.put("col1", "", "val1");
-      m1.put("col2", "", "val2");
-
-      bw1.addMutation(m1);
-      bw2.addMutation(m1);
-
-      tops.rename(table1, newTable1);
-      tops.rename(table2, newTable2);
-
-      Mutation m2 = new Mutation("bar");
-      m2.put("col1", "", "val1");
-      m2.put("col2", "", "val2");
-
-      bw1.addMutation(m2);
-      bw2.addMutation(m2);
-
-      mtbw.close();
-
-      Map<Entry<String,String>,String> expectations = new HashMap<Entry<String,String>,String>();
-      expectations.put(Maps.immutableEntry("foo", "col1"), "val1");
-      expectations.put(Maps.immutableEntry("foo", "col2"), "val2");
-      expectations.put(Maps.immutableEntry("bar", "col1"), "val1");
-      expectations.put(Maps.immutableEntry("bar", "col2"), "val2");
-
-      for (String table : Arrays.asList(newTable1, newTable2)) {
-        Scanner s = connector.createScanner(table, new Authorizations());
-        s.setRange(new Range());
-        Map<Entry<String,String>,String> actual = new HashMap<Entry<String,String>,String>();
-        for (Entry<Key,Value> entry : s) {
-          actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
-        }
-
-        Assert.assertEquals("Differing results for " + table, expectations, actual);
-      }
-    } finally {
-      if (null != mtbw) {
-        mtbw.close();
-      }
-    }
-  }
-
-  @Test
-  public void testTableRenameNewWriters() throws Exception {
-    ZooKeeperInstance instance = new ZooKeeperInstance(new ClientConfiguration().withInstance(cluster.getInstanceName()).withZkHosts(cluster.getZooKeepers()));
-    Connector connector = instance.getConnector("root", password);
-
-    BatchWriterConfig config = new BatchWriterConfig();
-
-    Credentials creds = new Credentials("root", password);
-    MultiTableBatchWriter mtbw = new MultiTableBatchWriterImpl(instance, creds, config, 60, TimeUnit.SECONDS);
-
-    try {
-      final String table1 = "testTableRenameNewWriters_table1", table2 = "testTableRenameNewWriters_table2";
-      final String newTable1 = "testTableRenameNewWriters_newTable1", newTable2 = "testTableRenameNewWriters_newTable2";
-
-      TableOperations tops = connector.tableOperations();
-      tops.create(table1);
-      tops.create(table2);
-
-      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
-
-      Mutation m1 = new Mutation("foo");
-      m1.put("col1", "", "val1");
-      m1.put("col2", "", "val2");
-
-      bw1.addMutation(m1);
-      bw2.addMutation(m1);
-
-      tops.rename(table1, newTable1);
-
-      // MTBW is still caching this name to the correct table, but we should invalidate its cache
-      // after seeing the rename
-      try {
-        bw1 = mtbw.getBatchWriter(table1);
-        Assert.fail("Should not be able to find this table");
-      } catch (TableNotFoundException e) {
-        // pass
-      }
-
-      tops.rename(table2, newTable2);
-
-      try {
-        bw2 = mtbw.getBatchWriter(table2);
-        Assert.fail("Should not be able to find this table");
-      } catch (TableNotFoundException e) {
-        // pass
-      }
-
-      bw1 = mtbw.getBatchWriter(newTable1);
-      bw2 = mtbw.getBatchWriter(newTable2);
-
-      Mutation m2 = new Mutation("bar");
-      m2.put("col1", "", "val1");
-      m2.put("col2", "", "val2");
-
-      bw1.addMutation(m2);
-      bw2.addMutation(m2);
-
-      mtbw.close();
-
-      Map<Entry<String,String>,String> expectations = new HashMap<Entry<String,String>,String>();
-      expectations.put(Maps.immutableEntry("foo", "col1"), "val1");
-      expectations.put(Maps.immutableEntry("foo", "col2"), "val2");
-      expectations.put(Maps.immutableEntry("bar", "col1"), "val1");
-      expectations.put(Maps.immutableEntry("bar", "col2"), "val2");
-
-      for (String table : Arrays.asList(newTable1, newTable2)) {
-        Scanner s = connector.createScanner(table, new Authorizations());
-        s.setRange(new Range());
-        Map<Entry<String,String>,String> actual = new HashMap<Entry<String,String>,String>();
-        for (Entry<Key,Value> entry : s) {
-          actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
-        }
-
-        Assert.assertEquals("Differing results for " + table, expectations, actual);
-      }
-    } finally {
-      if (null != mtbw) {
-        mtbw.close();
-      }
-    }
-  }
-
-  @Test
-  public void testTableRenameNewWritersNoCaching() throws Exception {
-    ZooKeeperInstance instance = new ZooKeeperInstance(new ClientConfiguration().withInstance(cluster.getInstanceName()).withZkHosts(cluster.getZooKeepers()));
-    Connector connector = instance.getConnector("root", password);
-
-    BatchWriterConfig config = new BatchWriterConfig();
-
-    Credentials creds = new Credentials("root", password);
-    MultiTableBatchWriter mtbw = new MultiTableBatchWriterImpl(instance, creds, config, 0, TimeUnit.SECONDS);
-
-    try {
-      final String table1 = "testTableRenameNewWritersNoCaching_table1", table2 = "testTableRenameNewWritersNoCaching_table2";
-      final String newTable1 = "testTableRenameNewWritersNoCaching_newTable1", newTable2 = "testTableRenameNewWritersNoCaching_newTable2";
-
-      TableOperations tops = connector.tableOperations();
-      tops.create(table1);
-      tops.create(table2);
-
-      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
-
-      Mutation m1 = new Mutation("foo");
-      m1.put("col1", "", "val1");
-      m1.put("col2", "", "val2");
-
-      bw1.addMutation(m1);
-      bw2.addMutation(m1);
-
-      tops.rename(table1, newTable1);
-      tops.rename(table2, newTable2);
-
-      try {
-        bw1 = mtbw.getBatchWriter(table1);
-        Assert.fail("Should not have gotten batchwriter for " + table1);
-      } catch (TableNotFoundException e) {
-        // Pass
-      }
-
-      try {
-        bw2 = mtbw.getBatchWriter(table2);
-      } catch (TableNotFoundException e) {
-        // Pass
-      }
-    } finally {
-      if (null != mtbw) {
-        mtbw.close();
-      }
-    }
-  }
-
-  @Test
-  public void testTableDelete() throws Exception {
-    ZooKeeperInstance instance = new ZooKeeperInstance(new ClientConfiguration().withInstance(cluster.getInstanceName()).withZkHosts(cluster.getZooKeepers()));
-    Connector connector = instance.getConnector("root", password);
-
-    BatchWriterConfig config = new BatchWriterConfig();
-
-    Credentials creds = new Credentials("root", password);
-    MultiTableBatchWriter mtbw = new MultiTableBatchWriterImpl(instance, creds, config, 60, TimeUnit.SECONDS);
-    boolean mutationsRejected = false;
-    
-    try {
-      final String table1 = "testTableDelete_table1", table2 = "testTableDelete_table2";
-
-      TableOperations tops = connector.tableOperations();
-      tops.create(table1);
-      tops.create(table2);
-
-      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
-
-      Mutation m1 = new Mutation("foo");
-      m1.put("col1", "", "val1");
-      m1.put("col2", "", "val2");
-
-      bw1.addMutation(m1);
-      bw2.addMutation(m1);
-
-      tops.delete(table1);
-      tops.delete(table2);
-
-      Mutation m2 = new Mutation("bar");
-      m2.put("col1", "", "val1");
-      m2.put("col2", "", "val2");
-
-      try {
-        bw1.addMutation(m2);
-        bw2.addMutation(m2);
-      } catch (MutationsRejectedException e) {
-        // Pass - Mutations might flush immediately
-        mutationsRejected = true;
-      }
-
-    } finally {
-      if (null != mtbw) {
-        try {
-          // Mutations might have flushed before the table offline occurred
-          mtbw.close();
-        } catch (MutationsRejectedException e) {
-          // Pass
-          mutationsRejected = true;
-        }
-      }
-    }
-    
-    Assert.assertTrue("Expected mutations to be rejected.", mutationsRejected);
-  }
-
-  @Test
-  public void testOfflineTable() throws Exception {
-    ZooKeeperInstance instance = new ZooKeeperInstance(new ClientConfiguration().withInstance(cluster.getInstanceName()).withZkHosts(cluster.getZooKeepers()));
-    Connector connector = instance.getConnector("root", password);
-
-    BatchWriterConfig config = new BatchWriterConfig();
-
-    Credentials creds = new Credentials("root", password);
-    MultiTableBatchWriter mtbw = new MultiTableBatchWriterImpl(instance, creds, config, 60, TimeUnit.SECONDS);
-    boolean mutationsRejected = false;
-
-    try {
-      final String table1 = "testOfflineTable_table1", table2 = "testOfflineTable_table2";
-
-      TableOperations tops = connector.tableOperations();
-      tops.create(table1);
-      tops.create(table2);
-
-      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
-
-      Mutation m1 = new Mutation("foo");
-      m1.put("col1", "", "val1");
-      m1.put("col2", "", "val2");
-
-      bw1.addMutation(m1);
-      bw2.addMutation(m1);
-
-      tops.offline(table1, true);
-      tops.offline(table2, true);
-
-      Mutation m2 = new Mutation("bar");
-      m2.put("col1", "", "val1");
-      m2.put("col2", "", "val2");
-
-      try {
-        bw1.addMutation(m2);
-        bw2.addMutation(m2);
-      } catch (MutationsRejectedException e) {
-        // Pass -- Mutations might flush immediately and fail because of offline table
-        mutationsRejected = true;
-      }
-    } finally {
-      if (null != mtbw) {
-        try {
-          mtbw.close();
-        } catch (MutationsRejectedException e) {
-          // Pass
-          mutationsRejected = true;
-        }
-      }
-    }
-    
-    Assert.assertTrue("Expected mutations to be rejected.", mutationsRejected);
-  }
-
-  @Test
-  public void testOfflineTableWithCache() throws Exception {
-    ZooKeeperInstance instance = new ZooKeeperInstance(new ClientConfiguration().withInstance(cluster.getInstanceName()).withZkHosts(cluster.getZooKeepers()));
-    Connector connector = instance.getConnector("root", password);
-
-    BatchWriterConfig config = new BatchWriterConfig();
-
-    Credentials creds = new Credentials("root", password);
-    MultiTableBatchWriter mtbw = new MultiTableBatchWriterImpl(instance, creds, config, 60, TimeUnit.SECONDS);
-    boolean mutationsRejected = false;
-
-    try {
-      final String table1 = "testOfflineTableWithCache_table1", table2 = "testOfflineTableWithCache_table2";
-
-      TableOperations tops = connector.tableOperations();
-      tops.create(table1);
-      tops.create(table2);
-
-      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
-
-      Mutation m1 = new Mutation("foo");
-      m1.put("col1", "", "val1");
-      m1.put("col2", "", "val2");
-
-      bw1.addMutation(m1);
-      bw2.addMutation(m1);
-
-      tops.offline(table1);
-
-      try {
-        bw1 = mtbw.getBatchWriter(table1);
-      } catch (TableOfflineException e) {
-        // pass
-        mutationsRejected = true;
-      }
-
-      tops.offline(table2);
-
-      try {
-        bw2 = mtbw.getBatchWriter(table2);
-      } catch (TableOfflineException e) {
-        // pass
-        mutationsRejected = true;
-      }
-    } finally {
-      if (null != mtbw) {
-        try {
-          // Mutations might have flushed before the table offline occurred
-          mtbw.close();
-        } catch (MutationsRejectedException e) {
-          // Pass
-          mutationsRejected = true;
-        }
-      }
-    }
-
-    Assert.assertTrue("Expected mutations to be rejected.", mutationsRejected);
-  }
-
-  @Test
-  public void testOfflineTableWithoutCache() throws Exception {
-    ZooKeeperInstance instance = new ZooKeeperInstance(new ClientConfiguration().withInstance(cluster.getInstanceName()).withZkHosts(cluster.getZooKeepers()));
-    Connector connector = instance.getConnector("root", password);
-
-    BatchWriterConfig config = new BatchWriterConfig();
-
-    Credentials creds = new Credentials("root", password);
-    MultiTableBatchWriter mtbw = new MultiTableBatchWriterImpl(instance, creds, config, 0, TimeUnit.SECONDS);
-    boolean mutationsRejected = false;
-
-    try {
-      final String table1 = "testOfflineTableWithoutCache_table1", table2 = "testOfflineTableWithoutCache_table2";
-
-      TableOperations tops = connector.tableOperations();
-      tops.create(table1);
-      tops.create(table2);
-
-      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
-
-      Mutation m1 = new Mutation("foo");
-      m1.put("col1", "", "val1");
-      m1.put("col2", "", "val2");
-
-      bw1.addMutation(m1);
-      bw2.addMutation(m1);
-
-      // Mutations might or might not flush before tables goes offline
-      tops.offline(table1);
-      tops.offline(table2);
-
-      try {
-        bw1 = mtbw.getBatchWriter(table1);
-        Assert.fail(table1 + " should be offline");
-      } catch (TableOfflineException e) {
-        // pass
-        mutationsRejected = true;
-      }
-
-      try {
-        bw2 = mtbw.getBatchWriter(table2);
-        Assert.fail(table1 + " should be offline");
-      } catch (TableOfflineException e) {
-        // pass
-        mutationsRejected = true;
-      }
-    } finally {
-      if (null != mtbw) {
-        try {
-          // Mutations might have flushed before the table offline occurred
-          mtbw.close();
-        } catch (MutationsRejectedException e) {
-          // Pass
-          mutationsRejected = true;
-        }
-      }
-    }
-
-    Assert.assertTrue("Expected mutations to be rejected.", mutationsRejected);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java b/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java
index 4f0c14c..21b56b7 100644
--- a/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java
@@ -147,6 +147,7 @@ public class ShellServerIT extends SimpleMacIT {
   public static void setUpBeforeClass() throws Exception {
     // history file is updated in $HOME
     System.setProperty("HOME", getFolder().getAbsolutePath());
+    System.setProperty("hadoop.tmp.dir", System.getProperty("user.dir") + "/target/hadoop-tmp");
 
     // start the shell
     output = new TestOutputStream();
@@ -768,11 +769,11 @@ public class ShellServerIT extends SimpleMacIT {
 
   @Test(timeout = 30 * 1000)
   public void testPertableClasspath() throws Exception {
-    File fooFilterJar = File.createTempFile("FooFilter", ".jar");
+    File fooFilterJar = File.createTempFile("FooFilter", ".jar", getFolder());
     FileUtils.copyURLToFile(this.getClass().getResource("/FooFilter.jar"), fooFilterJar);
     fooFilterJar.deleteOnExit();
 
-    File fooConstraintJar = File.createTempFile("FooConstraint", ".jar");
+    File fooConstraintJar = File.createTempFile("FooConstraint", ".jar", getFolder());
     FileUtils.copyURLToFile(this.getClass().getResource("/FooConstraint.jar"), fooConstraintJar);
     fooConstraintJar.deleteOnExit();
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/test/src/test/java/org/apache/accumulo/test/functional/ConfigurableMacIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ConfigurableMacIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ConfigurableMacIT.java
index 54585fe..a7abcb1 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ConfigurableMacIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ConfigurableMacIT.java
@@ -64,7 +64,7 @@ public class ConfigurableMacIT extends AbstractMacIT {
     return getCluster().getConnector("root", ROOT_PASSWORD);
   }
 
-  public Process exec(Class<? extends Object> clazz, String... args) throws IOException {
+  public Process exec(Class<?> clazz, String... args) throws IOException {
     return getCluster().exec(clazz, args);
   }
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java
index 16b425f..dcf72c8 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java
@@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.File;
+import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
@@ -89,7 +90,7 @@ public class ExamplesIT extends ConfigurableMacIT {
     cfg.setDefaultMemory(cfg.getDefaultMemory() * 2, MemoryUnit.BYTE);
   }
 
-  @Test(timeout = 10 * 60 * 1000)
+  @Test(timeout = 15 * 60 * 1000)
   public void test() throws Exception {
     Connector c = getConnector();
     String instance = c.getInstance().getInstanceName();
@@ -104,13 +105,12 @@ public class ExamplesIT extends ConfigurableMacIT {
     String dir = cluster.getConfig().getDir().getAbsolutePath();
     FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
 
-    Process trace = cluster.exec(TraceServer.class);
+    Process trace = exec(TraceServer.class);
     while (!c.tableOperations().exists("trace"))
       UtilWaitThread.sleep(500);
 
     log.info("trace example");
-    Process p = cluster.exec(TracingExample.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-C", "-D", "-c");
-    assertEquals(0, p.waitFor());
+    Process p = goodExec(TracingExample.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-C", "-D", "-c");
     for (LogWriter writer : cluster.getLogWriters()) {
       writer.flush();
     }
@@ -119,8 +119,7 @@ public class ExamplesIT extends ConfigurableMacIT {
     Matcher matcher = pattern.matcher(result);
     int count = 0;
     while (matcher.find()) {
-      p = cluster.exec(TraceDumpExample.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--traceid", matcher.group(1));
-      assertEquals(0, p.waitFor());
+      p = goodExec(TraceDumpExample.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--traceid", matcher.group(1));
       count++;
     }
     assertTrue(count > 0);
@@ -130,13 +129,10 @@ public class ExamplesIT extends ConfigurableMacIT {
 
     log.info("testing dirlist example (a little)");
     c.securityOperations().changeUserAuthorizations(user, new Authorizations(auths.split(",")));
-    assertEquals(
-        0,
-        cluster.exec(Ingest.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--dirTable", "dirTable", "--indexTable", "indexTable",
-            "--dataTable", "dataTable", "--vis", visibility, "--chunkSize", 10000 + "", cluster.getConfig().getDir().getAbsolutePath()).waitFor());
-    p = cluster.exec(QueryUtil.class, "-i", instance, "-z", keepers, "-p", passwd, "-u", user, "-t", "indexTable", "--auths", auths, "--search", "--path",
+    goodExec(Ingest.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--dirTable", "dirTable", "--indexTable", "indexTable", "--dataTable",
+        "dataTable", "--vis", visibility, "--chunkSize", 10000 + "", cluster.getConfig().getDir().getAbsolutePath());
+    p = goodExec(QueryUtil.class, "-i", instance, "-z", keepers, "-p", passwd, "-u", user, "-t", "indexTable", "--auths", auths, "--search", "--path",
         "accumulo-site.xml");
-    assertEquals(0, p.waitFor());
     for (LogWriter writer : cluster.getLogWriters()) {
       writer.flush();
     }
@@ -163,25 +159,19 @@ public class ExamplesIT extends ConfigurableMacIT {
     log.info("Testing bloom filters are fast for missing data");
     c.tableOperations().create("bloom_test");
     c.tableOperations().setProperty("bloom_test", Property.TABLE_BLOOM_ENABLED.getKey(), "true");
-    assertEquals(
-        0,
-        cluster.exec(RandomBatchWriter.class, "--seed", "7", "-i", instance, "-z", keepers, "-u", user, "-p", ROOT_PASSWORD, "--num", "100000", "--min", "0",
-            "--max", "1000000000", "--size", "50", "--batchMemory", "2M", "--batchLatency", "60s", "--batchThreads", "3", "-t", "bloom_test").waitFor());
+    goodExec(RandomBatchWriter.class, "--seed", "7", "-i", instance, "-z", keepers, "-u", user, "-p", ROOT_PASSWORD, "--num", "100000", "--min", "0", "--max",
+        "1000000000", "--size", "50", "--batchMemory", "2M", "--batchLatency", "60s", "--batchThreads", "3", "-t", "bloom_test");
     c.tableOperations().flush("bloom_test", null, null, true);
     long diff = 0, diff2 = 0;
     // try the speed test a couple times in case the system is loaded with other tests
     for (int i = 0; i < 2; i++) {
       long now = System.currentTimeMillis();
-      assertEquals(
-          0,
-          cluster.exec(RandomBatchScanner.class, "--seed", "7", "-i", instance, "-z", keepers, "-u", user, "-p", ROOT_PASSWORD, "--num", "10000", "--min", "0",
-              "--max", "1000000000", "--size", "50", "--scanThreads", "4", "-t", "bloom_test").waitFor());
+      goodExec(RandomBatchScanner.class, "--seed", "7", "-i", instance, "-z", keepers, "-u", user, "-p", ROOT_PASSWORD, "--num", "10000", "--min", "0",
+          "--max", "1000000000", "--size", "50", "--scanThreads", "4", "-t", "bloom_test");
       diff = System.currentTimeMillis() - now;
       now = System.currentTimeMillis();
-      assertEquals(
-          1,
-          cluster.exec(RandomBatchScanner.class, "--seed", "8", "-i", instance, "-z", keepers, "-u", user, "-p", ROOT_PASSWORD, "--num", "10000", "--min", "0",
-              "--max", "1000000000", "--size", "50", "--scanThreads", "4", "-t", "bloom_test").waitFor());
+      expectExec(1, RandomBatchScanner.class, "--seed", "8", "-i", instance, "-z", keepers, "-u", user, "-p", ROOT_PASSWORD, "--num", "10000", "--min", "0",
+          "--max", "1000000000", "--size", "50", "--scanThreads", "4", "-t", "bloom_test");
       diff2 = System.currentTimeMillis() - now;
       if (diff2 < diff)
         break;
@@ -206,13 +196,10 @@ public class ExamplesIT extends ConfigurableMacIT {
     assertTrue(thisFile);
     // create a reverse index
     c.tableOperations().create("doc2Term");
-    assertEquals(0, cluster.exec(Reverse.class, "-i", instance, "-z", keepers, "--shardTable", "shard", "--doc2Term", "doc2Term", "-u", "root", "-p", passwd)
-        .waitFor());
+    goodExec(Reverse.class, "-i", instance, "-z", keepers, "--shardTable", "shard", "--doc2Term", "doc2Term", "-u", "root", "-p", passwd);
     // run some queries
-    assertEquals(
-        0,
-        cluster.exec(ContinuousQuery.class, "-i", instance, "-z", keepers, "--shardTable", "shard", "--doc2Term", "doc2Term", "-u", "root", "-p", passwd,
-            "--terms", "5", "--count", "1000").waitFor());
+    goodExec(ContinuousQuery.class, "-i", instance, "-z", keepers, "--shardTable", "shard", "--doc2Term", "doc2Term", "-u", "root", "-p", passwd, "--terms",
+        "5", "--count", "1000");
 
     log.info("Testing MaxMutation constraint");
     c.tableOperations().create("test_ingest");
@@ -227,23 +214,21 @@ public class ExamplesIT extends ConfigurableMacIT {
     }
 
     log.info("Starting bulk ingest example");
-    assertEquals(0, cluster.exec(GenerateTestData.class, "--start-row", "0", "--count", "10000", "--output", dir + "/tmp/input/data").waitFor());
-    assertEquals(0, cluster.exec(SetupTable.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", "bulkTable").waitFor());
-    assertEquals(
-        0,
-        cluster.exec(BulkIngestExample.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", "bulkTable", "--inputDir",
-            dir + "/tmp/input", "--workDir", dir + "/tmp").waitFor());
+    goodExec(GenerateTestData.class, "--start-row", "0", "--count", "10000", "--output", dir + "/tmp/input/data");
+    goodExec(SetupTable.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", "bulkTable");
+    goodExec(BulkIngestExample.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", "bulkTable", "--inputDir", dir + "/tmp/input",
+        "--workDir", dir + "/tmp");
 
     log.info("Running TeraSortIngest example");
-    exec(TeraSortIngest.class, new String[] {"--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv", "10", "-xv", "10", "-t", "sorted", "-i", instance,
-        "-z", keepers, "-u", user, "-p", passwd, "--splits", "4"});
+    goodExec(TeraSortIngest.class, "--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv", "10", "-xv", "10", "-t", "sorted", "-i", instance, "-z",
+        keepers, "-u", user, "-p", passwd, "--splits", "4");
     log.info("Running Regex example");
-    exec(RegexExample.class, new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", "sorted", "--rowRegex", ".*999.*", "--output",
-        dir + "/tmp/nines"});
+    goodExec(RegexExample.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", "sorted", "--rowRegex", ".*999.*", "--output", dir
+        + "/tmp/nines");
     log.info("Running RowHash example");
-    exec(RowHash.class, new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", "sorted", "--column", "c:"});
+    goodExec(RowHash.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", "sorted", "--column", "c:");
     log.info("Running TableToFile example");
-    exec(TableToFile.class, new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", "sorted", "--output", dir + "/tmp/tableFile"});
+    goodExec(TableToFile.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", "sorted", "--output", dir + "/tmp/tableFile");
 
     log.info("Running word count example");
     c.tableOperations().create("wordCount");
@@ -252,39 +237,48 @@ public class ExamplesIT extends ConfigurableMacIT {
     SummingCombiner.setEncodingType(is, SummingCombiner.Type.STRING);
     c.tableOperations().attachIterator("wordCount", is);
     fs.copyFromLocalFile(new Path(new Path(System.getProperty("user.dir")).getParent(), "README"), new Path(dir + "/tmp/wc/README"));
-    exec(WordCount.class, new String[] {"-i", instance, "-u", user, "-p", passwd, "-z", keepers, "--input", dir + "/tmp/wc", "-t", "wordCount"});
+    goodExec(WordCount.class, "-i", instance, "-u", user, "-p", passwd, "-z", keepers, "--input", dir + "/tmp/wc", "-t", "wordCount");
 
     log.info("Inserting data with a batch writer");
-    exec(InsertWithBatchWriter.class, new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", "helloBatch"});
+    goodExec(InsertWithBatchWriter.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", "helloBatch");
     log.info("Reading data");
-    exec(ReadData.class, new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", "helloBatch"});
+    goodExec(ReadData.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", "helloBatch");
     log.info("Running isolated scans");
-    exec(InterferenceTest.class, new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", "itest1", "--iterations", "100000", "--isolated"});
+    goodExec(InterferenceTest.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", "itest1", "--iterations", "100000", "--isolated");
     log.info("Running scans without isolation");
-    exec(InterferenceTest.class, new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", "itest2", "--iterations", "100000",});
+    goodExec(InterferenceTest.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", "itest2", "--iterations", "100000");
     log.info("Performing some row operations");
-    exec(RowOperations.class, new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd,});
+    goodExec(RowOperations.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd);
     log.info("Using the batch writer");
     c.tableOperations().create("test");
-    exec(SequentialBatchWriter.class, new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", "test", "--start", "0", "--num", "100000",
-        "--size", "50", "--batchMemory", "10000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility});
+    goodExec(SequentialBatchWriter.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", "test", "--start", "0", "--num", "100000", "--size",
+        "50", "--batchMemory", "10000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility);
 
     log.info("Reading and writing some data");
-    exec(ReadWriteExample.class, new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--auths", auths, "--table", "test2", "--createtable",
-        "-c", "--debug"});
+    goodExec(ReadWriteExample.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--auths", auths, "--table", "test2", "--createtable", "-c",
+        "--debug");
     log.info("Deleting some data");
-    exec(ReadWriteExample.class, new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--auths", auths, "--table", "test2", "-d", "--debug"});
+    goodExec(ReadWriteExample.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--auths", auths, "--table", "test2", "-d", "--debug");
     log.info("Writing some data with the batch writer");
     c.tableOperations().create("test3");
-    exec(RandomBatchWriter.class, new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", "test3", "--num", "100000", "--min", "0",
-        "--max", "99999", "--size", "100", "--batchMemory", "1000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility});
+    goodExec(RandomBatchWriter.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", "test3", "--num", "100000", "--min", "0", "--max",
+        "100000", "--size", "100", "--batchMemory", "1000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility);
     log.info("Reading some data with the batch scanner");
-    exec(RandomBatchScanner.class, new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", "test3", "--num", "10000", "--min", "0",
-        "--max", "99999", "--size", "100", "--scanThreads", "4", "--auths", auths});
+    goodExec(RandomBatchScanner.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", "test3", "--num", "10000", "--min", "0", "--max",
+        "100000", "--size", "100", "--scanThreads", "4", "--auths", auths);
     log.info("Running an example table operation (Flush)");
-    exec(Flush.class, new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", "test3",});
-    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+    goodExec(Flush.class, "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", "test3");
+    goodExec(Admin.class, "stopAll");
 
   }
 
+  private Process goodExec(Class<?> theClass, String... args) throws InterruptedException, IOException {
+    return expectExec(0, theClass, args);
+  }
+
+  private Process expectExec(int exitCode, Class<?> theClass, String... args) throws InterruptedException, IOException {
+    Process p = null;
+    assertEquals(exitCode, (p = cluster.exec(theClass, Collections.singletonList(MapReduceIT.hadoopTmpDirArg), args)).waitFor());
+    return p;
+  }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java b/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
index 8644440..08a6f51 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
@@ -23,9 +23,9 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.Collection;
 import java.util.HashMap;
-import java.util.SortedSet;
 import java.util.Map.Entry;
 import java.util.Set;
+import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
@@ -50,34 +50,34 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 
 public class FunctionalTestUtils {
-  
+
   static void checkRFiles(Connector c, String tableName, int minTablets, int maxTablets, int minRFiles, int maxRFiles) throws Exception {
     Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     String tableId = c.tableOperations().tableIdMap().get(tableName);
     scanner.setRange(new Range(new Text(tableId + ";"), true, new Text(tableId + "<"), true));
     scanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
     MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
-    
+
     HashMap<Text,Integer> tabletFileCounts = new HashMap<Text,Integer>();
-    
+
     for (Entry<Key,Value> entry : scanner) {
-      
+
       Text row = entry.getKey().getRow();
-      
+
       Integer count = tabletFileCounts.get(row);
       if (count == null)
         count = 0;
       if (entry.getKey().getColumnFamily().equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME)) {
         count = count + 1;
       }
-      
+
       tabletFileCounts.put(row, count);
     }
-    
+
     if (tabletFileCounts.size() < minTablets || tabletFileCounts.size() > maxTablets) {
       throw new Exception("Did not find expected number of tablets " + tabletFileCounts.size());
     }
-    
+
     Set<Entry<Text,Integer>> es = tabletFileCounts.entrySet();
     for (Entry<Text,Integer> entry : es) {
       if (entry.getValue() > maxRFiles || entry.getValue() < minRFiles) {
@@ -85,28 +85,28 @@ public class FunctionalTestUtils {
       }
     }
   }
-  
+
   static public void bulkImport(Connector c, FileSystem fs, String table, String dir) throws Exception {
     String failDir = dir + "_failures";
     Path failPath = new Path(failDir);
     fs.delete(failPath, true);
     fs.mkdirs(failPath);
-    
+
     c.tableOperations().importDirectory(table, dir, failDir, false);
-    
+
     if (fs.listStatus(failPath).length > 0) {
       throw new Exception("Some files failed to bulk import");
     }
-    
+
   }
-  
+
   static public void checkSplits(Connector c, String table, int min, int max) throws Exception {
     Collection<Text> splits = c.tableOperations().listSplits(table);
     if (splits.size() < min || splits.size() > max) {
       throw new Exception("# of table splits points out of range, #splits=" + splits.size() + " table=" + table + " min=" + min + " max=" + max);
     }
   }
-  
+
   static public void createRFiles(final Connector c, FileSystem fs, String path, int rows, int splits, int threads) throws Exception {
     fs.delete(new Path(path), true);
     ExecutorService threadPool = Executors.newFixedThreadPool(threads);
@@ -135,7 +135,7 @@ public class FunctionalTestUtils {
     threadPool.awaitTermination(1, TimeUnit.HOURS);
     assertFalse(fail.get());
   }
-  
+
   static public String readAll(InputStream is) throws IOException {
     byte[] buffer = new byte[4096];
     StringBuffer result = new StringBuffer();
@@ -147,27 +147,28 @@ public class FunctionalTestUtils {
     }
     return result.toString();
   }
-  
-  static String readAll(MiniAccumuloCluster c, Class<? extends Object> klass, Process p) throws Exception {
+
+  static String readAll(MiniAccumuloCluster c, Class<?> klass, Process p) throws Exception {
     for (LogWriter writer : c.getLogWriters())
       writer.flush();
     return readAll(new FileInputStream(c.getConfig().getLogDir() + "/" + klass.getSimpleName() + "_" + p.hashCode() + ".out"));
   }
-  
+
   static Mutation nm(String row, String cf, String cq, Value value) {
     Mutation m = new Mutation(new Text(row));
     m.put(new Text(cf), new Text(cq), value);
     return m;
   }
-  
+
   static Mutation nm(String row, String cf, String cq, String value) {
     return nm(row, cf, cq, new Value(value.getBytes()));
   }
-  public static SortedSet<Text> splits(String [] splits) {
+
+  public static SortedSet<Text> splits(String[] splits) {
     SortedSet<Text> result = new TreeSet<Text>();
     for (String split : splits)
       result.add(new Text(split));
     return result;
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
index 0867e73..bb3a9e5 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
@@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
 import java.io.IOException;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
+import java.util.Collections;
 import java.util.Map.Entry;
 
 import org.apache.accumulo.core.client.AccumuloException;
@@ -43,6 +44,7 @@ import org.codehaus.plexus.util.Base64;
 import org.junit.Test;
 
 public class MapReduceIT extends ConfigurableMacIT {
+  public static final String hadoopTmpDirArg = "-Dhadoop.tmp.dir=" + System.getProperty("user.dir") + "/target/hadoop-tmp";
 
   static final String tablename = "mapredf";
   static final String input_cf = "cf-HASHTYPE";
@@ -66,8 +68,8 @@ public class MapReduceIT extends ConfigurableMacIT {
       bw.addMutation(m);
     }
     bw.close();
-    Process hash = cluster.exec(RowHash.class, "-i", c.getInstance().getInstanceName(), "-z", c.getInstance().getZooKeepers(), "-u", "root", "-p",
-        ROOT_PASSWORD, "-t", tablename, "--column", input_cfcq);
+    Process hash = cluster.exec(RowHash.class, Collections.singletonList(hadoopTmpDirArg), "-i", c.getInstance().getInstanceName(), "-z", c.getInstance()
+        .getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "-t", tablename, "--column", input_cfcq);
     assertEquals(0, hash.waitFor());
 
     Scanner s = c.createScanner(tablename, Authorizations.EMPTY);
@@ -81,5 +83,4 @@ public class MapReduceIT extends ConfigurableMacIT {
     }
 
   }
-
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/test/src/test/java/org/apache/accumulo/test/functional/SimpleMacIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SimpleMacIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SimpleMacIT.java
index 10db515..8293fe8 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/SimpleMacIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/SimpleMacIT.java
@@ -22,6 +22,7 @@ import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
 import org.apache.accumulo.minicluster.MiniAccumuloConfig;
 import org.apache.accumulo.minicluster.MiniAccumuloInstance;
@@ -41,6 +42,8 @@ public class SimpleMacIT extends AbstractMacIT {
     if (getInstanceOneConnector() == null && cluster == null) {
       folder = createSharedTestDir(SimpleMacIT.class.getName());
       MiniAccumuloConfig cfg = new MiniAccumuloConfig(folder, ROOT_PASSWORD);
+      cfg.setNativeLibPaths(NativeMapIT.nativeMapLocation().getAbsolutePath());
+      cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString());
       configureForEnvironment(cfg, SimpleMacIT.class, createSharedTestDir(SimpleMacIT.class.getName() + "-ssl"));
       cluster = new MiniAccumuloCluster(cfg);
       cluster.start();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/test/src/test/java/org/apache/accumulo/test/functional/ZooCacheIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ZooCacheIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ZooCacheIT.java
index ba5f44b..9b089a1 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ZooCacheIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ZooCacheIT.java
@@ -18,17 +18,27 @@ package org.apache.accumulo.test.functional;
 
 import static org.junit.Assert.assertEquals;
 
+import java.io.File;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicReference;
 
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 public class ZooCacheIT extends ConfigurableMacIT {
 
+  private static String pathName = "/zcTest-42";
+  private static File testDir;
+
+  @BeforeClass
+  public static void createTestDirectory() {
+    testDir = createSharedTestDir(ZooCacheIT.class.getName() + pathName);
+  }
+
   @Test(timeout = 2 * 60 * 1000)
   public void test() throws Exception {
-    assertEquals(0, exec(CacheTestClean.class, "/zcTest-42", "/tmp/zcTest-42").waitFor());
+    assertEquals(0, exec(CacheTestClean.class, pathName, testDir.getAbsolutePath()).waitFor());
     final AtomicReference<Exception> ref = new AtomicReference<Exception>();
     List<Thread> threads = new ArrayList<Thread>();
     for (int i = 0; i < 3; i++) {
@@ -36,7 +46,7 @@ public class ZooCacheIT extends ConfigurableMacIT {
         @Override
         public void run() {
           try {
-            CacheTestReader.main(new String[] {"/zcTest-42", "/tmp/zcTest-42", getConnector().getInstance().getZooKeepers()});
+            CacheTestReader.main(new String[] {pathName, testDir.getAbsolutePath(), getConnector().getInstance().getZooKeepers()});
           } catch (Exception ex) {
             ref.set(ex);
           }
@@ -45,7 +55,7 @@ public class ZooCacheIT extends ConfigurableMacIT {
       reader.start();
       threads.add(reader);
     }
-    assertEquals(0, exec(CacheTestWriter.class, "/zcTest-42", "/tmp/zcTest-42", "3", "50").waitFor());
+    assertEquals(0, exec(CacheTestWriter.class, pathName, testDir.getAbsolutePath(), "3", "50").waitFor());
     for (Thread t : threads) {
       t.join();
       if (ref.get() != null)

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/test/src/test/java/org/apache/accumulo/test/util/CertUtilsTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/util/CertUtilsTest.java b/test/src/test/java/org/apache/accumulo/test/util/CertUtilsTest.java
index bb2a933..eea9ac2 100644
--- a/test/src/test/java/org/apache/accumulo/test/util/CertUtilsTest.java
+++ b/test/src/test/java/org/apache/accumulo/test/util/CertUtilsTest.java
@@ -37,7 +37,7 @@ public class CertUtilsTest {
   private static final String RDN_STRING = "o=Apache Accumulo,cn=CertUtilsTest";
 
   @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
+  public TemporaryFolder folder = new TemporaryFolder(new File(System.getProperty("user.dir") + "/target"));
 
   private CertUtils getUtils() {
     return new CertUtils(KEYSTORE_TYPE, RDN_STRING, "RSA", 2048, "sha1WithRSAEncryption");


[4/4] git commit: ACCUMULO-1599 Stop using /tmp wherever possible

Posted by ct...@apache.org.
ACCUMULO-1599 Stop using /tmp wherever possible

  The only obvious remaining place I found was the Jetty temporary
  directories created by Hadoop MiniDFSCluster. These get cleaned up
  automatically when the tests pass and don't really offer much value
  for debugging test failures, so I don't consider it a high priority.


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/c1fbeac5
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/c1fbeac5
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/c1fbeac5

Branch: refs/heads/1.6.0-SNAPSHOT
Commit: c1fbeac505ad1d01da656f597487173dfb511ded
Parents: 9695621
Author: Christopher Tubbs <ct...@apache.org>
Authored: Wed Dec 11 18:20:01 2013 -0500
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Wed Dec 11 19:24:17 2013 -0500

----------------------------------------------------------------------
 .../org/apache/accumulo/core/conf/Property.java |  11 +-
 .../apache/accumulo/core/data/KeyExtent.java    | 276 ++++-----
 .../accumulo/core/util/ArgumentChecker.java     |  18 +-
 .../client/mapred/AccumuloInputFormatTest.java  |  31 +-
 .../core/client/mock/MockNamespacesTest.java    |   3 +-
 .../simple/client/RandomBatchWriter.java        |  48 +-
 .../simple/filedata/ChunkInputFormatTest.java   |  28 +-
 .../minicluster/MiniAccumuloCluster.java        |  26 +-
 .../security/AuditedSecurityOperation.java      | 120 ++--
 .../tserver/log/TestUpgradePathForWALogs.java   |  44 +-
 .../accumulo/test/MultiTableBatchWriterIT.java  | 505 +++++++++++++++++
 .../test/MultiTableBatchWriterTest.java         | 565 -------------------
 .../org/apache/accumulo/test/ShellServerIT.java |   5 +-
 .../test/functional/ConfigurableMacIT.java      |   2 +-
 .../accumulo/test/functional/ExamplesIT.java    | 112 ++--
 .../test/functional/FunctionalTestUtils.java    |  45 +-
 .../accumulo/test/functional/MapReduceIT.java   |   7 +-
 .../accumulo/test/functional/SimpleMacIT.java   |   3 +
 .../accumulo/test/functional/ZooCacheIT.java    |  16 +-
 .../accumulo/test/util/CertUtilsTest.java       |   2 +-
 20 files changed, 916 insertions(+), 951 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/core/src/main/java/org/apache/accumulo/core/conf/Property.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
index 47f9c37..2ab3a20 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@ -84,16 +84,15 @@ public enum Property {
   @Sensitive
   RPC_SSL_KEYSTORE_PASSWORD("rpc.javax.net.ssl.keyStorePassword", "", PropertyType.STRING,
       "Password used to encrypt the SSL private keystore.  Leave blank to use the Accumulo instance secret"),
-  RPC_SSL_KEYSTORE_TYPE("rpc.javax.net.ssl.keyStoreType", "jks", PropertyType.STRING,
-      "Type of SSL keystore"),
+  RPC_SSL_KEYSTORE_TYPE("rpc.javax.net.ssl.keyStoreType", "jks", PropertyType.STRING, "Type of SSL keystore"),
   RPC_SSL_TRUSTSTORE_PATH("rpc.javax.net.ssl.trustStore", "$ACCUMULO_CONF_DIR/ssl/truststore.jks", PropertyType.PATH,
       "Path of the truststore file for the root cert"),
   @Sensitive
   RPC_SSL_TRUSTSTORE_PASSWORD("rpc.javax.net.ssl.trustStorePassword", "", PropertyType.STRING,
       "Password used to encrypt the SSL truststore.  Leave blank to use no password"),
-  RPC_SSL_TRUSTSTORE_TYPE("rpc.javax.net.ssl.trustStoreType", "jks", PropertyType.STRING,
-        "Type of SSL truststore"),
-  RPC_USE_JSSE("rpc.useJsse", "false", PropertyType.BOOLEAN, "Use JSSE system properties to configure SSL rather than general.javax.net.ssl.* Accumulo properties"),
+  RPC_SSL_TRUSTSTORE_TYPE("rpc.javax.net.ssl.trustStoreType", "jks", PropertyType.STRING, "Type of SSL truststore"),
+  RPC_USE_JSSE("rpc.useJsse", "false", PropertyType.BOOLEAN,
+      "Use JSSE system properties to configure SSL rather than general.javax.net.ssl.* Accumulo properties"),
   // instance properties (must be the same for every node in an instance)
   INSTANCE_PREFIX("instance.", null, PropertyType.PREFIX,
       "Properties in this category must be consistent throughout a cloud. This is enforced and servers won't be able to communicate if these differ."),
@@ -598,7 +597,7 @@ public enum Property {
   }
 
   // This is not a cache for loaded classes, just a way to avoid spamming the debug log
-  static Map<String, Class<? extends Object>> loaded = Collections.synchronizedMap(new HashMap<String, Class<? extends Object>>()); 
+  static Map<String,Class<?>> loaded = Collections.synchronizedMap(new HashMap<String,Class<?>>());
 
   public static <T> T createInstanceFromPropertyName(AccumuloConfiguration conf, Property property, Class<T> base, T defaultInstance) {
     String clazzName = conf.get(property);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/core/src/main/java/org/apache/accumulo/core/data/KeyExtent.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/data/KeyExtent.java b/core/src/main/java/org/apache/accumulo/core/data/KeyExtent.java
index fc766fe..9dc8db5 100644
--- a/core/src/main/java/org/apache/accumulo/core/data/KeyExtent.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/KeyExtent.java
@@ -51,9 +51,9 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableComparable;
 
 public class KeyExtent implements WritableComparable<KeyExtent> {
-  
+
   private static final WeakHashMap<Text,WeakReference<Text>> tableIds = new WeakHashMap<Text,WeakReference<Text>>();
-  
+
   private static Text dedupeTableId(Text tableId) {
     synchronized (tableIds) {
       WeakReference<Text> etir = tableIds.get(tableId);
@@ -63,30 +63,30 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
           return eti;
         }
       }
-      
+
       tableId = new Text(tableId);
       tableIds.put(tableId, new WeakReference<Text>(tableId));
       return tableId;
     }
   }
-  
+
   private Text textTableId;
   private Text textEndRow;
   private Text textPrevEndRow;
-  
+
   private void check() {
-    
+
     if (getTableId() == null)
       throw new IllegalArgumentException("null table id not allowed");
-    
+
     if (getEndRow() == null || getPrevEndRow() == null)
       return;
-    
+
     if (getPrevEndRow().compareTo(getEndRow()) >= 0) {
       throw new IllegalArgumentException("prevEndRow (" + getPrevEndRow() + ") >= endRow (" + getEndRow() + ")");
     }
   }
-  
+
   /**
    * Default constructor
    * 
@@ -96,32 +96,32 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
     this.setEndRow(new Text(), false, false);
     this.setPrevEndRow(new Text(), false, false);
   }
-  
+
   public KeyExtent(Text table, Text endRow, Text prevEndRow) {
     this.setTableId(table);
     this.setEndRow(endRow, false, true);
     this.setPrevEndRow(prevEndRow, false, true);
-    
+
     check();
   }
-  
+
   public KeyExtent(KeyExtent extent) {
     // extent has already deduped table id, so there is no need to do it again
     this.textTableId = extent.textTableId;
     this.setEndRow(extent.getEndRow(), false, true);
     this.setPrevEndRow(extent.getPrevEndRow(), false, true);
-    
+
     check();
   }
-  
+
   public KeyExtent(TKeyExtent tke) {
     this.setTableId(new Text(ByteBufferUtil.toBytes(tke.table)));
     this.setEndRow(tke.endRow == null ? null : new Text(ByteBufferUtil.toBytes(tke.endRow)), false, false);
     this.setPrevEndRow(tke.prevEndRow == null ? null : new Text(ByteBufferUtil.toBytes(tke.prevEndRow)), false, false);
-    
+
     check();
   }
-  
+
   /**
    * Returns a String representing this extent's entry in the Metadata table
    * 
@@ -129,48 +129,48 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
   public Text getMetadataEntry() {
     return getMetadataEntry(getTableId(), getEndRow());
   }
-  
+
   public static Text getMetadataEntry(Text tableId, Text endRow) {
     return MetadataSchema.TabletsSection.getRow(tableId, endRow);
   }
-  
+
   // constructor for loading extents from metadata rows
   public KeyExtent(Text flattenedExtent, Value prevEndRow) {
     decodeMetadataRow(flattenedExtent);
-    
+
     // decode the prev row
     this.setPrevEndRow(decodePrevEndRow(prevEndRow), false, true);
-    
+
     check();
   }
-  
+
   // recreates an encoded extent from a string representation
   // this encoding is what is stored as the row id of the metadata table
   public KeyExtent(Text flattenedExtent, Text prevEndRow) {
-    
+
     decodeMetadataRow(flattenedExtent);
-    
+
     this.setPrevEndRow(null, false, false);
     if (prevEndRow != null)
       this.setPrevEndRow(prevEndRow, false, true);
-    
+
     check();
   }
-  
+
   /**
    * Sets the extents table id
    * 
    */
   public void setTableId(Text tId) {
-    
+
     if (tId == null)
       throw new IllegalArgumentException("null table name not allowed");
-    
+
     this.textTableId = dedupeTableId(tId);
-    
+
     hashCode = 0;
   }
-  
+
   /**
    * Returns the extent's table id
    * 
@@ -178,7 +178,7 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
   public Text getTableId() {
     return textTableId;
   }
-  
+
   private void setEndRow(Text endRow, boolean check, boolean copy) {
     if (endRow != null)
       if (copy)
@@ -187,12 +187,12 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
         this.textEndRow = endRow;
     else
       this.textEndRow = null;
-    
+
     hashCode = 0;
     if (check)
       check();
   }
-  
+
   /**
    * Sets this extent's end row
    * 
@@ -200,7 +200,7 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
   public void setEndRow(Text endRow) {
     setEndRow(endRow, true, true);
   }
-  
+
   /**
    * Returns this extent's end row
    * 
@@ -208,7 +208,7 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
   public Text getEndRow() {
     return textEndRow;
   }
-  
+
   /**
    * Return the previous extent's end row
    * 
@@ -216,7 +216,7 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
   public Text getPrevEndRow() {
     return textPrevEndRow;
   }
-  
+
   private void setPrevEndRow(Text prevEndRow, boolean check, boolean copy) {
     if (prevEndRow != null)
       if (copy)
@@ -225,12 +225,12 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
         this.textPrevEndRow = prevEndRow;
     else
       this.textPrevEndRow = null;
-    
+
     hashCode = 0;
     if (check)
       check();
   }
-  
+
   /**
    * Sets the previous extent's end row
    * 
@@ -238,7 +238,7 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
   public void setPrevEndRow(Text prevEndRow) {
     setPrevEndRow(prevEndRow, true, true);
   }
-  
+
   /**
    * Populates the extents data fields from a DataInput object
    * 
@@ -264,11 +264,11 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
     } else {
       setPrevEndRow((Text) null);
     }
-    
+
     hashCode = 0;
     check();
   }
-  
+
   /**
    * Writes this extent's data fields to a DataOutput object
    * 
@@ -289,7 +289,7 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
       out.writeBoolean(false);
     }
   }
-  
+
   /**
    * Returns a String representing the previous extent's entry in the Metadata table
    * 
@@ -297,13 +297,13 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
   public Mutation getPrevRowUpdateMutation() {
     return getPrevRowUpdateMutation(this);
   }
-  
+
   /**
    * Empty start or end rows tell the method there are no start or end rows, and to use all the keyextents that are before the end row if no start row etc.
    * 
    * @return all the key extents that the rows cover
    */
-  
+
   public static Collection<KeyExtent> getKeyExtentsForRange(Text startRow, Text endRow, Set<KeyExtent> kes) {
     if (kes == null)
       return Collections.emptyList();
@@ -340,7 +340,7 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
           // tablet in the middle
           if (startRow.getLength() == 0) {
             // no start row
-            
+
             if (endRow.getLength() == 0) {
               // no start & end row
               keys.add(ckes);
@@ -361,24 +361,24 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
               keys.add(ckes);
             }
           }
-          
+
         }
       }
     }
     return keys;
   }
-  
+
   public static Text decodePrevEndRow(Value ibw) {
     Text per = null;
-    
+
     if (ibw.get()[0] != 0) {
       per = new Text();
       per.set(ibw.get(), 1, ibw.get().length - 1);
     }
-    
+
     return per;
   }
-  
+
   public static Value encodePrevEndRow(Text per) {
     if (per == null)
       return new Value(new byte[] {0});
@@ -387,31 +387,31 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
     System.arraycopy(per.getBytes(), 0, b, 1, per.getLength());
     return new Value(b);
   }
-  
+
   public static Mutation getPrevRowUpdateMutation(KeyExtent ke) {
     Mutation m = new Mutation(ke.getMetadataEntry());
     TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, encodePrevEndRow(ke.getPrevEndRow()));
     return m;
   }
-  
+
   /**
    * Compares extents based on rows
    * 
    */
   @Override
   public int compareTo(KeyExtent other) {
-    
+
     int result = getTableId().compareTo(other.getTableId());
     if (result != 0)
       return result;
-    
+
     if (this.getEndRow() == null) {
       if (other.getEndRow() != null)
         return 1;
     } else {
       if (other.getEndRow() == null)
         return -1;
-      
+
       result = getEndRow().compareTo(other.getEndRow());
       if (result != 0)
         return result;
@@ -425,35 +425,35 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
       return 1;
     return this.getPrevEndRow().compareTo(other.getPrevEndRow());
   }
-  
+
   private int hashCode = 0;
-  
+
   @Override
   public int hashCode() {
     if (hashCode != 0)
       return hashCode;
-    
+
     int prevEndRowHash = 0;
     int endRowHash = 0;
     if (this.getEndRow() != null) {
       endRowHash = this.getEndRow().hashCode();
     }
-    
+
     if (this.getPrevEndRow() != null) {
       prevEndRowHash = this.getPrevEndRow().hashCode();
     }
-    
+
     hashCode = getTableId().hashCode() + endRowHash + prevEndRowHash;
     return hashCode;
   }
-  
+
   private boolean equals(Text t1, Text t2) {
     if (t1 == null || t2 == null)
       return t1 == t2;
-    
+
     return t1.equals(t2);
   }
-  
+
   @Override
   public boolean equals(Object o) {
     if (o == this)
@@ -463,47 +463,47 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
     KeyExtent oke = (KeyExtent) o;
     return textTableId.equals(oke.textTableId) && equals(textEndRow, oke.textEndRow) && equals(textPrevEndRow, oke.textPrevEndRow);
   }
-  
+
   @Override
   public String toString() {
     String endRowString;
     String prevEndRowString;
     String tableIdString = getTableId().toString().replaceAll(";", "\\\\;").replaceAll("\\\\", "\\\\\\\\");
-    
+
     if (getEndRow() == null)
       endRowString = "<";
     else
       endRowString = ";" + TextUtil.truncate(getEndRow()).toString().replaceAll(";", "\\\\;").replaceAll("\\\\", "\\\\\\\\");
-    
+
     if (getPrevEndRow() == null)
       prevEndRowString = "<";
     else
       prevEndRowString = ";" + TextUtil.truncate(getPrevEndRow()).toString().replaceAll(";", "\\\\;").replaceAll("\\\\", "\\\\\\\\");
-    
+
     return tableIdString + endRowString + prevEndRowString;
   }
-  
+
   public UUID getUUID() {
     try {
-      
+
       ByteArrayOutputStream baos = new ByteArrayOutputStream();
       DataOutputStream dos = new DataOutputStream(baos);
-      
+
       // to get a unique hash it is important to encode the data
       // like it is being serialized
-      
+
       this.write(dos);
-      
+
       dos.close();
-      
+
       return UUID.nameUUIDFromBytes(baos.toByteArray());
-      
+
     } catch (IOException e) {
       // should not happen since we are writing to memory
       throw new RuntimeException(e);
     }
   }
-  
+
   // note: this is only the encoding of the table id and the last row, not the prev row
   /**
    * Populates the extent's fields based on a flatted extent
@@ -512,183 +512,183 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
   private void decodeMetadataRow(Text flattenedExtent) {
     int semiPos = -1;
     int ltPos = -1;
-    
+
     for (int i = 0; i < flattenedExtent.getLength(); i++) {
       if (flattenedExtent.getBytes()[i] == ';' && semiPos < 0) {
         // want the position of the first semicolon
         semiPos = i;
       }
-      
+
       if (flattenedExtent.getBytes()[i] == '<') {
         ltPos = i;
       }
     }
-    
+
     if (semiPos < 0 && ltPos < 0) {
       throw new IllegalArgumentException("Metadata row does not contain ; or <  " + flattenedExtent);
     }
-    
+
     if (semiPos < 0) {
-      
+
       if (ltPos != flattenedExtent.getLength() - 1) {
         throw new IllegalArgumentException("< must come at end of Metadata row  " + flattenedExtent);
       }
-      
+
       Text tableId = new Text();
       tableId.set(flattenedExtent.getBytes(), 0, flattenedExtent.getLength() - 1);
       this.setTableId(tableId);
       this.setEndRow(null, false, false);
     } else {
-      
+
       Text tableId = new Text();
       tableId.set(flattenedExtent.getBytes(), 0, semiPos);
-      
+
       Text endRow = new Text();
       endRow.set(flattenedExtent.getBytes(), semiPos + 1, flattenedExtent.getLength() - (semiPos + 1));
-      
+
       this.setTableId(tableId);
-      
+
       this.setEndRow(endRow, false, false);
     }
   }
-  
+
   public static byte[] tableOfMetadataRow(Text row) {
     KeyExtent ke = new KeyExtent();
     ke.decodeMetadataRow(row);
     return TextUtil.getBytes(ke.getTableId());
   }
-  
+
   public boolean contains(final ByteSequence bsrow) {
     if (bsrow == null) {
       throw new IllegalArgumentException("Passing null to contains is ambiguous, could be in first or last extent of table");
     }
-    
+
     BinaryComparable row = new BinaryComparable() {
-      
+
       @Override
       public int getLength() {
         return bsrow.length();
       }
-      
+
       @Override
       public byte[] getBytes() {
         if (bsrow.isBackedByArray() && bsrow.offset() == 0)
           return bsrow.getBackingArray();
-        
+
         return bsrow.toArray();
       }
     };
-    
+
     if ((this.getPrevEndRow() == null || this.getPrevEndRow().compareTo(row) < 0) && (this.getEndRow() == null || this.getEndRow().compareTo(row) >= 0)) {
       return true;
     }
     return false;
   }
-  
+
   public boolean contains(BinaryComparable row) {
     if (row == null) {
       throw new IllegalArgumentException("Passing null to contains is ambiguous, could be in first or last extent of table");
     }
-    
+
     if ((this.getPrevEndRow() == null || this.getPrevEndRow().compareTo(row) < 0) && (this.getEndRow() == null || this.getEndRow().compareTo(row) >= 0)) {
       return true;
     }
     return false;
   }
-  
+
   public Range toDataRange() {
     return new Range(getPrevEndRow(), false, getEndRow(), true);
   }
-  
+
   public Range toMetadataRange() {
     Text metadataPrevRow = new Text(getTableId());
     metadataPrevRow.append(new byte[] {';'}, 0, 1);
     if (getPrevEndRow() != null) {
       metadataPrevRow.append(getPrevEndRow().getBytes(), 0, getPrevEndRow().getLength());
     }
-    
+
     Range range = new Range(metadataPrevRow, getPrevEndRow() == null, getMetadataEntry(), true);
     return range;
   }
-  
+
   public static SortedSet<KeyExtent> findChildren(KeyExtent ke, SortedSet<KeyExtent> tablets) {
-    
+
     SortedSet<KeyExtent> children = null;
-    
+
     for (KeyExtent tabletKe : tablets) {
-      
+
       if (ke.getPrevEndRow() == tabletKe.getPrevEndRow() || ke.getPrevEndRow() != null && tabletKe.getPrevEndRow() != null
           && tabletKe.getPrevEndRow().compareTo(ke.getPrevEndRow()) == 0) {
         children = new TreeSet<KeyExtent>();
       }
-      
+
       if (children != null) {
         children.add(tabletKe);
       }
-      
+
       if (ke.getEndRow() == tabletKe.getEndRow() || ke.getEndRow() != null && tabletKe.getEndRow() != null
           && tabletKe.getEndRow().compareTo(ke.getEndRow()) == 0) {
         return children;
       }
     }
-    
+
     return new TreeSet<KeyExtent>();
   }
-  
+
   public static KeyExtent findContainingExtent(KeyExtent extent, SortedSet<KeyExtent> extents) {
-    
+
     KeyExtent lookupExtent = new KeyExtent(extent);
     lookupExtent.setPrevEndRow((Text) null);
-    
+
     SortedSet<KeyExtent> tailSet = extents.tailSet(lookupExtent);
-    
+
     if (tailSet.isEmpty()) {
       return null;
     }
-    
+
     KeyExtent first = tailSet.first();
-    
+
     if (first.getTableId().compareTo(extent.getTableId()) != 0) {
       return null;
     }
-    
+
     if (first.getPrevEndRow() == null) {
       return first;
     }
-    
+
     if (extent.getPrevEndRow() == null) {
       return null;
     }
-    
+
     if (extent.getPrevEndRow().compareTo(first.getPrevEndRow()) >= 0)
       return first;
     return null;
   }
-  
+
   private static boolean startsAfter(KeyExtent nke, KeyExtent ke) {
-    
+
     int tiCmp = ke.getTableId().compareTo(nke.getTableId());
-    
+
     if (tiCmp > 0) {
       return true;
     }
-    
+
     return ke.getPrevEndRow() != null && nke.getEndRow() != null && ke.getPrevEndRow().compareTo(nke.getEndRow()) >= 0;
   }
-  
+
   private static Text rowAfterPrevRow(KeyExtent nke) {
     Text row = new Text(nke.getPrevEndRow());
     row.append(new byte[] {0}, 0, 1);
     return row;
   }
-  
+
   // Some duplication with TabletLocatorImpl
   public static Set<KeyExtent> findOverlapping(KeyExtent nke, SortedSet<KeyExtent> extents) {
     if (nke == null || extents == null || extents.isEmpty())
       return Collections.emptySet();
-    
+
     SortedSet<KeyExtent> start;
-    
+
     if (nke.getPrevEndRow() != null) {
       Text row = rowAfterPrevRow(nke);
       KeyExtent lookupKey = new KeyExtent(nke.getTableId(), row, null);
@@ -697,7 +697,7 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
       KeyExtent lookupKey = new KeyExtent(nke.getTableId(), new Text(), null);
       start = extents.tailSet(lookupKey);
     }
-    
+
     TreeSet<KeyExtent> result = new TreeSet<KeyExtent>();
     for (KeyExtent ke : start) {
       if (startsAfter(nke, ke)) {
@@ -707,20 +707,20 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
     }
     return result;
   }
-  
+
   public boolean overlaps(KeyExtent other) {
     SortedSet<KeyExtent> set = new TreeSet<KeyExtent>();
     set.add(other);
     return !findOverlapping(this, set).isEmpty();
   }
-  
+
   // Specialization of findOverlapping(KeyExtent, SortedSet<KeyExtent> to work with SortedMap
-  public static Set<KeyExtent> findOverlapping(KeyExtent nke, SortedMap<KeyExtent,? extends Object> extents) {
+  public static Set<KeyExtent> findOverlapping(KeyExtent nke, SortedMap<KeyExtent,?> extents) {
     if (nke == null || extents == null || extents.isEmpty())
       return Collections.emptySet();
-    
-    SortedMap<KeyExtent,? extends Object> start;
-    
+
+    SortedMap<KeyExtent,?> start;
+
     if (nke.getPrevEndRow() != null) {
       Text row = rowAfterPrevRow(nke);
       KeyExtent lookupKey = new KeyExtent(nke.getTableId(), row, null);
@@ -729,9 +729,9 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
       KeyExtent lookupKey = new KeyExtent(nke.getTableId(), new Text(), null);
       start = extents.tailMap(lookupKey);
     }
-    
+
     TreeSet<KeyExtent> result = new TreeSet<KeyExtent>();
-    for (Entry<KeyExtent,? extends Object> entry : start.entrySet()) {
+    for (Entry<KeyExtent,?> entry : start.entrySet()) {
       KeyExtent ke = entry.getKey();
       if (startsAfter(nke, ke)) {
         break;
@@ -740,36 +740,36 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
     }
     return result;
   }
-  
+
   public static Text getMetadataEntry(KeyExtent extent) {
     return getMetadataEntry(extent.getTableId(), extent.getEndRow());
   }
-  
+
   public TKeyExtent toThrift() {
     return new TKeyExtent(TextUtil.getByteBuffer(textTableId), textEndRow == null ? null : TextUtil.getByteBuffer(textEndRow), textPrevEndRow == null ? null
         : TextUtil.getByteBuffer(textPrevEndRow));
   }
-  
+
   public boolean isPreviousExtent(KeyExtent prevExtent) {
     if (prevExtent == null)
       return getPrevEndRow() == null;
-    
+
     if (!prevExtent.getTableId().equals(getTableId()))
       throw new IllegalArgumentException("Cannot compare accross tables " + prevExtent + " " + this);
-    
+
     if (prevExtent.getEndRow() == null)
       return false;
-    
+
     if (getPrevEndRow() == null)
       return false;
-    
+
     return prevExtent.getEndRow().equals(getPrevEndRow());
   }
-  
+
   public boolean isMeta() {
     return getTableId().toString().equals(MetadataTable.ID) || isRootTablet();
   }
-  
+
   public boolean isRootTablet() {
     return getTableId().toString().equals(RootTable.ID);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/core/src/main/java/org/apache/accumulo/core/util/ArgumentChecker.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/ArgumentChecker.java b/core/src/main/java/org/apache/accumulo/core/util/ArgumentChecker.java
index 8966299..20a1373 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/ArgumentChecker.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/ArgumentChecker.java
@@ -25,43 +25,43 @@ package org.apache.accumulo.core.util;
  */
 public class ArgumentChecker {
   private static final String NULL_ARG_MSG = "argument was null";
-  
+
   public static final void notNull(final Object arg1) {
     if (arg1 == null)
       throw new IllegalArgumentException(NULL_ARG_MSG + ":Is null- arg1? " + (arg1 == null));
   }
-  
+
   public static final void notNull(final Object arg1, final Object arg2) {
     if (arg1 == null || arg2 == null)
       throw new IllegalArgumentException(NULL_ARG_MSG + ":Is null- arg1? " + (arg1 == null) + " arg2? " + (arg2 == null));
   }
-  
+
   public static final void notNull(final Object arg1, final Object arg2, final Object arg3) {
     if (arg1 == null || arg2 == null || arg3 == null)
       throw new IllegalArgumentException(NULL_ARG_MSG + ":Is null- arg1? " + (arg1 == null) + " arg2? " + (arg2 == null) + " arg3? " + (arg3 == null));
   }
-  
+
   public static final void notNull(final Object arg1, final Object arg2, final Object arg3, final Object arg4) {
     if (arg1 == null || arg2 == null || arg3 == null || arg4 == null)
       throw new IllegalArgumentException(NULL_ARG_MSG + ":Is null- arg1? " + (arg1 == null) + " arg2? " + (arg2 == null) + " arg3? " + (arg3 == null)
           + " arg4? " + (arg4 == null));
   }
-  
+
   public static final void notNull(final Object[] args) {
     if (args == null)
       throw new IllegalArgumentException(NULL_ARG_MSG + ":arg array is null");
-    
+
     for (int i = 0; i < args.length; i++)
       if (args[i] == null)
         throw new IllegalArgumentException(NULL_ARG_MSG + ":arg" + i + " is null");
   }
-  
+
   public static final void strictlyPositive(final int i) {
     if (i <= 0)
       throw new IllegalArgumentException("integer should be > 0, was " + i);
   }
-  
-  public static final void notEmpty(Iterable<? extends Object> arg) {
+
+  public static final void notEmpty(Iterable<?> arg) {
     if (!arg.iterator().hasNext())
       throw new IllegalArgumentException("Argument should not be empty");
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java b/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java
index 2822c50..e83453e 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java
@@ -36,8 +36,8 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.RegExFilter;
 import org.apache.accumulo.core.iterators.user.WholeRowIterator;
-import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobClient;
@@ -48,6 +48,8 @@ import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.lib.NullOutputFormat;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 public class AccumuloInputFormatTest {
@@ -56,6 +58,18 @@ public class AccumuloInputFormatTest {
   private static final String INSTANCE_NAME = PREFIX + "_mapred_instance";
   private static final String TEST_TABLE_1 = PREFIX + "_mapred_table_1";
 
+  private JobConf job;
+
+  @BeforeClass
+  public static void setupClass() {
+    System.setProperty("hadoop.tmp.dir", System.getProperty("user.dir") + "/target/hadoop-tmp");
+  }
+
+  @Before
+  public void createJob() {
+    job = new JobConf();
+  }
+
   /**
    * Check that the iterator configuration is getting stored in the Job conf correctly.
    * 
@@ -63,8 +77,6 @@ public class AccumuloInputFormatTest {
    */
   @Test
   public void testSetIterator() throws IOException {
-    JobConf job = new JobConf();
-
     IteratorSetting is = new IteratorSetting(1, "WholeRow", "org.apache.accumulo.core.iterators.WholeRowIterator");
     AccumuloInputFormat.addIterator(job, is);
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
@@ -75,8 +87,6 @@ public class AccumuloInputFormatTest {
 
   @Test
   public void testAddIterator() throws IOException {
-    JobConf job = new JobConf();
-
     AccumuloInputFormat.addIterator(job, new IteratorSetting(1, "WholeRow", WholeRowIterator.class));
     AccumuloInputFormat.addIterator(job, new IteratorSetting(2, "Versions", "org.apache.accumulo.core.iterators.VersioningIterator"));
     IteratorSetting iter = new IteratorSetting(3, "Count", "org.apache.accumulo.core.iterators.CountingIterator");
@@ -123,7 +133,6 @@ public class AccumuloInputFormatTest {
     String value = "comma,delimited,value";
     IteratorSetting someSetting = new IteratorSetting(1, "iterator", "Iterator.class");
     someSetting.addOption(key, value);
-    JobConf job = new JobConf();
     AccumuloInputFormat.addIterator(job, someSetting);
 
     List<IteratorSetting> list = AccumuloInputFormat.getIterators(job);
@@ -151,8 +160,6 @@ public class AccumuloInputFormatTest {
    */
   @Test
   public void testGetIteratorSettings() throws IOException {
-    JobConf job = new JobConf();
-
     AccumuloInputFormat.addIterator(job, new IteratorSetting(1, "WholeRow", "org.apache.accumulo.core.iterators.WholeRowIterator"));
     AccumuloInputFormat.addIterator(job, new IteratorSetting(2, "Versions", "org.apache.accumulo.core.iterators.VersioningIterator"));
     AccumuloInputFormat.addIterator(job, new IteratorSetting(3, "Count", "org.apache.accumulo.core.iterators.CountingIterator"));
@@ -182,8 +189,6 @@ public class AccumuloInputFormatTest {
 
   @Test
   public void testSetRegex() throws IOException {
-    JobConf job = new JobConf();
-
     String regex = ">\"*%<>\'\\";
 
     IteratorSetting is = new IteratorSetting(50, regex, RegExFilter.class);
@@ -259,8 +264,8 @@ public class AccumuloInputFormatTest {
       return JobClient.runJob(job).isSuccessful() ? 0 : 1;
     }
 
-    public static void main(String[] args) throws Exception {
-      assertEquals(0, ToolRunner.run(CachedConfiguration.getInstance(), new MRTester(), args));
+    public static void main(String... args) throws Exception {
+      assertEquals(0, ToolRunner.run(new Configuration(), new MRTester(), args));
     }
   }
 
@@ -277,7 +282,7 @@ public class AccumuloInputFormatTest {
     }
     bw.close();
 
-    MRTester.main(new String[] {"root", "", TEST_TABLE_1});
+    MRTester.main("root", "", TEST_TABLE_1);
     assertNull(e1);
     assertNull(e2);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/core/src/test/java/org/apache/accumulo/core/client/mock/MockNamespacesTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/mock/MockNamespacesTest.java b/core/src/test/java/org/apache/accumulo/core/client/mock/MockNamespacesTest.java
index ad03551..50a9fbe 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/mock/MockNamespacesTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/mock/MockNamespacesTest.java
@@ -20,6 +20,7 @@ package org.apache.accumulo.core.client.mock;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.File;
 import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.Map.Entry;
@@ -52,7 +53,7 @@ import org.junit.rules.TemporaryFolder;
 public class MockNamespacesTest {
 
   Random random = new Random();
-  public static TemporaryFolder folder = new TemporaryFolder();
+  public static TemporaryFolder folder = new TemporaryFolder(new File(System.getProperty("user.dir") + "/target"));
 
   /**
    * This test creates a table without specifying a namespace. In this case, it puts the table into the default namespace.

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
index ce91da6..689f07c 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
@@ -46,7 +46,7 @@ import com.beust.jcommander.Parameter;
  * qualifiers will be "1". The values will be random byte arrays of a specified size.
  */
 public class RandomBatchWriter {
-  
+
   /**
    * Creates a random byte array of specified size using the specified seed.
    * 
@@ -59,17 +59,17 @@ public class RandomBatchWriter {
   public static byte[] createValue(long rowid, int dataSize) {
     Random r = new Random(rowid);
     byte value[] = new byte[dataSize];
-    
+
     r.nextBytes(value);
-    
+
     // transform to printable chars
     for (int j = 0; j < value.length; j++) {
       value[j] = (byte) (((0xff & value[j]) % 92) + ' ');
     }
-    
+
     return value;
   }
-  
+
   /**
    * Creates a mutation on a specified row with column family "foo", column qualifier "1", specified visibility, and a random value of specified size.
    * 
@@ -83,33 +83,33 @@ public class RandomBatchWriter {
    */
   public static Mutation createMutation(long rowid, int dataSize, ColumnVisibility visibility) {
     Text row = new Text(String.format("row_%010d", rowid));
-    
+
     Mutation m = new Mutation(row);
-    
+
     // create a random value that is a function of the
     // row id for verification purposes
     byte value[] = createValue(rowid, dataSize);
-    
+
     m.put(new Text("foo"), new Text("1"), visibility, new Value(value));
-    
+
     return m;
   }
-  
+
   static class Opts extends ClientOnRequiredTable {
-    @Parameter(names="--num", required=true)
+    @Parameter(names = "--num", required = true)
     int num = 0;
-    @Parameter(names="--min")
+    @Parameter(names = "--min")
     long min = 0;
-    @Parameter(names="--max")
+    @Parameter(names = "--max")
     long max = Long.MAX_VALUE;
-    @Parameter(names="--size", required=true, description="size of the value to write")
+    @Parameter(names = "--size", required = true, description = "size of the value to write")
     int size = 0;
-    @Parameter(names="--vis", converter=VisibilityConverter.class)
+    @Parameter(names = "--vis", converter = VisibilityConverter.class)
     ColumnVisibility visiblity = new ColumnVisibility("");
-    @Parameter(names="--seed", description="seed for pseudo-random number generator")
+    @Parameter(names = "--seed", description = "seed for pseudo-random number generator")
     Long seed = null;
   }
- 
+
   /**
    * Writes a specified number of entries to Accumulo using a {@link BatchWriter}.
    * 
@@ -122,7 +122,11 @@ public class RandomBatchWriter {
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
     if ((opts.max - opts.min) < opts.num) {
-      System.err.println(String.format("You must specify a min and a max that allow for at least num possible values. For example, you requested %d rows, but a min of %d and a max of %d only allows for %d rows.", opts.num, opts.min, opts.max, (opts.max - opts.min)));
+      System.err
+          .println(String
+              .format(
+                  "You must specify a min and a max that allow for at least num possible values. For example, you requested %d rows, but a min of %d and a max of %d (exclusive), which only allows for %d rows.",
+                  opts.num, opts.min, opts.max, (opts.max - opts.min)));
       System.exit(1);
     }
     Random r;
@@ -133,10 +137,10 @@ public class RandomBatchWriter {
     }
     Connector connector = opts.getConnector();
     BatchWriter bw = connector.createBatchWriter(opts.tableName, bwOpts.getBatchWriterConfig());
-    
+
     // reuse the ColumnVisibility object to improve performance
     ColumnVisibility cv = opts.visiblity;
-   
+
     // Generate num unique row ids in the given range
     HashSet<Long> rowids = new HashSet<Long>(opts.num);
     while (rowids.size() < opts.num) {
@@ -146,7 +150,7 @@ public class RandomBatchWriter {
       Mutation m = createMutation(rowid, opts.size, cv);
       bw.addMutation(m);
     }
-    
+
     try {
       bw.close();
     } catch (MutationsRejectedException e) {
@@ -162,7 +166,7 @@ public class RandomBatchWriter {
         }
         System.err.println("ERROR : Not authorized to write to tables : " + tables);
       }
-      
+
       if (e.getConstraintViolationSummaries().size() > 0) {
         System.err.println("ERROR : Constraint violations occurred : " + e.getConstraintViolationSummaries());
       }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputFormatTest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputFormatTest.java b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputFormatTest.java
index dab1e10..3f57119 100644
--- a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputFormatTest.java
+++ b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputFormatTest.java
@@ -16,14 +16,17 @@
  */
 package org.apache.accumulo.examples.simple.filedata;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map.Entry;
 
-import junit.framework.TestCase;
-
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
@@ -42,8 +45,11 @@ import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class ChunkInputFormatTest {
 
-public class ChunkInputFormatTest extends TestCase {
   private static AssertionError e0 = null;
   private static AssertionError e1 = null;
   private static AssertionError e2 = null;
@@ -54,7 +60,10 @@ public class ChunkInputFormatTest extends TestCase {
   private static List<Entry<Key,Value>> data;
   private static List<Entry<Key,Value>> baddata;
 
-  {
+  @BeforeClass
+  public static void setupClass() {
+    System.setProperty("hadoop.tmp.dir", System.getProperty("user.dir") + "/target/hadoop-tmp");
+
     data = new ArrayList<Entry<Key,Value>>();
     ChunkInputStreamTest.addData(data, "a", "refs", "ida\0ext", "A&B", "ext");
     ChunkInputStreamTest.addData(data, "a", "refs", "ida\0name", "A&B", "name");
@@ -219,11 +228,12 @@ public class ChunkInputFormatTest extends TestCase {
       return job.isSuccessful() ? 0 : 1;
     }
 
-    public static int main(String[] args) throws Exception {
+    public static int main(String... args) throws Exception {
       return ToolRunner.run(new Configuration(), new CIFTester(), args);
     }
   }
 
+  @Test
   public void test() throws Exception {
     MockInstance instance = new MockInstance("instance1");
     Connector conn = instance.getConnector("root", new PasswordToken(""));
@@ -238,11 +248,12 @@ public class ChunkInputFormatTest extends TestCase {
     }
     bw.close();
 
-    assertEquals(0, CIFTester.main(new String[] {"instance1", "root", "", "test", CIFTester.TestMapper.class.getName()}));
+    assertEquals(0, CIFTester.main("instance1", "root", "", "test", CIFTester.TestMapper.class.getName()));
     assertNull(e1);
     assertNull(e2);
   }
 
+  @Test
   public void testErrorOnNextWithoutClose() throws Exception {
     MockInstance instance = new MockInstance("instance2");
     Connector conn = instance.getConnector("root", new PasswordToken(""));
@@ -257,12 +268,13 @@ public class ChunkInputFormatTest extends TestCase {
     }
     bw.close();
 
-    assertEquals(1, CIFTester.main(new String[] {"instance2", "root", "", "test", CIFTester.TestNoClose.class.getName()}));
+    assertEquals(1, CIFTester.main("instance2", "root", "", "test", CIFTester.TestNoClose.class.getName()));
     assertNull(e1);
     assertNull(e2);
     assertNotNull(e3);
   }
 
+  @Test
   public void testInfoWithoutChunks() throws Exception {
     MockInstance instance = new MockInstance("instance3");
     Connector conn = instance.getConnector("root", new PasswordToken(""));
@@ -276,7 +288,7 @@ public class ChunkInputFormatTest extends TestCase {
     }
     bw.close();
 
-    assertEquals(0, CIFTester.main(new String[] {"instance3", "root", "", "test", CIFTester.TestBadData.class.getName()}));
+    assertEquals(0, CIFTester.main("instance3", "root", "", "test", CIFTester.TestBadData.class.getName()));
     assertNull(e0);
     assertNull(e1);
     assertNull(e2);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
index 09f60c2..2f8f433 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
@@ -150,8 +150,16 @@ public class MiniAccumuloCluster {
   private MiniDFSCluster miniDFS = null;
   private List<Process> cleanup = new ArrayList<Process>();
 
-  public Process exec(Class<? extends Object> clazz, String... args) throws IOException {
-    Process proc = exec(clazz, Collections.singletonList("-Xmx" + config.getDefaultMemory()), args);
+  public Process exec(Class<?> clazz, String... args) throws IOException {
+    return exec(clazz, null, args);
+  }
+
+  public Process exec(Class<?> clazz, List<String> jvmArgs, String... args) throws IOException {
+    ArrayList<String> jvmArgs2 = new ArrayList<String>(1 + (jvmArgs == null ? 0 : jvmArgs.size()));
+    jvmArgs2.add("-Xmx" + config.getDefaultMemory());
+    if (jvmArgs != null)
+      jvmArgs2.addAll(jvmArgs);
+    Process proc = _exec(clazz, jvmArgs2, args);
     cleanup.add(proc);
     return proc;
   }
@@ -226,7 +234,7 @@ public class MiniAccumuloCluster {
     }
   }
 
-  private Process exec(Class<? extends Object> clazz, List<String> extraJvmOpts, String... args) throws IOException {
+  private Process _exec(Class<?> clazz, List<String> extraJvmOpts, String... args) throws IOException {
     String javaHome = System.getProperty("java.home");
     String javaBin = javaHome + File.separator + "bin" + File.separator + "java";
     String classpath = getClasspath();
@@ -275,7 +283,7 @@ public class MiniAccumuloCluster {
     return process;
   }
 
-  private Process exec(Class<? extends Object> clazz, ServerType serverType, String... args) throws IOException {
+  private Process _exec(Class<?> clazz, ServerType serverType, String... args) throws IOException {
 
     List<String> jvmOpts = new ArrayList<String>();
     jvmOpts.add("-Xmx" + config.getMemory(serverType));
@@ -285,7 +293,7 @@ public class MiniAccumuloCluster {
       jvmOpts.addAll(buildRemoteDebugParams(port));
       debugPorts.add(new Pair<ServerType,Integer>(serverType, port));
     }
-    return exec(clazz, jvmOpts, args);
+    return _exec(clazz, jvmOpts, args);
   }
 
   /**
@@ -425,7 +433,7 @@ public class MiniAccumuloCluster {
     }
 
     if (zooKeeperProcess == null) {
-      zooKeeperProcess = exec(ZooKeeperServerMain.class, ServerType.ZOOKEEPER, zooCfgFile.getAbsolutePath());
+      zooKeeperProcess = _exec(ZooKeeperServerMain.class, ServerType.ZOOKEEPER, zooCfgFile.getAbsolutePath());
     }
 
     if (!initialized) {
@@ -456,7 +464,7 @@ public class MiniAccumuloCluster {
     }
     synchronized (tabletServerProcesses) {
       for (int i = tabletServerProcesses.size(); i < config.getNumTservers(); i++) {
-        tabletServerProcesses.add(exec(TabletServer.class, ServerType.TABLET_SERVER));
+        tabletServerProcesses.add(_exec(TabletServer.class, ServerType.TABLET_SERVER));
       }
     }
     int ret = 0;
@@ -470,10 +478,10 @@ public class MiniAccumuloCluster {
       throw new RuntimeException("Could not set master goal state, process returned " + ret + ". Check the logs in " + config.getLogDir() + " for errors.");
     }
     if (masterProcess == null) {
-      masterProcess = exec(Master.class, ServerType.MASTER);
+      masterProcess = _exec(Master.class, ServerType.MASTER);
     }
     if (config.shouldRunGC()) {
-      gcProcess = exec(SimpleGarbageCollector.class, ServerType.GARBAGE_COLLECTOR);
+      gcProcess = _exec(SimpleGarbageCollector.class, ServerType.GARBAGE_COLLECTOR);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java b/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
index 7ec9fd2..eca7dfe 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
@@ -54,19 +54,19 @@ import org.apache.log4j.Logger;
  *
  */
 public class AuditedSecurityOperation extends SecurityOperation {
-  
+
   public static final String AUDITLOG = "Audit";
   public static final Logger audit = Logger.getLogger(AUDITLOG);
-  
+
   public AuditedSecurityOperation(Authorizor author, Authenticator authent, PermissionHandler pm, String instanceId) {
     super(author, authent, pm, instanceId);
   }
-  
+
   public static synchronized SecurityOperation getInstance() {
     String instanceId = HdfsZooInstance.getInstance().getInstanceID();
     return getInstance(instanceId, false);
   }
-  
+
   public static synchronized SecurityOperation getInstance(String instanceId, boolean initialize) {
     if (instance == null) {
       instance = new AuditedSecurityOperation(getAuthorizor(instanceId, initialize), getAuthenticator(instanceId, initialize), getPermHandler(instanceId,
@@ -74,7 +74,7 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
     return instance;
   }
-  
+
   private static String getTableName(String tableId) {
     try {
       return Tables.getTableName(HdfsZooInstance.getInstance(), tableId);
@@ -82,7 +82,7 @@ public class AuditedSecurityOperation extends SecurityOperation {
       return "Unknown Table with ID " + tableId;
     }
   }
-  
+
   public static StringBuilder getAuthString(List<ByteBuffer> authorizations) {
     StringBuilder auths = new StringBuilder();
     for (ByteBuffer bb : authorizations) {
@@ -90,16 +90,16 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
     return auths;
   }
-  
+
   private static boolean shouldAudit(TCredentials credentials, String tableId) {
     return !tableId.equals(MetadataTable.ID) && shouldAudit(credentials);
   }
-  
+
   // Is INFO the right level to check? Do we even need that check?
   private static boolean shouldAudit(TCredentials credentials) {
     return !SystemCredentials.get().getToken().getClass().getName().equals(credentials.getTokenClassName());
   }
-  
+
   /*
    * Three auditing methods try to capture the 4 states we might have here. audit is in response to a thrown exception, the operation failed (perhaps due to
    * insufficient privs, or some other reason) audit(credentials, template, args) is a successful operation audit(credentials, permitted, template, args) is a
@@ -109,24 +109,24 @@ public class AuditedSecurityOperation extends SecurityOperation {
   private void audit(TCredentials credentials, ThriftSecurityException ex, String template, Object... args) {
     audit.warn("operation: failed; user: " + credentials.getPrincipal() + "; " + String.format(template, args) + "; exception: " + ex.toString());
   }
-  
+
   private void audit(TCredentials credentials, String template, Object... args) {
     if (shouldAudit(credentials)) {
       audit.info("operation: success; user: " + credentials.getPrincipal() + ": " + String.format(template, args));
     }
   }
-  
+
   private void audit(TCredentials credentials, boolean permitted, String template, Object... args) {
     if (shouldAudit(credentials)) {
       String prefix = permitted ? "permitted" : "denied";
       audit.info("operation: " + prefix + "; user: " + credentials.getPrincipal() + "; " + String.format(template, args));
     }
   }
-  
+
   public static final String CAN_SCAN_AUDIT_TEMPLATE = "action: scan; targetTable: %s; authorizations: %s; range: %s; columns: %s; iterators: %s; iteratorOptions: %s;";
   private static final int MAX_ELEMENTS_TO_LOG = 10;
-  
-  private static List<String> truncate(Collection<? extends Object> list) {
+
+  private static List<String> truncate(Collection<?> list) {
     List<String> result = new ArrayList<String>();
     int i = 0;
     for (Object obj : list) {
@@ -138,7 +138,7 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
     return result;
   }
-  
+
   @Override
   public boolean canScan(TCredentials credentials, String tableId, TRange range, List<TColumn> columns, List<IterInfo> ssiList,
       Map<String,Map<String,String>> ssio, List<ByteBuffer> authorizations) throws ThriftSecurityException {
@@ -146,11 +146,11 @@ public class AuditedSecurityOperation extends SecurityOperation {
       Range convertedRange = new Range(range);
       List<String> convertedColumns = truncate(Translator.translate(columns, new Translator.TColumnTranslator()));
       String tableName = getTableName(tableId);
-      
+
       try {
         boolean canScan = super.canScan(credentials, tableId);
         audit(credentials, canScan, CAN_SCAN_AUDIT_TEMPLATE, tableName, getAuthString(authorizations), convertedRange, convertedColumns, ssiList, ssio);
-        
+
         return canScan;
       } catch (ThriftSecurityException ex) {
         audit(credentials, ex, CAN_SCAN_AUDIT_TEMPLATE, getAuthString(authorizations), tableId, convertedRange, convertedColumns, ssiList, ssio);
@@ -160,9 +160,9 @@ public class AuditedSecurityOperation extends SecurityOperation {
       return super.canScan(credentials, tableId);
     }
   }
-  
+
   public static final String CAN_SCAN_BATCH_AUDIT_TEMPLATE = "action: scan; targetTable: %s; authorizations: %s; range: %s; columns: %s; iterators: %s; iteratorOptions: %s;";
-  
+
   @Override
   public boolean canScan(TCredentials credentials, String tableId, Map<TKeyExtent,List<TRange>> tbatch, List<TColumn> tcolumns, List<IterInfo> ssiList,
       Map<String,Map<String,String>> ssio, List<ByteBuffer> authorizations) throws ThriftSecurityException {
@@ -170,17 +170,17 @@ public class AuditedSecurityOperation extends SecurityOperation {
       @SuppressWarnings({"unchecked", "rawtypes"})
       Map<KeyExtent,List<Range>> convertedBatch = Translator.translate(tbatch, new Translator.TKeyExtentTranslator(), new Translator.ListTranslator(
           new Translator.TRangeTranslator()));
-      Map<KeyExtent, List<String>> truncated = new HashMap<KeyExtent, List<String>>();
+      Map<KeyExtent,List<String>> truncated = new HashMap<KeyExtent,List<String>>();
       for (Entry<KeyExtent,List<Range>> entry : convertedBatch.entrySet()) {
-          truncated.put(entry.getKey(), truncate(entry.getValue()));
+        truncated.put(entry.getKey(), truncate(entry.getValue()));
       }
       List<Column> convertedColumns = Translator.translate(tcolumns, new Translator.TColumnTranslator());
       String tableName = getTableName(tableId);
-      
+
       try {
         boolean canScan = super.canScan(credentials, tableId);
         audit(credentials, canScan, CAN_SCAN_BATCH_AUDIT_TEMPLATE, tableName, getAuthString(authorizations), truncated, convertedColumns, ssiList, ssio);
-        
+
         return canScan;
       } catch (ThriftSecurityException ex) {
         audit(credentials, ex, CAN_SCAN_BATCH_AUDIT_TEMPLATE, getAuthString(authorizations), tableId, truncated, convertedColumns, ssiList, ssio);
@@ -190,9 +190,9 @@ public class AuditedSecurityOperation extends SecurityOperation {
       return super.canScan(credentials, tableId);
     }
   }
-  
+
   public static final String CHANGE_AUTHORIZATIONS_AUDIT_TEMPLATE = "action: changeAuthorizations; targetUser: %s; authorizations: %s";
-  
+
   @Override
   public void changeAuthorizations(TCredentials credentials, String user, Authorizations authorizations) throws ThriftSecurityException {
     try {
@@ -203,9 +203,9 @@ public class AuditedSecurityOperation extends SecurityOperation {
       throw ex;
     }
   }
-  
+
   public static final String CHANGE_PASSWORD_AUDIT_TEMPLATE = "action: changePassword; targetUser: %s;";
-  
+
   @Override
   public void changePassword(TCredentials credentials, Credentials newInfo) throws ThriftSecurityException {
     try {
@@ -216,9 +216,9 @@ public class AuditedSecurityOperation extends SecurityOperation {
       throw ex;
     }
   }
-  
+
   public static final String CREATE_USER_AUDIT_TEMPLATE = "action: createUser; targetUser: %s; Authorizations: %s;";
-  
+
   @Override
   public void createUser(TCredentials credentials, Credentials newUser, Authorizations authorizations) throws ThriftSecurityException {
     try {
@@ -229,9 +229,9 @@ public class AuditedSecurityOperation extends SecurityOperation {
       throw ex;
     }
   }
-  
+
   public static final String CAN_CREATE_TABLE_AUDIT_TEMPLATE = "action: createTable; targetTable: %s;";
-  
+
   @Override
   public boolean canCreateTable(TCredentials c, String tableName) throws ThriftSecurityException {
     try {
@@ -243,9 +243,9 @@ public class AuditedSecurityOperation extends SecurityOperation {
       throw ex;
     }
   }
-  
+
   public static final String CAN_DELETE_TABLE_AUDIT_TEMPLATE = "action: deleteTable; targetTable: %s;";
-  
+
   @Override
   public boolean canDeleteTable(TCredentials c, String tableId) throws ThriftSecurityException {
     String tableName = getTableName(tableId);
@@ -258,9 +258,9 @@ public class AuditedSecurityOperation extends SecurityOperation {
       throw ex;
     }
   }
-  
+
   public static final String CAN_RENAME_TABLE_AUDIT_TEMPLATE = "action: renameTable; targetTable: %s; newTableName: %s;";
-  
+
   @Override
   public boolean canRenameTable(TCredentials c, String tableId, String oldTableName, String newTableName) throws ThriftSecurityException {
     try {
@@ -272,9 +272,9 @@ public class AuditedSecurityOperation extends SecurityOperation {
       throw ex;
     }
   }
-  
+
   public static final String CAN_CLONE_TABLE_AUDIT_TEMPLATE = "action: cloneTable; targetTable: %s; newTableName: %s";
-  
+
   @Override
   public boolean canCloneTable(TCredentials c, String tableId, String tableName) throws ThriftSecurityException {
     String oldTableName = getTableName(tableId);
@@ -287,9 +287,9 @@ public class AuditedSecurityOperation extends SecurityOperation {
       throw ex;
     }
   }
-  
+
   public static final String CAN_DELETE_RANGE_AUDIT_TEMPLATE = "action: deleteData; targetTable: %s; startRange: %s; endRange: %s;";
-  
+
   @Override
   public boolean canDeleteRange(TCredentials c, String tableId, String tableName, Text startRow, Text endRow) throws ThriftSecurityException {
     try {
@@ -301,9 +301,9 @@ public class AuditedSecurityOperation extends SecurityOperation {
       throw ex;
     }
   }
-  
+
   public static final String CAN_BULK_IMPORT_AUDIT_TEMPLATE = "action: bulkImport; targetTable: %s; dataDir: %s; failDir: %s;";
-  
+
   @Override
   public boolean canBulkImport(TCredentials c, String tableId, String tableName, String dir, String failDir) throws ThriftSecurityException {
     try {
@@ -315,12 +315,12 @@ public class AuditedSecurityOperation extends SecurityOperation {
       throw ex;
     }
   }
-  
+
   public static final String CAN_IMPORT_AUDIT_TEMPLATE = "action: import; targetTable: %s; dataDir: %s;";
-  
+
   @Override
   public boolean canImport(TCredentials credentials, String tableName, String importDir) throws ThriftSecurityException {
-    
+
     try {
       boolean result = super.canImport(credentials, tableName, importDir);
       audit(credentials, result, CAN_IMPORT_AUDIT_TEMPLATE, tableName, importDir);
@@ -330,12 +330,12 @@ public class AuditedSecurityOperation extends SecurityOperation {
       throw ex;
     }
   }
-  
+
   public static final String CAN_EXPORT_AUDIT_TEMPLATE = "action: export; targetTable: %s; dataDir: %s;";
-  
+
   @Override
   public boolean canExport(TCredentials credentials, String tableId, String tableName, String exportDir) throws ThriftSecurityException {
-    
+
     try {
       boolean result = super.canExport(credentials, tableId, tableName, exportDir);
       audit(credentials, result, CAN_EXPORT_AUDIT_TEMPLATE, tableName, exportDir);
@@ -345,9 +345,9 @@ public class AuditedSecurityOperation extends SecurityOperation {
       throw ex;
     }
   }
-  
+
   public static final String DROP_USER_AUDIT_TEMPLATE = "action: dropUser; targetUser: %s;";
-  
+
   @Override
   public void dropUser(TCredentials credentials, String user) throws ThriftSecurityException {
     try {
@@ -358,9 +358,9 @@ public class AuditedSecurityOperation extends SecurityOperation {
       throw ex;
     }
   }
-  
+
   public static final String GRANT_SYSTEM_PERMISSION_AUDIT_TEMPLATE = "action: grantSystemPermission; permission: %s; targetUser: %s;";
-  
+
   @Override
   public void grantSystemPermission(TCredentials credentials, String user, SystemPermission permission) throws ThriftSecurityException {
     try {
@@ -371,9 +371,9 @@ public class AuditedSecurityOperation extends SecurityOperation {
       throw ex;
     }
   }
-  
+
   public static final String GRANT_TABLE_PERMISSION_AUDIT_TEMPLATE = "action: grantTablePermission; permission: %s; targetTable: %s; targetUser: %s;";
-  
+
   @Override
   public void grantTablePermission(TCredentials credentials, String user, String tableId, TablePermission permission) throws ThriftSecurityException {
     String tableName = getTableName(tableId);
@@ -385,12 +385,12 @@ public class AuditedSecurityOperation extends SecurityOperation {
       throw ex;
     }
   }
-  
+
   public static final String REVOKE_SYSTEM_PERMISSION_AUDIT_TEMPLATE = "action: revokeSystemPermission; permission: %s; targetUser: %s;";
-  
+
   @Override
   public void revokeSystemPermission(TCredentials credentials, String user, SystemPermission permission) throws ThriftSecurityException {
-    
+
     try {
       super.revokeSystemPermission(credentials, user, permission);
       audit(credentials, REVOKE_SYSTEM_PERMISSION_AUDIT_TEMPLATE, permission, user);
@@ -399,9 +399,9 @@ public class AuditedSecurityOperation extends SecurityOperation {
       throw ex;
     }
   }
-  
+
   public static final String REVOKE_TABLE_PERMISSION_AUDIT_TEMPLATE = "action: revokeTablePermission; permission: %s; targetTable: %s; targetUser: %s;";
-  
+
   @Override
   public void revokeTablePermission(TCredentials credentials, String user, String tableId, TablePermission permission) throws ThriftSecurityException {
     String tableName = getTableName(tableId);
@@ -413,9 +413,9 @@ public class AuditedSecurityOperation extends SecurityOperation {
       throw ex;
     }
   }
-  
+
   public static final String CAN_ONLINE_OFFLINE_TABLE_AUDIT_TEMPLATE = "action: %s; targetTable: %s;";
-  
+
   @Override
   public boolean canOnlineOfflineTable(TCredentials credentials, String tableId, TableOperation op) throws ThriftSecurityException {
     String tableName = getTableName(tableId);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/server/tserver/src/test/java/org/apache/accumulo/tserver/log/TestUpgradePathForWALogs.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/test/java/org/apache/accumulo/tserver/log/TestUpgradePathForWALogs.java b/server/tserver/src/test/java/org/apache/accumulo/tserver/log/TestUpgradePathForWALogs.java
index c03aa17..f1ceb3b 100644
--- a/server/tserver/src/test/java/org/apache/accumulo/tserver/log/TestUpgradePathForWALogs.java
+++ b/server/tserver/src/test/java/org/apache/accumulo/tserver/log/TestUpgradePathForWALogs.java
@@ -26,12 +26,14 @@ import java.io.OutputStream;
 import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
+import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.fs.Path;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
-import org.junit.After;
 import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 
@@ -39,22 +41,24 @@ public class TestUpgradePathForWALogs {
 
   private static final String WALOG_FROM_15 = "/walog-from-15.walog";
   private static final String WALOG_FROM_16 = "/walog-from-16.walog";
+  private static File testDir;
 
   VolumeManager fs;
 
-  TemporaryFolder root;
+  @BeforeClass
+  public static void createTestDirectory() {
+    File baseDir = new File(System.getProperty("user.dir") + "/target/upgrade-tests");
+    baseDir.mkdirs();
+    testDir = new File(baseDir,  TestUpgradePathForWALogs.class.getName());
+    FileUtils.deleteQuietly(testDir);
+    testDir.mkdir();
+  }
+
+  @Rule
+  public TemporaryFolder root = new TemporaryFolder(testDir);
 
   @Before
   public void setUp() throws Exception {
-    File tempFile = File.createTempFile("TestUpgradePathForWALogs", "");
-    String tempDirName = tempFile.getAbsolutePath() + "Dir";
-    tempFile.delete();
-
-    File tempDir = new File(tempDirName);
-    tempDir.mkdirs();
-
-    root = new TemporaryFolder(new File(tempDirName));
-
     // quiet log messages about compress.CodecPool
     Logger.getRootLogger().setLevel(Level.ERROR);
     fs = VolumeManagerImpl.getLocal();
@@ -63,20 +67,6 @@ public class TestUpgradePathForWALogs {
     Path manyMapsPath = new Path("file://" + path + "/manyMaps");
     fs.mkdirs(manyMapsPath);
     fs.create(new Path(manyMapsPath, "finished")).close();
-    // FileSystem ns = fs.getDefaultVolume();
-    // Writer writer = new Writer(ns.getConf(), ns, new Path(root, "odd").toString(), IntWritable.class, BytesWritable.class);
-    // BytesWritable value = new BytesWritable("someValue".getBytes());
-    // for (int i = 1; i < 1000; i += 2) {
-    // writer.append(new IntWritable(i), value);
-    // }
-    // writer.close();
-    // writer = new Writer(ns.getConf(), ns, new Path(root, "even").toString(), IntWritable.class, BytesWritable.class);
-    // for (int i = 0; i < 1000; i += 2) {
-    // if (i == 10)
-    // continue;
-    // writer.append(new IntWritable(i), value);
-    // }
-    // writer.close();
   }
 
   @Test
@@ -147,8 +137,4 @@ public class TestUpgradePathForWALogs {
     }
   }
 
-  @After
-  public void tearDown() throws Exception {
-    // root.delete();
-  }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c1fbeac5/test/src/test/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java b/test/src/test/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java
new file mode 100644
index 0000000..427b12f
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java
@@ -0,0 +1,505 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.MultiTableBatchWriter;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.TableOfflineException;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.client.impl.MultiTableBatchWriterImpl;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.Credentials;
+import org.apache.accumulo.test.functional.SimpleMacIT;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.common.collect.Maps;
+
+public class MultiTableBatchWriterIT extends SimpleMacIT {
+
+  private Connector connector;
+  private MultiTableBatchWriter mtbw;
+
+  @Before
+  public void setUpArgs() throws AccumuloException, AccumuloSecurityException {
+    connector = getConnector();
+    mtbw = getMultiTableBatchWriter(60);
+  }
+
+  public MultiTableBatchWriter getMultiTableBatchWriter(long cacheTimeoutInSeconds) {
+    return new MultiTableBatchWriterImpl(connector.getInstance(), new Credentials("root", new PasswordToken(getStaticCluster().getConfig().getRootPassword())),
+        new BatchWriterConfig(), cacheTimeoutInSeconds, TimeUnit.SECONDS);
+  }
+
+  @Test
+  public void testTableRenameDataValidation() throws Exception {
+
+    try {
+      final String table1 = "testTableRenameDataValidation_table1", table2 = "testTableRenameDataValidation_table2";
+
+      TableOperations tops = connector.tableOperations();
+      tops.create(table1);
+
+      BatchWriter bw1 = mtbw.getBatchWriter(table1);
+
+      Mutation m1 = new Mutation("foo");
+      m1.put("col1", "", "val1");
+
+      bw1.addMutation(m1);
+
+      tops.rename(table1, table2);
+      tops.create(table1);
+
+      BatchWriter bw2 = mtbw.getBatchWriter(table1);
+
+      Mutation m2 = new Mutation("bar");
+      m2.put("col1", "", "val1");
+
+      bw1.addMutation(m2);
+      bw2.addMutation(m2);
+
+      mtbw.close();
+
+      Map<Entry<String,String>,String> table1Expectations = new HashMap<Entry<String,String>,String>();
+      table1Expectations.put(Maps.immutableEntry("bar", "col1"), "val1");
+
+      Map<Entry<String,String>,String> table2Expectations = new HashMap<Entry<String,String>,String>();
+      table2Expectations.put(Maps.immutableEntry("foo", "col1"), "val1");
+      table2Expectations.put(Maps.immutableEntry("bar", "col1"), "val1");
+
+      Scanner s = connector.createScanner(table1, new Authorizations());
+      s.setRange(new Range());
+      Map<Entry<String,String>,String> actual = new HashMap<Entry<String,String>,String>();
+      for (Entry<Key,Value> entry : s) {
+        actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
+      }
+
+      Assert.assertEquals("Differing results for " + table1, table1Expectations, actual);
+
+      s = connector.createScanner(table2, new Authorizations());
+      s.setRange(new Range());
+      actual = new HashMap<Entry<String,String>,String>();
+      for (Entry<Key,Value> entry : s) {
+        actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
+      }
+
+      Assert.assertEquals("Differing results for " + table2, table2Expectations, actual);
+
+    } finally {
+      if (null != mtbw) {
+        mtbw.close();
+      }
+    }
+  }
+
+  @Test
+  public void testTableRenameSameWriters() throws Exception {
+
+    try {
+      final String table1 = "testTableRenameSameWriters_table1", table2 = "testTableRenameSameWriters_table2";
+      final String newTable1 = "testTableRenameSameWriters_newTable1", newTable2 = "testTableRenameSameWriters_newTable2";
+
+      TableOperations tops = connector.tableOperations();
+      tops.create(table1);
+      tops.create(table2);
+
+      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
+
+      Mutation m1 = new Mutation("foo");
+      m1.put("col1", "", "val1");
+      m1.put("col2", "", "val2");
+
+      bw1.addMutation(m1);
+      bw2.addMutation(m1);
+
+      tops.rename(table1, newTable1);
+      tops.rename(table2, newTable2);
+
+      Mutation m2 = new Mutation("bar");
+      m2.put("col1", "", "val1");
+      m2.put("col2", "", "val2");
+
+      bw1.addMutation(m2);
+      bw2.addMutation(m2);
+
+      mtbw.close();
+
+      Map<Entry<String,String>,String> expectations = new HashMap<Entry<String,String>,String>();
+      expectations.put(Maps.immutableEntry("foo", "col1"), "val1");
+      expectations.put(Maps.immutableEntry("foo", "col2"), "val2");
+      expectations.put(Maps.immutableEntry("bar", "col1"), "val1");
+      expectations.put(Maps.immutableEntry("bar", "col2"), "val2");
+
+      for (String table : Arrays.asList(newTable1, newTable2)) {
+        Scanner s = connector.createScanner(table, new Authorizations());
+        s.setRange(new Range());
+        Map<Entry<String,String>,String> actual = new HashMap<Entry<String,String>,String>();
+        for (Entry<Key,Value> entry : s) {
+          actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
+        }
+
+        Assert.assertEquals("Differing results for " + table, expectations, actual);
+      }
+    } finally {
+      if (null != mtbw) {
+        mtbw.close();
+      }
+    }
+  }
+
+  @Test
+  public void testTableRenameNewWriters() throws Exception {
+
+    try {
+      final String table1 = "testTableRenameNewWriters_table1", table2 = "testTableRenameNewWriters_table2";
+      final String newTable1 = "testTableRenameNewWriters_newTable1", newTable2 = "testTableRenameNewWriters_newTable2";
+
+      TableOperations tops = connector.tableOperations();
+      tops.create(table1);
+      tops.create(table2);
+
+      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
+
+      Mutation m1 = new Mutation("foo");
+      m1.put("col1", "", "val1");
+      m1.put("col2", "", "val2");
+
+      bw1.addMutation(m1);
+      bw2.addMutation(m1);
+
+      tops.rename(table1, newTable1);
+
+      // MTBW is still caching this name to the correct table, but we should invalidate its cache
+      // after seeing the rename
+      try {
+        bw1 = mtbw.getBatchWriter(table1);
+        Assert.fail("Should not be able to find this table");
+      } catch (TableNotFoundException e) {
+        // pass
+      }
+
+      tops.rename(table2, newTable2);
+
+      try {
+        bw2 = mtbw.getBatchWriter(table2);
+        Assert.fail("Should not be able to find this table");
+      } catch (TableNotFoundException e) {
+        // pass
+      }
+
+      bw1 = mtbw.getBatchWriter(newTable1);
+      bw2 = mtbw.getBatchWriter(newTable2);
+
+      Mutation m2 = new Mutation("bar");
+      m2.put("col1", "", "val1");
+      m2.put("col2", "", "val2");
+
+      bw1.addMutation(m2);
+      bw2.addMutation(m2);
+
+      mtbw.close();
+
+      Map<Entry<String,String>,String> expectations = new HashMap<Entry<String,String>,String>();
+      expectations.put(Maps.immutableEntry("foo", "col1"), "val1");
+      expectations.put(Maps.immutableEntry("foo", "col2"), "val2");
+      expectations.put(Maps.immutableEntry("bar", "col1"), "val1");
+      expectations.put(Maps.immutableEntry("bar", "col2"), "val2");
+
+      for (String table : Arrays.asList(newTable1, newTable2)) {
+        Scanner s = connector.createScanner(table, new Authorizations());
+        s.setRange(new Range());
+        Map<Entry<String,String>,String> actual = new HashMap<Entry<String,String>,String>();
+        for (Entry<Key,Value> entry : s) {
+          actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
+        }
+
+        Assert.assertEquals("Differing results for " + table, expectations, actual);
+      }
+    } finally {
+      if (null != mtbw) {
+        mtbw.close();
+      }
+    }
+  }
+
+  @Test
+  public void testTableRenameNewWritersNoCaching() throws Exception {
+    mtbw = getMultiTableBatchWriter(0);
+
+    try {
+      final String table1 = "testTableRenameNewWritersNoCaching_table1", table2 = "testTableRenameNewWritersNoCaching_table2";
+      final String newTable1 = "testTableRenameNewWritersNoCaching_newTable1", newTable2 = "testTableRenameNewWritersNoCaching_newTable2";
+
+      TableOperations tops = connector.tableOperations();
+      tops.create(table1);
+      tops.create(table2);
+
+      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
+
+      Mutation m1 = new Mutation("foo");
+      m1.put("col1", "", "val1");
+      m1.put("col2", "", "val2");
+
+      bw1.addMutation(m1);
+      bw2.addMutation(m1);
+
+      tops.rename(table1, newTable1);
+      tops.rename(table2, newTable2);
+
+      try {
+        bw1 = mtbw.getBatchWriter(table1);
+        Assert.fail("Should not have gotten batchwriter for " + table1);
+      } catch (TableNotFoundException e) {
+        // Pass
+      }
+
+      try {
+        bw2 = mtbw.getBatchWriter(table2);
+      } catch (TableNotFoundException e) {
+        // Pass
+      }
+    } finally {
+      if (null != mtbw) {
+        mtbw.close();
+      }
+    }
+  }
+
+  @Test
+  public void testTableDelete() throws Exception {
+    boolean mutationsRejected = false;
+
+    try {
+      final String table1 = "testTableDelete_table1", table2 = "testTableDelete_table2";
+
+      TableOperations tops = connector.tableOperations();
+      tops.create(table1);
+      tops.create(table2);
+
+      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
+
+      Mutation m1 = new Mutation("foo");
+      m1.put("col1", "", "val1");
+      m1.put("col2", "", "val2");
+
+      bw1.addMutation(m1);
+      bw2.addMutation(m1);
+
+      tops.delete(table1);
+      tops.delete(table2);
+
+      Mutation m2 = new Mutation("bar");
+      m2.put("col1", "", "val1");
+      m2.put("col2", "", "val2");
+
+      try {
+        bw1.addMutation(m2);
+        bw2.addMutation(m2);
+      } catch (MutationsRejectedException e) {
+        // Pass - Mutations might flush immediately
+        mutationsRejected = true;
+      }
+
+    } finally {
+      if (null != mtbw) {
+        try {
+          // Mutations might have flushed before the table offline occurred
+          mtbw.close();
+        } catch (MutationsRejectedException e) {
+          // Pass
+          mutationsRejected = true;
+        }
+      }
+    }
+
+    Assert.assertTrue("Expected mutations to be rejected.", mutationsRejected);
+  }
+
+  @Test
+  public void testOfflineTable() throws Exception {
+    boolean mutationsRejected = false;
+
+    try {
+      final String table1 = "testOfflineTable_table1", table2 = "testOfflineTable_table2";
+
+      TableOperations tops = connector.tableOperations();
+      tops.create(table1);
+      tops.create(table2);
+
+      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
+
+      Mutation m1 = new Mutation("foo");
+      m1.put("col1", "", "val1");
+      m1.put("col2", "", "val2");
+
+      bw1.addMutation(m1);
+      bw2.addMutation(m1);
+
+      tops.offline(table1, true);
+      tops.offline(table2, true);
+
+      Mutation m2 = new Mutation("bar");
+      m2.put("col1", "", "val1");
+      m2.put("col2", "", "val2");
+
+      try {
+        bw1.addMutation(m2);
+        bw2.addMutation(m2);
+      } catch (MutationsRejectedException e) {
+        // Pass -- Mutations might flush immediately and fail because of offline table
+        mutationsRejected = true;
+      }
+    } finally {
+      if (null != mtbw) {
+        try {
+          mtbw.close();
+        } catch (MutationsRejectedException e) {
+          // Pass
+          mutationsRejected = true;
+        }
+      }
+    }
+
+    Assert.assertTrue("Expected mutations to be rejected.", mutationsRejected);
+  }
+
+  @Test
+  public void testOfflineTableWithCache() throws Exception {
+    boolean mutationsRejected = false;
+
+    try {
+      final String table1 = "testOfflineTableWithCache_table1", table2 = "testOfflineTableWithCache_table2";
+
+      TableOperations tops = connector.tableOperations();
+      tops.create(table1);
+      tops.create(table2);
+
+      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
+
+      Mutation m1 = new Mutation("foo");
+      m1.put("col1", "", "val1");
+      m1.put("col2", "", "val2");
+
+      bw1.addMutation(m1);
+      bw2.addMutation(m1);
+
+      tops.offline(table1);
+
+      try {
+        bw1 = mtbw.getBatchWriter(table1);
+      } catch (TableOfflineException e) {
+        // pass
+        mutationsRejected = true;
+      }
+
+      tops.offline(table2);
+
+      try {
+        bw2 = mtbw.getBatchWriter(table2);
+      } catch (TableOfflineException e) {
+        // pass
+        mutationsRejected = true;
+      }
+    } finally {
+      if (null != mtbw) {
+        try {
+          // Mutations might have flushed before the table offline occurred
+          mtbw.close();
+        } catch (MutationsRejectedException e) {
+          // Pass
+          mutationsRejected = true;
+        }
+      }
+    }
+
+    Assert.assertTrue("Expected mutations to be rejected.", mutationsRejected);
+  }
+
+  @Test
+  public void testOfflineTableWithoutCache() throws Exception {
+    mtbw = getMultiTableBatchWriter(0);
+    boolean mutationsRejected = false;
+
+    try {
+      final String table1 = "testOfflineTableWithoutCache_table1", table2 = "testOfflineTableWithoutCache_table2";
+
+      TableOperations tops = connector.tableOperations();
+      tops.create(table1);
+      tops.create(table2);
+
+      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
+
+      Mutation m1 = new Mutation("foo");
+      m1.put("col1", "", "val1");
+      m1.put("col2", "", "val2");
+
+      bw1.addMutation(m1);
+      bw2.addMutation(m1);
+
+      // Mutations might or might not flush before tables goes offline
+      tops.offline(table1);
+      tops.offline(table2);
+
+      try {
+        bw1 = mtbw.getBatchWriter(table1);
+        Assert.fail(table1 + " should be offline");
+      } catch (TableOfflineException e) {
+        // pass
+        mutationsRejected = true;
+      }
+
+      try {
+        bw2 = mtbw.getBatchWriter(table2);
+        Assert.fail(table1 + " should be offline");
+      } catch (TableOfflineException e) {
+        // pass
+        mutationsRejected = true;
+      }
+    } finally {
+      if (null != mtbw) {
+        try {
+          // Mutations might have flushed before the table offline occurred
+          mtbw.close();
+        } catch (MutationsRejectedException e) {
+          // Pass
+          mutationsRejected = true;
+        }
+      }
+    }
+
+    Assert.assertTrue("Expected mutations to be rejected.", mutationsRejected);
+  }
+}