You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2015/07/30 23:51:40 UTC

[06/14] accumulo git commit: ACCUMULO-3920 Convert more tests from mock

http://git-wip-us.apache.org/repos/asf/accumulo/blob/cc3c0111/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormatTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormatTest.java b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormatTest.java
index c02ca66..94ef555 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormatTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormatTest.java
@@ -17,120 +17,17 @@
 package org.apache.accumulo.core.client.mapreduce;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
 
-import java.io.File;
 import java.io.IOException;
-import java.util.Iterator;
-import java.util.Map.Entry;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
 import org.junit.Test;
 
-/**
- *
- */
 public class AccumuloOutputFormatTest {
-  private static AssertionError e1 = null;
-  private static final String PREFIX = AccumuloOutputFormatTest.class.getSimpleName();
-  private static final String INSTANCE_NAME = PREFIX + "_mapreduce_instance";
-  private static final String TEST_TABLE_1 = PREFIX + "_mapreduce_table_1";
-  private static final String TEST_TABLE_2 = PREFIX + "_mapreduce_table_2";
-
-  private static class MRTester extends Configured implements Tool {
-    private static class TestMapper extends Mapper<Key,Value,Text,Mutation> {
-      Key key = null;
-      int count = 0;
-
-      @Override
-      protected void map(Key k, Value v, Context context) throws IOException, InterruptedException {
-        try {
-          if (key != null)
-            assertEquals(key.getRow().toString(), new String(v.get()));
-          assertEquals(k.getRow(), new Text(String.format("%09x", count + 1)));
-          assertEquals(new String(v.get()), String.format("%09x", count));
-        } catch (AssertionError e) {
-          e1 = e;
-        }
-        key = new Key(k);
-        count++;
-      }
-
-      @Override
-      protected void cleanup(Context context) throws IOException, InterruptedException {
-        Mutation m = new Mutation("total");
-        m.put("", "", Integer.toString(count));
-        context.write(new Text(), m);
-      }
-    }
-
-    @Override
-    public int run(String[] args) throws Exception {
-
-      if (args.length != 4) {
-        throw new IllegalArgumentException("Usage : " + MRTester.class.getName() + " <user> <pass> <inputtable> <outputtable>");
-      }
-
-      String user = args[0];
-      String pass = args[1];
-      String table1 = args[2];
-      String table2 = args[3];
-
-      Job job = Job.getInstance(getConf(), this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
-      job.setJarByClass(this.getClass());
-
-      job.setInputFormatClass(AccumuloInputFormat.class);
-
-      AccumuloInputFormat.setConnectorInfo(job, user, new PasswordToken(pass));
-      AccumuloInputFormat.setInputTableName(job, table1);
-      AccumuloInputFormat.setMockInstance(job, INSTANCE_NAME);
-
-      job.setMapperClass(TestMapper.class);
-      job.setMapOutputKeyClass(Key.class);
-      job.setMapOutputValueClass(Value.class);
-      job.setOutputFormatClass(AccumuloOutputFormat.class);
-      job.setOutputKeyClass(Text.class);
-      job.setOutputValueClass(Mutation.class);
-
-      AccumuloOutputFormat.setConnectorInfo(job, user, new PasswordToken(pass));
-      AccumuloOutputFormat.setCreateTables(job, false);
-      AccumuloOutputFormat.setDefaultTableName(job, table2);
-      AccumuloOutputFormat.setMockInstance(job, INSTANCE_NAME);
-
-      job.setNumReduceTasks(0);
-
-      job.waitForCompletion(true);
-
-      return job.isSuccessful() ? 0 : 1;
-    }
-
-    public static void main(String[] args) throws Exception {
-      Configuration conf = new Configuration();
-      conf.set("mapreduce.cluster.local.dir", new File(System.getProperty("user.dir"), "target/mapreduce-tmp").getAbsolutePath());
-      assertEquals(0, ToolRunner.run(conf, new MRTester(), args));
-    }
-  }
 
   @Test
   public void testBWSettings() throws IOException {
@@ -172,28 +69,4 @@ public class AccumuloOutputFormatTest {
     myAOF.checkOutputSpecs(job);
   }
 
-  @Test
-  public void testMR() throws Exception {
-    Instance mockInstance = new org.apache.accumulo.core.client.mock.MockInstance(INSTANCE_NAME);
-    Connector c = mockInstance.getConnector("root", new PasswordToken(""));
-    c.tableOperations().create(TEST_TABLE_1);
-    c.tableOperations().create(TEST_TABLE_2);
-    BatchWriter bw = c.createBatchWriter(TEST_TABLE_1, new BatchWriterConfig());
-    for (int i = 0; i < 100; i++) {
-      Mutation m = new Mutation(new Text(String.format("%09x", i + 1)));
-      m.put(new Text(), new Text(), new Value(String.format("%09x", i).getBytes()));
-      bw.addMutation(m);
-    }
-    bw.close();
-
-    MRTester.main(new String[] {"root", "", TEST_TABLE_1, TEST_TABLE_2});
-    assertNull(e1);
-
-    Scanner scanner = c.createScanner(TEST_TABLE_2, new Authorizations());
-    Iterator<Entry<Key,Value>> iter = scanner.iterator();
-    assertTrue(iter.hasNext());
-    Entry<Key,Value> entry = iter.next();
-    assertEquals(Integer.parseInt(new String(entry.getValue().get())), 100);
-    assertFalse(iter.hasNext());
-  }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/cc3c0111/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloRowInputFormatTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloRowInputFormatTest.java b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloRowInputFormatTest.java
deleted file mode 100644
index 8df9d0f..0000000
--- a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloRowInputFormatTest.java
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.core.client.mapreduce;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyValue;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.util.PeekingIterator;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.junit.Test;
-
-public class AccumuloRowInputFormatTest {
-  private static final String PREFIX = AccumuloRowInputFormatTest.class.getSimpleName();
-  private static final String INSTANCE_NAME = PREFIX + "_mapreduce_instance";
-  private static final String TEST_TABLE_1 = PREFIX + "_mapreduce_table_1";
-
-  private static final String ROW1 = "row1";
-  private static final String ROW2 = "row2";
-  private static final String ROW3 = "row3";
-  private static final String COLF1 = "colf1";
-  private static final List<Entry<Key,Value>> row1;
-  private static final List<Entry<Key,Value>> row2;
-  private static final List<Entry<Key,Value>> row3;
-  private static AssertionError e1 = null;
-  private static AssertionError e2 = null;
-
-  static {
-    row1 = new ArrayList<Entry<Key,Value>>();
-    row1.add(new KeyValue(new Key(ROW1, COLF1, "colq1"), "v1".getBytes()));
-    row1.add(new KeyValue(new Key(ROW1, COLF1, "colq2"), "v2".getBytes()));
-    row1.add(new KeyValue(new Key(ROW1, "colf2", "colq3"), "v3".getBytes()));
-    row2 = new ArrayList<Entry<Key,Value>>();
-    row2.add(new KeyValue(new Key(ROW2, COLF1, "colq4"), "v4".getBytes()));
-    row3 = new ArrayList<Entry<Key,Value>>();
-    row3.add(new KeyValue(new Key(ROW3, COLF1, "colq5"), "v5".getBytes()));
-  }
-
-  public static void checkLists(final List<Entry<Key,Value>> first, final List<Entry<Key,Value>> second) {
-    assertEquals("Sizes should be the same.", first.size(), second.size());
-    for (int i = 0; i < first.size(); i++) {
-      assertEquals("Keys should be equal.", first.get(i).getKey(), second.get(i).getKey());
-      assertEquals("Values should be equal.", first.get(i).getValue(), second.get(i).getValue());
-    }
-  }
-
-  public static void checkLists(final List<Entry<Key,Value>> first, final Iterator<Entry<Key,Value>> second) {
-    int entryIndex = 0;
-    while (second.hasNext()) {
-      final Entry<Key,Value> entry = second.next();
-      assertEquals("Keys should be equal", first.get(entryIndex).getKey(), entry.getKey());
-      assertEquals("Values should be equal", first.get(entryIndex).getValue(), entry.getValue());
-      entryIndex++;
-    }
-  }
-
-  public static void insertList(final BatchWriter writer, final List<Entry<Key,Value>> list) throws MutationsRejectedException {
-    for (Entry<Key,Value> e : list) {
-      final Key key = e.getKey();
-      final Mutation mutation = new Mutation(key.getRow());
-      ColumnVisibility colVisibility = new ColumnVisibility(key.getColumnVisibility());
-      mutation.put(key.getColumnFamily(), key.getColumnQualifier(), colVisibility, key.getTimestamp(), e.getValue());
-      writer.addMutation(mutation);
-    }
-  }
-
-  private static class MRTester extends Configured implements Tool {
-    private static class TestMapper extends Mapper<Text,PeekingIterator<Entry<Key,Value>>,Key,Value> {
-      int count = 0;
-
-      @Override
-      protected void map(Text k, PeekingIterator<Entry<Key,Value>> v, Context context) throws IOException, InterruptedException {
-        try {
-          switch (count) {
-            case 0:
-              assertEquals("Current key should be " + ROW1, new Text(ROW1), k);
-              checkLists(row1, v);
-              break;
-            case 1:
-              assertEquals("Current key should be " + ROW2, new Text(ROW2), k);
-              checkLists(row2, v);
-              break;
-            case 2:
-              assertEquals("Current key should be " + ROW3, new Text(ROW3), k);
-              checkLists(row3, v);
-              break;
-            default:
-              assertTrue(false);
-          }
-        } catch (AssertionError e) {
-          e1 = e;
-        }
-        count++;
-      }
-
-      @Override
-      protected void cleanup(Context context) throws IOException, InterruptedException {
-        try {
-          assertEquals(3, count);
-        } catch (AssertionError e) {
-          e2 = e;
-        }
-      }
-    }
-
-    @Override
-    public int run(String[] args) throws Exception {
-
-      if (args.length != 3) {
-        throw new IllegalArgumentException("Usage : " + MRTester.class.getName() + " <user> <pass> <table>");
-      }
-
-      String user = args[0];
-      String pass = args[1];
-      String table = args[2];
-
-      Job job = Job.getInstance(getConf(), this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
-      job.setJarByClass(this.getClass());
-
-      job.setInputFormatClass(AccumuloRowInputFormat.class);
-
-      AccumuloInputFormat.setConnectorInfo(job, user, new PasswordToken(pass));
-      AccumuloInputFormat.setInputTableName(job, table);
-      AccumuloRowInputFormat.setMockInstance(job, INSTANCE_NAME);
-
-      job.setMapperClass(TestMapper.class);
-      job.setMapOutputKeyClass(Key.class);
-      job.setMapOutputValueClass(Value.class);
-      job.setOutputFormatClass(NullOutputFormat.class);
-
-      job.setNumReduceTasks(0);
-
-      job.waitForCompletion(true);
-
-      return job.isSuccessful() ? 0 : 1;
-    }
-
-    public static void main(String[] args) throws Exception {
-      Configuration conf = new Configuration();
-      conf.set("mapreduce.cluster.local.dir", new File(System.getProperty("user.dir"), "target/mapreduce-tmp").getAbsolutePath());
-      assertEquals(0, ToolRunner.run(conf, new MRTester(), args));
-    }
-  }
-
-  @Test
-  public void test() throws Exception {
-    final Instance instance = new org.apache.accumulo.core.client.mock.MockInstance(INSTANCE_NAME);
-    final Connector conn = instance.getConnector("root", new PasswordToken(""));
-    conn.tableOperations().create(TEST_TABLE_1);
-    BatchWriter writer = null;
-    try {
-      writer = conn.createBatchWriter(TEST_TABLE_1, new BatchWriterConfig());
-      insertList(writer, row1);
-      insertList(writer, row2);
-      insertList(writer, row3);
-    } finally {
-      if (writer != null) {
-        writer.close();
-      }
-    }
-    MRTester.main(new String[] {"root", "", TEST_TABLE_1});
-    assertNull(e1);
-    assertNull(e2);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/cc3c0111/core/src/test/java/org/apache/accumulo/core/client/mapreduce/BadPasswordSplitsAccumuloInputFormat.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/BadPasswordSplitsAccumuloInputFormat.java b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/BadPasswordSplitsAccumuloInputFormat.java
deleted file mode 100644
index 9028d94..0000000
--- a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/BadPasswordSplitsAccumuloInputFormat.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.core.client.mapreduce;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.JobContext;
-
-/**
- * AccumuloInputFormat which returns an "empty" RangeInputSplit
- */
-public class BadPasswordSplitsAccumuloInputFormat extends AccumuloInputFormat {
-
-  @Override
-  public List<InputSplit> getSplits(JobContext context) throws IOException {
-    List<InputSplit> splits = super.getSplits(context);
-
-    for (InputSplit split : splits) {
-      org.apache.accumulo.core.client.mapreduce.RangeInputSplit rangeSplit = (org.apache.accumulo.core.client.mapreduce.RangeInputSplit) split;
-      rangeSplit.setToken(new PasswordToken("anythingelse"));
-    }
-
-    return splits;
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/cc3c0111/core/src/test/java/org/apache/accumulo/core/client/mapreduce/EmptySplitsAccumuloInputFormat.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/EmptySplitsAccumuloInputFormat.java b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/EmptySplitsAccumuloInputFormat.java
deleted file mode 100644
index dd531c0..0000000
--- a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/EmptySplitsAccumuloInputFormat.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.core.client.mapreduce;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.JobContext;
-
-/**
- * AccumuloInputFormat which returns an "empty" RangeInputSplit
- */
-public class EmptySplitsAccumuloInputFormat extends AccumuloInputFormat {
-
-  @Override
-  public List<InputSplit> getSplits(JobContext context) throws IOException {
-    List<InputSplit> oldSplits = super.getSplits(context);
-    List<InputSplit> newSplits = new ArrayList<InputSplit>(oldSplits.size());
-
-    // Copy only the necessary information
-    for (InputSplit oldSplit : oldSplits) {
-      org.apache.accumulo.core.client.mapreduce.RangeInputSplit newSplit = new org.apache.accumulo.core.client.mapreduce.RangeInputSplit(
-          (org.apache.accumulo.core.client.mapreduce.RangeInputSplit) oldSplit);
-      newSplits.add(newSplit);
-    }
-
-    return newSplits;
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/cc3c0111/core/src/test/java/org/apache/accumulo/core/client/mapreduce/TokenFileTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/TokenFileTest.java b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/TokenFileTest.java
deleted file mode 100644
index 825d905..0000000
--- a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/TokenFileTest.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.core.client.mapreduce;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.Iterator;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.impl.Credentials;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-/**
- *
- */
-public class TokenFileTest {
-  private static AssertionError e1 = null;
-  private static final String PREFIX = TokenFileTest.class.getSimpleName();
-  private static final String INSTANCE_NAME = PREFIX + "_mapreduce_instance";
-  private static final String TEST_TABLE_1 = PREFIX + "_mapreduce_table_1";
-  private static final String TEST_TABLE_2 = PREFIX + "_mapreduce_table_2";
-
-  private static class MRTokenFileTester extends Configured implements Tool {
-    private static class TestMapper extends Mapper<Key,Value,Text,Mutation> {
-      Key key = null;
-      int count = 0;
-
-      @Override
-      protected void map(Key k, Value v, Context context) throws IOException, InterruptedException {
-        try {
-          if (key != null)
-            assertEquals(key.getRow().toString(), new String(v.get()));
-          assertEquals(k.getRow(), new Text(String.format("%09x", count + 1)));
-          assertEquals(new String(v.get()), String.format("%09x", count));
-        } catch (AssertionError e) {
-          e1 = e;
-        }
-        key = new Key(k);
-        count++;
-      }
-
-      @Override
-      protected void cleanup(Context context) throws IOException, InterruptedException {
-        Mutation m = new Mutation("total");
-        m.put("", "", Integer.toString(count));
-        context.write(new Text(), m);
-      }
-    }
-
-    @Override
-    public int run(String[] args) throws Exception {
-
-      if (args.length != 4) {
-        throw new IllegalArgumentException("Usage : " + MRTokenFileTester.class.getName() + " <user> <token file> <inputtable> <outputtable>");
-      }
-
-      String user = args[0];
-      String tokenFile = args[1];
-      String table1 = args[2];
-      String table2 = args[3];
-
-      Job job = Job.getInstance(getConf(), this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
-      job.setJarByClass(this.getClass());
-
-      job.setInputFormatClass(AccumuloInputFormat.class);
-
-      AccumuloInputFormat.setConnectorInfo(job, user, tokenFile);
-      AccumuloInputFormat.setInputTableName(job, table1);
-      AccumuloInputFormat.setMockInstance(job, INSTANCE_NAME);
-
-      job.setMapperClass(TestMapper.class);
-      job.setMapOutputKeyClass(Key.class);
-      job.setMapOutputValueClass(Value.class);
-      job.setOutputFormatClass(AccumuloOutputFormat.class);
-      job.setOutputKeyClass(Text.class);
-      job.setOutputValueClass(Mutation.class);
-
-      AccumuloOutputFormat.setConnectorInfo(job, user, tokenFile);
-      AccumuloOutputFormat.setCreateTables(job, false);
-      AccumuloOutputFormat.setDefaultTableName(job, table2);
-      AccumuloOutputFormat.setMockInstance(job, INSTANCE_NAME);
-
-      job.setNumReduceTasks(0);
-
-      job.waitForCompletion(true);
-
-      return job.isSuccessful() ? 0 : 1;
-    }
-
-    public static void main(String[] args) throws Exception {
-      Configuration conf = CachedConfiguration.getInstance();
-      conf.set("hadoop.tmp.dir", new File(args[1]).getParent());
-      conf.set("mapreduce.cluster.local.dir", new File(System.getProperty("user.dir"), "target/mapreduce-tmp").getAbsolutePath());
-      assertEquals(0, ToolRunner.run(conf, new MRTokenFileTester(), args));
-    }
-  }
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder(new File(System.getProperty("user.dir") + "/target"));
-
-  @Test
-  public void testMR() throws Exception {
-    Instance mockInstance = new org.apache.accumulo.core.client.mock.MockInstance(INSTANCE_NAME);
-    Connector c = mockInstance.getConnector("root", new PasswordToken(""));
-    c.tableOperations().create(TEST_TABLE_1);
-    c.tableOperations().create(TEST_TABLE_2);
-    BatchWriter bw = c.createBatchWriter(TEST_TABLE_1, new BatchWriterConfig());
-    for (int i = 0; i < 100; i++) {
-      Mutation m = new Mutation(new Text(String.format("%09x", i + 1)));
-      m.put(new Text(), new Text(), new Value(String.format("%09x", i).getBytes()));
-      bw.addMutation(m);
-    }
-    bw.close();
-
-    File tf = folder.newFile("root_test.pw");
-    PrintStream out = new PrintStream(tf);
-    String outString = new Credentials("root", new PasswordToken("")).serialize();
-    out.println(outString);
-    out.close();
-
-    MRTokenFileTester.main(new String[] {"root", tf.getAbsolutePath(), TEST_TABLE_1, TEST_TABLE_2});
-    assertNull(e1);
-
-    Scanner scanner = c.createScanner(TEST_TABLE_2, new Authorizations());
-    Iterator<Entry<Key,Value>> iter = scanner.iterator();
-    assertTrue(iter.hasNext());
-    Entry<Key,Value> entry = iter.next();
-    assertEquals(Integer.parseInt(new String(entry.getValue().get())), 100);
-    assertFalse(iter.hasNext());
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/cc3c0111/core/src/test/java/org/apache/accumulo/core/iterators/user/IntersectingIteratorTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/iterators/user/IntersectingIteratorTest.java b/core/src/test/java/org/apache/accumulo/core/iterators/user/IntersectingIteratorTest.java
index 286b343..700f93b 100644
--- a/core/src/test/java/org/apache/accumulo/core/iterators/user/IntersectingIteratorTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/iterators/user/IntersectingIteratorTest.java
@@ -16,30 +16,18 @@
  */
 package org.apache.accumulo.core.iterators.user;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map.Entry;
 import java.util.Random;
 import java.util.TreeMap;
 
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.DefaultIteratorEnvironment;
@@ -47,7 +35,6 @@ import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.iterators.SortedMapIterator;
 import org.apache.accumulo.core.iterators.system.MultiIterator;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.hadoop.io.Text;
 import org.junit.Rule;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/cc3c0111/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/FileCount.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/FileCount.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/FileCount.java
index dabb4c1..111fae0 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/FileCount.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/FileCount.java
@@ -233,7 +233,7 @@ public class FileCount {
     }
   }
 
-  FileCount(Opts opts, ScannerOpts scanOpts, BatchWriterOpts bwOpts) throws Exception {
+  public FileCount(Opts opts, ScannerOpts scanOpts, BatchWriterOpts bwOpts) throws Exception {
     this.opts = opts;
     this.scanOpts = scanOpts;
     this.bwOpts = bwOpts;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/cc3c0111/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java
deleted file mode 100644
index 492f3e5..0000000
--- a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.dirlist;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-
-import java.util.ArrayList;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOpts.Password;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.examples.simple.dirlist.FileCount.Opts;
-import org.apache.hadoop.io.Text;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
-
-public class CountTest {
-
-  @Rule
-  public TestName test = new TestName();
-
-  private Connector conn;
-  private String tableName;
-
-  @Before
-  public void setupInstance() throws Exception {
-    tableName = test.getMethodName();
-    Instance inst = new org.apache.accumulo.core.client.mock.MockInstance(test.getMethodName());
-    conn = inst.getConnector("root", new PasswordToken(""));
-    conn.tableOperations().create(tableName);
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
-    ColumnVisibility cv = new ColumnVisibility();
-    // / has 1 dir
-    // /local has 2 dirs 1 file
-    // /local/user1 has 2 files
-    bw.addMutation(Ingest.buildMutation(cv, "/local", true, false, true, 272, 12345, null));
-    bw.addMutation(Ingest.buildMutation(cv, "/local/user1", true, false, true, 272, 12345, null));
-    bw.addMutation(Ingest.buildMutation(cv, "/local/user2", true, false, true, 272, 12345, null));
-    bw.addMutation(Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 12345, null));
-    bw.addMutation(Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 23456, null));
-    bw.addMutation(Ingest.buildMutation(cv, "/local/user1/file1", false, false, false, 2024, 12345, null));
-    bw.addMutation(Ingest.buildMutation(cv, "/local/user1/file2", false, false, false, 1028, 23456, null));
-    bw.close();
-  }
-
-  @Test
-  public void test() throws Exception {
-    Scanner scanner = conn.createScanner(tableName, new Authorizations());
-    scanner.fetchColumn(new Text("dir"), new Text("counts"));
-    assertFalse(scanner.iterator().hasNext());
-
-    Opts opts = new Opts();
-    ScannerOpts scanOpts = new ScannerOpts();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    opts.instance = "counttest";
-    opts.setTableName(tableName);
-    opts.setPassword(new Password("secret"));
-    opts.mock = true;
-    opts.setPassword(new Opts.Password(""));
-    FileCount fc = new FileCount(opts, scanOpts, bwOpts);
-    fc.run();
-
-    ArrayList<Pair<String,String>> expected = new ArrayList<Pair<String,String>>();
-    expected.add(new Pair<String,String>(QueryUtil.getRow("").toString(), "1,0,3,3"));
-    expected.add(new Pair<String,String>(QueryUtil.getRow("/local").toString(), "2,1,2,3"));
-    expected.add(new Pair<String,String>(QueryUtil.getRow("/local/user1").toString(), "0,2,0,2"));
-    expected.add(new Pair<String,String>(QueryUtil.getRow("/local/user2").toString(), "0,0,0,0"));
-
-    int i = 0;
-    for (Entry<Key,Value> e : scanner) {
-      assertEquals(e.getKey().getRow().toString(), expected.get(i).getFirst());
-      assertEquals(e.getValue().toString(), expected.get(i).getSecond());
-      i++;
-    }
-    assertEquals(i, expected.size());
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/cc3c0111/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputFormatTest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputFormatTest.java b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputFormatTest.java
deleted file mode 100644
index 997612f..0000000
--- a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputFormatTest.java
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.filedata;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
-
-public class ChunkInputFormatTest {
-
-  private static AssertionError e0 = null;
-  private static AssertionError e1 = null;
-  private static AssertionError e2 = null;
-  private static IOException e3 = null;
-
-  private static final Authorizations AUTHS = new Authorizations("A", "B", "C", "D");
-
-  private static List<Entry<Key,Value>> data;
-  private static List<Entry<Key,Value>> baddata;
-
-  private Connector conn;
-
-  @Rule
-  public TestName test = new TestName();
-
-  @Before
-  public void setupInstance() throws Exception {
-    Instance instance = new org.apache.accumulo.core.client.mock.MockInstance(test.getMethodName());
-    conn = instance.getConnector("root", new PasswordToken(""));
-  }
-
-  @BeforeClass
-  public static void setupClass() {
-    System.setProperty("hadoop.tmp.dir", System.getProperty("user.dir") + "/target/hadoop-tmp");
-
-    data = new ArrayList<Entry<Key,Value>>();
-    ChunkInputStreamTest.addData(data, "a", "refs", "ida\0ext", "A&B", "ext");
-    ChunkInputStreamTest.addData(data, "a", "refs", "ida\0name", "A&B", "name");
-    ChunkInputStreamTest.addData(data, "a", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    ChunkInputStreamTest.addData(data, "a", "~chunk", 100, 1, "A&B", "");
-    ChunkInputStreamTest.addData(data, "b", "refs", "ida\0ext", "A&B", "ext");
-    ChunkInputStreamTest.addData(data, "b", "refs", "ida\0name", "A&B", "name");
-    ChunkInputStreamTest.addData(data, "b", "~chunk", 100, 0, "A&B", "qwertyuiop");
-    ChunkInputStreamTest.addData(data, "b", "~chunk", 100, 0, "B&C", "qwertyuiop");
-    ChunkInputStreamTest.addData(data, "b", "~chunk", 100, 1, "A&B", "");
-    ChunkInputStreamTest.addData(data, "b", "~chunk", 100, 1, "B&C", "");
-    ChunkInputStreamTest.addData(data, "b", "~chunk", 100, 1, "D", "");
-    baddata = new ArrayList<Entry<Key,Value>>();
-    ChunkInputStreamTest.addData(baddata, "c", "refs", "ida\0ext", "A&B", "ext");
-    ChunkInputStreamTest.addData(baddata, "c", "refs", "ida\0name", "A&B", "name");
-  }
-
-  public static void entryEquals(Entry<Key,Value> e1, Entry<Key,Value> e2) {
-    assertEquals(e1.getKey(), e2.getKey());
-    assertEquals(e1.getValue(), e2.getValue());
-  }
-
-  public static class CIFTester extends Configured implements Tool {
-    public static class TestMapper extends Mapper<List<Entry<Key,Value>>,InputStream,List<Entry<Key,Value>>,InputStream> {
-      int count = 0;
-
-      @Override
-      protected void map(List<Entry<Key,Value>> key, InputStream value, Context context) throws IOException, InterruptedException {
-        byte[] b = new byte[20];
-        int read;
-        try {
-          switch (count) {
-            case 0:
-              assertEquals(key.size(), 2);
-              entryEquals(key.get(0), data.get(0));
-              entryEquals(key.get(1), data.get(1));
-              assertEquals(read = value.read(b), 8);
-              assertEquals(new String(b, 0, read), "asdfjkl;");
-              assertEquals(read = value.read(b), -1);
-              break;
-            case 1:
-              assertEquals(key.size(), 2);
-              entryEquals(key.get(0), data.get(4));
-              entryEquals(key.get(1), data.get(5));
-              assertEquals(read = value.read(b), 10);
-              assertEquals(new String(b, 0, read), "qwertyuiop");
-              assertEquals(read = value.read(b), -1);
-              break;
-            default:
-              assertTrue(false);
-          }
-        } catch (AssertionError e) {
-          e1 = e;
-        } finally {
-          value.close();
-        }
-        count++;
-      }
-
-      @Override
-      protected void cleanup(Context context) throws IOException, InterruptedException {
-        try {
-          assertEquals(2, count);
-        } catch (AssertionError e) {
-          e2 = e;
-        }
-      }
-    }
-
-    public static class TestNoClose extends Mapper<List<Entry<Key,Value>>,InputStream,List<Entry<Key,Value>>,InputStream> {
-      int count = 0;
-
-      @Override
-      protected void map(List<Entry<Key,Value>> key, InputStream value, Context context) throws IOException, InterruptedException {
-        byte[] b = new byte[5];
-        int read;
-        try {
-          switch (count) {
-            case 0:
-              assertEquals(read = value.read(b), 5);
-              assertEquals(new String(b, 0, read), "asdfj");
-              break;
-            default:
-              assertTrue(false);
-          }
-        } catch (AssertionError e) {
-          e1 = e;
-        }
-        count++;
-        try {
-          context.nextKeyValue();
-          assertTrue(false);
-        } catch (IOException ioe) {
-          e3 = ioe;
-        }
-      }
-    }
-
-    public static class TestBadData extends Mapper<List<Entry<Key,Value>>,InputStream,List<Entry<Key,Value>>,InputStream> {
-      @Override
-      protected void map(List<Entry<Key,Value>> key, InputStream value, Context context) throws IOException, InterruptedException {
-        byte[] b = new byte[20];
-        try {
-          assertEquals(key.size(), 2);
-          entryEquals(key.get(0), baddata.get(0));
-          entryEquals(key.get(1), baddata.get(1));
-        } catch (AssertionError e) {
-          e0 = e;
-        }
-        try {
-          assertFalse(value.read(b) > 0);
-          try {
-            fail();
-          } catch (AssertionError e) {
-            e1 = e;
-          }
-        } catch (Exception e) {
-          // expected, ignore
-        }
-        try {
-          value.close();
-          try {
-            fail();
-          } catch (AssertionError e) {
-            e2 = e;
-          }
-        } catch (Exception e) {
-          // expected, ignore
-        }
-      }
-    }
-
-    @Override
-    public int run(String[] args) throws Exception {
-      if (args.length != 5) {
-        throw new IllegalArgumentException("Usage : " + CIFTester.class.getName() + " <instance name> <user> <pass> <table> <mapperClass>");
-      }
-
-      String instance = args[0];
-      String user = args[1];
-      String pass = args[2];
-      String table = args[3];
-
-      Job job = Job.getInstance(getConf());
-      job.setJobName(this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
-      job.setJarByClass(this.getClass());
-
-      job.setInputFormatClass(ChunkInputFormat.class);
-
-      ChunkInputFormat.setConnectorInfo(job, user, new PasswordToken(pass));
-      ChunkInputFormat.setInputTableName(job, table);
-      ChunkInputFormat.setScanAuthorizations(job, AUTHS);
-      ChunkInputFormat.setMockInstance(job, instance);
-
-      @SuppressWarnings("unchecked")
-      Class<? extends Mapper<?,?,?,?>> forName = (Class<? extends Mapper<?,?,?,?>>) Class.forName(args[4]);
-      job.setMapperClass(forName);
-      job.setMapOutputKeyClass(Key.class);
-      job.setMapOutputValueClass(Value.class);
-      job.setOutputFormatClass(NullOutputFormat.class);
-
-      job.setNumReduceTasks(0);
-
-      job.waitForCompletion(true);
-
-      return job.isSuccessful() ? 0 : 1;
-    }
-
-    public static int main(String... args) throws Exception {
-      Configuration conf = new Configuration();
-      conf.set("mapreduce.cluster.local.dir", new File(System.getProperty("user.dir"), "target/mapreduce-tmp").getAbsolutePath());
-      return ToolRunner.run(conf, new CIFTester(), args);
-    }
-  }
-
-  @Test
-  public void test() throws Exception {
-    conn.tableOperations().create("test");
-    BatchWriter bw = conn.createBatchWriter("test", new BatchWriterConfig());
-
-    for (Entry<Key,Value> e : data) {
-      Key k = e.getKey();
-      Mutation m = new Mutation(k.getRow());
-      m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
-      bw.addMutation(m);
-    }
-    bw.close();
-
-    assertEquals(0, CIFTester.main(conn.getInstance().getInstanceName(), "root", "", "test", CIFTester.TestMapper.class.getName()));
-    assertNull(e1);
-    assertNull(e2);
-  }
-
-  @Test
-  public void testErrorOnNextWithoutClose() throws Exception {
-    conn.tableOperations().create("test");
-    BatchWriter bw = conn.createBatchWriter("test", new BatchWriterConfig());
-
-    for (Entry<Key,Value> e : data) {
-      Key k = e.getKey();
-      Mutation m = new Mutation(k.getRow());
-      m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
-      bw.addMutation(m);
-    }
-    bw.close();
-
-    assertEquals(1, CIFTester.main(conn.getInstance().getInstanceName(), "root", "", "test", CIFTester.TestNoClose.class.getName()));
-    assertNull(e1);
-    assertNull(e2);
-    assertNotNull(e3);
-  }
-
-  @Test
-  public void testInfoWithoutChunks() throws Exception {
-    conn.tableOperations().create("test");
-    BatchWriter bw = conn.createBatchWriter("test", new BatchWriterConfig());
-    for (Entry<Key,Value> e : baddata) {
-      Key k = e.getKey();
-      Mutation m = new Mutation(k.getRow());
-      m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
-      bw.addMutation(m);
-    }
-    bw.close();
-
-    assertEquals(0, CIFTester.main(conn.getInstance().getInstanceName(), "root", "", "test", CIFTester.TestBadData.class.getName()));
-    assertNull(e0);
-    assertNull(e1);
-    assertNull(e2);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/cc3c0111/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputStreamTest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputStreamTest.java b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputStreamTest.java
index 614a480..17fbb76 100644
--- a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputStreamTest.java
+++ b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputStreamTest.java
@@ -27,36 +27,21 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyValue;
-import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.accumulo.core.util.PeekingIterator;
 import org.apache.hadoop.io.Text;
 import org.junit.Before;
-import org.junit.Rule;
 import org.junit.Test;
-import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class ChunkInputStreamTest {
   private static final Logger log = LoggerFactory.getLogger(ChunkInputStream.class);
-  List<Entry<Key,Value>> data;
-  List<Entry<Key,Value>> baddata;
-  List<Entry<Key,Value>> multidata;
+  private List<Entry<Key,Value>> data;
+  private List<Entry<Key,Value>> baddata;
+  private List<Entry<Key,Value>> multidata;
 
   @Before
   public void setupData() {
@@ -103,11 +88,11 @@ public class ChunkInputStreamTest {
     addData(multidata, "c", "~chunk", 100, 1, "B&C", "");
   }
 
-  public static void addData(List<Entry<Key,Value>> data, String row, String cf, String cq, String vis, String value) {
+  private static void addData(List<Entry<Key,Value>> data, String row, String cf, String cq, String vis, String value) {
     data.add(new KeyValue(new Key(new Text(row), new Text(cf), new Text(cq), new Text(vis)), value.getBytes()));
   }
 
-  public static void addData(List<Entry<Key,Value>> data, String row, String cf, int chunkSize, int chunkCount, String vis, String value) {
+  private static void addData(List<Entry<Key,Value>> data, String row, String cf, int chunkSize, int chunkCount, String vis, String value) {
     Text chunkCQ = new Text(FileDataIngest.intToBytes(chunkSize));
     chunkCQ.append(FileDataIngest.intToBytes(chunkCount), 0, 4);
     data.add(new KeyValue(new Key(new Text(row), new Text(cf), chunkCQ, new Text(vis)), value.getBytes()));
@@ -235,63 +220,6 @@ public class ChunkInputStreamTest {
     assertFalse(pi.hasNext());
   }
 
-  @Rule
-  public TestName test = new TestName();
-
-  @Test
-  public void testWithAccumulo() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, IOException {
-    Instance inst = new org.apache.accumulo.core.client.mock.MockInstance(test.getMethodName());
-    Connector conn = inst.getConnector("root", new PasswordToken(""));
-    conn.tableOperations().create("test");
-    BatchWriter bw = conn.createBatchWriter("test", new BatchWriterConfig());
-
-    for (Entry<Key,Value> e : data) {
-      Key k = e.getKey();
-      Mutation m = new Mutation(k.getRow());
-      m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), e.getValue());
-      bw.addMutation(m);
-    }
-    bw.close();
-
-    Scanner scan = conn.createScanner("test", new Authorizations("A", "B", "C", "D"));
-
-    ChunkInputStream cis = new ChunkInputStream();
-    byte[] b = new byte[20];
-    int read;
-    PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<Entry<Key,Value>>(scan.iterator());
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 8);
-    assertEquals(new String(b, 0, read), "asdfjkl;");
-    assertEquals(read = cis.read(b), -1);
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 10);
-    assertEquals(new String(b, 0, read), "qwertyuiop");
-    assertEquals(read = cis.read(b), -1);
-    assertEquals(cis.getVisibilities().toString(), "[A&B, B&C, D]");
-    cis.close();
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 16);
-    assertEquals(new String(b, 0, read), "asdfjkl;asdfjkl;");
-    assertEquals(read = cis.read(b), -1);
-    assertEquals(cis.getVisibilities().toString(), "[A&B]");
-    cis.close();
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), -1);
-    cis.close();
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 8);
-    assertEquals(new String(b, 0, read), "asdfjkl;");
-    assertEquals(read = cis.read(b), -1);
-    cis.close();
-
-    assertFalse(pi.hasNext());
-  }
-
   private static void assumeExceptionOnRead(ChunkInputStream cis, byte[] b) {
     try {
       assertEquals(0, cis.read(b));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/cc3c0111/server/base/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java b/server/base/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java
deleted file mode 100644
index 19bf13a..0000000
--- a/server/base/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.util;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.server.util.TabletIterator.TabletDeletedException;
-import org.apache.hadoop.io.Text;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.TestName;
-
-public class TabletIteratorTest {
-
-  @Rule
-  public TestName test = new TestName();
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  class TestTabletIterator extends TabletIterator {
-
-    private Connector conn;
-
-    public TestTabletIterator(Connector conn) throws Exception {
-      super(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY), MetadataSchema.TabletsSection.getRange(), true, true);
-      this.conn = conn;
-    }
-
-    @Override
-    protected void resetScanner() {
-      try {
-        Scanner ds = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-        Text tablet = new KeyExtent(new Text("0"), new Text("m"), null).getMetadataEntry();
-        ds.setRange(new Range(tablet, true, tablet, true));
-
-        Mutation m = new Mutation(tablet);
-
-        BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-        for (Entry<Key,Value> entry : ds) {
-          Key k = entry.getKey();
-          m.putDelete(k.getColumnFamily(), k.getColumnQualifier(), k.getTimestamp());
-        }
-
-        bw.addMutation(m);
-
-        bw.close();
-
-      } catch (Exception e) {
-        throw new RuntimeException(e);
-      }
-
-      super.resetScanner();
-    }
-
-  }
-
-  // simulate a merge happening while iterating over tablets
-  @Test
-  public void testMerge() throws Exception {
-    Instance mi = new org.apache.accumulo.core.client.mock.MockInstance(test.getMethodName());
-    Connector conn = mi.getConnector("", new PasswordToken(""));
-
-    KeyExtent ke1 = new KeyExtent(new Text("0"), new Text("m"), null);
-    Mutation mut1 = ke1.getPrevRowUpdateMutation();
-    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut1, new Value("/d1".getBytes()));
-
-    KeyExtent ke2 = new KeyExtent(new Text("0"), null, null);
-    Mutation mut2 = ke2.getPrevRowUpdateMutation();
-    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut2, new Value("/d2".getBytes()));
-
-    BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-    bw1.addMutation(mut1);
-    bw1.addMutation(mut2);
-    bw1.close();
-
-    TestTabletIterator tabIter = new TestTabletIterator(conn);
-
-    exception.expect(TabletDeletedException.class);
-    while (tabIter.hasNext()) {
-      tabIter.next();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/cc3c0111/server/gc/src/test/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferencesTest.java
----------------------------------------------------------------------
diff --git a/server/gc/src/test/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferencesTest.java b/server/gc/src/test/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferencesTest.java
deleted file mode 100644
index 92a72fb..0000000
--- a/server/gc/src/test/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferencesTest.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.gc.replication;
-
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.ConfigurationCopy;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.conf.SiteConfiguration;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.ReplicationSection;
-import org.apache.accumulo.core.protobuf.ProtobufUtil;
-import org.apache.accumulo.core.replication.ReplicationSchema.StatusSection;
-import org.apache.accumulo.core.replication.ReplicationTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.server.AccumuloServerContext;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
-import org.apache.accumulo.server.replication.StatusUtil;
-import org.apache.accumulo.server.replication.proto.Replication.Status;
-import org.apache.hadoop.io.Text;
-import org.easymock.EasyMock;
-import org.easymock.IAnswer;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
-
-import com.google.common.collect.Iterables;
-
-public class CloseWriteAheadLogReferencesTest {
-
-  private CloseWriteAheadLogReferences refs;
-  private Connector conn;
-
-  @Rule
-  public TestName testName = new TestName();
-
-  @Before
-  public void setupInstance() throws Exception {
-    Instance inst = new org.apache.accumulo.core.client.mock.MockInstance(testName.getMethodName());
-    conn = inst.getConnector("root", new PasswordToken(""));
-  }
-
-  @Before
-  public void setup() {
-    Instance mockInst = createMock(Instance.class);
-    SiteConfiguration siteConfig = EasyMock.createMock(SiteConfiguration.class);
-    expect(mockInst.getInstanceID()).andReturn(testName.getMethodName()).anyTimes();
-    expect(mockInst.getZooKeepers()).andReturn("localhost").anyTimes();
-    expect(mockInst.getZooKeepersSessionTimeOut()).andReturn(30000).anyTimes();
-    final AccumuloConfiguration systemConf = new ConfigurationCopy(new HashMap<String,String>());
-    ServerConfigurationFactory factory = createMock(ServerConfigurationFactory.class);
-    expect(factory.getConfiguration()).andReturn(systemConf).anyTimes();
-    expect(factory.getInstance()).andReturn(mockInst).anyTimes();
-    expect(factory.getSiteConfiguration()).andReturn(siteConfig).anyTimes();
-
-    // Just make the SiteConfiguration delegate to our AccumuloConfiguration
-    // Presently, we only need get(Property) and iterator().
-    EasyMock.expect(siteConfig.get(EasyMock.anyObject(Property.class))).andAnswer(new IAnswer<String>() {
-      @Override
-      public String answer() {
-        Object[] args = EasyMock.getCurrentArguments();
-        return systemConf.get((Property) args[0]);
-      }
-    }).anyTimes();
-    EasyMock.expect(siteConfig.getBoolean(EasyMock.anyObject(Property.class))).andAnswer(new IAnswer<Boolean>() {
-      @Override
-      public Boolean answer() {
-        Object[] args = EasyMock.getCurrentArguments();
-        return systemConf.getBoolean((Property) args[0]);
-      }
-    }).anyTimes();
-
-    EasyMock.expect(siteConfig.iterator()).andAnswer(new IAnswer<Iterator<Entry<String,String>>>() {
-      @Override
-      public Iterator<Entry<String,String>> answer() {
-        return systemConf.iterator();
-      }
-    }).anyTimes();
-
-    replay(mockInst, factory, siteConfig);
-    refs = new CloseWriteAheadLogReferences(new AccumuloServerContext(factory));
-  }
-
-  @Test
-  public void unclosedWalsLeaveStatusOpen() throws Exception {
-    Set<String> wals = Collections.emptySet();
-    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-    Mutation m = new Mutation(ReplicationSection.getRowPrefix() + "file:/accumulo/wal/tserver+port/12345");
-    m.put(ReplicationSection.COLF, new Text("1"), StatusUtil.fileCreatedValue(System.currentTimeMillis()));
-    bw.addMutation(m);
-    bw.close();
-
-    refs.updateReplicationEntries(conn, wals);
-
-    Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    Entry<Key,Value> entry = Iterables.getOnlyElement(s);
-    Status status = Status.parseFrom(entry.getValue().get());
-    Assert.assertFalse(status.getClosed());
-  }
-
-  @Test
-  public void closedWalsUpdateStatus() throws Exception {
-    String file = "file:/accumulo/wal/tserver+port/12345";
-    Set<String> wals = Collections.singleton(file);
-    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-    Mutation m = new Mutation(ReplicationSection.getRowPrefix() + file);
-    m.put(ReplicationSection.COLF, new Text("1"), StatusUtil.fileCreatedValue(System.currentTimeMillis()));
-    bw.addMutation(m);
-    bw.close();
-
-    refs.updateReplicationEntries(conn, wals);
-
-    Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    Entry<Key,Value> entry = Iterables.getOnlyElement(s);
-    Status status = Status.parseFrom(entry.getValue().get());
-    Assert.assertTrue(status.getClosed());
-  }
-
-  @Test
-  public void partiallyReplicatedReferencedWalsAreNotClosed() throws Exception {
-    String file = "file:/accumulo/wal/tserver+port/12345";
-    Set<String> wals = Collections.singleton(file);
-    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
-    Mutation m = new Mutation(file);
-    StatusSection.add(m, new Text("1"), ProtobufUtil.toValue(StatusUtil.ingestedUntil(1000)));
-    bw.addMutation(m);
-    bw.close();
-
-    refs.updateReplicationEntries(conn, wals);
-
-    Scanner s = ReplicationTable.getScanner(conn);
-    Entry<Key,Value> entry = Iterables.getOnlyElement(s);
-    Status status = Status.parseFrom(entry.getValue().get());
-    Assert.assertFalse(status.getClosed());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/cc3c0111/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java b/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java
index 7f23ad5..7d8f406 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java
@@ -16,6 +16,8 @@
  */
 package org.apache.accumulo.master;
 
+import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
+
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -96,15 +98,14 @@ import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.KeeperException.NoNodeException;
 import org.slf4j.Logger;
 
-import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 import com.google.protobuf.InvalidProtocolBufferException;
 
-class MasterClientServiceHandler extends FateServiceHandler implements MasterClientService.Iface {
+public class MasterClientServiceHandler extends FateServiceHandler implements MasterClientService.Iface {
 
   private static final Logger log = Master.log;
   private Instance instance;
 
-  MasterClientServiceHandler(Master master) {
+  protected MasterClientServiceHandler(Master master) {
     super(master);
     this.instance = master.getInstance();
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/cc3c0111/server/master/src/test/java/org/apache/accumulo/master/ReplicationOperationsImplTest.java
----------------------------------------------------------------------
diff --git a/server/master/src/test/java/org/apache/accumulo/master/ReplicationOperationsImplTest.java b/server/master/src/test/java/org/apache/accumulo/master/ReplicationOperationsImplTest.java
deleted file mode 100644
index a18e5e9..0000000
--- a/server/master/src/test/java/org/apache/accumulo/master/ReplicationOperationsImplTest.java
+++ /dev/null
@@ -1,451 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.master;
-
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.impl.ClientContext;
-import org.apache.accumulo.core.client.impl.Credentials;
-import org.apache.accumulo.core.client.impl.ReplicationOperationsImpl;
-import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.ReplicationSection;
-import org.apache.accumulo.core.protobuf.ProtobufUtil;
-import org.apache.accumulo.core.replication.ReplicationSchema.StatusSection;
-import org.apache.accumulo.core.replication.ReplicationTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.tabletserver.log.LogEntry;
-import org.apache.accumulo.core.trace.thrift.TInfo;
-import org.apache.accumulo.server.replication.proto.Replication.Status;
-import org.apache.hadoop.io.Text;
-import org.apache.thrift.TException;
-import org.easymock.EasyMock;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ReplicationOperationsImplTest {
-  private static final Logger log = LoggerFactory.getLogger(ReplicationOperationsImplTest.class);
-
-  private Instance inst;
-
-  @Rule
-  public TestName test = new TestName();
-
-  @Before
-  public void setup() {
-    inst = new org.apache.accumulo.core.client.mock.MockInstance(test.getMethodName());
-  }
-
-  /**
-   * Spoof out the Master so we can call the implementation without starting a full instance.
-   */
-  private ReplicationOperationsImpl getReplicationOperations(ClientContext context) throws Exception {
-    Master master = EasyMock.createMock(Master.class);
-    EasyMock.expect(master.getConnector()).andReturn(inst.getConnector("root", new PasswordToken(""))).anyTimes();
-    EasyMock.expect(master.getInstance()).andReturn(inst).anyTimes();
-    EasyMock.replay(master);
-
-    final MasterClientServiceHandler mcsh = new MasterClientServiceHandler(master) {
-      @Override
-      protected String getTableId(Instance inst, String tableName) throws ThriftTableOperationException {
-        try {
-          return inst.getConnector("root", new PasswordToken("")).tableOperations().tableIdMap().get(tableName);
-        } catch (Exception e) {
-          throw new RuntimeException(e);
-        }
-      }
-    };
-
-    return new ReplicationOperationsImpl(context) {
-      @Override
-      protected boolean getMasterDrain(final TInfo tinfo, final TCredentials rpcCreds, final String tableName, final Set<String> wals)
-          throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-        try {
-          return mcsh.drainReplicationTable(tinfo, rpcCreds, tableName, wals);
-        } catch (TException e) {
-          throw new RuntimeException(e);
-        }
-      }
-    };
-  }
-
-  @Test
-  public void waitsUntilEntriesAreReplicated() throws Exception {
-    Connector conn = inst.getConnector("root", new PasswordToken(""));
-    conn.tableOperations().create("foo");
-    Text tableId = new Text(conn.tableOperations().tableIdMap().get("foo"));
-
-    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID(), file2 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
-    Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
-
-    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
-
-    Mutation m = new Mutation(file1);
-    StatusSection.add(m, tableId, ProtobufUtil.toValue(stat));
-    bw.addMutation(m);
-
-    m = new Mutation(file2);
-    StatusSection.add(m, tableId, ProtobufUtil.toValue(stat));
-    bw.addMutation(m);
-
-    bw.close();
-
-    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
-    m.put(ReplicationSection.COLF, tableId, ProtobufUtil.toValue(stat));
-
-    bw.addMutation(m);
-
-    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
-    m.put(ReplicationSection.COLF, tableId, ProtobufUtil.toValue(stat));
-
-    bw.close();
-
-    final AtomicBoolean done = new AtomicBoolean(false);
-    final AtomicBoolean exception = new AtomicBoolean(false);
-    ClientContext context = new ClientContext(inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration());
-    final ReplicationOperationsImpl roi = getReplicationOperations(context);
-    Thread t = new Thread(new Runnable() {
-      @Override
-      public void run() {
-        try {
-          roi.drain("foo");
-        } catch (Exception e) {
-          log.error("Got error", e);
-          exception.set(true);
-        }
-        done.set(true);
-      }
-    });
-
-    t.start();
-
-    // With the records, we shouldn't be drained
-    Assert.assertFalse(done.get());
-
-    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
-    m.putDelete(ReplicationSection.COLF, tableId);
-    bw.addMutation(m);
-    bw.flush();
-
-    Assert.assertFalse(done.get());
-
-    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
-    m.putDelete(ReplicationSection.COLF, tableId);
-    bw.addMutation(m);
-    bw.flush();
-    bw.close();
-
-    // Removing metadata entries doesn't change anything
-    Assert.assertFalse(done.get());
-
-    // Remove the replication entries too
-    bw = ReplicationTable.getBatchWriter(conn);
-    m = new Mutation(file1);
-    m.putDelete(StatusSection.NAME, tableId);
-    bw.addMutation(m);
-    bw.flush();
-
-    Assert.assertFalse(done.get());
-
-    m = new Mutation(file2);
-    m.putDelete(StatusSection.NAME, tableId);
-    bw.addMutation(m);
-    bw.flush();
-
-    try {
-      t.join(5000);
-    } catch (InterruptedException e) {
-      Assert.fail("ReplicationOperations.drain did not complete");
-    }
-
-    // After both metadata and replication
-    Assert.assertTrue("Drain never finished", done.get());
-    Assert.assertFalse("Saw unexpectetd exception", exception.get());
-  }
-
-  @Test
-  public void unrelatedReplicationRecordsDontBlockDrain() throws Exception {
-    Connector conn = inst.getConnector("root", new PasswordToken(""));
-    conn.tableOperations().create("foo");
-    conn.tableOperations().create("bar");
-
-    Text tableId1 = new Text(conn.tableOperations().tableIdMap().get("foo"));
-    Text tableId2 = new Text(conn.tableOperations().tableIdMap().get("bar"));
-
-    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID(), file2 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
-    Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
-
-    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
-
-    Mutation m = new Mutation(file1);
-    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
-    bw.addMutation(m);
-
-    m = new Mutation(file2);
-    StatusSection.add(m, tableId2, ProtobufUtil.toValue(stat));
-    bw.addMutation(m);
-
-    bw.close();
-
-    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
-    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat));
-
-    bw.addMutation(m);
-
-    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
-    m.put(ReplicationSection.COLF, tableId2, ProtobufUtil.toValue(stat));
-
-    bw.close();
-
-    final AtomicBoolean done = new AtomicBoolean(false);
-    final AtomicBoolean exception = new AtomicBoolean(false);
-    ClientContext context = new ClientContext(inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration());
-
-    final ReplicationOperationsImpl roi = getReplicationOperations(context);
-
-    Thread t = new Thread(new Runnable() {
-      @Override
-      public void run() {
-        try {
-          roi.drain("foo");
-        } catch (Exception e) {
-          log.error("Got error", e);
-          exception.set(true);
-        }
-        done.set(true);
-      }
-    });
-
-    t.start();
-
-    // With the records, we shouldn't be drained
-    Assert.assertFalse(done.get());
-
-    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
-    m.putDelete(ReplicationSection.COLF, tableId1);
-    bw.addMutation(m);
-    bw.flush();
-
-    // Removing metadata entries doesn't change anything
-    Assert.assertFalse(done.get());
-
-    // Remove the replication entries too
-    bw = ReplicationTable.getBatchWriter(conn);
-    m = new Mutation(file1);
-    m.putDelete(StatusSection.NAME, tableId1);
-    bw.addMutation(m);
-    bw.flush();
-
-    try {
-      t.join(5000);
-    } catch (InterruptedException e) {
-      Assert.fail("ReplicationOperations.drain did not complete");
-    }
-
-    // After both metadata and replication
-    Assert.assertTrue("Drain never completed", done.get());
-    Assert.assertFalse("Saw unexpected exception", exception.get());
-  }
-
-  @Test
-  public void inprogressReplicationRecordsBlockExecution() throws Exception {
-    Connector conn = inst.getConnector("root", new PasswordToken(""));
-    conn.tableOperations().create("foo");
-
-    Text tableId1 = new Text(conn.tableOperations().tableIdMap().get("foo"));
-
-    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
-    Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
-
-    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
-
-    Mutation m = new Mutation(file1);
-    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
-    bw.addMutation(m);
-    bw.close();
-
-    LogEntry logEntry = new LogEntry(new KeyExtent(new Text(tableId1), null, null), System.currentTimeMillis(), "tserver", file1);
-
-    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
-    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat));
-    bw.addMutation(m);
-
-    m = new Mutation(logEntry.getRow());
-    m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
-    bw.addMutation(m);
-
-    bw.close();
-
-    final AtomicBoolean done = new AtomicBoolean(false);
-    final AtomicBoolean exception = new AtomicBoolean(false);
-    ClientContext context = new ClientContext(inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration());
-    final ReplicationOperationsImpl roi = getReplicationOperations(context);
-    Thread t = new Thread(new Runnable() {
-      @Override
-      public void run() {
-        try {
-          roi.drain("foo");
-        } catch (Exception e) {
-          log.error("Got error", e);
-          exception.set(true);
-        }
-        done.set(true);
-      }
-    });
-
-    t.start();
-
-    // With the records, we shouldn't be drained
-    Assert.assertFalse(done.get());
-
-    Status newStatus = Status.newBuilder().setBegin(1000).setEnd(2000).setInfiniteEnd(false).setClosed(true).build();
-    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
-    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(newStatus));
-    bw.addMutation(m);
-    bw.flush();
-
-    // Removing metadata entries doesn't change anything
-    Assert.assertFalse(done.get());
-
-    // Remove the replication entries too
-    bw = ReplicationTable.getBatchWriter(conn);
-    m = new Mutation(file1);
-    m.put(StatusSection.NAME, tableId1, ProtobufUtil.toValue(newStatus));
-    bw.addMutation(m);
-    bw.flush();
-
-    try {
-      t.join(5000);
-    } catch (InterruptedException e) {
-      Assert.fail("ReplicationOperations.drain did not complete");
-    }
-
-    // New records, but not fully replicated ones don't cause it to complete
-    Assert.assertFalse("Drain somehow finished", done.get());
-    Assert.assertFalse("Saw unexpected exception", exception.get());
-  }
-
-  @Test
-  public void laterCreatedLogsDontBlockExecution() throws Exception {
-    Connector conn = inst.getConnector("root", new PasswordToken(""));
-    conn.tableOperations().create("foo");
-
-    Text tableId1 = new Text(conn.tableOperations().tableIdMap().get("foo"));
-
-    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
-    Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
-
-    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
-    Mutation m = new Mutation(file1);
-    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
-    bw.addMutation(m);
-    bw.close();
-
-    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
-    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat));
-    bw.addMutation(m);
-
-    bw.close();
-
-    System.out.println("Reading metadata first time");
-    for (Entry<Key,Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
-      System.out.println(e.getKey());
-    }
-
-    final AtomicBoolean done = new AtomicBoolean(false);
-    final AtomicBoolean exception = new AtomicBoolean(false);
-    ClientContext context = new ClientContext(inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration());
-    final ReplicationOperationsImpl roi = getReplicationOperations(context);
-    Thread t = new Thread(new Runnable() {
-      @Override
-      public void run() {
-        try {
-          roi.drain("foo");
-        } catch (Exception e) {
-          log.error("Got error", e);
-          exception.set(true);
-        }
-        done.set(true);
-      }
-    });
-
-    t.start();
-
-    // We need to wait long enough for the table to read once
-    Thread.sleep(2000);
-
-    // Write another file, but also delete the old files
-    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-    m = new Mutation(ReplicationSection.getRowPrefix() + "/accumulo/wals/tserver+port/" + UUID.randomUUID());
-    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat));
-    bw.addMutation(m);
-    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
-    m.putDelete(ReplicationSection.COLF, tableId1);
-    bw.addMutation(m);
-    bw.close();
-
-    System.out.println("Reading metadata second time");
-    for (Entry<Key,Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
-      System.out.println(e.getKey());
-    }
-
-    bw = ReplicationTable.getBatchWriter(conn);
-    m = new Mutation(file1);
-    m.putDelete(StatusSection.NAME, tableId1);
-    bw.addMutation(m);
-    bw.close();
-
-    try {
-      t.join(5000);
-    } catch (InterruptedException e) {
-      Assert.fail("ReplicationOperations.drain did not complete");
-    }
-
-    // We should pass immediately because we aren't waiting on both files to be deleted (just the one that we did)
-    Assert.assertTrue("Drain didn't finish", done.get());
-  }
-
-}