You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ec...@apache.org on 2015/06/04 20:52:50 UTC

[09/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar, stop building test jar

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/RenameIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/RenameIT.java b/test/src/test/java/org/apache/accumulo/test/functional/RenameIT.java
deleted file mode 100644
index 0c22196..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/RenameIT.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.junit.Test;
-
-public class RenameIT extends AccumuloClusterHarness {
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 2 * 60;
-  }
-
-  @Test
-  public void renameTest() throws Exception {
-    String[] tableNames = getUniqueNames(2);
-    String name1 = tableNames[0];
-    String name2 = tableNames[1];
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    ScannerOpts scanOpts = new ScannerOpts();
-    TestIngest.Opts opts = new TestIngest.Opts();
-    opts.createTable = true;
-    opts.setTableName(name1);
-
-    final ClientConfiguration clientConfig = cluster.getClientConfig();
-    if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
-      opts.updateKerberosCredentials(clientConfig);
-    } else {
-      opts.setPrincipal(getAdminPrincipal());
-    }
-
-    Connector c = getConnector();
-    TestIngest.ingest(c, opts, bwOpts);
-    c.tableOperations().rename(name1, name2);
-    TestIngest.ingest(c, opts, bwOpts);
-    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
-
-    if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
-      vopts.updateKerberosCredentials(clientConfig);
-    } else {
-      vopts.setPrincipal(getAdminPrincipal());
-    }
-
-    vopts.setTableName(name2);
-    VerifyIngest.verifyIngest(c, vopts, scanOpts);
-    c.tableOperations().delete(name1);
-    c.tableOperations().rename(name2, name1);
-    vopts.setTableName(name1);
-    VerifyIngest.verifyIngest(c, vopts, scanOpts);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java b/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
deleted file mode 100644
index 39e9bed..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.IOException;
-import java.util.Map.Entry;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.cluster.ClusterControl;
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.ZooCache;
-import org.apache.accumulo.fate.zookeeper.ZooLock;
-import org.apache.accumulo.fate.zookeeper.ZooReader;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Charsets;
-
-public class RestartIT extends AccumuloClusterHarness {
-  private static final Logger log = LoggerFactory.getLogger(RestartIT.class);
-
-  @Override
-  public int defaultTimeoutSeconds() {
-    return 10 * 60;
-  }
-
-  @Override
-  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
-    cfg.setProperty(Property.GC_CYCLE_DELAY, "1s");
-    cfg.setProperty(Property.GC_CYCLE_START, "1s");
-    hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
-  }
-
-  private static final ScannerOpts SOPTS = new ScannerOpts();
-  private static final VerifyIngest.Opts VOPTS = new VerifyIngest.Opts();
-  private static final TestIngest.Opts OPTS = new TestIngest.Opts();
-  private static final BatchWriterOpts BWOPTS = new BatchWriterOpts();
-  static {
-    OPTS.rows = VOPTS.rows = 10 * 1000;
-  }
-
-  private ExecutorService svc;
-
-  @Before
-  public void setup() throws Exception {
-    svc = Executors.newFixedThreadPool(1);
-  }
-
-  @After
-  public void teardown() throws Exception {
-    if (null == svc) {
-      return;
-    }
-
-    if (!svc.isShutdown()) {
-      svc.shutdown();
-    }
-
-    while (!svc.awaitTermination(10, TimeUnit.SECONDS)) {
-      log.info("Waiting for threadpool to terminate");
-    }
-  }
-
-  @Test
-  public void restartMaster() throws Exception {
-    Connector c = getConnector();
-    final String tableName = getUniqueNames(1)[0];
-    OPTS.setTableName(tableName);
-    VOPTS.setTableName(tableName);
-    c.tableOperations().create(tableName);
-    final AuthenticationToken token = getAdminToken();
-    final ClusterControl control = getCluster().getClusterControl();
-
-    final String[] args;
-    if (token instanceof PasswordToken) {
-      byte[] password = ((PasswordToken) token).getPassword();
-      args = new String[] {"-u", getAdminPrincipal(), "-p", new String(password, Charsets.UTF_8), "-i", cluster.getInstanceName(), "-z",
-          cluster.getZooKeepers(), "--rows", "" + OPTS.rows, "--table", tableName};
-      OPTS.setPrincipal(getAdminPrincipal());
-      VOPTS.setPrincipal(getAdminPrincipal());
-    } else if (token instanceof KerberosToken) {
-      ClusterUser rootUser = getAdminUser();
-      args = new String[] {"-u", getAdminPrincipal(), "--keytab", rootUser.getKeytab().getAbsolutePath(), "-i", cluster.getInstanceName(), "-z",
-          cluster.getZooKeepers(), "--rows", "" + OPTS.rows, "--table", tableName};
-      ClientConfiguration clientConfig = cluster.getClientConfig();
-      OPTS.updateKerberosCredentials(clientConfig);
-      VOPTS.updateKerberosCredentials(clientConfig);
-    } else {
-      throw new RuntimeException("Unknown token");
-    }
-
-    Future<Integer> ret = svc.submit(new Callable<Integer>() {
-      @Override
-      public Integer call() {
-        try {
-          return control.exec(TestIngest.class, args);
-        } catch (IOException e) {
-          log.error("Error running TestIngest", e);
-          return -1;
-        }
-      }
-    });
-
-    control.stopAllServers(ServerType.MASTER);
-    control.startAllServers(ServerType.MASTER);
-    assertEquals(0, ret.get().intValue());
-    VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
-  }
-
-  @Test
-  public void restartMasterRecovery() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    OPTS.setTableName(tableName);
-    VOPTS.setTableName(tableName);
-    ClientConfiguration clientConfig = cluster.getClientConfig();
-    if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
-      OPTS.updateKerberosCredentials(clientConfig);
-      VOPTS.updateKerberosCredentials(clientConfig);
-    } else {
-      OPTS.setPrincipal(getAdminPrincipal());
-      VOPTS.setPrincipal(getAdminPrincipal());
-    }
-    TestIngest.ingest(c, OPTS, BWOPTS);
-    ClusterControl control = getCluster().getClusterControl();
-
-    // TODO implement a kill all too?
-    // cluster.stop() would also stop ZooKeeper
-    control.stopAllServers(ServerType.MASTER);
-    control.stopAllServers(ServerType.TRACER);
-    control.stopAllServers(ServerType.TABLET_SERVER);
-    control.stopAllServers(ServerType.GARBAGE_COLLECTOR);
-    control.stopAllServers(ServerType.MONITOR);
-
-    ZooReader zreader = new ZooReader(c.getInstance().getZooKeepers(), c.getInstance().getZooKeepersSessionTimeOut());
-    ZooCache zcache = new ZooCache(zreader, null);
-    byte[] masterLockData;
-    do {
-      masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(c.getInstance()) + Constants.ZMASTER_LOCK, null);
-      if (null != masterLockData) {
-        log.info("Master lock is still held");
-        Thread.sleep(1000);
-      }
-    } while (null != masterLockData);
-
-    cluster.start();
-    UtilWaitThread.sleep(5);
-    control.stopAllServers(ServerType.MASTER);
-
-    masterLockData = new byte[0];
-    do {
-      masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(c.getInstance()) + Constants.ZMASTER_LOCK, null);
-      if (null != masterLockData) {
-        log.info("Master lock is still held");
-        Thread.sleep(1000);
-      }
-    } while (null != masterLockData);
-    cluster.start();
-    VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
-  }
-
-  @Test
-  public void restartMasterSplit() throws Exception {
-    Connector c = getConnector();
-    final String tableName = getUniqueNames(1)[0];
-    final AuthenticationToken token = getAdminToken();
-    final ClusterControl control = getCluster().getClusterControl();
-    VOPTS.setTableName(tableName);
-    c.tableOperations().create(tableName);
-    c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "5K");
-
-    final String[] args;
-    if (token instanceof PasswordToken) {
-      byte[] password = ((PasswordToken) token).getPassword();
-      args = new String[] {"-u", getAdminPrincipal(), "-p", new String(password, Charsets.UTF_8), "-i", cluster.getInstanceName(), "-z",
-          cluster.getZooKeepers(), "--rows", Integer.toString(VOPTS.rows), "--table", tableName};
-      OPTS.setPrincipal(getAdminPrincipal());
-      VOPTS.setPrincipal(getAdminPrincipal());
-    } else if (token instanceof KerberosToken) {
-      ClusterUser rootUser = getAdminUser();
-      args = new String[] {"-u", getAdminPrincipal(), "--keytab", rootUser.getKeytab().getAbsolutePath(), "-i", cluster.getInstanceName(), "-z",
-          cluster.getZooKeepers(), "--rows", Integer.toString(VOPTS.rows), "--table", tableName};
-      ClientConfiguration clientConfig = cluster.getClientConfig();
-      OPTS.updateKerberosCredentials(clientConfig);
-      VOPTS.updateKerberosCredentials(clientConfig);
-    } else {
-      throw new RuntimeException("Unknown token");
-    }
-
-    Future<Integer> ret = svc.submit(new Callable<Integer>() {
-      @Override
-      public Integer call() {
-        try {
-          return control.exec(TestIngest.class, args);
-        } catch (Exception e) {
-          log.error("Error running TestIngest", e);
-          return -1;
-        }
-      }
-    });
-
-    control.stopAllServers(ServerType.MASTER);
-
-    ZooReader zreader = new ZooReader(c.getInstance().getZooKeepers(), c.getInstance().getZooKeepersSessionTimeOut());
-    ZooCache zcache = new ZooCache(zreader, null);
-    byte[] masterLockData;
-    do {
-      masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(c.getInstance()) + Constants.ZMASTER_LOCK, null);
-      if (null != masterLockData) {
-        log.info("Master lock is still held");
-        Thread.sleep(1000);
-      }
-    } while (null != masterLockData);
-
-    cluster.start();
-    assertEquals(0, ret.get().intValue());
-    VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
-  }
-
-  @Test
-  public void killedTabletServer() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    OPTS.setTableName(tableName);
-    VOPTS.setTableName(tableName);
-    ClientConfiguration clientConfig = cluster.getClientConfig();
-    if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
-      OPTS.updateKerberosCredentials(clientConfig);
-      VOPTS.updateKerberosCredentials(clientConfig);
-    } else {
-      OPTS.setPrincipal(getAdminPrincipal());
-      VOPTS.setPrincipal(getAdminPrincipal());
-    }
-    TestIngest.ingest(c, OPTS, BWOPTS);
-    VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
-    cluster.getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
-    cluster.start();
-    VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
-  }
-
-  @Test
-  public void killedTabletServer2() throws Exception {
-    final Connector c = getConnector();
-    final String[] names = getUniqueNames(2);
-    final String tableName = names[0];
-    final ClusterControl control = getCluster().getClusterControl();
-    c.tableOperations().create(tableName);
-    // Original test started and then stopped a GC. Not sure why it did this. The GC was
-    // already running by default, and it would have nothing to do after only creating a table
-    control.stopAllServers(ServerType.TABLET_SERVER);
-
-    cluster.start();
-    c.tableOperations().create(names[1]);
-  }
-
-  @Test
-  public void killedTabletServerDuringShutdown() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    OPTS.setTableName(tableName);
-    ClientConfiguration clientConfig = cluster.getClientConfig();
-    if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
-      OPTS.updateKerberosCredentials(clientConfig);
-    } else {
-      OPTS.setPrincipal(getAdminPrincipal());
-    }
-    TestIngest.ingest(c, OPTS, BWOPTS);
-    try {
-      getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
-      getCluster().getClusterControl().adminStopAll();
-    } finally {
-      getCluster().start();
-    }
-  }
-
-  @Test
-  public void shutdownDuringCompactingSplitting() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    VOPTS.setTableName(tableName);
-    ClientConfiguration clientConfig = cluster.getClientConfig();
-    if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
-      OPTS.updateKerberosCredentials(clientConfig);
-      VOPTS.updateKerberosCredentials(clientConfig);
-    } else {
-      OPTS.setPrincipal(getAdminPrincipal());
-      VOPTS.setPrincipal(getAdminPrincipal());
-    }
-    c.tableOperations().create(tableName);
-    c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
-    String splitThreshold = null;
-    for (Entry<String,String> entry : c.tableOperations().getProperties(tableName)) {
-      if (entry.getKey().equals(Property.TABLE_SPLIT_THRESHOLD.getKey())) {
-        splitThreshold = entry.getValue();
-        break;
-      }
-    }
-    Assert.assertNotNull(splitThreshold);
-    try {
-      c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "20K");
-      TestIngest.Opts opts = new TestIngest.Opts();
-      opts.setTableName(tableName);
-      if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
-        opts.updateKerberosCredentials(clientConfig);
-      } else {
-        opts.setPrincipal(getAdminPrincipal());
-      }
-      TestIngest.ingest(c, opts, BWOPTS);
-      c.tableOperations().flush(tableName, null, null, false);
-      VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
-      getCluster().stop();
-    } finally {
-      if (getClusterType() == ClusterType.STANDALONE) {
-        getCluster().start();
-        c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), splitThreshold);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/RestartStressIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/RestartStressIT.java b/test/src/test/java/org/apache/accumulo/test/functional/RestartStressIT.java
deleted file mode 100644
index abfd5d8..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/RestartStressIT.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.cluster.ClusterControl;
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Charsets;
-
-public class RestartStressIT extends AccumuloClusterHarness {
-  private static final Logger log = LoggerFactory.getLogger(RestartStressIT.class);
-
-  @Override
-  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    Map<String,String> opts = cfg.getSiteConfig();
-    opts.put(Property.TSERV_MAXMEM.getKey(), "100K");
-    opts.put(Property.TSERV_MAJC_DELAY.getKey(), "100ms");
-    opts.put(Property.TSERV_WALOG_MAX_SIZE.getKey(), "1M");
-    opts.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "5s");
-    opts.put(Property.MASTER_RECOVERY_DELAY.getKey(), "1s");
-    cfg.setSiteConfig(opts);
-    hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
-  }
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 10 * 60;
-  }
-
-  private ExecutorService svc;
-
-  @Before
-  public void setup() throws Exception {
-    svc = Executors.newFixedThreadPool(1);
-  }
-
-  @After
-  public void teardown() throws Exception {
-    if (null == svc) {
-      return;
-    }
-
-    if (!svc.isShutdown()) {
-      svc.shutdown();
-    }
-
-    while (!svc.awaitTermination(10, TimeUnit.SECONDS)) {
-      log.info("Waiting for threadpool to terminate");
-    }
-  }
-
-  private static final VerifyIngest.Opts VOPTS;
-  static {
-    VOPTS = new VerifyIngest.Opts();
-    VOPTS.rows = 10 * 1000;
-  }
-  private static final ScannerOpts SOPTS = new ScannerOpts();
-
-  @Test
-  public void test() throws Exception {
-    final Connector c = getConnector();
-    final String tableName = getUniqueNames(1)[0];
-    final AuthenticationToken token = getAdminToken();
-    c.tableOperations().create(tableName);
-    c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "500K");
-    final ClusterControl control = getCluster().getClusterControl();
-    final String[] args;
-    if (token instanceof PasswordToken) {
-      byte[] password = ((PasswordToken) token).getPassword();
-      args = new String[] {"-u", getAdminPrincipal(), "-p", new String(password, Charsets.UTF_8), "-i", cluster.getInstanceName(), "-z",
-          cluster.getZooKeepers(), "--rows", "" + VOPTS.rows, "--table", tableName};
-    } else if (token instanceof KerberosToken) {
-      ClusterUser rootUser = getAdminUser();
-      args = new String[] {"-u", getAdminPrincipal(), "--keytab", rootUser.getKeytab().getAbsolutePath(), "-i", cluster.getInstanceName(), "-z",
-          cluster.getZooKeepers(), "--rows", "" + VOPTS.rows, "--table", tableName};
-    } else {
-      throw new RuntimeException("Unrecognized token");
-    }
-
-    Future<Integer> retCode = svc.submit(new Callable<Integer>() {
-      @Override
-      public Integer call() {
-        try {
-          return control.exec(TestIngest.class, args);
-        } catch (Exception e) {
-          log.error("Error running TestIngest", e);
-          return -1;
-        }
-      }
-    });
-
-    for (int i = 0; i < 2; i++) {
-      UtilWaitThread.sleep(10 * 1000);
-      control.stopAllServers(ServerType.TABLET_SERVER);
-      control.startAllServers(ServerType.TABLET_SERVER);
-    }
-    assertEquals(0, retCode.get().intValue());
-    VOPTS.setTableName(tableName);
-
-    if (token instanceof PasswordToken) {
-      VOPTS.setPrincipal(getAdminPrincipal());
-    } else if (token instanceof KerberosToken) {
-      VOPTS.updateKerberosCredentials(cluster.getClientConfig());
-    } else {
-      throw new RuntimeException("Unrecognized token");
-    }
-
-    VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/RowDeleteIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/RowDeleteIT.java b/test/src/test/java/org/apache/accumulo/test/functional/RowDeleteIT.java
deleted file mode 100644
index 75c66bd..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/RowDeleteIT.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.apache.accumulo.test.functional.FunctionalTestUtils.checkRFiles;
-import static org.apache.accumulo.test.functional.FunctionalTestUtils.nm;
-import static org.junit.Assert.assertEquals;
-
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
-import org.apache.accumulo.core.iterators.user.RowDeletingIterator;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class RowDeleteIT extends AccumuloClusterHarness {
-
-  @Override
-  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    Map<String,String> siteConfig = cfg.getSiteConfig();
-    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "50ms");
-    cfg.setSiteConfig(siteConfig);
-  }
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 60;
-  }
-
-  @Test
-  public void run() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    Map<String,Set<Text>> groups = new HashMap<String,Set<Text>>();
-    groups.put("lg1", Collections.singleton(new Text("foo")));
-    groups.put("dg", Collections.<Text> emptySet());
-    c.tableOperations().setLocalityGroups(tableName, groups);
-    IteratorSetting setting = new IteratorSetting(30, RowDeletingIterator.class);
-    c.tableOperations().attachIterator(tableName, setting, EnumSet.of(IteratorScope.majc));
-    c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "100");
-
-    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
-
-    bw.addMutation(nm("r1", "foo", "cf1", "v1"));
-    bw.addMutation(nm("r1", "bar", "cf1", "v2"));
-
-    bw.flush();
-    c.tableOperations().flush(tableName, null, null, true);
-
-    checkRFiles(c, tableName, 1, 1, 1, 1);
-
-    Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
-    int count = Iterators.size(scanner.iterator());
-    assertEquals("count == " + count, 2, count);
-
-    bw.addMutation(nm("r1", "", "", RowDeletingIterator.DELETE_ROW_VALUE));
-
-    bw.flush();
-    c.tableOperations().flush(tableName, null, null, true);
-
-    checkRFiles(c, tableName, 1, 1, 2, 2);
-
-    scanner = c.createScanner(tableName, Authorizations.EMPTY);
-    count = Iterators.size(scanner.iterator());
-    assertEquals("count == " + count, 3, count);
-
-    c.tableOperations().compact(tableName, null, null, false, true);
-
-    checkRFiles(c, tableName, 1, 1, 0, 0);
-
-    scanner = c.createScanner(tableName, Authorizations.EMPTY);
-    count = Iterators.size(scanner.iterator());
-    assertEquals("count == " + count, 0, count);
-    bw.close();
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ScanIdIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ScanIdIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ScanIdIT.java
deleted file mode 100644
index 863ac78..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScanIdIT.java
+++ /dev/null
@@ -1,385 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static com.google.common.base.Charsets.UTF_8;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.util.EnumSet;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.admin.ActiveScan;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.IteratorUtil;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * ACCUMULO-2641 Integration test. ACCUMULO-2641 Adds scan id to thrift protocol so that {@code org.apache.accumulo.core.client.admin.ActiveScan.getScanid()}
- * returns a unique scan id.
- * <p>
- * <p/>
- * The test uses the Minicluster and the {@code org.apache.accumulo.test.functional.SlowIterator} to create multiple scan sessions. The test exercises multiple
- * tablet servers with splits and multiple ranges to force the scans to occur across multiple tablet servers for completeness.
- * <p/>
- * This patch modified thrift, the TraceRepoDeserializationTest test seems to fail unless the following be added:
- * <p/>
- * private static final long serialVersionUID = -4659975753252858243l;
- * <p/>
- * back into org.apache.accumulo.trace.thrift.TInfo until that test signature is regenerated.
- */
-public class ScanIdIT extends AccumuloClusterHarness {
-
-  private static final Logger log = LoggerFactory.getLogger(ScanIdIT.class);
-
-  private static final int NUM_SCANNERS = 8;
-
-  private static final int NUM_DATA_ROWS = 100;
-
-  private static final Random random = new Random();
-
-  private static final ExecutorService pool = Executors.newFixedThreadPool(NUM_SCANNERS);
-
-  private static final AtomicBoolean testInProgress = new AtomicBoolean(true);
-
-  private static final Map<Integer,Value> resultsByWorker = new ConcurrentHashMap<Integer,Value>();
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 60;
-  }
-
-  /**
-   * @throws Exception
-   *           any exception is a test failure.
-   */
-  @Test
-  public void testScanId() throws Exception {
-
-    final String tableName = getUniqueNames(1)[0];
-    Connector conn = getConnector();
-    conn.tableOperations().create(tableName);
-
-    addSplits(conn, tableName);
-
-    log.info("Splits added");
-
-    generateSampleData(conn, tableName);
-
-    log.info("Generated data for {}", tableName);
-
-    attachSlowIterator(conn, tableName);
-
-    CountDownLatch latch = new CountDownLatch(NUM_SCANNERS);
-
-    for (int scannerIndex = 0; scannerIndex < NUM_SCANNERS; scannerIndex++) {
-      ScannerThread st = new ScannerThread(conn, scannerIndex, tableName, latch);
-      pool.submit(st);
-    }
-
-    // wait for scanners to report a result.
-    while (testInProgress.get()) {
-
-      if (resultsByWorker.size() < NUM_SCANNERS) {
-        log.trace("Results reported {}", resultsByWorker.size());
-        UtilWaitThread.sleep(750);
-      } else {
-        // each worker has reported at least one result.
-        testInProgress.set(false);
-
-        log.debug("Final result count {}", resultsByWorker.size());
-
-        // delay to allow scanners to react to end of test and cleanly close.
-        UtilWaitThread.sleep(1000);
-      }
-
-    }
-
-    // all scanner have reported at least 1 result, so check for unique scan ids.
-    Set<Long> scanIds = new HashSet<Long>();
-
-    List<String> tservers = conn.instanceOperations().getTabletServers();
-
-    log.debug("tablet servers {}", tservers.toString());
-
-    for (String tserver : tservers) {
-
-      List<ActiveScan> activeScans = null;
-      for (int i = 0; i < 10; i++) {
-        try {
-          activeScans = conn.instanceOperations().getActiveScans(tserver);
-          break;
-        } catch (AccumuloException e) {
-          if (e.getCause() instanceof TableNotFoundException) {
-            log.debug("Got TableNotFoundException, will retry");
-            Thread.sleep(200);
-            continue;
-          }
-          throw e;
-        }
-      }
-
-      assertNotNull("Repeatedly got exception trying to active scans", activeScans);
-
-      log.debug("TServer {} has {} active scans", tserver, activeScans.size());
-
-      for (ActiveScan scan : activeScans) {
-        log.debug("Tserver {} scan id {}", tserver, scan.getScanid());
-        scanIds.add(scan.getScanid());
-      }
-    }
-
-    assertTrue("Expected at least " + NUM_SCANNERS + " scanIds, but saw " + scanIds.size(), NUM_SCANNERS <= scanIds.size());
-
-  }
-
-  /**
-   * Runs scanner in separate thread to allow multiple scanners to execute in parallel.
-   * <p/>
-   * The thread run method is terminated when the testInProgress flag is set to false.
-   */
-  private static class ScannerThread implements Runnable {
-
-    private final Connector connector;
-    private Scanner scanner = null;
-    private final int workerIndex;
-    private final String tablename;
-    private final CountDownLatch latch;
-
-    public ScannerThread(final Connector connector, final int workerIndex, final String tablename, final CountDownLatch latch) {
-      this.connector = connector;
-      this.workerIndex = workerIndex;
-      this.tablename = tablename;
-      this.latch = latch;
-    }
-
-    /**
-     * execute the scan across the sample data and put scan result into result map until testInProgress flag is set to false.
-     */
-    @Override
-    public void run() {
-
-      latch.countDown();
-      try {
-        latch.await();
-      } catch (InterruptedException e) {
-        log.error("Thread interrupted with id {}", workerIndex);
-        Thread.currentThread().interrupt();
-        return;
-      }
-
-      log.debug("Creating scanner in worker thread {}", workerIndex);
-
-      try {
-
-        scanner = connector.createScanner(tablename, new Authorizations());
-
-        // Never start readahead
-        scanner.setReadaheadThreshold(Long.MAX_VALUE);
-        scanner.setBatchSize(1);
-
-        // create different ranges to try to hit more than one tablet.
-        scanner.setRange(new Range(new Text(Integer.toString(workerIndex)), new Text("9")));
-
-      } catch (TableNotFoundException e) {
-        throw new IllegalStateException("Initialization failure. Could not create scanner", e);
-      }
-
-      scanner.fetchColumnFamily(new Text("fam1"));
-
-      for (Map.Entry<Key,Value> entry : scanner) {
-
-        // exit when success condition is met.
-        if (!testInProgress.get()) {
-          scanner.clearScanIterators();
-          scanner.close();
-
-          return;
-        }
-
-        Text row = entry.getKey().getRow();
-
-        log.debug("worker {}, row {}", workerIndex, row.toString());
-
-        if (entry.getValue() != null) {
-
-          Value prevValue = resultsByWorker.put(workerIndex, entry.getValue());
-
-          // value should always being increasing
-          if (prevValue != null) {
-
-            log.trace("worker {} values {}", workerIndex, String.format("%1$s < %2$s", prevValue, entry.getValue()));
-
-            assertTrue(prevValue.compareTo(entry.getValue()) > 0);
-          }
-        } else {
-          log.info("Scanner returned null");
-          fail("Scanner returned unexpected null value");
-        }
-
-      }
-
-      log.debug("Scanner ran out of data. (info only, not an error) ");
-
-    }
-  }
-
-  /**
-   * Create splits on table and force migration by taking table offline and then bring back online for test.
-   *
-   * @param conn
-   *          Accumulo connector Accumulo connector to test cluster or MAC instance.
-   */
-  private void addSplits(final Connector conn, final String tableName) {
-
-    SortedSet<Text> splits = createSplits();
-
-    try {
-
-      conn.tableOperations().addSplits(tableName, splits);
-
-      conn.tableOperations().offline(tableName, true);
-
-      UtilWaitThread.sleep(2000);
-      conn.tableOperations().online(tableName, true);
-
-      for (Text split : conn.tableOperations().listSplits(tableName)) {
-        log.trace("Split {}", split);
-      }
-
-    } catch (AccumuloSecurityException e) {
-      throw new IllegalStateException("Initialization failed. Could not add splits to " + tableName, e);
-    } catch (TableNotFoundException e) {
-      throw new IllegalStateException("Initialization failed. Could not add splits to " + tableName, e);
-    } catch (AccumuloException e) {
-      throw new IllegalStateException("Initialization failed. Could not add splits to " + tableName, e);
-    }
-
-  }
-
-  /**
-   * Create splits to distribute data across multiple tservers.
-   *
-   * @return splits in sorted set for addSplits.
-   */
-  private SortedSet<Text> createSplits() {
-
-    SortedSet<Text> splits = new TreeSet<Text>();
-
-    for (int split = 0; split < 10; split++) {
-      splits.add(new Text(Integer.toString(split)));
-    }
-
-    return splits;
-  }
-
-  /**
-   * Generate some sample data using random row id to distribute across splits.
-   * <p/>
-   * The primary goal is to determine that each scanner is assigned a unique scan id. This test does check that the count value for fam1 increases if a scanner
-   * reads multiple value, but this is secondary consideration for this test, that is included for completeness.
-   *
-   * @param connector
-   *          Accumulo connector Accumulo connector to test cluster or MAC instance.
-   */
-  private void generateSampleData(Connector connector, final String tablename) {
-
-    try {
-
-      BatchWriter bw = connector.createBatchWriter(tablename, new BatchWriterConfig());
-
-      ColumnVisibility vis = new ColumnVisibility("public");
-
-      for (int i = 0; i < NUM_DATA_ROWS; i++) {
-
-        Text rowId = new Text(String.format("%d", ((random.nextInt(10) * 100) + i)));
-
-        Mutation m = new Mutation(rowId);
-        m.put(new Text("fam1"), new Text("count"), new Value(Integer.toString(i).getBytes(UTF_8)));
-        m.put(new Text("fam1"), new Text("positive"), vis, new Value(Integer.toString(NUM_DATA_ROWS - i).getBytes(UTF_8)));
-        m.put(new Text("fam1"), new Text("negative"), vis, new Value(Integer.toString(i - NUM_DATA_ROWS).getBytes(UTF_8)));
-
-        log.trace("Added row {}", rowId);
-
-        bw.addMutation(m);
-      }
-
-      bw.close();
-    } catch (TableNotFoundException ex) {
-      throw new IllegalStateException("Initialization failed. Could not create test data", ex);
-    } catch (MutationsRejectedException ex) {
-      throw new IllegalStateException("Initialization failed. Could not create test data", ex);
-    }
-  }
-
-  /**
-   * Attach the test slow iterator so that we have time to read the scan id without creating a large dataset. Uses a fairly large sleep and delay times because
-   * we are not concerned with how much data is read and we do not read all of the data - the test stops once each scanner reports a scan id.
-   *
-   * @param connector
-   *          Accumulo connector Accumulo connector to test cluster or MAC instance.
-   */
-  private void attachSlowIterator(Connector connector, final String tablename) {
-    try {
-
-      IteratorSetting slowIter = new IteratorSetting(50, "slowIter", "org.apache.accumulo.test.functional.SlowIterator");
-      slowIter.addOption("sleepTime", "200");
-      slowIter.addOption("seekSleepTime", "200");
-
-      connector.tableOperations().attachIterator(tablename, slowIter, EnumSet.of(IteratorUtil.IteratorScope.scan));
-
-    } catch (AccumuloException ex) {
-      throw new IllegalStateException("Initialization failed. Could not attach slow iterator", ex);
-    } catch (TableNotFoundException ex) {
-      throw new IllegalStateException("Initialization failed. Could not attach slow iterator", ex);
-    } catch (AccumuloSecurityException ex) {
-      throw new IllegalStateException("Initialization failed. Could not attach slow iterator", ex);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
deleted file mode 100644
index 3453303..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ScannerBase;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class ScanIteratorIT extends AccumuloClusterHarness {
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 30;
-  }
-
-  @Test
-  public void run() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    Connector c = getConnector();
-    c.tableOperations().create(tableName);
-
-    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
-
-    for (int i = 0; i < 1000; i++) {
-      Mutation m = new Mutation(new Text(String.format("%06d", i)));
-      m.put(new Text("cf1"), new Text("cq1"), new Value(Integer.toString(1000 - i).getBytes(UTF_8)));
-      m.put(new Text("cf1"), new Text("cq2"), new Value(Integer.toString(i - 1000).getBytes(UTF_8)));
-
-      bw.addMutation(m);
-    }
-
-    bw.close();
-
-    Scanner scanner = c.createScanner(tableName, new Authorizations());
-
-    setupIter(scanner);
-    verify(scanner, 1, 999);
-
-    BatchScanner bscanner = c.createBatchScanner(tableName, new Authorizations(), 3);
-    bscanner.setRanges(Collections.singleton(new Range((Key) null, null)));
-
-    setupIter(bscanner);
-    verify(bscanner, 1, 999);
-
-    ArrayList<Range> ranges = new ArrayList<Range>();
-    ranges.add(new Range(new Text(String.format("%06d", 1))));
-    ranges.add(new Range(new Text(String.format("%06d", 6)), new Text(String.format("%06d", 16))));
-    ranges.add(new Range(new Text(String.format("%06d", 20))));
-    ranges.add(new Range(new Text(String.format("%06d", 23))));
-    ranges.add(new Range(new Text(String.format("%06d", 56)), new Text(String.format("%06d", 61))));
-    ranges.add(new Range(new Text(String.format("%06d", 501)), new Text(String.format("%06d", 504))));
-    ranges.add(new Range(new Text(String.format("%06d", 998)), new Text(String.format("%06d", 1000))));
-
-    HashSet<Integer> got = new HashSet<Integer>();
-    HashSet<Integer> expected = new HashSet<Integer>();
-    for (int i : new int[] {1, 7, 9, 11, 13, 15, 23, 57, 59, 61, 501, 503, 999}) {
-      expected.add(i);
-    }
-
-    bscanner.setRanges(ranges);
-
-    for (Entry<Key,Value> entry : bscanner) {
-      got.add(Integer.parseInt(entry.getKey().getRow().toString()));
-    }
-
-    System.out.println("got : " + got);
-
-    if (!got.equals(expected)) {
-      throw new Exception(got + " != " + expected);
-    }
-
-    bscanner.close();
-
-  }
-
-  private void verify(Iterable<Entry<Key,Value>> scanner, int start, int finish) throws Exception {
-
-    int expected = start;
-    for (Entry<Key,Value> entry : scanner) {
-      if (Integer.parseInt(entry.getKey().getRow().toString()) != expected) {
-        throw new Exception("Saw unexpexted " + entry.getKey().getRow() + " " + expected);
-      }
-
-      if (entry.getKey().getColumnQualifier().toString().equals("cq2")) {
-        expected += 2;
-      }
-    }
-
-    if (expected != finish + 2) {
-      throw new Exception("Ended at " + expected + " not " + (finish + 2));
-    }
-  }
-
-  private void setupIter(ScannerBase scanner) throws Exception {
-    IteratorSetting dropMod = new IteratorSetting(50, "dropMod", "org.apache.accumulo.test.functional.DropModIter");
-    dropMod.addOption("mod", "2");
-    dropMod.addOption("drop", "0");
-    scanner.addScanIterator(dropMod);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
deleted file mode 100644
index bd7555e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.Map.Entry;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class ScanRangeIT extends AccumuloClusterHarness {
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 2 * 60;
-  }
-
-  private static final int TS_LIMIT = 1;
-  private static final int CQ_LIMIT = 5;
-  private static final int CF_LIMIT = 5;
-  private static final int ROW_LIMIT = 100;
-
-  @Test
-  public void run() throws Exception {
-    Connector c = getConnector();
-    String[] tableNames = getUniqueNames(2);
-    String table1 = tableNames[0];
-    c.tableOperations().create(table1);
-    String table2 = tableNames[1];
-    c.tableOperations().create(table2);
-    TreeSet<Text> splitRows = new TreeSet<Text>();
-    int splits = 3;
-    for (int i = (ROW_LIMIT / splits); i < ROW_LIMIT; i += (ROW_LIMIT / splits))
-      splitRows.add(createRow(i));
-    c.tableOperations().addSplits(table2, splitRows);
-
-    insertData(c, table1);
-    scanTable(c, table1);
-
-    insertData(c, table2);
-    scanTable(c, table2);
-  }
-
-  private void scanTable(Connector c, String table) throws Exception {
-    scanRange(c, table, new IntKey(0, 0, 0, 0), new IntKey(1, 0, 0, 0));
-
-    scanRange(c, table, new IntKey(0, 0, 0, 0), new IntKey(ROW_LIMIT - 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
-
-    scanRange(c, table, null, null);
-
-    for (int i = 0; i < ROW_LIMIT; i += (ROW_LIMIT / 3)) {
-      for (int j = 0; j < CF_LIMIT; j += (CF_LIMIT / 2)) {
-        for (int k = 1; k < CQ_LIMIT; k += (CQ_LIMIT / 2)) {
-          scanRange(c, table, null, new IntKey(i, j, k, 0));
-          scanRange(c, table, new IntKey(0, 0, 0, 0), new IntKey(i, j, k, 0));
-
-          scanRange(c, table, new IntKey(i, j, k, 0), new IntKey(ROW_LIMIT - 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
-
-          scanRange(c, table, new IntKey(i, j, k, 0), null);
-
-        }
-      }
-    }
-
-    for (int i = 0; i < ROW_LIMIT; i++) {
-      scanRange(c, table, new IntKey(i, 0, 0, 0), new IntKey(i, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
-
-      if (i > 0 && i < ROW_LIMIT - 1) {
-        scanRange(c, table, new IntKey(i - 1, 0, 0, 0), new IntKey(i + 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
-      }
-    }
-
-  }
-
-  private static class IntKey {
-    private int row;
-    private int cf;
-    private int cq;
-    private long ts;
-
-    IntKey(IntKey ik) {
-      this.row = ik.row;
-      this.cf = ik.cf;
-      this.cq = ik.cq;
-      this.ts = ik.ts;
-    }
-
-    IntKey(int row, int cf, int cq, long ts) {
-      this.row = row;
-      this.cf = cf;
-      this.cq = cq;
-      this.ts = ts;
-    }
-
-    Key createKey() {
-      Text trow = createRow(row);
-      Text tcf = createCF(cf);
-      Text tcq = createCQ(cq);
-
-      return new Key(trow, tcf, tcq, ts);
-    }
-
-    IntKey increment() {
-
-      IntKey ik = new IntKey(this);
-
-      ik.ts++;
-      if (ik.ts >= TS_LIMIT) {
-        ik.ts = 0;
-        ik.cq++;
-        if (ik.cq >= CQ_LIMIT) {
-          ik.cq = 0;
-          ik.cf++;
-          if (ik.cf >= CF_LIMIT) {
-            ik.cf = 0;
-            ik.row++;
-          }
-        }
-      }
-
-      return ik;
-    }
-
-  }
-
-  private void scanRange(Connector c, String table, IntKey ik1, IntKey ik2) throws Exception {
-    scanRange(c, table, ik1, false, ik2, false);
-    scanRange(c, table, ik1, false, ik2, true);
-    scanRange(c, table, ik1, true, ik2, false);
-    scanRange(c, table, ik1, true, ik2, true);
-  }
-
-  private void scanRange(Connector c, String table, IntKey ik1, boolean inclusive1, IntKey ik2, boolean inclusive2) throws Exception {
-    Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
-
-    Key key1 = null;
-    Key key2 = null;
-
-    IntKey expectedIntKey;
-    IntKey expectedEndIntKey;
-
-    if (ik1 != null) {
-      key1 = ik1.createKey();
-      expectedIntKey = ik1;
-
-      if (!inclusive1) {
-        expectedIntKey = expectedIntKey.increment();
-      }
-    } else {
-      expectedIntKey = new IntKey(0, 0, 0, 0);
-    }
-
-    if (ik2 != null) {
-      key2 = ik2.createKey();
-      expectedEndIntKey = ik2;
-
-      if (inclusive2) {
-        expectedEndIntKey = expectedEndIntKey.increment();
-      }
-    } else {
-      expectedEndIntKey = new IntKey(ROW_LIMIT, 0, 0, 0);
-    }
-
-    Range range = new Range(key1, inclusive1, key2, inclusive2);
-
-    scanner.setRange(range);
-
-    for (Entry<Key,Value> entry : scanner) {
-
-      Key expectedKey = expectedIntKey.createKey();
-      if (!expectedKey.equals(entry.getKey())) {
-        throw new Exception(" " + expectedKey + " != " + entry.getKey());
-      }
-
-      expectedIntKey = expectedIntKey.increment();
-    }
-
-    if (!expectedIntKey.createKey().equals(expectedEndIntKey.createKey())) {
-      throw new Exception(" " + expectedIntKey.createKey() + " != " + expectedEndIntKey.createKey());
-    }
-  }
-
-  private static Text createCF(int cf) {
-    Text tcf = new Text(String.format("cf_%03d", cf));
-    return tcf;
-  }
-
-  private static Text createCQ(int cf) {
-    Text tcf = new Text(String.format("cq_%03d", cf));
-    return tcf;
-  }
-
-  private static Text createRow(int row) {
-    Text trow = new Text(String.format("r_%06d", row));
-    return trow;
-  }
-
-  private void insertData(Connector c, String table) throws Exception {
-
-    BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
-
-    for (int i = 0; i < ROW_LIMIT; i++) {
-      Mutation m = new Mutation(createRow(i));
-
-      for (int j = 0; j < CF_LIMIT; j++) {
-        for (int k = 0; k < CQ_LIMIT; k++) {
-          for (int t = 0; t < TS_LIMIT; t++) {
-            m.put(createCF(j), createCQ(k), t, new Value(String.format("%06d_%03d_%03d_%03d", i, j, k, t).getBytes(UTF_8)));
-          }
-        }
-      }
-
-      bw.addMutation(m);
-    }
-
-    bw.close();
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
deleted file mode 100644
index 0636056..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.InstanceOperations;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ScanSessionTimeOutIT extends AccumuloClusterHarness {
-  private static final Logger log = LoggerFactory.getLogger(ScanSessionTimeOutIT.class);
-
-  @Override
-  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    Map<String,String> siteConfig = cfg.getSiteConfig();
-    siteConfig.put(Property.TSERV_SESSION_MAXIDLE.getKey(), "3");
-    cfg.setSiteConfig(siteConfig);
-  }
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 60;
-  }
-
-  private String sessionIdle = null;
-
-  @Before
-  public void reduceSessionIdle() throws Exception {
-    InstanceOperations ops = getConnector().instanceOperations();
-    sessionIdle = ops.getSystemConfiguration().get(Property.TSERV_SESSION_MAXIDLE.getKey());
-    ops.setProperty(Property.TSERV_SESSION_MAXIDLE.getKey(), "3");
-    log.info("Waiting for existing session idle time to expire");
-    Thread.sleep(AccumuloConfiguration.getTimeInMillis(sessionIdle));
-    log.info("Finished waiting");
-  }
-
-  @After
-  public void resetSessionIdle() throws Exception {
-    if (null != sessionIdle) {
-      getConnector().instanceOperations().setProperty(Property.TSERV_SESSION_MAXIDLE.getKey(), sessionIdle);
-    }
-  }
-
-  @Test
-  public void run() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-
-    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
-
-    for (int i = 0; i < 100000; i++) {
-      Mutation m = new Mutation(new Text(String.format("%08d", i)));
-      for (int j = 0; j < 3; j++)
-        m.put(new Text("cf1"), new Text("cq" + j), new Value((i + "_" + j).getBytes(UTF_8)));
-
-      bw.addMutation(m);
-    }
-
-    bw.close();
-
-    Scanner scanner = c.createScanner(tableName, new Authorizations());
-    scanner.setBatchSize(1000);
-
-    Iterator<Entry<Key,Value>> iter = scanner.iterator();
-
-    verify(iter, 0, 200);
-
-    // sleep three times the session timeout
-    UtilWaitThread.sleep(9000);
-
-    verify(iter, 200, 100000);
-
-  }
-
-  private void verify(Iterator<Entry<Key,Value>> iter, int start, int stop) throws Exception {
-    for (int i = start; i < stop; i++) {
-
-      Text er = new Text(String.format("%08d", i));
-
-      for (int j = 0; j < 3; j++) {
-        Entry<Key,Value> entry = iter.next();
-
-        if (!entry.getKey().getRow().equals(er)) {
-          throw new Exception("row " + entry.getKey().getRow() + " != " + er);
-        }
-
-        if (!entry.getKey().getColumnFamily().equals(new Text("cf1"))) {
-          throw new Exception("cf " + entry.getKey().getColumnFamily() + " != cf1");
-        }
-
-        if (!entry.getKey().getColumnQualifier().equals(new Text("cq" + j))) {
-          throw new Exception("cq " + entry.getKey().getColumnQualifier() + " != cq" + j);
-        }
-
-        if (!entry.getValue().toString().equals("" + i + "_" + j)) {
-          throw new Exception("value " + entry.getValue() + " != " + i + "_" + j);
-        }
-
-      }
-    }
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ScannerIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ScannerIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ScannerIT.java
deleted file mode 100644
index 340a58e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScannerIT.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.common.base.Stopwatch;
-
-/**
- *
- */
-public class ScannerIT extends AccumuloClusterHarness {
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 60;
-  }
-
-  @Test
-  public void testScannerReadaheadConfiguration() throws Exception {
-    final String table = getUniqueNames(1)[0];
-    Connector c = getConnector();
-    c.tableOperations().create(table);
-
-    BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
-
-    Mutation m = new Mutation("a");
-    for (int i = 0; i < 10; i++) {
-      m.put(Integer.toString(i), "", "");
-    }
-
-    bw.addMutation(m);
-    bw.close();
-
-    Scanner s = c.createScanner(table, new Authorizations());
-
-    IteratorSetting cfg = new IteratorSetting(100, SlowIterator.class);
-    // A batch size of one will end up calling seek() for each element with no calls to next()
-    SlowIterator.setSeekSleepTime(cfg, 100l);
-
-    s.addScanIterator(cfg);
-    // Never start readahead
-    s.setReadaheadThreshold(Long.MAX_VALUE);
-    s.setBatchSize(1);
-    s.setRange(new Range());
-
-    Stopwatch sw = new Stopwatch();
-    Iterator<Entry<Key,Value>> iterator = s.iterator();
-
-    sw.start();
-    while (iterator.hasNext()) {
-      sw.stop();
-
-      // While we "do work" in the client, we should be fetching the next result
-      UtilWaitThread.sleep(100l);
-      iterator.next();
-      sw.start();
-    }
-    sw.stop();
-
-    long millisWithWait = sw.elapsed(TimeUnit.MILLISECONDS);
-
-    s = c.createScanner(table, new Authorizations());
-    s.addScanIterator(cfg);
-    s.setRange(new Range());
-    s.setBatchSize(1);
-    s.setReadaheadThreshold(0l);
-
-    sw = new Stopwatch();
-    iterator = s.iterator();
-
-    sw.start();
-    while (iterator.hasNext()) {
-      sw.stop();
-
-      // While we "do work" in the client, we should be fetching the next result
-      UtilWaitThread.sleep(100l);
-      iterator.next();
-      sw.start();
-    }
-    sw.stop();
-
-    long millisWithNoWait = sw.elapsed(TimeUnit.MILLISECONDS);
-
-    // The "no-wait" time should be much less than the "wait-time"
-    Assert.assertTrue("Expected less time to be taken with immediate readahead (" + millisWithNoWait + ") than without immediate readahead (" + millisWithWait
-        + ")", millisWithNoWait < millisWithWait);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
deleted file mode 100644
index 02b65f4..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Collections;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.Combiner;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class ServerSideErrorIT extends AccumuloClusterHarness {
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 2 * 60;
-  }
-
-  @Test
-  public void run() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    IteratorSetting is = new IteratorSetting(5, "Bad Aggregator", BadCombiner.class);
-    Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("acf")));
-    c.tableOperations().attachIterator(tableName, is);
-
-    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
-
-    Mutation m = new Mutation(new Text("r1"));
-    m.put(new Text("acf"), new Text("foo"), new Value(new byte[] {'1'}));
-
-    bw.addMutation(m);
-
-    bw.close();
-
-    // try to scan table
-    Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
-
-    boolean caught = false;
-    try {
-      for (Entry<Key,Value> entry : scanner) {
-        entry.getKey();
-      }
-    } catch (Exception e) {
-      caught = true;
-    }
-
-    if (!caught)
-      throw new Exception("Scan did not fail");
-
-    // try to batch scan the table
-    BatchScanner bs = c.createBatchScanner(tableName, Authorizations.EMPTY, 2);
-    bs.setRanges(Collections.singleton(new Range()));
-
-    caught = false;
-    try {
-      for (Entry<Key,Value> entry : bs) {
-        entry.getKey();
-      }
-    } catch (Exception e) {
-      caught = true;
-    } finally {
-      bs.close();
-    }
-
-    if (!caught)
-      throw new Exception("batch scan did not fail");
-
-    // remove the bad agg so accumulo can shutdown
-    TableOperations to = c.tableOperations();
-    for (Entry<String,String> e : to.getProperties(tableName)) {
-      to.removeProperty(tableName, e.getKey());
-    }
-
-    UtilWaitThread.sleep(500);
-
-    // should be able to scan now
-    scanner = c.createScanner(tableName, Authorizations.EMPTY);
-    for (Entry<Key,Value> entry : scanner) {
-      entry.getKey();
-    }
-
-    // set a non existant iterator, should cause scan to fail on server side
-    scanner.addScanIterator(new IteratorSetting(100, "bogus", "com.bogus.iterator"));
-
-    caught = false;
-    try {
-      for (Entry<Key,Value> entry : scanner) {
-        // should error
-        entry.getKey();
-      }
-    } catch (Exception e) {
-      caught = true;
-    }
-
-    if (!caught)
-      throw new Exception("Scan did not fail");
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/SessionDurabilityIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SessionDurabilityIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SessionDurabilityIT.java
deleted file mode 100644
index 36bdd7a..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/SessionDurabilityIT.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.ConditionalWriter;
-import org.apache.accumulo.core.client.ConditionalWriter.Status;
-import org.apache.accumulo.core.client.ConditionalWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Durability;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Condition;
-import org.apache.accumulo.core.data.ConditionalMutation;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ProcessReference;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class SessionDurabilityIT extends ConfigurableMacBase {
-
-  @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    cfg.setNumTservers(1);
-    hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
-    cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
-  }
-
-  @Test(timeout = 3 * 60 * 1000)
-  public void nondurableTableHasDurableWrites() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    // table default has no durability
-    c.tableOperations().create(tableName);
-    c.tableOperations().setProperty(tableName, Property.TABLE_DURABILITY.getKey(), "none");
-    // send durable writes
-    BatchWriterConfig cfg = new BatchWriterConfig();
-    cfg.setDurability(Durability.SYNC);
-    writeSome(tableName, 10, cfg);
-    assertEquals(10, count(tableName));
-    // verify writes servive restart
-    restartTServer();
-    assertEquals(10, count(tableName));
-  }
-
-  @Test(timeout = 3 * 60 * 1000)
-  public void durableTableLosesNonDurableWrites() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    // table default is durable writes
-    c.tableOperations().create(tableName);
-    c.tableOperations().setProperty(tableName, Property.TABLE_DURABILITY.getKey(), "sync");
-    // write with no durability
-    BatchWriterConfig cfg = new BatchWriterConfig();
-    cfg.setDurability(Durability.NONE);
-    writeSome(tableName, 10, cfg);
-    // verify writes are lost on restart
-    restartTServer();
-    assertTrue(10 > count(tableName));
-  }
-
-  private int count(String tableName) throws Exception {
-    return Iterators.size(getConnector().createScanner(tableName, Authorizations.EMPTY).iterator());
-  }
-
-  private void writeSome(String tableName, int n, BatchWriterConfig cfg) throws Exception {
-    Connector c = getConnector();
-    BatchWriter bw = c.createBatchWriter(tableName, cfg);
-    for (int i = 0; i < n; i++) {
-      Mutation m = new Mutation(i + "");
-      m.put("", "", "");
-      bw.addMutation(m);
-    }
-    bw.close();
-  }
-
-  @Test(timeout = 3 * 60 * 1000)
-  public void testConditionDurability() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    // table default is durable writes
-    c.tableOperations().create(tableName);
-    c.tableOperations().setProperty(tableName, Property.TABLE_DURABILITY.getKey(), "sync");
-    // write without durability
-    ConditionalWriterConfig cfg = new ConditionalWriterConfig();
-    cfg.setDurability(Durability.NONE);
-    conditionWriteSome(tableName, 10, cfg);
-    // everything in there?
-    assertEquals(10, count(tableName));
-    // restart the server and verify the updates are lost
-    restartTServer();
-    assertEquals(0, count(tableName));
-  }
-
-  @Test(timeout = 3 * 60 * 1000)
-  public void testConditionDurability2() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    // table default is durable writes
-    c.tableOperations().create(tableName);
-    c.tableOperations().setProperty(tableName, Property.TABLE_DURABILITY.getKey(), "none");
-    // write with durability
-    ConditionalWriterConfig cfg = new ConditionalWriterConfig();
-    cfg.setDurability(Durability.SYNC);
-    conditionWriteSome(tableName, 10, cfg);
-    // everything in there?
-    assertEquals(10, count(tableName));
-    // restart the server and verify the updates are still there
-    restartTServer();
-    assertEquals(10, count(tableName));
-  }
-
-  private void conditionWriteSome(String tableName, int n, ConditionalWriterConfig cfg) throws Exception {
-    Connector c = getConnector();
-    ConditionalWriter cw = c.createConditionalWriter(tableName, cfg);
-    for (int i = 0; i < n; i++) {
-      ConditionalMutation m = new ConditionalMutation((CharSequence) (i + ""), new Condition("", ""));
-      m.put("", "", "X");
-      assertEquals(Status.ACCEPTED, cw.write(m).getStatus());
-    }
-  }
-
-  private void restartTServer() throws Exception {
-    for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
-      cluster.killProcess(ServerType.TABLET_SERVER, proc);
-    }
-    cluster.start();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ShutdownIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ShutdownIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ShutdownIT.java
deleted file mode 100644
index f27ee02..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ShutdownIT.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.server.util.Admin;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.TestRandomDeletes;
-import org.apache.accumulo.test.VerifyIngest;
-import org.junit.Test;
-
-public class ShutdownIT extends ConfigurableMacBase {
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 2 * 60;
-  }
-
-  @Test
-  public void shutdownDuringIngest() throws Exception {
-    Process ingest = cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD,
-        "--createTable");
-    UtilWaitThread.sleep(100);
-    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
-    ingest.destroy();
-  }
-
-  @Test
-  public void shutdownDuringQuery() throws Exception {
-    assertEquals(0,
-        cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "--createTable")
-            .waitFor());
-    Process verify = cluster.exec(VerifyIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD);
-    UtilWaitThread.sleep(100);
-    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
-    verify.destroy();
-  }
-
-  @Test
-  public void shutdownDuringDelete() throws Exception {
-    assertEquals(0,
-        cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "--createTable")
-            .waitFor());
-    Process deleter = cluster.exec(TestRandomDeletes.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD);
-    UtilWaitThread.sleep(100);
-    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
-    deleter.destroy();
-  }
-
-  @Test
-  public void shutdownDuringDeleteTable() throws Exception {
-    final Connector c = getConnector();
-    for (int i = 0; i < 10; i++) {
-      c.tableOperations().create("table" + i);
-    }
-    final AtomicReference<Exception> ref = new AtomicReference<Exception>();
-    Thread async = new Thread() {
-      @Override
-      public void run() {
-        try {
-          for (int i = 0; i < 10; i++)
-            c.tableOperations().delete("table" + i);
-        } catch (Exception ex) {
-          ref.set(ex);
-        }
-      }
-    };
-    async.start();
-    UtilWaitThread.sleep(100);
-    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
-    if (ref.get() != null)
-      throw ref.get();
-  }
-
-  @Test
-  public void stopDuringStart() throws Exception {
-    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
-  }
-
-  @Test
-  public void adminStop() throws Exception {
-    runAdminStopTest(getConnector(), cluster);
-  }
-
-  static void runAdminStopTest(Connector c, MiniAccumuloClusterImpl cluster) throws InterruptedException, IOException {
-    assertEquals(0,
-        cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "--createTable")
-            .waitFor());
-    List<String> tabletServers = c.instanceOperations().getTabletServers();
-    assertEquals(2, tabletServers.size());
-    String doomed = tabletServers.get(0);
-    assertEquals(0, cluster.exec(Admin.class, "stop", doomed).waitFor());
-    tabletServers = c.instanceOperations().getTabletServers();
-    assertEquals(1, tabletServers.size());
-    assertFalse(tabletServers.get(0).equals(doomed));
-  }
-
-}