You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ec...@apache.org on 2015/06/04 20:52:52 UTC

[11/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar, stop building test jar

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/KerberosProxyIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/KerberosProxyIT.java b/test/src/test/java/org/apache/accumulo/test/functional/KerberosProxyIT.java
deleted file mode 100644
index 31d1329..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/KerberosProxyIT.java
+++ /dev/null
@@ -1,426 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.net.ConnectException;
-import java.net.InetAddress;
-import java.nio.ByteBuffer;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.rpc.UGIAssumingTransport;
-import org.apache.accumulo.harness.AccumuloITBase;
-import org.apache.accumulo.harness.MiniClusterConfigurationCallback;
-import org.apache.accumulo.harness.MiniClusterHarness;
-import org.apache.accumulo.harness.TestingKdc;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.proxy.Proxy;
-import org.apache.accumulo.proxy.ProxyServer;
-import org.apache.accumulo.proxy.thrift.AccumuloProxy;
-import org.apache.accumulo.proxy.thrift.AccumuloProxy.Client;
-import org.apache.accumulo.proxy.thrift.AccumuloSecurityException;
-import org.apache.accumulo.proxy.thrift.ColumnUpdate;
-import org.apache.accumulo.proxy.thrift.Key;
-import org.apache.accumulo.proxy.thrift.KeyValue;
-import org.apache.accumulo.proxy.thrift.ScanOptions;
-import org.apache.accumulo.proxy.thrift.ScanResult;
-import org.apache.accumulo.proxy.thrift.TimeType;
-import org.apache.accumulo.proxy.thrift.WriterOptions;
-import org.apache.accumulo.server.util.PortUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.transport.TSaslClientTransport;
-import org.apache.thrift.transport.TSocket;
-import org.apache.thrift.transport.TTransportException;
-import org.hamcrest.Description;
-import org.hamcrest.TypeSafeMatcher;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Tests impersonation of clients by the proxy over SASL
- */
-public class KerberosProxyIT extends AccumuloITBase {
-  private static final Logger log = LoggerFactory.getLogger(KerberosProxyIT.class);
-
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  private static TestingKdc kdc;
-  private static String krbEnabledForITs = null;
-  private static File proxyKeytab;
-  private static String hostname, proxyPrimary, proxyPrincipal;
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 60 * 5;
-  }
-
-  @BeforeClass
-  public static void startKdc() throws Exception {
-    kdc = new TestingKdc();
-    kdc.start();
-    krbEnabledForITs = System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION);
-    if (null == krbEnabledForITs || !Boolean.parseBoolean(krbEnabledForITs)) {
-      System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, "true");
-    }
-
-    // Create a principal+keytab for the proxy
-    proxyKeytab = new File(kdc.getKeytabDir(), "proxy.keytab");
-    hostname = InetAddress.getLocalHost().getCanonicalHostName();
-    // Set the primary because the client needs to know it
-    proxyPrimary = "proxy";
-    // Qualify with an instance
-    proxyPrincipal = proxyPrimary + "/" + hostname;
-    kdc.createPrincipal(proxyKeytab, proxyPrincipal);
-    // Tack on the realm too
-    proxyPrincipal = kdc.qualifyUser(proxyPrincipal);
-  }
-
-  @AfterClass
-  public static void stopKdc() throws Exception {
-    if (null != kdc) {
-      kdc.stop();
-    }
-    if (null != krbEnabledForITs) {
-      System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, krbEnabledForITs);
-    }
-  }
-
-  private MiniAccumuloClusterImpl mac;
-  private Process proxyProcess;
-  private int proxyPort;
-
-  @Before
-  public void startMac() throws Exception {
-    MiniClusterHarness harness = new MiniClusterHarness();
-    mac = harness.create(getClass().getName(), testName.getMethodName(), new PasswordToken("unused"), new MiniClusterConfigurationCallback() {
-
-      @Override
-      public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
-        cfg.setNumTservers(1);
-        Map<String,String> siteCfg = cfg.getSiteConfig();
-        // Allow the proxy to impersonate the client user, but no one else
-        siteCfg.put(Property.INSTANCE_RPC_SASL_PROXYUSERS.getKey() + proxyPrincipal + ".users", kdc.getRootUser().getPrincipal());
-        siteCfg.put(Property.INSTANCE_RPC_SASL_PROXYUSERS.getKey() + proxyPrincipal + ".hosts", "*");
-        cfg.setSiteConfig(siteCfg);
-      }
-
-    }, kdc);
-
-    mac.start();
-    MiniAccumuloConfigImpl cfg = mac.getConfig();
-
-    // Proxy configuration
-    proxyPort = PortUtils.getRandomFreePort();
-    File proxyPropertiesFile = new File(cfg.getConfDir(), "proxy.properties");
-    Properties proxyProperties = new Properties();
-    proxyProperties.setProperty("useMockInstance", "false");
-    proxyProperties.setProperty("useMiniAccumulo", "false");
-    proxyProperties.setProperty("protocolFactory", TCompactProtocol.Factory.class.getName());
-    proxyProperties.setProperty("tokenClass", KerberosToken.class.getName());
-    proxyProperties.setProperty("port", Integer.toString(proxyPort));
-    proxyProperties.setProperty("maxFrameSize", "16M");
-    proxyProperties.setProperty("instance", mac.getInstanceName());
-    proxyProperties.setProperty("zookeepers", mac.getZooKeepers());
-    proxyProperties.setProperty("thriftServerType", "sasl");
-    proxyProperties.setProperty("kerberosPrincipal", proxyPrincipal);
-    proxyProperties.setProperty("kerberosKeytab", proxyKeytab.getCanonicalPath());
-
-    // Write out the proxy.properties file
-    FileWriter writer = new FileWriter(proxyPropertiesFile);
-    proxyProperties.store(writer, "Configuration for Accumulo proxy");
-    writer.close();
-
-    proxyProcess = mac.exec(Proxy.class, "-p", proxyPropertiesFile.getCanonicalPath());
-
-    // Enabled kerberos auth
-    Configuration conf = new Configuration(false);
-    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
-    UserGroupInformation.setConfiguration(conf);
-
-    boolean success = false;
-    ClusterUser rootUser = kdc.getRootUser();
-    for (int i = 0; i < 10 && !success; i++) {
-
-      UserGroupInformation ugi;
-      try {
-        UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
-        ugi = UserGroupInformation.getCurrentUser();
-      } catch (IOException ex) {
-        log.info("Login as root is failing", ex);
-        Thread.sleep(1000);
-        continue;
-      }
-
-      TSocket socket = new TSocket(hostname, proxyPort);
-      log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
-      TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
-          "auth"), null, socket);
-
-      final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);
-
-      try {
-        // UGI transport will perform the doAs for us
-        ugiTransport.open();
-        success = true;
-      } catch (TTransportException e) {
-        Throwable cause = e.getCause();
-        if (null != cause && cause instanceof ConnectException) {
-          log.info("Proxy not yet up, waiting");
-          Thread.sleep(1000);
-          continue;
-        }
-      } finally {
-        if (null != ugiTransport) {
-          ugiTransport.close();
-        }
-      }
-    }
-
-    assertTrue("Failed to connect to the proxy repeatedly", success);
-  }
-
-  @After
-  public void stopMac() throws Exception {
-    if (null != proxyProcess) {
-      log.info("Destroying proxy process");
-      proxyProcess.destroy();
-      log.info("Waiting for proxy termination");
-      proxyProcess.waitFor();
-      log.info("Proxy terminated");
-    }
-    if (null != mac) {
-      mac.stop();
-    }
-  }
-
-  @Test
-  public void testProxyClient() throws Exception {
-    ClusterUser rootUser = kdc.getRootUser();
-    UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-
-    TSocket socket = new TSocket(hostname, proxyPort);
-    log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
-    TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
-        "auth"), null, socket);
-
-    final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);
-
-    // UGI transport will perform the doAs for us
-    ugiTransport.open();
-
-    AccumuloProxy.Client.Factory factory = new AccumuloProxy.Client.Factory();
-    Client client = factory.getClient(new TCompactProtocol(ugiTransport), new TCompactProtocol(ugiTransport));
-
-    // Will fail if the proxy can impersonate the client
-    ByteBuffer login = client.login(rootUser.getPrincipal(), Collections.<String,String> emptyMap());
-
-    // For all of the below actions, the proxy user doesn't have permission to do any of them, but the client user does.
-    // The fact that any of them actually run tells us that impersonation is working.
-
-    // Create a table
-    String table = "table";
-    if (!client.tableExists(login, table)) {
-      client.createTable(login, table, true, TimeType.MILLIS);
-    }
-
-    // Write two records to the table
-    String writer = client.createWriter(login, table, new WriterOptions());
-    Map<ByteBuffer,List<ColumnUpdate>> updates = new HashMap<>();
-    ColumnUpdate update = new ColumnUpdate(ByteBuffer.wrap("cf1".getBytes(UTF_8)), ByteBuffer.wrap("cq1".getBytes(UTF_8)));
-    update.setValue(ByteBuffer.wrap("value1".getBytes(UTF_8)));
-    updates.put(ByteBuffer.wrap("row1".getBytes(UTF_8)), Collections.<ColumnUpdate> singletonList(update));
-    update = new ColumnUpdate(ByteBuffer.wrap("cf2".getBytes(UTF_8)), ByteBuffer.wrap("cq2".getBytes(UTF_8)));
-    update.setValue(ByteBuffer.wrap("value2".getBytes(UTF_8)));
-    updates.put(ByteBuffer.wrap("row2".getBytes(UTF_8)), Collections.<ColumnUpdate> singletonList(update));
-    client.update(writer, updates);
-
-    // Flush and close the writer
-    client.flush(writer);
-    client.closeWriter(writer);
-
-    // Open a scanner to the table
-    String scanner = client.createScanner(login, table, new ScanOptions());
-    ScanResult results = client.nextK(scanner, 10);
-    assertEquals(2, results.getResults().size());
-
-    // Check the first key-value
-    KeyValue kv = results.getResults().get(0);
-    Key k = kv.key;
-    ByteBuffer v = kv.value;
-    assertEquals(ByteBuffer.wrap("row1".getBytes(UTF_8)), k.row);
-    assertEquals(ByteBuffer.wrap("cf1".getBytes(UTF_8)), k.colFamily);
-    assertEquals(ByteBuffer.wrap("cq1".getBytes(UTF_8)), k.colQualifier);
-    assertEquals(ByteBuffer.wrap(new byte[0]), k.colVisibility);
-    assertEquals(ByteBuffer.wrap("value1".getBytes(UTF_8)), v);
-
-    // And then the second
-    kv = results.getResults().get(1);
-    k = kv.key;
-    v = kv.value;
-    assertEquals(ByteBuffer.wrap("row2".getBytes(UTF_8)), k.row);
-    assertEquals(ByteBuffer.wrap("cf2".getBytes(UTF_8)), k.colFamily);
-    assertEquals(ByteBuffer.wrap("cq2".getBytes(UTF_8)), k.colQualifier);
-    assertEquals(ByteBuffer.wrap(new byte[0]), k.colVisibility);
-    assertEquals(ByteBuffer.wrap("value2".getBytes(UTF_8)), v);
-
-    // Close the scanner
-    client.closeScanner(scanner);
-
-    ugiTransport.close();
-  }
-
-  @Test
-  public void testDisallowedClientForImpersonation() throws Exception {
-    String user = testName.getMethodName();
-    File keytab = new File(kdc.getKeytabDir(), user + ".keytab");
-    kdc.createPrincipal(keytab, user);
-
-    // Login as the new user
-    UserGroupInformation.loginUserFromKeytab(user, keytab.getAbsolutePath());
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-
-    log.info("Logged in as " + ugi);
-
-    // Expect an AccumuloSecurityException
-    thrown.expect(AccumuloSecurityException.class);
-    // Error msg would look like:
-    //
-    // org.apache.accumulo.core.client.AccumuloSecurityException: Error BAD_CREDENTIALS for user Principal in credentials object should match kerberos
-    // principal.
-    // Expected 'proxy/hw10447.local@EXAMPLE.COM' but was 'testDisallowedClientForImpersonation@EXAMPLE.COM' - Username or Password is Invalid)
-    thrown.expect(new ThriftExceptionMatchesPattern(".*Error BAD_CREDENTIALS.*"));
-    thrown.expect(new ThriftExceptionMatchesPattern(".*Expected '" + proxyPrincipal + "' but was '" + kdc.qualifyUser(user) + "'.*"));
-
-    TSocket socket = new TSocket(hostname, proxyPort);
-    log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
-
-    // Should fail to open the tran
-    TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
-        "auth"), null, socket);
-
-    final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);
-
-    // UGI transport will perform the doAs for us
-    ugiTransport.open();
-
-    AccumuloProxy.Client.Factory factory = new AccumuloProxy.Client.Factory();
-    Client client = factory.getClient(new TCompactProtocol(ugiTransport), new TCompactProtocol(ugiTransport));
-
-    // Will fail because the proxy can't impersonate this user (per the site configuration)
-    try {
-      client.login(kdc.qualifyUser(user), Collections.<String,String> emptyMap());
-    } finally {
-      if (null != ugiTransport) {
-        ugiTransport.close();
-      }
-    }
-  }
-
-  @Test
-  public void testMismatchPrincipals() throws Exception {
-    ClusterUser rootUser = kdc.getRootUser();
-    // Should get an AccumuloSecurityException and the given message
-    thrown.expect(AccumuloSecurityException.class);
-    thrown.expect(new ThriftExceptionMatchesPattern(ProxyServer.RPC_ACCUMULO_PRINCIPAL_MISMATCH_MSG));
-
-    // Make a new user
-    String user = testName.getMethodName();
-    File keytab = new File(kdc.getKeytabDir(), user + ".keytab");
-    kdc.createPrincipal(keytab, user);
-
-    // Login as the new user
-    UserGroupInformation.loginUserFromKeytab(user, keytab.getAbsolutePath());
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-
-    log.info("Logged in as " + ugi);
-
-    TSocket socket = new TSocket(hostname, proxyPort);
-    log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
-
-    // Should fail to open the tran
-    TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
-        "auth"), null, socket);
-
-    final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);
-
-    // UGI transport will perform the doAs for us
-    ugiTransport.open();
-
-    AccumuloProxy.Client.Factory factory = new AccumuloProxy.Client.Factory();
-    Client client = factory.getClient(new TCompactProtocol(ugiTransport), new TCompactProtocol(ugiTransport));
-
-    // The proxy needs to recognize that the requested principal isn't the same as the SASL principal and fail
-    // Accumulo should let this through -- we need to rely on the proxy to dump me before talking to accumulo
-    try {
-      client.login(rootUser.getPrincipal(), Collections.<String,String> emptyMap());
-    } finally {
-      if (null != ugiTransport) {
-        ugiTransport.close();
-      }
-    }
-  }
-
-  private static class ThriftExceptionMatchesPattern extends TypeSafeMatcher<AccumuloSecurityException> {
-    private String pattern;
-
-    public ThriftExceptionMatchesPattern(String pattern) {
-      this.pattern = pattern;
-    }
-
-    @Override
-    protected boolean matchesSafely(AccumuloSecurityException item) {
-      return item.isSetMsg() && item.msg.matches(pattern);
-    }
-
-    @Override
-    public void describeTo(Description description) {
-      description.appendText("matches pattern ").appendValue(pattern);
-    }
-
-    @Override
-    protected void describeMismatchSafely(AccumuloSecurityException item, Description mismatchDescription) {
-      mismatchDescription.appendText("does not match");
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java b/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
deleted file mode 100644
index 72b51eb..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Random;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.MemoryUnit;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class LargeRowIT extends AccumuloClusterHarness {
-  private static final Logger log = LoggerFactory.getLogger(LargeRowIT.class);
-
-  @Override
-  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    cfg.setMemory(ServerType.TABLET_SERVER, cfg.getMemory(ServerType.TABLET_SERVER) * 2, MemoryUnit.BYTE);
-    Map<String,String> siteConfig = cfg.getSiteConfig();
-    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "10ms");
-    cfg.setSiteConfig(siteConfig);
-  }
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 4 * 60;
-  }
-
-  private static final int SEED = 42;
-  private static final int NUM_ROWS = 100;
-  private static final int ROW_SIZE = 1 << 17;
-  private static final int NUM_PRE_SPLITS = 9;
-  private static final int SPLIT_THRESH = ROW_SIZE * NUM_ROWS / NUM_PRE_SPLITS;
-
-  private String REG_TABLE_NAME;
-  private String PRE_SPLIT_TABLE_NAME;
-  private int timeoutFactor = 1;
-  private String tservMajcDelay;
-
-  @Before
-  public void getTimeoutFactor() throws Exception {
-    try {
-      timeoutFactor = Integer.parseInt(System.getProperty("timeout.factor"));
-    } catch (NumberFormatException e) {
-      log.warn("Could not parse property value for 'timeout.factor' as integer: " + System.getProperty("timeout.factor"));
-    }
-
-    Assert.assertTrue("Timeout factor must be greater than or equal to 1", timeoutFactor >= 1);
-
-    String[] names = getUniqueNames(2);
-    REG_TABLE_NAME = names[0];
-    PRE_SPLIT_TABLE_NAME = names[1];
-
-    Connector c = getConnector();
-    tservMajcDelay = c.instanceOperations().getSystemConfiguration().get(Property.TSERV_MAJC_DELAY.getKey());
-    c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "10ms");
-  }
-
-  @After
-  public void resetMajcDelay() throws Exception {
-    if (null != tservMajcDelay) {
-      Connector conn = getConnector();
-      conn.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), tservMajcDelay);
-    }
-  }
-
-  @Test
-  public void run() throws Exception {
-    Random r = new Random();
-    byte rowData[] = new byte[ROW_SIZE];
-    r.setSeed(SEED + 1);
-    TreeSet<Text> splitPoints = new TreeSet<Text>();
-    for (int i = 0; i < NUM_PRE_SPLITS; i++) {
-      r.nextBytes(rowData);
-      TestIngest.toPrintableChars(rowData);
-      splitPoints.add(new Text(rowData));
-    }
-    Connector c = getConnector();
-    c.tableOperations().create(REG_TABLE_NAME);
-    c.tableOperations().create(PRE_SPLIT_TABLE_NAME);
-    c.tableOperations().setProperty(PRE_SPLIT_TABLE_NAME, Property.TABLE_MAX_END_ROW_SIZE.getKey(), "256K");
-    UtilWaitThread.sleep(3 * 1000);
-    c.tableOperations().addSplits(PRE_SPLIT_TABLE_NAME, splitPoints);
-    test1(c);
-    test2(c);
-  }
-
-  private void test1(Connector c) throws Exception {
-
-    basicTest(c, REG_TABLE_NAME, 0);
-
-    c.tableOperations().setProperty(REG_TABLE_NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "" + SPLIT_THRESH);
-
-    UtilWaitThread.sleep(timeoutFactor * 12000);
-    log.info("checking splits");
-    FunctionalTestUtils.checkSplits(c, REG_TABLE_NAME, NUM_PRE_SPLITS / 2, NUM_PRE_SPLITS * 4);
-
-    verify(c, REG_TABLE_NAME);
-  }
-
-  private void test2(Connector c) throws Exception {
-    basicTest(c, PRE_SPLIT_TABLE_NAME, NUM_PRE_SPLITS);
-  }
-
-  private void basicTest(Connector c, String table, int expectedSplits) throws Exception {
-    BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
-
-    Random r = new Random();
-    byte rowData[] = new byte[ROW_SIZE];
-
-    r.setSeed(SEED);
-
-    for (int i = 0; i < NUM_ROWS; i++) {
-
-      r.nextBytes(rowData);
-      TestIngest.toPrintableChars(rowData);
-
-      Mutation mut = new Mutation(new Text(rowData));
-      mut.put(new Text(""), new Text(""), new Value(Integer.toString(i).getBytes(UTF_8)));
-      bw.addMutation(mut);
-    }
-
-    bw.close();
-
-    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
-
-    verify(c, table);
-
-    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
-
-    c.tableOperations().flush(table, null, null, false);
-
-    // verify while table flush is running
-    verify(c, table);
-
-    // give split time to complete
-    c.tableOperations().flush(table, null, null, true);
-
-    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
-
-    verify(c, table);
-
-    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
-  }
-
-  private void verify(Connector c, String table) throws Exception {
-    Random r = new Random();
-    byte rowData[] = new byte[ROW_SIZE];
-
-    r.setSeed(SEED);
-
-    Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
-
-    for (int i = 0; i < NUM_ROWS; i++) {
-
-      r.nextBytes(rowData);
-      TestIngest.toPrintableChars(rowData);
-
-      scanner.setRange(new Range(new Text(rowData)));
-
-      int count = 0;
-
-      for (Entry<Key,Value> entry : scanner) {
-        if (!entry.getKey().getRow().equals(new Text(rowData))) {
-          throw new Exception("verification failed, unexpected row i =" + i);
-        }
-        if (!entry.getValue().equals(Integer.toString(i).getBytes(UTF_8))) {
-          throw new Exception("verification failed, unexpected value i =" + i + " value = " + entry.getValue());
-        }
-        count++;
-      }
-
-      if (count != 1) {
-        throw new Exception("verification failed, unexpected count i =" + i + " count=" + count);
-      }
-
-    }
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/LateLastContactIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/LateLastContactIT.java b/test/src/test/java/org/apache/accumulo/test/functional/LateLastContactIT.java
deleted file mode 100644
index 9c310f0..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/LateLastContactIT.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.Collections;
-
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-/**
- * Fake the "tablet stops talking but holds its lock" problem we see when hard drives and NFS fail. Start a ZombieTServer, and see that master stops it.
- */
-public class LateLastContactIT extends ConfigurableMacBase {
-
-  @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    cfg.setSiteConfig(Collections.singletonMap(Property.GENERAL_RPC_TIMEOUT.getKey(), "2s"));
-  }
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 60;
-  }
-
-  @Test
-  public void test() throws Exception {
-    Process zombie = cluster.exec(ZombieTServer.class);
-    assertEquals(0, zombie.waitFor());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java b/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
deleted file mode 100644
index 1e7fef0..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.NewTableConfiguration;
-import org.apache.accumulo.core.client.admin.TimeType;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class LogicalTimeIT extends AccumuloClusterHarness {
-  private static final Logger log = LoggerFactory.getLogger(LogicalTimeIT.class);
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 4 * 60;
-  }
-
-  @Test
-  public void run() throws Exception {
-    int tc = 0;
-    String tableName = getUniqueNames(1)[0];
-    Connector c = getConnector();
-    runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a"}, null, null, "b", 2l);
-    runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"z"}, null, null, "b", 2l);
-    runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a", "z"}, null, null, "b", 2l);
-    runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a", "c", "z"}, null, null, "b", 3l);
-    runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a", "y", "z"}, null, null, "b", 3l);
-
-    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a"}, null, null, "b", 2l);
-    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"h"}, null, null, "b", 2l);
-    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"s"}, null, null, "b", 2l);
-    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s"}, null, null, "b", 2l);
-    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "c", "h", "s"}, null, null, "b", 3l);
-    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s", "i"}, null, null, "b", 3l);
-    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"t", "a", "h", "s"}, null, null, "b", 3l);
-
-    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a"}, null, "h", "b", 2l);
-    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"h"}, null, "h", "b", 2l);
-    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"s"}, null, "h", "b", 1l);
-    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s"}, null, "h", "b", 2l);
-    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "c", "h", "s"}, null, "h", "b", 3l);
-    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s", "i"}, null, "h", "b", 3l);
-    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"t", "a", "h", "s"}, null, "h", "b", 2l);
-
-  }
-
-  private void runMergeTest(Connector conn, String table, String[] splits, String[] inserts, String start, String end, String last, long expected)
-      throws Exception {
-    log.info("table " + table);
-    conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
-    TreeSet<Text> splitSet = new TreeSet<Text>();
-    for (String split : splits) {
-      splitSet.add(new Text(split));
-    }
-    conn.tableOperations().addSplits(table, splitSet);
-
-    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
-    for (String row : inserts) {
-      Mutation m = new Mutation(row);
-      m.put("cf", "cq", "v");
-      bw.addMutation(m);
-    }
-
-    bw.flush();
-
-    conn.tableOperations().merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));
-
-    Mutation m = new Mutation(last);
-    m.put("cf", "cq", "v");
-    bw.addMutation(m);
-    bw.flush();
-
-    Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
-    scanner.setRange(new Range(last));
-
-    bw.close();
-
-    long time = scanner.iterator().next().getKey().getTimestamp();
-    if (time != expected)
-      throw new RuntimeException("unexpected time " + time + " " + expected);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
deleted file mode 100644
index 8c4666c..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.IOException;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-import java.util.Collections;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.Base64;
-import org.apache.accumulo.examples.simple.mapreduce.RowHash;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class MapReduceIT extends ConfigurableMacBase {
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 60;
-  }
-
-  public static final String hadoopTmpDirArg = "-Dhadoop.tmp.dir=" + System.getProperty("user.dir") + "/target/hadoop-tmp";
-
-  static final String tablename = "mapredf";
-  static final String input_cf = "cf-HASHTYPE";
-  static final String input_cq = "cq-NOTHASHED";
-  static final String input_cfcq = input_cf + ":" + input_cq;
-  static final String output_cq = "cq-MD4BASE64";
-  static final String output_cfcq = input_cf + ":" + output_cq;
-
-  @Test
-  public void test() throws Exception {
-    runTest(getConnector(), getCluster());
-  }
-
-  static void runTest(Connector c, MiniAccumuloClusterImpl cluster) throws AccumuloException, AccumuloSecurityException, TableExistsException,
-      TableNotFoundException, MutationsRejectedException, IOException, InterruptedException, NoSuchAlgorithmException {
-    c.tableOperations().create(tablename);
-    BatchWriter bw = c.createBatchWriter(tablename, new BatchWriterConfig());
-    for (int i = 0; i < 10; i++) {
-      Mutation m = new Mutation("" + i);
-      m.put(input_cf, input_cq, "row" + i);
-      bw.addMutation(m);
-    }
-    bw.close();
-    Process hash = cluster.exec(RowHash.class, Collections.singletonList(hadoopTmpDirArg), "-i", c.getInstance().getInstanceName(), "-z", c.getInstance()
-        .getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "-t", tablename, "--column", input_cfcq);
-    assertEquals(0, hash.waitFor());
-
-    Scanner s = c.createScanner(tablename, Authorizations.EMPTY);
-    s.fetchColumn(new Text(input_cf), new Text(output_cq));
-    int i = 0;
-    for (Entry<Key,Value> entry : s) {
-      MessageDigest md = MessageDigest.getInstance("MD5");
-      byte[] check = Base64.encodeBase64(md.digest(("row" + i).getBytes()));
-      assertEquals(entry.getValue().toString(), new String(check));
-      i++;
-    }
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MasterAssignmentIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MasterAssignmentIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MasterAssignmentIT.java
deleted file mode 100644
index 72f8ce7..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MasterAssignmentIT.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-
-import java.io.FileNotFoundException;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.impl.ClientContext;
-import org.apache.accumulo.core.client.impl.Credentials;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.server.master.state.MetaDataTableScanner;
-import org.apache.accumulo.server.master.state.TabletLocationState;
-import org.apache.commons.configuration.ConfigurationException;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class MasterAssignmentIT extends AccumuloClusterHarness {
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 2 * 60;
-  }
-
-  @Test
-  public void test() throws Exception {
-    Connector c = getConnector();
-    String tableName = super.getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    String tableId = c.tableOperations().tableIdMap().get(tableName);
-    // wait for the table to be online
-    TabletLocationState newTablet;
-    do {
-      UtilWaitThread.sleep(250);
-      newTablet = getTabletLocationState(c, tableId);
-    } while (newTablet.current == null);
-    assertNull(newTablet.last);
-    assertNull(newTablet.future);
-
-    // put something in it
-    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
-    Mutation m = new Mutation("a");
-    m.put("b", "c", "d");
-    bw.addMutation(m);
-    bw.close();
-    // give it a last location
-    c.tableOperations().flush(tableName, null, null, true);
-
-    TabletLocationState flushed = getTabletLocationState(c, tableId);
-    assertEquals(newTablet.current, flushed.current);
-    assertEquals(flushed.current, flushed.last);
-    assertNull(newTablet.future);
-
-    // take the tablet offline
-    c.tableOperations().offline(tableName, true);
-    TabletLocationState offline = getTabletLocationState(c, tableId);
-    assertNull(offline.future);
-    assertNull(offline.current);
-    assertEquals(flushed.current, offline.last);
-
-    // put it back online
-    c.tableOperations().online(tableName, true);
-    TabletLocationState online = getTabletLocationState(c, tableId);
-    assertNull(online.future);
-    assertNotNull(online.current);
-    assertEquals(online.current, online.last);
-  }
-
-  private TabletLocationState getTabletLocationState(Connector c, String tableId) throws FileNotFoundException, ConfigurationException {
-    Credentials creds = new Credentials(getAdminPrincipal(), getAdminToken());
-    ClientContext context = new ClientContext(c.getInstance(), creds, getCluster().getClientConfig());
-    MetaDataTableScanner s = new MetaDataTableScanner(context, new Range(KeyExtent.getMetadataEntry(new Text(tableId), null)));
-    TabletLocationState tlState = s.next();
-    s.close();
-    return tlState;
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MasterFailoverIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
deleted file mode 100644
index 3489c26..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Map;
-
-import org.apache.accumulo.cluster.ClusterControl;
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-public class MasterFailoverIT extends AccumuloClusterHarness {
-
-  @Override
-  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    Map<String,String> siteConfig = cfg.getSiteConfig();
-    siteConfig.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "5s");
-    cfg.setSiteConfig(siteConfig);
-  }
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 90;
-  }
-
-  @Test
-  public void test() throws Exception {
-    Connector c = getConnector();
-    String[] names = getUniqueNames(2);
-    c.tableOperations().create(names[0]);
-    TestIngest.Opts opts = new TestIngest.Opts();
-    opts.setTableName(names[0]);
-    ClientConfiguration clientConf = cluster.getClientConfig();
-    if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
-      opts.updateKerberosCredentials(clientConf);
-    } else {
-      opts.setPrincipal(getAdminPrincipal());
-    }
-    TestIngest.ingest(c, opts, new BatchWriterOpts());
-
-    ClusterControl control = cluster.getClusterControl();
-    control.stopAllServers(ServerType.MASTER);
-    // start up a new one
-    control.startAllServers(ServerType.MASTER);
-    // talk to it
-    c.tableOperations().rename(names[0], names[1]);
-    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
-    vopts.setTableName(names[1]);
-    if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
-      vopts.updateKerberosCredentials(clientConf);
-    } else {
-      vopts.setPrincipal(getAdminPrincipal());
-    }
-    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java
deleted file mode 100644
index 6f08c1f..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Random;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.admin.InstanceOperations;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * A functional test that exercises hitting the max open file limit on a tablet server. This test assumes there are one or two tablet servers.
- */
-
-public class MaxOpenIT extends AccumuloClusterHarness {
-
-  @Override
-  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    Map<String,String> conf = cfg.getSiteConfig();
-    conf.put(Property.TSERV_SCAN_MAX_OPENFILES.getKey(), "4");
-    conf.put(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "1");
-    conf.put(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey(), "2");
-    cfg.setSiteConfig(conf);
-  }
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 3 * 60;
-  }
-
-  private String scanMaxOpenFiles, majcConcurrent, majcThreadMaxOpen;
-
-  @Before
-  public void alterConfig() throws Exception {
-    InstanceOperations iops = getConnector().instanceOperations();
-    Map<String,String> sysConfig = iops.getSystemConfiguration();
-    scanMaxOpenFiles = sysConfig.get(Property.TSERV_SCAN_MAX_OPENFILES.getKey());
-    majcConcurrent = sysConfig.get(Property.TSERV_MAJC_MAXCONCURRENT.getKey());
-    majcThreadMaxOpen = sysConfig.get(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey());
-  }
-
-  @After
-  public void restoreConfig() throws Exception {
-    InstanceOperations iops = getConnector().instanceOperations();
-    if (null != scanMaxOpenFiles) {
-      iops.setProperty(Property.TSERV_SCAN_MAX_OPENFILES.getKey(), scanMaxOpenFiles);
-    }
-    if (null != majcConcurrent) {
-      iops.setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), majcConcurrent);
-    }
-    if (null != majcThreadMaxOpen) {
-      iops.setProperty(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey(), majcThreadMaxOpen);
-    }
-  }
-
-  private static final int NUM_TABLETS = 16;
-  private static final int NUM_TO_INGEST = 10000;
-
-  @Test
-  public void run() throws Exception {
-    final Connector c = getConnector();
-    final String tableName = getUniqueNames(1)[0];
-    final ClientConfiguration clientConf = cluster.getClientConfig();
-    c.tableOperations().create(tableName);
-    c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "10");
-    c.tableOperations().addSplits(tableName, TestIngest.getSplitPoints(0, NUM_TO_INGEST, NUM_TABLETS));
-
-    // the following loop should create three tablets in each map file
-    for (int i = 0; i < 3; i++) {
-      TestIngest.Opts opts = new TestIngest.Opts();
-      opts.timestamp = i;
-      opts.dataSize = 50;
-      opts.rows = NUM_TO_INGEST;
-      opts.cols = 1;
-      opts.random = i;
-      opts.setTableName(tableName);
-      if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
-        opts.updateKerberosCredentials(clientConf);
-      } else {
-        opts.setPrincipal(getAdminPrincipal());
-      }
-      TestIngest.ingest(c, opts, new BatchWriterOpts());
-
-      c.tableOperations().flush(tableName, null, null, true);
-      FunctionalTestUtils.checkRFiles(c, tableName, NUM_TABLETS, NUM_TABLETS, i + 1, i + 1);
-    }
-
-    List<Range> ranges = new ArrayList<Range>(NUM_TO_INGEST);
-
-    for (int i = 0; i < NUM_TO_INGEST; i++) {
-      ranges.add(new Range(TestIngest.generateRow(i, 0)));
-    }
-
-    long time1 = batchScan(c, tableName, ranges, 1);
-    // run it again, now that stuff is cached on the client and sever
-    time1 = batchScan(c, tableName, ranges, 1);
-    long time2 = batchScan(c, tableName, ranges, NUM_TABLETS);
-
-    System.out.printf("Single thread scan time   %6.2f %n", time1 / 1000.0);
-    System.out.printf("Multiple thread scan time %6.2f %n", time2 / 1000.0);
-
-  }
-
-  private long batchScan(Connector c, String tableName, List<Range> ranges, int threads) throws Exception {
-    BatchScanner bs = c.createBatchScanner(tableName, TestIngest.AUTHS, threads);
-
-    bs.setRanges(ranges);
-
-    int count = 0;
-
-    long t1 = System.currentTimeMillis();
-
-    byte rval[] = new byte[50];
-    Random random = new Random();
-
-    for (Entry<Key,Value> entry : bs) {
-      count++;
-      int row = VerifyIngest.getRow(entry.getKey());
-      int col = VerifyIngest.getCol(entry.getKey());
-
-      if (row < 0 || row >= NUM_TO_INGEST) {
-        throw new Exception("unexcepted row " + row);
-      }
-
-      rval = TestIngest.genRandomValue(random, rval, 2, row, col);
-
-      if (entry.getValue().compareTo(rval) != 0) {
-        throw new Exception("unexcepted value row=" + row + " col=" + col);
-      }
-    }
-
-    long t2 = System.currentTimeMillis();
-
-    bs.close();
-
-    if (count != NUM_TO_INGEST) {
-      throw new Exception("Batch Scan did not return expected number of values " + count);
-    }
-
-    return t2 - t1;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java
deleted file mode 100644
index 9e3e8b6..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.NewTableConfiguration;
-import org.apache.accumulo.core.client.admin.TimeType;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.Merge;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class MergeIT extends AccumuloClusterHarness {
-
-  @Override
-  public int defaultTimeoutSeconds() {
-    return 8 * 60;
-  }
-
-  SortedSet<Text> splits(String[] points) {
-    SortedSet<Text> result = new TreeSet<Text>();
-    for (String point : points)
-      result.add(new Text(point));
-    return result;
-  }
-
-  @Test
-  public void merge() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    c.tableOperations().addSplits(tableName, splits("a b c d e f g h i j k".split(" ")));
-    BatchWriter bw = c.createBatchWriter(tableName, null);
-    for (String row : "a b c d e f g h i j k".split(" ")) {
-      Mutation m = new Mutation(row);
-      m.put("cf", "cq", "value");
-      bw.addMutation(m);
-    }
-    bw.close();
-    c.tableOperations().flush(tableName, null, null, true);
-    c.tableOperations().merge(tableName, new Text("c1"), new Text("f1"));
-    assertEquals(8, c.tableOperations().listSplits(tableName).size());
-  }
-
-  @Test
-  public void mergeSize() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    c.tableOperations().addSplits(tableName, splits("a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")));
-    BatchWriter bw = c.createBatchWriter(tableName, null);
-    for (String row : "c e f y".split(" ")) {
-      Mutation m = new Mutation(row);
-      m.put("cf", "cq", "mersydotesanddozeydotesanlittolamsiedives");
-      bw.addMutation(m);
-    }
-    bw.close();
-    c.tableOperations().flush(tableName, null, null, true);
-    Merge merge = new Merge();
-    merge.mergomatic(c, tableName, null, null, 100, false);
-    assertArrayEquals("b c d e f x y".split(" "), toStrings(c.tableOperations().listSplits(tableName)));
-    merge.mergomatic(c, tableName, null, null, 100, true);
-    assertArrayEquals("c e f y".split(" "), toStrings(c.tableOperations().listSplits(tableName)));
-  }
-
-  private String[] toStrings(Collection<Text> listSplits) {
-    String[] result = new String[listSplits.size()];
-    int i = 0;
-    for (Text t : listSplits) {
-      result[i++] = t.toString();
-    }
-    return result;
-  }
-
-  private String[] ns(String... strings) {
-    return strings;
-  }
-
-  @Test
-  public void mergeTest() throws Exception {
-    int tc = 0;
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    runMergeTest(c, tableName + tc++, ns(), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
-
-    runMergeTest(c, tableName + tc++, ns("m"), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
-    runMergeTest(c, tableName + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns("m", "n"), ns(null, "z"));
-    runMergeTest(c, tableName + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns(null, "b"), ns("l", "m"));
-
-    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns(), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns(null, "s"));
-    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("c", "m"));
-    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("n", "r"));
-    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns(null, "s"));
-    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns(null, "s"));
-    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("q", "r"));
-    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("aa", "b"));
-    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("r", "s"), ns(null, "z"));
-    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("l", "m"));
-    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns("q", "r"));
-
-  }
-
-  private void runMergeTest(Connector c, String table, String[] splits, String[] expectedSplits, String[] inserts, String[] start, String[] end)
-      throws Exception {
-    int count = 0;
-
-    for (String s : start) {
-      for (String e : end) {
-        runMergeTest(c, table + "_" + count++, splits, expectedSplits, inserts, s, e);
-      }
-    }
-  }
-
-  private void runMergeTest(Connector conn, String table, String[] splits, String[] expectedSplits, String[] inserts, String start, String end)
-      throws Exception {
-    System.out.println("Running merge test " + table + " " + Arrays.asList(splits) + " " + start + " " + end);
-
-    conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
-    TreeSet<Text> splitSet = new TreeSet<Text>();
-    for (String split : splits) {
-      splitSet.add(new Text(split));
-    }
-    conn.tableOperations().addSplits(table, splitSet);
-
-    BatchWriter bw = conn.createBatchWriter(table, null);
-    HashSet<String> expected = new HashSet<String>();
-    for (String row : inserts) {
-      Mutation m = new Mutation(row);
-      m.put("cf", "cq", row);
-      bw.addMutation(m);
-      expected.add(row);
-    }
-
-    bw.close();
-
-    conn.tableOperations().merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));
-
-    Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
-
-    HashSet<String> observed = new HashSet<String>();
-    for (Entry<Key,Value> entry : scanner) {
-      String row = entry.getKey().getRowData().toString();
-      if (!observed.add(row)) {
-        throw new Exception("Saw data twice " + table + " " + row);
-      }
-    }
-
-    if (!observed.equals(expected)) {
-      throw new Exception("data inconsistency " + table + " " + observed + " != " + expected);
-    }
-
-    HashSet<Text> currentSplits = new HashSet<Text>(conn.tableOperations().listSplits(table));
-    HashSet<Text> ess = new HashSet<Text>();
-    for (String es : expectedSplits) {
-      ess.add(new Text(es));
-    }
-
-    if (!currentSplits.equals(ess)) {
-      throw new Exception("split inconsistency " + table + " " + currentSplits + " != " + ess);
-    }
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MetadataIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MetadataIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MetadataIT.java
deleted file mode 100644
index 9455456..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MetadataIT.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class MetadataIT extends AccumuloClusterHarness {
-
-  @Override
-  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    cfg.setNumTservers(1);
-  }
-
-  @Override
-  public int defaultTimeoutSeconds() {
-    return 2 * 60;
-  }
-
-  @Test
-  public void testFlushAndCompact() throws Exception {
-    Connector c = getConnector();
-    String tableNames[] = getUniqueNames(2);
-
-    // create a table to write some data to metadata table
-    c.tableOperations().create(tableNames[0]);
-
-    Scanner rootScanner = c.createScanner(RootTable.NAME, Authorizations.EMPTY);
-    rootScanner.setRange(MetadataSchema.TabletsSection.getRange());
-    rootScanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
-
-    Set<String> files1 = new HashSet<String>();
-    for (Entry<Key,Value> entry : rootScanner)
-      files1.add(entry.getKey().getColumnQualifier().toString());
-
-    c.tableOperations().create(tableNames[1]);
-    c.tableOperations().flush(MetadataTable.NAME, null, null, true);
-
-    Set<String> files2 = new HashSet<String>();
-    for (Entry<Key,Value> entry : rootScanner)
-      files2.add(entry.getKey().getColumnQualifier().toString());
-
-    // flush of metadata table should change file set in root table
-    Assert.assertTrue(files2.size() > 0);
-    Assert.assertNotEquals(files1, files2);
-
-    c.tableOperations().compact(MetadataTable.NAME, null, null, false, true);
-
-    Set<String> files3 = new HashSet<String>();
-    for (Entry<Key,Value> entry : rootScanner)
-      files3.add(entry.getKey().getColumnQualifier().toString());
-
-    // compaction of metadata table should change file set in root table
-    Assert.assertNotEquals(files2, files3);
-  }
-
-  @Test
-  public void mergeMeta() throws Exception {
-    Connector c = getConnector();
-    String[] names = getUniqueNames(5);
-    SortedSet<Text> splits = new TreeSet<Text>();
-    for (String id : "1 2 3 4 5".split(" ")) {
-      splits.add(new Text(id));
-    }
-    c.tableOperations().addSplits(MetadataTable.NAME, splits);
-    for (String tableName : names) {
-      c.tableOperations().create(tableName);
-    }
-    c.tableOperations().merge(MetadataTable.NAME, null, null);
-    Scanner s = c.createScanner(RootTable.NAME, Authorizations.EMPTY);
-    s.setRange(MetadataSchema.DeletesSection.getRange());
-    while (Iterators.size(s.iterator()) == 0) {
-      UtilWaitThread.sleep(100);
-    }
-    assertEquals(0, c.tableOperations().listSplits(MetadataTable.NAME).size());
-  }
-
-  @Test
-  public void batchScanTest() throws Exception {
-    Connector c = getConnector();
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-
-    // batch scan regular metadata table
-    BatchScanner s = c.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1);
-    s.setRanges(Collections.singleton(new Range()));
-    int count = 0;
-    for (Entry<Key,Value> e : s) {
-      if (e != null)
-        count++;
-    }
-    s.close();
-    assertTrue(count > 0);
-
-    // batch scan root metadata table
-    s = c.createBatchScanner(RootTable.NAME, Authorizations.EMPTY, 1);
-    s.setRanges(Collections.singleton(new Range()));
-    count = 0;
-    for (Entry<Key,Value> e : s) {
-      if (e != null)
-        count++;
-    }
-    s.close();
-    assertTrue(count > 0);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
deleted file mode 100644
index 086dd1a..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.impl.ClientContext;
-import org.apache.accumulo.core.client.impl.Credentials;
-import org.apache.accumulo.core.client.impl.MasterClient;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.master.thrift.MasterClientService.Client;
-import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
-import org.apache.accumulo.core.master.thrift.TableInfo;
-import org.apache.accumulo.core.master.thrift.TabletServerStatus;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.trace.Tracer;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.util.Admin;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class MetadataMaxFilesIT extends ConfigurableMacBase {
-
-  @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    Map<String,String> siteConfig = new HashMap<String,String>();
-    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1");
-    siteConfig.put(Property.TSERV_SCAN_MAX_OPENFILES.getKey(), "10");
-    cfg.setSiteConfig(siteConfig);
-    hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
-  }
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 4 * 60;
-  }
-
-  @Test
-  public void test() throws Exception {
-    Connector c = getConnector();
-    SortedSet<Text> splits = new TreeSet<Text>();
-    for (int i = 0; i < 1000; i++) {
-      splits.add(new Text(String.format("%03d", i)));
-    }
-    c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10000");
-    for (int i = 0; i < 5; i++) {
-      String tableName = "table" + i;
-      log.info("Creating " + tableName);
-      c.tableOperations().create(tableName);
-      log.info("adding splits");
-      c.tableOperations().addSplits(tableName, splits);
-      log.info("flushing");
-      c.tableOperations().flush(MetadataTable.NAME, null, null, true);
-      c.tableOperations().flush(RootTable.NAME, null, null, true);
-    }
-    UtilWaitThread.sleep(20 * 1000);
-    log.info("shutting down");
-    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
-    cluster.stop();
-    log.info("starting up");
-    cluster.start();
-
-    UtilWaitThread.sleep(30 * 1000);
-
-    while (true) {
-      MasterMonitorInfo stats = null;
-      Credentials creds = new Credentials("root", new PasswordToken(ROOT_PASSWORD));
-      Client client = null;
-      try {
-        ClientContext context = new ClientContext(c.getInstance(), creds, getClientConfig());
-        client = MasterClient.getConnectionWithRetry(context);
-        stats = client.getMasterStats(Tracer.traceInfo(), context.rpcCreds());
-      } finally {
-        if (client != null)
-          MasterClient.close(client);
-      }
-      int tablets = 0;
-      for (TabletServerStatus tserver : stats.tServerInfo) {
-        for (Entry<String,TableInfo> entry : tserver.tableMap.entrySet()) {
-          if (entry.getKey().startsWith("!") || entry.getKey().startsWith("+"))
-            continue;
-          tablets += entry.getValue().onlineTablets;
-        }
-      }
-      if (tablets == 5005)
-        break;
-      UtilWaitThread.sleep(1000);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MetadataSplitIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
deleted file mode 100644
index ab2c791..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Collections;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-public class MetadataSplitIT extends ConfigurableMacBase {
-
-  @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "100ms"));
-  }
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 2 * 60;
-  }
-
-  @Test
-  public void test() throws Exception {
-    Connector c = getConnector();
-    assertEquals(1, c.tableOperations().listSplits(MetadataTable.NAME).size());
-    c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "500");
-    for (int i = 0; i < 10; i++) {
-      c.tableOperations().create("table" + i);
-      c.tableOperations().flush(MetadataTable.NAME, null, null, true);
-    }
-    UtilWaitThread.sleep(10 * 1000);
-    assertTrue(c.tableOperations().listSplits(MetadataTable.NAME).size() > 2);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MonitorLoggingIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MonitorLoggingIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MonitorLoggingIT.java
deleted file mode 100644
index c59c52e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MonitorLoggingIT.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.net.URL;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.monitor.Monitor;
-import org.apache.accumulo.server.util.Admin;
-import org.apache.commons.io.FileUtils;
-import org.apache.zookeeper.KeeperException;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class MonitorLoggingIT extends ConfigurableMacBase {
-  private static final Logger log = LoggerFactory.getLogger(MonitorLoggingIT.class);
-
-  @Override
-  public void beforeClusterStart(MiniAccumuloConfigImpl cfg) throws Exception {
-    cfg.setNumTservers(1);
-    File confDir = cfg.getConfDir();
-    try {
-      FileUtils.copyFileToDirectory(new File(MonitorLoggingIT.class.getResource("/conf/generic_logger.xml").toURI()), confDir);
-      FileUtils.copyFileToDirectory(new File(MonitorLoggingIT.class.getResource("/conf/monitor_logger.xml").toURI()), confDir);
-    } catch (Exception e) {
-      log.error("Failed to copy Log4J XML files to conf dir", e);
-    }
-  }
-
-  private static final int NUM_LOCATION_PASSES = 5;
-  private static final int LOCATION_DELAY_SECS = 5;
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 30 + ((NUM_LOCATION_PASSES + 2) * LOCATION_DELAY_SECS);
-  }
-
-  @Test
-  public void logToMonitor() throws Exception {
-    // Start the monitor.
-    log.debug("Starting Monitor");
-    Process monitor = cluster.exec(Monitor.class);
-
-    // Get monitor location to ensure it is running.
-    String monitorLocation = null;
-    for (int i = 0; i < NUM_LOCATION_PASSES; i++) {
-      Thread.sleep(LOCATION_DELAY_SECS * 1000);
-      try {
-        monitorLocation = getMonitor();
-        break;
-      } catch (KeeperException e) {
-        log.debug("Monitor not up yet, trying again in " + LOCATION_DELAY_SECS + " secs");
-      }
-    }
-    assertNotNull("Monitor failed to start within " + (LOCATION_DELAY_SECS * NUM_LOCATION_PASSES) + " secs", monitorLocation);
-    log.debug("Monitor running at " + monitorLocation);
-
-    // The tserver has to observe that the log-forwarding address
-    // changed in ZooKeeper. If we cause the error before the tserver
-    // updates, we'll never see the error on the monitor.
-    Thread.sleep(10000);
-
-    // Attempt a scan with an invalid iterator to force a log message in the monitor.
-    try {
-      Connector c = getConnector();
-      Scanner s = c.createScanner("accumulo.root", new Authorizations());
-      IteratorSetting cfg = new IteratorSetting(100, "incorrect", "java.lang.String");
-      s.addScanIterator(cfg);
-      s.iterator().next();
-    } catch (RuntimeException e) {
-      // expected, the iterator was bad
-    }
-
-    String result = "";
-    while (true) {
-      Thread.sleep(LOCATION_DELAY_SECS * 1000); // extra precaution to ensure monitor has opportunity to log
-
-      // Verify messages were received at the monitor.
-      URL url = new URL("http://" + monitorLocation + "/log");
-      log.debug("Fetching web page " + url);
-      result = FunctionalTestUtils.readAll(url.openStream());
-      if (result.contains("<pre class='logevent'>")) {
-        break;
-      }
-      log.debug("No messages found, waiting a little longer...");
-    }
-
-    assertTrue("No log messages found", result.contains("<pre class='logevent'>"));
-
-    // Shutdown cleanly.
-    log.debug("Stopping mini accumulo cluster");
-    Process shutdown = cluster.exec(Admin.class, "stopAll");
-    shutdown.waitFor();
-    assertTrue(shutdown.exitValue() == 0);
-    log.debug("success!");
-    monitor.destroy();
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MonitorSslIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MonitorSslIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MonitorSslIT.java
deleted file mode 100644
index 7283c4d..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MonitorSslIT.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.net.URL;
-import java.security.KeyManagementException;
-import java.security.NoSuchAlgorithmException;
-import java.security.SecureRandom;
-import java.security.cert.CertificateException;
-import java.security.cert.X509Certificate;
-import java.util.Map;
-
-import javax.net.ssl.HostnameVerifier;
-import javax.net.ssl.HttpsURLConnection;
-import javax.net.ssl.KeyManager;
-import javax.net.ssl.SSLContext;
-import javax.net.ssl.SSLSession;
-import javax.net.ssl.TrustManager;
-import javax.net.ssl.X509TrustManager;
-
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.util.MonitorUtil;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Check SSL for the Monitor
- *
- */
-public class MonitorSslIT extends ConfigurableMacBase {
-  @BeforeClass
-  public static void initHttps() throws NoSuchAlgorithmException, KeyManagementException {
-    SSLContext ctx = SSLContext.getInstance("SSL");
-    TrustManager[] tm = new TrustManager[] {new TestTrustManager()};
-    ctx.init(new KeyManager[0], tm, new SecureRandom());
-    SSLContext.setDefault(ctx);
-    HttpsURLConnection.setDefaultSSLSocketFactory(ctx.getSocketFactory());
-    HttpsURLConnection.setDefaultHostnameVerifier(new TestHostnameVerifier());
-  }
-
-  private static class TestTrustManager implements X509TrustManager {
-    @Override
-    public void checkClientTrusted(X509Certificate[] arg0, String arg1) throws CertificateException {}
-
-    @Override
-    public void checkServerTrusted(X509Certificate[] arg0, String arg1) throws CertificateException {}
-
-    @Override
-    public X509Certificate[] getAcceptedIssuers() {
-      return null;
-    }
-  }
-
-  private static class TestHostnameVerifier implements HostnameVerifier {
-    @Override
-    public boolean verify(String hostname, SSLSession session) {
-      return true;
-    }
-  }
-
-  @Override
-  public int defaultTimeoutSeconds() {
-    return 6 * 60;
-  }
-
-  @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    super.configure(cfg, hadoopCoreSite);
-    File baseDir = createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName());
-    configureForSsl(cfg, getSslDir(baseDir));
-    Map<String,String> siteConfig = cfg.getSiteConfig();
-    siteConfig.put(Property.MONITOR_SSL_KEYSTORE.getKey(), siteConfig.get(Property.RPC_SSL_KEYSTORE_PATH.getKey()));
-    siteConfig.put(Property.MONITOR_SSL_KEYSTOREPASS.getKey(), siteConfig.get(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey()));
-    if (siteConfig.containsKey(Property.RPC_SSL_KEYSTORE_TYPE.getKey())) {
-      siteConfig.put(Property.MONITOR_SSL_KEYSTORETYPE.getKey(), siteConfig.get(Property.RPC_SSL_KEYSTORE_TYPE.getKey()));
-    } else {
-      siteConfig.put(Property.MONITOR_SSL_KEYSTORETYPE.getKey(), Property.RPC_SSL_KEYSTORE_TYPE.getDefaultValue());
-    }
-    siteConfig.put(Property.MONITOR_SSL_TRUSTSTORE.getKey(), siteConfig.get(Property.RPC_SSL_TRUSTSTORE_PATH.getKey()));
-    siteConfig.put(Property.MONITOR_SSL_TRUSTSTOREPASS.getKey(), siteConfig.get(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey()));
-    if (siteConfig.containsKey(Property.RPC_SSL_TRUSTSTORE_TYPE.getKey())) {
-      siteConfig.put(Property.MONITOR_SSL_TRUSTSTORETYPE.getKey(), siteConfig.get(Property.RPC_SSL_TRUSTSTORE_TYPE.getKey()));
-    } else {
-      siteConfig.put(Property.MONITOR_SSL_TRUSTSTORETYPE.getKey(), Property.RPC_SSL_TRUSTSTORE_TYPE.getDefaultValue());
-    }
-    cfg.setSiteConfig(siteConfig);
-  }
-
-  @Test
-  public void test() throws Exception {
-    log.debug("Starting Monitor");
-    cluster.getClusterControl().startAllServers(ServerType.MONITOR);
-    String monitorLocation = null;
-    while (null == monitorLocation) {
-      try {
-        monitorLocation = MonitorUtil.getLocation(getConnector().getInstance());
-      } catch (Exception e) {
-        // ignored
-      }
-      if (null == monitorLocation) {
-        log.debug("Could not fetch monitor HTTP address from zookeeper");
-        Thread.sleep(2000);
-      }
-    }
-    URL url = new URL("https://" + monitorLocation);
-    log.debug("Fetching web page " + url);
-    String result = FunctionalTestUtils.readAll(url.openStream());
-    assertTrue(result.length() > 100);
-    assertTrue(result.indexOf("Accumulo Overview") >= 0);
-  }
-
-}