You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ec...@apache.org on 2015/06/04 20:53:13 UTC

[32/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar, stop building test jar

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/KerberosIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/KerberosIT.java b/test/src/main/java/org/apache/accumulo/test/functional/KerberosIT.java
new file mode 100644
index 0000000..aa8313e
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/KerberosIT.java
@@ -0,0 +1,573 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.admin.CompactionConfig;
+import org.apache.accumulo.core.client.admin.DelegationTokenConfig;
+import org.apache.accumulo.core.client.impl.AuthenticationTokenIdentifier;
+import org.apache.accumulo.core.client.impl.DelegationTokenImpl;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.core.security.SystemPermission;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.harness.AccumuloITBase;
+import org.apache.accumulo.harness.MiniClusterConfigurationCallback;
+import org.apache.accumulo.harness.MiniClusterHarness;
+import org.apache.accumulo.harness.TestingKdc;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Sets;
+
+/**
+ * MAC test which uses {@link MiniKdc} to simulate ta secure environment. Can be used as a sanity check for Kerberos/SASL testing.
+ */
+public class KerberosIT extends AccumuloITBase {
+  private static final Logger log = LoggerFactory.getLogger(KerberosIT.class);
+
+  private static TestingKdc kdc;
+  private static String krbEnabledForITs = null;
+  private static ClusterUser rootUser;
+
+  @BeforeClass
+  public static void startKdc() throws Exception {
+    kdc = new TestingKdc();
+    kdc.start();
+    krbEnabledForITs = System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION);
+    if (null == krbEnabledForITs || !Boolean.parseBoolean(krbEnabledForITs)) {
+      System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, "true");
+    }
+    rootUser = kdc.getRootUser();
+  }
+
+  @AfterClass
+  public static void stopKdc() throws Exception {
+    if (null != kdc) {
+      kdc.stop();
+    }
+    if (null != krbEnabledForITs) {
+      System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, krbEnabledForITs);
+    }
+  }
+
+  @Override
+  public int defaultTimeoutSeconds() {
+    return 60 * 5;
+  }
+
+  private MiniAccumuloClusterImpl mac;
+
+  @Before
+  public void startMac() throws Exception {
+    MiniClusterHarness harness = new MiniClusterHarness();
+    mac = harness.create(this, new PasswordToken("unused"), kdc, new MiniClusterConfigurationCallback() {
+
+      @Override
+      public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
+        Map<String,String> site = cfg.getSiteConfig();
+        site.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "10s");
+        cfg.setSiteConfig(site);
+      }
+
+    });
+
+    mac.getConfig().setNumTservers(1);
+    mac.start();
+    // Enabled kerberos auth
+    Configuration conf = new Configuration(false);
+    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    UserGroupInformation.setConfiguration(conf);
+  }
+
+  @After
+  public void stopMac() throws Exception {
+    if (null != mac) {
+      mac.stop();
+    }
+  }
+
+  @Test
+  public void testAdminUser() throws Exception {
+    // Login as the client (provided to `accumulo init` as the "root" user)
+    UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+
+    final Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+
+    // The "root" user should have all system permissions
+    for (SystemPermission perm : SystemPermission.values()) {
+      assertTrue("Expected user to have permission: " + perm, conn.securityOperations().hasSystemPermission(conn.whoami(), perm));
+    }
+
+    // and the ability to modify the root and metadata tables
+    for (String table : Arrays.asList(RootTable.NAME, MetadataTable.NAME)) {
+      assertTrue(conn.securityOperations().hasTablePermission(conn.whoami(), table, TablePermission.ALTER_TABLE));
+    }
+  }
+
+  @Test
+  public void testNewUser() throws Exception {
+    String newUser = testName.getMethodName();
+    final File newUserKeytab = new File(kdc.getKeytabDir(), newUser + ".keytab");
+    if (newUserKeytab.exists() && !newUserKeytab.delete()) {
+      log.warn("Unable to delete {}", newUserKeytab);
+    }
+
+    // Create a new user
+    kdc.createPrincipal(newUserKeytab, newUser);
+
+    newUser = kdc.qualifyUser(newUser);
+
+    // Login as the "root" user
+    UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+    log.info("Logged in as {}", rootUser.getPrincipal());
+
+    Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+    log.info("Created connector as {}", rootUser.getPrincipal());
+    assertEquals(rootUser.getPrincipal(), conn.whoami());
+
+    // Make sure the system user doesn't exist -- this will force some RPC to happen server-side
+    createTableWithDataAndCompact(conn);
+
+    HashSet<String> users = Sets.newHashSet(rootUser.getPrincipal());
+    assertEquals(users, conn.securityOperations().listLocalUsers());
+
+    // Switch to a new user
+    UserGroupInformation.loginUserFromKeytab(newUser, newUserKeytab.getAbsolutePath());
+    log.info("Logged in as {}", newUser);
+
+    conn = mac.getConnector(newUser, new KerberosToken());
+    log.info("Created connector as {}", newUser);
+    assertEquals(newUser, conn.whoami());
+
+    // The new user should have no system permissions
+    for (SystemPermission perm : SystemPermission.values()) {
+      assertFalse(conn.securityOperations().hasSystemPermission(newUser, perm));
+    }
+
+    users.add(newUser);
+
+    // Same users as before, plus the new user we just created
+    assertEquals(users, conn.securityOperations().listLocalUsers());
+  }
+
+  @Test
+  public void testUserPrivilegesThroughGrant() throws Exception {
+    String user1 = testName.getMethodName();
+    final File user1Keytab = new File(kdc.getKeytabDir(), user1 + ".keytab");
+    if (user1Keytab.exists() && !user1Keytab.delete()) {
+      log.warn("Unable to delete {}", user1Keytab);
+    }
+
+    // Create some new users
+    kdc.createPrincipal(user1Keytab, user1);
+
+    user1 = kdc.qualifyUser(user1);
+
+    // Log in as user1
+    UserGroupInformation.loginUserFromKeytab(user1, user1Keytab.getAbsolutePath());
+    log.info("Logged in as {}", user1);
+
+    // Indirectly creates this user when we use it
+    Connector conn = mac.getConnector(user1, new KerberosToken());
+    log.info("Created connector as {}", user1);
+
+    // The new user should have no system permissions
+    for (SystemPermission perm : SystemPermission.values()) {
+      assertFalse(conn.securityOperations().hasSystemPermission(user1, perm));
+    }
+
+    UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+    conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+
+    conn.securityOperations().grantSystemPermission(user1, SystemPermission.CREATE_TABLE);
+
+    // Switch back to the original user
+    UserGroupInformation.loginUserFromKeytab(user1, user1Keytab.getAbsolutePath());
+    conn = mac.getConnector(user1, new KerberosToken());
+
+    // Shouldn't throw an exception since we granted the create table permission
+    final String table = testName.getMethodName() + "_user_table";
+    conn.tableOperations().create(table);
+
+    // Make sure we can actually use the table we made
+    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+    Mutation m = new Mutation("a");
+    m.put("b", "c", "d");
+    bw.addMutation(m);
+    bw.close();
+
+    conn.tableOperations().compact(table, new CompactionConfig().setWait(true).setFlush(true));
+  }
+
+  @Test
+  public void testUserPrivilegesForTable() throws Exception {
+    String user1 = testName.getMethodName();
+    final File user1Keytab = new File(kdc.getKeytabDir(), user1 + ".keytab");
+    if (user1Keytab.exists() && !user1Keytab.delete()) {
+      log.warn("Unable to delete {}", user1Keytab);
+    }
+
+    // Create some new users -- cannot contain realm
+    kdc.createPrincipal(user1Keytab, user1);
+
+    user1 = kdc.qualifyUser(user1);
+
+    // Log in as user1
+    UserGroupInformation.loginUserFromKeytab(user1, user1Keytab.getAbsolutePath());
+    log.info("Logged in as {}", user1);
+
+    // Indirectly creates this user when we use it
+    Connector conn = mac.getConnector(user1, new KerberosToken());
+    log.info("Created connector as {}", user1);
+
+    // The new user should have no system permissions
+    for (SystemPermission perm : SystemPermission.values()) {
+      assertFalse(conn.securityOperations().hasSystemPermission(user1, perm));
+    }
+
+    UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+    conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+
+    final String table = testName.getMethodName() + "_user_table";
+    conn.tableOperations().create(table);
+
+    final String viz = "viz";
+
+    // Give our unprivileged user permission on the table we made for them
+    conn.securityOperations().grantTablePermission(user1, table, TablePermission.READ);
+    conn.securityOperations().grantTablePermission(user1, table, TablePermission.WRITE);
+    conn.securityOperations().grantTablePermission(user1, table, TablePermission.ALTER_TABLE);
+    conn.securityOperations().grantTablePermission(user1, table, TablePermission.DROP_TABLE);
+    conn.securityOperations().changeUserAuthorizations(user1, new Authorizations(viz));
+
+    // Switch back to the original user
+    UserGroupInformation.loginUserFromKeytab(user1, user1Keytab.getAbsolutePath());
+    conn = mac.getConnector(user1, new KerberosToken());
+
+    // Make sure we can actually use the table we made
+
+    // Write data
+    final long ts = 1000l;
+    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+    Mutation m = new Mutation("a");
+    m.put("b", "c", new ColumnVisibility(viz.getBytes()), ts, "d");
+    bw.addMutation(m);
+    bw.close();
+
+    // Compact
+    conn.tableOperations().compact(table, new CompactionConfig().setWait(true).setFlush(true));
+
+    // Alter
+    conn.tableOperations().setProperty(table, Property.TABLE_BLOOM_ENABLED.getKey(), "true");
+
+    // Read (and proper authorizations)
+    Scanner s = conn.createScanner(table, new Authorizations(viz));
+    Iterator<Entry<Key,Value>> iter = s.iterator();
+    assertTrue("No results from iterator", iter.hasNext());
+    Entry<Key,Value> entry = iter.next();
+    assertEquals(new Key("a", "b", "c", viz, ts), entry.getKey());
+    assertEquals(new Value("d".getBytes()), entry.getValue());
+    assertFalse("Had more results from iterator", iter.hasNext());
+  }
+
+  @Test
+  public void testDelegationToken() throws Exception {
+    final String tableName = getUniqueNames(1)[0];
+
+    // Login as the "root" user
+    UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+    log.info("Logged in as {}", rootUser.getPrincipal());
+
+    final int numRows = 100, numColumns = 10;
+
+    // As the "root" user, open up the connection and get a delegation token
+    final AuthenticationToken delegationToken = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
+      @Override
+      public AuthenticationToken run() throws Exception {
+        Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+        log.info("Created connector as {}", rootUser.getPrincipal());
+        assertEquals(rootUser.getPrincipal(), conn.whoami());
+
+        conn.tableOperations().create(tableName);
+        BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+        for (int r = 0; r < numRows; r++) {
+          Mutation m = new Mutation(Integer.toString(r));
+          for (int c = 0; c < numColumns; c++) {
+            String col = Integer.toString(c);
+            m.put(col, col, col);
+          }
+          bw.addMutation(m);
+        }
+        bw.close();
+
+        return conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
+      }
+    });
+
+    // The above login with keytab doesn't have a way to logout, so make a fake user that won't have krb credentials
+    UserGroupInformation userWithoutPrivs = UserGroupInformation.createUserForTesting("fake_user", new String[0]);
+    int recordsSeen = userWithoutPrivs.doAs(new PrivilegedExceptionAction<Integer>() {
+      @Override
+      public Integer run() throws Exception {
+        Connector conn = mac.getConnector(rootUser.getPrincipal(), delegationToken);
+
+        BatchScanner bs = conn.createBatchScanner(tableName, Authorizations.EMPTY, 2);
+        bs.setRanges(Collections.singleton(new Range()));
+        int recordsSeen = Iterables.size(bs);
+        bs.close();
+        return recordsSeen;
+      }
+    });
+
+    assertEquals(numRows * numColumns, recordsSeen);
+  }
+
+  @Test
+  public void testDelegationTokenAsDifferentUser() throws Exception {
+    // Login as the "root" user
+    UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+    log.info("Logged in as {}", rootUser.getPrincipal());
+
+    // As the "root" user, open up the connection and get a delegation token
+    Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+    log.info("Created connector as {}", rootUser.getPrincipal());
+    assertEquals(rootUser.getPrincipal(), conn.whoami());
+    final AuthenticationToken delegationToken = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
+
+    // The above login with keytab doesn't have a way to logout, so make a fake user that won't have krb credentials
+    UserGroupInformation userWithoutPrivs = UserGroupInformation.createUserForTesting("fake_user", new String[0]);
+    try {
+      // Use the delegation token to try to log in as a different user
+      userWithoutPrivs.doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          mac.getConnector("some_other_user", delegationToken);
+          return null;
+        }
+      });
+      fail("Using a delegation token as a different user should throw an exception");
+    } catch (UndeclaredThrowableException e) {
+      Throwable cause = e.getCause();
+      assertNotNull(cause);
+      // We should get an AccumuloSecurityException from trying to use a delegation token for the wrong user
+      assertTrue("Expected cause to be AccumuloSecurityException, but was " + cause.getClass(), cause instanceof AccumuloSecurityException);
+    }
+  }
+
+  @Test(expected = AccumuloSecurityException.class)
+  public void testGetDelegationTokenDenied() throws Exception {
+    String newUser = testName.getMethodName();
+    final File newUserKeytab = new File(kdc.getKeytabDir(), newUser + ".keytab");
+    if (newUserKeytab.exists() && !newUserKeytab.delete()) {
+      log.warn("Unable to delete {}", newUserKeytab);
+    }
+
+    // Create a new user
+    kdc.createPrincipal(newUserKeytab, newUser);
+
+    newUser = kdc.qualifyUser(newUser);
+
+    // Login as a normal user
+    UserGroupInformation.loginUserFromKeytab(newUser, newUserKeytab.getAbsolutePath());
+
+    // As the "root" user, open up the connection and get a delegation token
+    Connector conn = mac.getConnector(newUser, new KerberosToken());
+    log.info("Created connector as {}", newUser);
+    assertEquals(newUser, conn.whoami());
+
+    conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
+  }
+
+  @Test
+  public void testRestartedMasterReusesSecretKey() throws Exception {
+    // Login as the "root" user
+    UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+    log.info("Logged in as {}", rootUser.getPrincipal());
+
+    // As the "root" user, open up the connection and get a delegation token
+    final AuthenticationToken delegationToken1 = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
+      @Override
+      public AuthenticationToken run() throws Exception {
+        Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+        log.info("Created connector as {}", rootUser.getPrincipal());
+        assertEquals(rootUser.getPrincipal(), conn.whoami());
+
+        AuthenticationToken token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
+
+        assertTrue("Could not get tables with delegation token", mac.getConnector(rootUser.getPrincipal(), token).tableOperations().list().size() > 0);
+
+        return token;
+      }
+    });
+
+    log.info("Stopping master");
+    mac.getClusterControl().stop(ServerType.MASTER);
+    Thread.sleep(5000);
+    log.info("Restarting master");
+    mac.getClusterControl().start(ServerType.MASTER);
+
+    // Make sure our original token is still good
+    root.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        Connector conn = mac.getConnector(rootUser.getPrincipal(), delegationToken1);
+
+        assertTrue("Could not get tables with delegation token", conn.tableOperations().list().size() > 0);
+
+        return null;
+      }
+    });
+
+    // Get a new token, so we can compare the keyId on the second to the first
+    final AuthenticationToken delegationToken2 = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
+      @Override
+      public AuthenticationToken run() throws Exception {
+        Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+        log.info("Created connector as {}", rootUser.getPrincipal());
+        assertEquals(rootUser.getPrincipal(), conn.whoami());
+
+        AuthenticationToken token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
+
+        assertTrue("Could not get tables with delegation token", mac.getConnector(rootUser.getPrincipal(), token).tableOperations().list().size() > 0);
+
+        return token;
+      }
+    });
+
+    // A restarted master should reuse the same secret key after a restart if the secret key hasn't expired (1day by default)
+    DelegationTokenImpl dt1 = (DelegationTokenImpl) delegationToken1;
+    DelegationTokenImpl dt2 = (DelegationTokenImpl) delegationToken2;
+    assertEquals(dt1.getIdentifier().getKeyId(), dt2.getIdentifier().getKeyId());
+  }
+
+  @Test(expected = AccumuloException.class)
+  public void testDelegationTokenWithInvalidLifetime() throws Throwable {
+    // Login as the "root" user
+    UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+    log.info("Logged in as {}", rootUser.getPrincipal());
+
+    // As the "root" user, open up the connection and get a delegation token
+    try {
+      root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
+        @Override
+        public AuthenticationToken run() throws Exception {
+          Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+          log.info("Created connector as {}", rootUser.getPrincipal());
+          assertEquals(rootUser.getPrincipal(), conn.whoami());
+
+          // Should fail
+          return conn.securityOperations().getDelegationToken(new DelegationTokenConfig().setTokenLifetime(Long.MAX_VALUE, TimeUnit.MILLISECONDS));
+        }
+      });
+    } catch (UndeclaredThrowableException e) {
+      Throwable cause = e.getCause();
+      if (null != cause) {
+        throw cause;
+      } else {
+        throw e;
+      }
+    }
+  }
+
+  @Test
+  public void testDelegationTokenWithReducedLifetime() throws Throwable {
+    // Login as the "root" user
+    UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+    log.info("Logged in as {}", rootUser.getPrincipal());
+
+    // As the "root" user, open up the connection and get a delegation token
+    final AuthenticationToken dt = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
+      @Override
+      public AuthenticationToken run() throws Exception {
+        Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+        log.info("Created connector as {}", rootUser.getPrincipal());
+        assertEquals(rootUser.getPrincipal(), conn.whoami());
+
+        return conn.securityOperations().getDelegationToken(new DelegationTokenConfig().setTokenLifetime(5, TimeUnit.MINUTES));
+      }
+    });
+
+    AuthenticationTokenIdentifier identifier = ((DelegationTokenImpl) dt).getIdentifier();
+    assertTrue("Expected identifier to expire in no more than 5 minutes: " + identifier,
+        identifier.getExpirationDate() - identifier.getIssueDate() <= (5 * 60 * 1000));
+  }
+
+  /**
+   * Creates a table, adds a record to it, and then compacts the table. A simple way to make sure that the system user exists (since the master does an RPC to
+   * the tserver which will create the system user if it doesn't already exist).
+   */
+  private void createTableWithDataAndCompact(Connector conn) throws TableNotFoundException, AccumuloSecurityException, AccumuloException, TableExistsException {
+    final String table = testName.getMethodName() + "_table";
+    conn.tableOperations().create(table);
+    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+    Mutation m = new Mutation("a");
+    m.put("b", "c", "d");
+    bw.addMutation(m);
+    bw.close();
+    conn.tableOperations().compact(table, new CompactionConfig().setFlush(true).setWait(true));
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/KerberosProxyIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/KerberosProxyIT.java b/test/src/main/java/org/apache/accumulo/test/functional/KerberosProxyIT.java
new file mode 100644
index 0000000..31d1329
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/KerberosProxyIT.java
@@ -0,0 +1,426 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.net.ConnectException;
+import java.net.InetAddress;
+import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.rpc.UGIAssumingTransport;
+import org.apache.accumulo.harness.AccumuloITBase;
+import org.apache.accumulo.harness.MiniClusterConfigurationCallback;
+import org.apache.accumulo.harness.MiniClusterHarness;
+import org.apache.accumulo.harness.TestingKdc;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.proxy.Proxy;
+import org.apache.accumulo.proxy.ProxyServer;
+import org.apache.accumulo.proxy.thrift.AccumuloProxy;
+import org.apache.accumulo.proxy.thrift.AccumuloProxy.Client;
+import org.apache.accumulo.proxy.thrift.AccumuloSecurityException;
+import org.apache.accumulo.proxy.thrift.ColumnUpdate;
+import org.apache.accumulo.proxy.thrift.Key;
+import org.apache.accumulo.proxy.thrift.KeyValue;
+import org.apache.accumulo.proxy.thrift.ScanOptions;
+import org.apache.accumulo.proxy.thrift.ScanResult;
+import org.apache.accumulo.proxy.thrift.TimeType;
+import org.apache.accumulo.proxy.thrift.WriterOptions;
+import org.apache.accumulo.server.util.PortUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.transport.TSaslClientTransport;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransportException;
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Tests impersonation of clients by the proxy over SASL
+ */
+public class KerberosProxyIT extends AccumuloITBase {
+  private static final Logger log = LoggerFactory.getLogger(KerberosProxyIT.class);
+
+  @Rule
+  public ExpectedException thrown = ExpectedException.none();
+
+  private static TestingKdc kdc;
+  private static String krbEnabledForITs = null;
+  private static File proxyKeytab;
+  private static String hostname, proxyPrimary, proxyPrincipal;
+
+  @Override
+  protected int defaultTimeoutSeconds() {
+    return 60 * 5;
+  }
+
+  @BeforeClass
+  public static void startKdc() throws Exception {
+    kdc = new TestingKdc();
+    kdc.start();
+    krbEnabledForITs = System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION);
+    if (null == krbEnabledForITs || !Boolean.parseBoolean(krbEnabledForITs)) {
+      System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, "true");
+    }
+
+    // Create a principal+keytab for the proxy
+    proxyKeytab = new File(kdc.getKeytabDir(), "proxy.keytab");
+    hostname = InetAddress.getLocalHost().getCanonicalHostName();
+    // Set the primary because the client needs to know it
+    proxyPrimary = "proxy";
+    // Qualify with an instance
+    proxyPrincipal = proxyPrimary + "/" + hostname;
+    kdc.createPrincipal(proxyKeytab, proxyPrincipal);
+    // Tack on the realm too
+    proxyPrincipal = kdc.qualifyUser(proxyPrincipal);
+  }
+
+  @AfterClass
+  public static void stopKdc() throws Exception {
+    if (null != kdc) {
+      kdc.stop();
+    }
+    if (null != krbEnabledForITs) {
+      System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, krbEnabledForITs);
+    }
+  }
+
+  private MiniAccumuloClusterImpl mac;
+  private Process proxyProcess;
+  private int proxyPort;
+
+  @Before
+  public void startMac() throws Exception {
+    MiniClusterHarness harness = new MiniClusterHarness();
+    mac = harness.create(getClass().getName(), testName.getMethodName(), new PasswordToken("unused"), new MiniClusterConfigurationCallback() {
+
+      @Override
+      public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
+        cfg.setNumTservers(1);
+        Map<String,String> siteCfg = cfg.getSiteConfig();
+        // Allow the proxy to impersonate the client user, but no one else
+        siteCfg.put(Property.INSTANCE_RPC_SASL_PROXYUSERS.getKey() + proxyPrincipal + ".users", kdc.getRootUser().getPrincipal());
+        siteCfg.put(Property.INSTANCE_RPC_SASL_PROXYUSERS.getKey() + proxyPrincipal + ".hosts", "*");
+        cfg.setSiteConfig(siteCfg);
+      }
+
+    }, kdc);
+
+    mac.start();
+    MiniAccumuloConfigImpl cfg = mac.getConfig();
+
+    // Proxy configuration
+    proxyPort = PortUtils.getRandomFreePort();
+    File proxyPropertiesFile = new File(cfg.getConfDir(), "proxy.properties");
+    Properties proxyProperties = new Properties();
+    proxyProperties.setProperty("useMockInstance", "false");
+    proxyProperties.setProperty("useMiniAccumulo", "false");
+    proxyProperties.setProperty("protocolFactory", TCompactProtocol.Factory.class.getName());
+    proxyProperties.setProperty("tokenClass", KerberosToken.class.getName());
+    proxyProperties.setProperty("port", Integer.toString(proxyPort));
+    proxyProperties.setProperty("maxFrameSize", "16M");
+    proxyProperties.setProperty("instance", mac.getInstanceName());
+    proxyProperties.setProperty("zookeepers", mac.getZooKeepers());
+    proxyProperties.setProperty("thriftServerType", "sasl");
+    proxyProperties.setProperty("kerberosPrincipal", proxyPrincipal);
+    proxyProperties.setProperty("kerberosKeytab", proxyKeytab.getCanonicalPath());
+
+    // Write out the proxy.properties file
+    FileWriter writer = new FileWriter(proxyPropertiesFile);
+    proxyProperties.store(writer, "Configuration for Accumulo proxy");
+    writer.close();
+
+    proxyProcess = mac.exec(Proxy.class, "-p", proxyPropertiesFile.getCanonicalPath());
+
+    // Enabled kerberos auth
+    Configuration conf = new Configuration(false);
+    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    UserGroupInformation.setConfiguration(conf);
+
+    boolean success = false;
+    ClusterUser rootUser = kdc.getRootUser();
+    for (int i = 0; i < 10 && !success; i++) {
+
+      UserGroupInformation ugi;
+      try {
+        UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+        ugi = UserGroupInformation.getCurrentUser();
+      } catch (IOException ex) {
+        log.info("Login as root is failing", ex);
+        Thread.sleep(1000);
+        continue;
+      }
+
+      TSocket socket = new TSocket(hostname, proxyPort);
+      log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
+      TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
+          "auth"), null, socket);
+
+      final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);
+
+      try {
+        // UGI transport will perform the doAs for us
+        ugiTransport.open();
+        success = true;
+      } catch (TTransportException e) {
+        Throwable cause = e.getCause();
+        if (null != cause && cause instanceof ConnectException) {
+          log.info("Proxy not yet up, waiting");
+          Thread.sleep(1000);
+          continue;
+        }
+      } finally {
+        if (null != ugiTransport) {
+          ugiTransport.close();
+        }
+      }
+    }
+
+    assertTrue("Failed to connect to the proxy repeatedly", success);
+  }
+
+  @After
+  public void stopMac() throws Exception {
+    if (null != proxyProcess) {
+      log.info("Destroying proxy process");
+      proxyProcess.destroy();
+      log.info("Waiting for proxy termination");
+      proxyProcess.waitFor();
+      log.info("Proxy terminated");
+    }
+    if (null != mac) {
+      mac.stop();
+    }
+  }
+
+  @Test
+  public void testProxyClient() throws Exception {
+    ClusterUser rootUser = kdc.getRootUser();
+    UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+
+    TSocket socket = new TSocket(hostname, proxyPort);
+    log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
+    TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
+        "auth"), null, socket);
+
+    final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);
+
+    // UGI transport will perform the doAs for us
+    ugiTransport.open();
+
+    AccumuloProxy.Client.Factory factory = new AccumuloProxy.Client.Factory();
+    Client client = factory.getClient(new TCompactProtocol(ugiTransport), new TCompactProtocol(ugiTransport));
+
+    // Will fail if the proxy can impersonate the client
+    ByteBuffer login = client.login(rootUser.getPrincipal(), Collections.<String,String> emptyMap());
+
+    // For all of the below actions, the proxy user doesn't have permission to do any of them, but the client user does.
+    // The fact that any of them actually run tells us that impersonation is working.
+
+    // Create a table
+    String table = "table";
+    if (!client.tableExists(login, table)) {
+      client.createTable(login, table, true, TimeType.MILLIS);
+    }
+
+    // Write two records to the table
+    String writer = client.createWriter(login, table, new WriterOptions());
+    Map<ByteBuffer,List<ColumnUpdate>> updates = new HashMap<>();
+    ColumnUpdate update = new ColumnUpdate(ByteBuffer.wrap("cf1".getBytes(UTF_8)), ByteBuffer.wrap("cq1".getBytes(UTF_8)));
+    update.setValue(ByteBuffer.wrap("value1".getBytes(UTF_8)));
+    updates.put(ByteBuffer.wrap("row1".getBytes(UTF_8)), Collections.<ColumnUpdate> singletonList(update));
+    update = new ColumnUpdate(ByteBuffer.wrap("cf2".getBytes(UTF_8)), ByteBuffer.wrap("cq2".getBytes(UTF_8)));
+    update.setValue(ByteBuffer.wrap("value2".getBytes(UTF_8)));
+    updates.put(ByteBuffer.wrap("row2".getBytes(UTF_8)), Collections.<ColumnUpdate> singletonList(update));
+    client.update(writer, updates);
+
+    // Flush and close the writer
+    client.flush(writer);
+    client.closeWriter(writer);
+
+    // Open a scanner to the table
+    String scanner = client.createScanner(login, table, new ScanOptions());
+    ScanResult results = client.nextK(scanner, 10);
+    assertEquals(2, results.getResults().size());
+
+    // Check the first key-value
+    KeyValue kv = results.getResults().get(0);
+    Key k = kv.key;
+    ByteBuffer v = kv.value;
+    assertEquals(ByteBuffer.wrap("row1".getBytes(UTF_8)), k.row);
+    assertEquals(ByteBuffer.wrap("cf1".getBytes(UTF_8)), k.colFamily);
+    assertEquals(ByteBuffer.wrap("cq1".getBytes(UTF_8)), k.colQualifier);
+    assertEquals(ByteBuffer.wrap(new byte[0]), k.colVisibility);
+    assertEquals(ByteBuffer.wrap("value1".getBytes(UTF_8)), v);
+
+    // And then the second
+    kv = results.getResults().get(1);
+    k = kv.key;
+    v = kv.value;
+    assertEquals(ByteBuffer.wrap("row2".getBytes(UTF_8)), k.row);
+    assertEquals(ByteBuffer.wrap("cf2".getBytes(UTF_8)), k.colFamily);
+    assertEquals(ByteBuffer.wrap("cq2".getBytes(UTF_8)), k.colQualifier);
+    assertEquals(ByteBuffer.wrap(new byte[0]), k.colVisibility);
+    assertEquals(ByteBuffer.wrap("value2".getBytes(UTF_8)), v);
+
+    // Close the scanner
+    client.closeScanner(scanner);
+
+    ugiTransport.close();
+  }
+
+  @Test
+  public void testDisallowedClientForImpersonation() throws Exception {
+    String user = testName.getMethodName();
+    File keytab = new File(kdc.getKeytabDir(), user + ".keytab");
+    kdc.createPrincipal(keytab, user);
+
+    // Login as the new user
+    UserGroupInformation.loginUserFromKeytab(user, keytab.getAbsolutePath());
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+
+    log.info("Logged in as " + ugi);
+
+    // Expect an AccumuloSecurityException
+    thrown.expect(AccumuloSecurityException.class);
+    // Error msg would look like:
+    //
+    // org.apache.accumulo.core.client.AccumuloSecurityException: Error BAD_CREDENTIALS for user Principal in credentials object should match kerberos
+    // principal.
+    // Expected 'proxy/hw10447.local@EXAMPLE.COM' but was 'testDisallowedClientForImpersonation@EXAMPLE.COM' - Username or Password is Invalid)
+    thrown.expect(new ThriftExceptionMatchesPattern(".*Error BAD_CREDENTIALS.*"));
+    thrown.expect(new ThriftExceptionMatchesPattern(".*Expected '" + proxyPrincipal + "' but was '" + kdc.qualifyUser(user) + "'.*"));
+
+    TSocket socket = new TSocket(hostname, proxyPort);
+    log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
+
+    // Should fail to open the tran
+    TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
+        "auth"), null, socket);
+
+    final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);
+
+    // UGI transport will perform the doAs for us
+    ugiTransport.open();
+
+    AccumuloProxy.Client.Factory factory = new AccumuloProxy.Client.Factory();
+    Client client = factory.getClient(new TCompactProtocol(ugiTransport), new TCompactProtocol(ugiTransport));
+
+    // Will fail because the proxy can't impersonate this user (per the site configuration)
+    try {
+      client.login(kdc.qualifyUser(user), Collections.<String,String> emptyMap());
+    } finally {
+      if (null != ugiTransport) {
+        ugiTransport.close();
+      }
+    }
+  }
+
+  @Test
+  public void testMismatchPrincipals() throws Exception {
+    ClusterUser rootUser = kdc.getRootUser();
+    // Should get an AccumuloSecurityException and the given message
+    thrown.expect(AccumuloSecurityException.class);
+    thrown.expect(new ThriftExceptionMatchesPattern(ProxyServer.RPC_ACCUMULO_PRINCIPAL_MISMATCH_MSG));
+
+    // Make a new user
+    String user = testName.getMethodName();
+    File keytab = new File(kdc.getKeytabDir(), user + ".keytab");
+    kdc.createPrincipal(keytab, user);
+
+    // Login as the new user
+    UserGroupInformation.loginUserFromKeytab(user, keytab.getAbsolutePath());
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+
+    log.info("Logged in as " + ugi);
+
+    TSocket socket = new TSocket(hostname, proxyPort);
+    log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
+
+    // Should fail to open the tran
+    TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
+        "auth"), null, socket);
+
+    final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);
+
+    // UGI transport will perform the doAs for us
+    ugiTransport.open();
+
+    AccumuloProxy.Client.Factory factory = new AccumuloProxy.Client.Factory();
+    Client client = factory.getClient(new TCompactProtocol(ugiTransport), new TCompactProtocol(ugiTransport));
+
+    // The proxy needs to recognize that the requested principal isn't the same as the SASL principal and fail
+    // Accumulo should let this through -- we need to rely on the proxy to dump me before talking to accumulo
+    try {
+      client.login(rootUser.getPrincipal(), Collections.<String,String> emptyMap());
+    } finally {
+      if (null != ugiTransport) {
+        ugiTransport.close();
+      }
+    }
+  }
+
+  private static class ThriftExceptionMatchesPattern extends TypeSafeMatcher<AccumuloSecurityException> {
+    private String pattern;
+
+    public ThriftExceptionMatchesPattern(String pattern) {
+      this.pattern = pattern;
+    }
+
+    @Override
+    protected boolean matchesSafely(AccumuloSecurityException item) {
+      return item.isSetMsg() && item.msg.matches(pattern);
+    }
+
+    @Override
+    public void describeTo(Description description) {
+      description.appendText("matches pattern ").appendValue(pattern);
+    }
+
+    @Override
+    protected void describeMismatchSafely(AccumuloSecurityException item, Description mismatchDescription) {
+      mismatchDescription.appendText("does not match");
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java b/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java
new file mode 100644
index 0000000..72b51eb
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java
@@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.MemoryUnit;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class LargeRowIT extends AccumuloClusterHarness {
+  private static final Logger log = LoggerFactory.getLogger(LargeRowIT.class);
+
+  @Override
+  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+    cfg.setMemory(ServerType.TABLET_SERVER, cfg.getMemory(ServerType.TABLET_SERVER) * 2, MemoryUnit.BYTE);
+    Map<String,String> siteConfig = cfg.getSiteConfig();
+    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "10ms");
+    cfg.setSiteConfig(siteConfig);
+  }
+
+  @Override
+  protected int defaultTimeoutSeconds() {
+    return 4 * 60;
+  }
+
+  private static final int SEED = 42;
+  private static final int NUM_ROWS = 100;
+  private static final int ROW_SIZE = 1 << 17;
+  private static final int NUM_PRE_SPLITS = 9;
+  private static final int SPLIT_THRESH = ROW_SIZE * NUM_ROWS / NUM_PRE_SPLITS;
+
+  private String REG_TABLE_NAME;
+  private String PRE_SPLIT_TABLE_NAME;
+  private int timeoutFactor = 1;
+  private String tservMajcDelay;
+
+  @Before
+  public void getTimeoutFactor() throws Exception {
+    try {
+      timeoutFactor = Integer.parseInt(System.getProperty("timeout.factor"));
+    } catch (NumberFormatException e) {
+      log.warn("Could not parse property value for 'timeout.factor' as integer: " + System.getProperty("timeout.factor"));
+    }
+
+    Assert.assertTrue("Timeout factor must be greater than or equal to 1", timeoutFactor >= 1);
+
+    String[] names = getUniqueNames(2);
+    REG_TABLE_NAME = names[0];
+    PRE_SPLIT_TABLE_NAME = names[1];
+
+    Connector c = getConnector();
+    tservMajcDelay = c.instanceOperations().getSystemConfiguration().get(Property.TSERV_MAJC_DELAY.getKey());
+    c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "10ms");
+  }
+
+  @After
+  public void resetMajcDelay() throws Exception {
+    if (null != tservMajcDelay) {
+      Connector conn = getConnector();
+      conn.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), tservMajcDelay);
+    }
+  }
+
+  @Test
+  public void run() throws Exception {
+    Random r = new Random();
+    byte rowData[] = new byte[ROW_SIZE];
+    r.setSeed(SEED + 1);
+    TreeSet<Text> splitPoints = new TreeSet<Text>();
+    for (int i = 0; i < NUM_PRE_SPLITS; i++) {
+      r.nextBytes(rowData);
+      TestIngest.toPrintableChars(rowData);
+      splitPoints.add(new Text(rowData));
+    }
+    Connector c = getConnector();
+    c.tableOperations().create(REG_TABLE_NAME);
+    c.tableOperations().create(PRE_SPLIT_TABLE_NAME);
+    c.tableOperations().setProperty(PRE_SPLIT_TABLE_NAME, Property.TABLE_MAX_END_ROW_SIZE.getKey(), "256K");
+    UtilWaitThread.sleep(3 * 1000);
+    c.tableOperations().addSplits(PRE_SPLIT_TABLE_NAME, splitPoints);
+    test1(c);
+    test2(c);
+  }
+
+  private void test1(Connector c) throws Exception {
+
+    basicTest(c, REG_TABLE_NAME, 0);
+
+    c.tableOperations().setProperty(REG_TABLE_NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "" + SPLIT_THRESH);
+
+    UtilWaitThread.sleep(timeoutFactor * 12000);
+    log.info("checking splits");
+    FunctionalTestUtils.checkSplits(c, REG_TABLE_NAME, NUM_PRE_SPLITS / 2, NUM_PRE_SPLITS * 4);
+
+    verify(c, REG_TABLE_NAME);
+  }
+
+  private void test2(Connector c) throws Exception {
+    basicTest(c, PRE_SPLIT_TABLE_NAME, NUM_PRE_SPLITS);
+  }
+
+  private void basicTest(Connector c, String table, int expectedSplits) throws Exception {
+    BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
+
+    Random r = new Random();
+    byte rowData[] = new byte[ROW_SIZE];
+
+    r.setSeed(SEED);
+
+    for (int i = 0; i < NUM_ROWS; i++) {
+
+      r.nextBytes(rowData);
+      TestIngest.toPrintableChars(rowData);
+
+      Mutation mut = new Mutation(new Text(rowData));
+      mut.put(new Text(""), new Text(""), new Value(Integer.toString(i).getBytes(UTF_8)));
+      bw.addMutation(mut);
+    }
+
+    bw.close();
+
+    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
+
+    verify(c, table);
+
+    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
+
+    c.tableOperations().flush(table, null, null, false);
+
+    // verify while table flush is running
+    verify(c, table);
+
+    // give split time to complete
+    c.tableOperations().flush(table, null, null, true);
+
+    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
+
+    verify(c, table);
+
+    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
+  }
+
+  private void verify(Connector c, String table) throws Exception {
+    Random r = new Random();
+    byte rowData[] = new byte[ROW_SIZE];
+
+    r.setSeed(SEED);
+
+    Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
+
+    for (int i = 0; i < NUM_ROWS; i++) {
+
+      r.nextBytes(rowData);
+      TestIngest.toPrintableChars(rowData);
+
+      scanner.setRange(new Range(new Text(rowData)));
+
+      int count = 0;
+
+      for (Entry<Key,Value> entry : scanner) {
+        if (!entry.getKey().getRow().equals(new Text(rowData))) {
+          throw new Exception("verification failed, unexpected row i =" + i);
+        }
+        if (!entry.getValue().equals(Integer.toString(i).getBytes(UTF_8))) {
+          throw new Exception("verification failed, unexpected value i =" + i + " value = " + entry.getValue());
+        }
+        count++;
+      }
+
+      if (count != 1) {
+        throw new Exception("verification failed, unexpected count i =" + i + " count=" + count);
+      }
+
+    }
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/LateLastContactIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/LateLastContactIT.java b/test/src/main/java/org/apache/accumulo/test/functional/LateLastContactIT.java
new file mode 100644
index 0000000..9c310f0
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/LateLastContactIT.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.Collections;
+
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+/**
+ * Fake the "tablet stops talking but holds its lock" problem we see when hard drives and NFS fail. Start a ZombieTServer, and see that master stops it.
+ */
+public class LateLastContactIT extends ConfigurableMacBase {
+
+  @Override
+  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+    cfg.setSiteConfig(Collections.singletonMap(Property.GENERAL_RPC_TIMEOUT.getKey(), "2s"));
+  }
+
+  @Override
+  protected int defaultTimeoutSeconds() {
+    return 60;
+  }
+
+  @Test
+  public void test() throws Exception {
+    Process zombie = cluster.exec(ZombieTServer.class);
+    assertEquals(0, zombie.waitFor());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java b/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
new file mode 100644
index 0000000..1e7fef0
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
+import org.apache.accumulo.core.client.admin.TimeType;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class LogicalTimeIT extends AccumuloClusterHarness {
+  private static final Logger log = LoggerFactory.getLogger(LogicalTimeIT.class);
+
+  @Override
+  protected int defaultTimeoutSeconds() {
+    return 4 * 60;
+  }
+
+  @Test
+  public void run() throws Exception {
+    int tc = 0;
+    String tableName = getUniqueNames(1)[0];
+    Connector c = getConnector();
+    runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a"}, null, null, "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"z"}, null, null, "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a", "z"}, null, null, "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a", "c", "z"}, null, null, "b", 3l);
+    runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a", "y", "z"}, null, null, "b", 3l);
+
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a"}, null, null, "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"h"}, null, null, "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"s"}, null, null, "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s"}, null, null, "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "c", "h", "s"}, null, null, "b", 3l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s", "i"}, null, null, "b", 3l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"t", "a", "h", "s"}, null, null, "b", 3l);
+
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a"}, null, "h", "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"h"}, null, "h", "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"s"}, null, "h", "b", 1l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s"}, null, "h", "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "c", "h", "s"}, null, "h", "b", 3l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s", "i"}, null, "h", "b", 3l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"t", "a", "h", "s"}, null, "h", "b", 2l);
+
+  }
+
+  private void runMergeTest(Connector conn, String table, String[] splits, String[] inserts, String start, String end, String last, long expected)
+      throws Exception {
+    log.info("table " + table);
+    conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
+    TreeSet<Text> splitSet = new TreeSet<Text>();
+    for (String split : splits) {
+      splitSet.add(new Text(split));
+    }
+    conn.tableOperations().addSplits(table, splitSet);
+
+    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+    for (String row : inserts) {
+      Mutation m = new Mutation(row);
+      m.put("cf", "cq", "v");
+      bw.addMutation(m);
+    }
+
+    bw.flush();
+
+    conn.tableOperations().merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));
+
+    Mutation m = new Mutation(last);
+    m.put("cf", "cq", "v");
+    bw.addMutation(m);
+    bw.flush();
+
+    Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
+    scanner.setRange(new Range(last));
+
+    bw.close();
+
+    long time = scanner.iterator().next().getKey().getTimestamp();
+    if (time != expected)
+      throw new RuntimeException("unexpected time " + time + " " + expected);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/MapReduceIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MapReduceIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MapReduceIT.java
new file mode 100644
index 0000000..8c4666c
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MapReduceIT.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.Collections;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.Base64;
+import org.apache.accumulo.examples.simple.mapreduce.RowHash;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class MapReduceIT extends ConfigurableMacBase {
+
+  @Override
+  protected int defaultTimeoutSeconds() {
+    return 60;
+  }
+
+  public static final String hadoopTmpDirArg = "-Dhadoop.tmp.dir=" + System.getProperty("user.dir") + "/target/hadoop-tmp";
+
+  static final String tablename = "mapredf";
+  static final String input_cf = "cf-HASHTYPE";
+  static final String input_cq = "cq-NOTHASHED";
+  static final String input_cfcq = input_cf + ":" + input_cq;
+  static final String output_cq = "cq-MD4BASE64";
+  static final String output_cfcq = input_cf + ":" + output_cq;
+
+  @Test
+  public void test() throws Exception {
+    runTest(getConnector(), getCluster());
+  }
+
+  static void runTest(Connector c, MiniAccumuloClusterImpl cluster) throws AccumuloException, AccumuloSecurityException, TableExistsException,
+      TableNotFoundException, MutationsRejectedException, IOException, InterruptedException, NoSuchAlgorithmException {
+    c.tableOperations().create(tablename);
+    BatchWriter bw = c.createBatchWriter(tablename, new BatchWriterConfig());
+    for (int i = 0; i < 10; i++) {
+      Mutation m = new Mutation("" + i);
+      m.put(input_cf, input_cq, "row" + i);
+      bw.addMutation(m);
+    }
+    bw.close();
+    Process hash = cluster.exec(RowHash.class, Collections.singletonList(hadoopTmpDirArg), "-i", c.getInstance().getInstanceName(), "-z", c.getInstance()
+        .getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "-t", tablename, "--column", input_cfcq);
+    assertEquals(0, hash.waitFor());
+
+    Scanner s = c.createScanner(tablename, Authorizations.EMPTY);
+    s.fetchColumn(new Text(input_cf), new Text(output_cq));
+    int i = 0;
+    for (Entry<Key,Value> entry : s) {
+      MessageDigest md = MessageDigest.getInstance("MD5");
+      byte[] check = Base64.encodeBase64(md.digest(("row" + i).getBytes()));
+      assertEquals(entry.getValue().toString(), new String(check));
+      i++;
+    }
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/MasterAssignmentIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MasterAssignmentIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MasterAssignmentIT.java
new file mode 100644
index 0000000..72f8ce7
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MasterAssignmentIT.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+import java.io.FileNotFoundException;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.impl.ClientContext;
+import org.apache.accumulo.core.client.impl.Credentials;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.fate.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.server.master.state.MetaDataTableScanner;
+import org.apache.accumulo.server.master.state.TabletLocationState;
+import org.apache.commons.configuration.ConfigurationException;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class MasterAssignmentIT extends AccumuloClusterHarness {
+
+  @Override
+  protected int defaultTimeoutSeconds() {
+    return 2 * 60;
+  }
+
+  @Test
+  public void test() throws Exception {
+    Connector c = getConnector();
+    String tableName = super.getUniqueNames(1)[0];
+    c.tableOperations().create(tableName);
+    String tableId = c.tableOperations().tableIdMap().get(tableName);
+    // wait for the table to be online
+    TabletLocationState newTablet;
+    do {
+      UtilWaitThread.sleep(250);
+      newTablet = getTabletLocationState(c, tableId);
+    } while (newTablet.current == null);
+    assertNull(newTablet.last);
+    assertNull(newTablet.future);
+
+    // put something in it
+    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+    Mutation m = new Mutation("a");
+    m.put("b", "c", "d");
+    bw.addMutation(m);
+    bw.close();
+    // give it a last location
+    c.tableOperations().flush(tableName, null, null, true);
+
+    TabletLocationState flushed = getTabletLocationState(c, tableId);
+    assertEquals(newTablet.current, flushed.current);
+    assertEquals(flushed.current, flushed.last);
+    assertNull(newTablet.future);
+
+    // take the tablet offline
+    c.tableOperations().offline(tableName, true);
+    TabletLocationState offline = getTabletLocationState(c, tableId);
+    assertNull(offline.future);
+    assertNull(offline.current);
+    assertEquals(flushed.current, offline.last);
+
+    // put it back online
+    c.tableOperations().online(tableName, true);
+    TabletLocationState online = getTabletLocationState(c, tableId);
+    assertNull(online.future);
+    assertNotNull(online.current);
+    assertEquals(online.current, online.last);
+  }
+
+  private TabletLocationState getTabletLocationState(Connector c, String tableId) throws FileNotFoundException, ConfigurationException {
+    Credentials creds = new Credentials(getAdminPrincipal(), getAdminToken());
+    ClientContext context = new ClientContext(c.getInstance(), creds, getCluster().getClientConfig());
+    MetaDataTableScanner s = new MetaDataTableScanner(context, new Range(KeyExtent.getMetadataEntry(new Text(tableId), null)));
+    TabletLocationState tlState = s.next();
+    s.close();
+    return tlState;
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MasterFailoverIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
new file mode 100644
index 0000000..3489c26
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Map;
+
+import org.apache.accumulo.cluster.ClusterControl;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+public class MasterFailoverIT extends AccumuloClusterHarness {
+
+  @Override
+  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+    Map<String,String> siteConfig = cfg.getSiteConfig();
+    siteConfig.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "5s");
+    cfg.setSiteConfig(siteConfig);
+  }
+
+  @Override
+  protected int defaultTimeoutSeconds() {
+    return 90;
+  }
+
+  @Test
+  public void test() throws Exception {
+    Connector c = getConnector();
+    String[] names = getUniqueNames(2);
+    c.tableOperations().create(names[0]);
+    TestIngest.Opts opts = new TestIngest.Opts();
+    opts.setTableName(names[0]);
+    ClientConfiguration clientConf = cluster.getClientConfig();
+    if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+      opts.updateKerberosCredentials(clientConf);
+    } else {
+      opts.setPrincipal(getAdminPrincipal());
+    }
+    TestIngest.ingest(c, opts, new BatchWriterOpts());
+
+    ClusterControl control = cluster.getClusterControl();
+    control.stopAllServers(ServerType.MASTER);
+    // start up a new one
+    control.startAllServers(ServerType.MASTER);
+    // talk to it
+    c.tableOperations().rename(names[0], names[1]);
+    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+    vopts.setTableName(names[1]);
+    if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+      vopts.updateKerberosCredentials(clientConf);
+    } else {
+      vopts.setPrincipal(getAdminPrincipal());
+    }
+    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/MaxOpenIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MaxOpenIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MaxOpenIT.java
new file mode 100644
index 0000000..6f08c1f
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MaxOpenIT.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.admin.InstanceOperations;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * A functional test that exercises hitting the max open file limit on a tablet server. This test assumes there are one or two tablet servers.
+ */
+
+public class MaxOpenIT extends AccumuloClusterHarness {
+
+  @Override
+  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+    Map<String,String> conf = cfg.getSiteConfig();
+    conf.put(Property.TSERV_SCAN_MAX_OPENFILES.getKey(), "4");
+    conf.put(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "1");
+    conf.put(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey(), "2");
+    cfg.setSiteConfig(conf);
+  }
+
+  @Override
+  protected int defaultTimeoutSeconds() {
+    return 3 * 60;
+  }
+
+  private String scanMaxOpenFiles, majcConcurrent, majcThreadMaxOpen;
+
+  @Before
+  public void alterConfig() throws Exception {
+    InstanceOperations iops = getConnector().instanceOperations();
+    Map<String,String> sysConfig = iops.getSystemConfiguration();
+    scanMaxOpenFiles = sysConfig.get(Property.TSERV_SCAN_MAX_OPENFILES.getKey());
+    majcConcurrent = sysConfig.get(Property.TSERV_MAJC_MAXCONCURRENT.getKey());
+    majcThreadMaxOpen = sysConfig.get(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey());
+  }
+
+  @After
+  public void restoreConfig() throws Exception {
+    InstanceOperations iops = getConnector().instanceOperations();
+    if (null != scanMaxOpenFiles) {
+      iops.setProperty(Property.TSERV_SCAN_MAX_OPENFILES.getKey(), scanMaxOpenFiles);
+    }
+    if (null != majcConcurrent) {
+      iops.setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), majcConcurrent);
+    }
+    if (null != majcThreadMaxOpen) {
+      iops.setProperty(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey(), majcThreadMaxOpen);
+    }
+  }
+
+  private static final int NUM_TABLETS = 16;
+  private static final int NUM_TO_INGEST = 10000;
+
+  @Test
+  public void run() throws Exception {
+    final Connector c = getConnector();
+    final String tableName = getUniqueNames(1)[0];
+    final ClientConfiguration clientConf = cluster.getClientConfig();
+    c.tableOperations().create(tableName);
+    c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "10");
+    c.tableOperations().addSplits(tableName, TestIngest.getSplitPoints(0, NUM_TO_INGEST, NUM_TABLETS));
+
+    // the following loop should create three tablets in each map file
+    for (int i = 0; i < 3; i++) {
+      TestIngest.Opts opts = new TestIngest.Opts();
+      opts.timestamp = i;
+      opts.dataSize = 50;
+      opts.rows = NUM_TO_INGEST;
+      opts.cols = 1;
+      opts.random = i;
+      opts.setTableName(tableName);
+      if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+        opts.updateKerberosCredentials(clientConf);
+      } else {
+        opts.setPrincipal(getAdminPrincipal());
+      }
+      TestIngest.ingest(c, opts, new BatchWriterOpts());
+
+      c.tableOperations().flush(tableName, null, null, true);
+      FunctionalTestUtils.checkRFiles(c, tableName, NUM_TABLETS, NUM_TABLETS, i + 1, i + 1);
+    }
+
+    List<Range> ranges = new ArrayList<Range>(NUM_TO_INGEST);
+
+    for (int i = 0; i < NUM_TO_INGEST; i++) {
+      ranges.add(new Range(TestIngest.generateRow(i, 0)));
+    }
+
+    long time1 = batchScan(c, tableName, ranges, 1);
+    // run it again, now that stuff is cached on the client and sever
+    time1 = batchScan(c, tableName, ranges, 1);
+    long time2 = batchScan(c, tableName, ranges, NUM_TABLETS);
+
+    System.out.printf("Single thread scan time   %6.2f %n", time1 / 1000.0);
+    System.out.printf("Multiple thread scan time %6.2f %n", time2 / 1000.0);
+
+  }
+
+  private long batchScan(Connector c, String tableName, List<Range> ranges, int threads) throws Exception {
+    BatchScanner bs = c.createBatchScanner(tableName, TestIngest.AUTHS, threads);
+
+    bs.setRanges(ranges);
+
+    int count = 0;
+
+    long t1 = System.currentTimeMillis();
+
+    byte rval[] = new byte[50];
+    Random random = new Random();
+
+    for (Entry<Key,Value> entry : bs) {
+      count++;
+      int row = VerifyIngest.getRow(entry.getKey());
+      int col = VerifyIngest.getCol(entry.getKey());
+
+      if (row < 0 || row >= NUM_TO_INGEST) {
+        throw new Exception("unexcepted row " + row);
+      }
+
+      rval = TestIngest.genRandomValue(random, rval, 2, row, col);
+
+      if (entry.getValue().compareTo(rval) != 0) {
+        throw new Exception("unexcepted value row=" + row + " col=" + col);
+      }
+    }
+
+    long t2 = System.currentTimeMillis();
+
+    bs.close();
+
+    if (count != NUM_TO_INGEST) {
+      throw new Exception("Batch Scan did not return expected number of values " + count);
+    }
+
+    return t2 - t1;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
new file mode 100644
index 0000000..9e3e8b6
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
+import org.apache.accumulo.core.client.admin.TimeType;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.Merge;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class MergeIT extends AccumuloClusterHarness {
+
+  @Override
+  public int defaultTimeoutSeconds() {
+    return 8 * 60;
+  }
+
+  SortedSet<Text> splits(String[] points) {
+    SortedSet<Text> result = new TreeSet<Text>();
+    for (String point : points)
+      result.add(new Text(point));
+    return result;
+  }
+
+  @Test
+  public void merge() throws Exception {
+    Connector c = getConnector();
+    String tableName = getUniqueNames(1)[0];
+    c.tableOperations().create(tableName);
+    c.tableOperations().addSplits(tableName, splits("a b c d e f g h i j k".split(" ")));
+    BatchWriter bw = c.createBatchWriter(tableName, null);
+    for (String row : "a b c d e f g h i j k".split(" ")) {
+      Mutation m = new Mutation(row);
+      m.put("cf", "cq", "value");
+      bw.addMutation(m);
+    }
+    bw.close();
+    c.tableOperations().flush(tableName, null, null, true);
+    c.tableOperations().merge(tableName, new Text("c1"), new Text("f1"));
+    assertEquals(8, c.tableOperations().listSplits(tableName).size());
+  }
+
+  @Test
+  public void mergeSize() throws Exception {
+    Connector c = getConnector();
+    String tableName = getUniqueNames(1)[0];
+    c.tableOperations().create(tableName);
+    c.tableOperations().addSplits(tableName, splits("a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")));
+    BatchWriter bw = c.createBatchWriter(tableName, null);
+    for (String row : "c e f y".split(" ")) {
+      Mutation m = new Mutation(row);
+      m.put("cf", "cq", "mersydotesanddozeydotesanlittolamsiedives");
+      bw.addMutation(m);
+    }
+    bw.close();
+    c.tableOperations().flush(tableName, null, null, true);
+    Merge merge = new Merge();
+    merge.mergomatic(c, tableName, null, null, 100, false);
+    assertArrayEquals("b c d e f x y".split(" "), toStrings(c.tableOperations().listSplits(tableName)));
+    merge.mergomatic(c, tableName, null, null, 100, true);
+    assertArrayEquals("c e f y".split(" "), toStrings(c.tableOperations().listSplits(tableName)));
+  }
+
+  private String[] toStrings(Collection<Text> listSplits) {
+    String[] result = new String[listSplits.size()];
+    int i = 0;
+    for (Text t : listSplits) {
+      result[i++] = t.toString();
+    }
+    return result;
+  }
+
+  private String[] ns(String... strings) {
+    return strings;
+  }
+
+  @Test
+  public void mergeTest() throws Exception {
+    int tc = 0;
+    Connector c = getConnector();
+    String tableName = getUniqueNames(1)[0];
+    runMergeTest(c, tableName + tc++, ns(), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
+
+    runMergeTest(c, tableName + tc++, ns("m"), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
+    runMergeTest(c, tableName + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns("m", "n"), ns(null, "z"));
+    runMergeTest(c, tableName + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns(null, "b"), ns("l", "m"));
+
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns(), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns(null, "s"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("c", "m"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("n", "r"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns(null, "s"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns(null, "s"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("q", "r"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("aa", "b"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("r", "s"), ns(null, "z"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("l", "m"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns("q", "r"));
+
+  }
+
+  private void runMergeTest(Connector c, String table, String[] splits, String[] expectedSplits, String[] inserts, String[] start, String[] end)
+      throws Exception {
+    int count = 0;
+
+    for (String s : start) {
+      for (String e : end) {
+        runMergeTest(c, table + "_" + count++, splits, expectedSplits, inserts, s, e);
+      }
+    }
+  }
+
+  private void runMergeTest(Connector conn, String table, String[] splits, String[] expectedSplits, String[] inserts, String start, String end)
+      throws Exception {
+    System.out.println("Running merge test " + table + " " + Arrays.asList(splits) + " " + start + " " + end);
+
+    conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
+    TreeSet<Text> splitSet = new TreeSet<Text>();
+    for (String split : splits) {
+      splitSet.add(new Text(split));
+    }
+    conn.tableOperations().addSplits(table, splitSet);
+
+    BatchWriter bw = conn.createBatchWriter(table, null);
+    HashSet<String> expected = new HashSet<String>();
+    for (String row : inserts) {
+      Mutation m = new Mutation(row);
+      m.put("cf", "cq", row);
+      bw.addMutation(m);
+      expected.add(row);
+    }
+
+    bw.close();
+
+    conn.tableOperations().merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));
+
+    Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
+
+    HashSet<String> observed = new HashSet<String>();
+    for (Entry<Key,Value> entry : scanner) {
+      String row = entry.getKey().getRowData().toString();
+      if (!observed.add(row)) {
+        throw new Exception("Saw data twice " + table + " " + row);
+      }
+    }
+
+    if (!observed.equals(expected)) {
+      throw new Exception("data inconsistency " + table + " " + observed + " != " + expected);
+    }
+
+    HashSet<Text> currentSplits = new HashSet<Text>(conn.tableOperations().listSplits(table));
+    HashSet<Text> ess = new HashSet<Text>();
+    for (String es : expectedSplits) {
+      ess.add(new Text(es));
+    }
+
+    if (!currentSplits.equals(ess)) {
+      throw new Exception("split inconsistency " + table + " " + currentSplits + " != " + ess);
+    }
+
+  }
+
+}