You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by kt...@apache.org on 2013/07/23 18:54:26 UTC

[01/50] git commit: ACCUMULO-1561 updating rpm-maven-plugin version

Updated Branches:
  refs/heads/ACCUMULO-1000 1fadfd22d -> 95931ea0b


ACCUMULO-1561 updating rpm-maven-plugin version

git-svn-id: https://svn.apache.org/repos/asf/accumulo/branches/1.5@1501024 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/bd6c426c
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/bd6c426c
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/bd6c426c

Branch: refs/heads/ACCUMULO-1000
Commit: bd6c426c998d23cc3ea88a37451402fe99998f3a
Parents: d7a7fbc
Author: Mike Drob <md...@apache.org>
Authored: Mon Jul 8 23:50:17 2013 +0000
Committer: Mike Drob <md...@apache.org>
Committed: Mon Jul 8 23:50:17 2013 +0000

----------------------------------------------------------------------
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/bd6c426c/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 5cb4012..37246f9 100644
--- a/pom.xml
+++ b/pom.xml
@@ -518,7 +518,7 @@
         <plugin>
           <groupId>org.codehaus.mojo</groupId>
           <artifactId>rpm-maven-plugin</artifactId>
-          <version>2.1-alpha-2</version>
+          <version>2.1-alpha-3</version>
         </plugin>
         <plugin>
           <groupId>org.eclipse.m2e</groupId>


[31/50] git commit: ACCUMULO-1537 Fix maven dependency problems on hadoop-test

Posted by kt...@apache.org.
ACCUMULO-1537 Fix maven dependency problems on hadoop-test


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/0793476d
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/0793476d
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/0793476d

Branch: refs/heads/ACCUMULO-1000
Commit: 0793476d28c34c8ae725a9fea38af1569831813c
Parents: 8513b55
Author: Christopher Tubbs <ct...@apache.org>
Authored: Fri Jul 19 16:57:09 2013 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Fri Jul 19 18:54:45 2013 -0400

----------------------------------------------------------------------
 minicluster/pom.xml                                       | 10 ++++------
 .../apache/accumulo/minicluster/MiniAccumuloCluster.java  | 10 +++++-----
 pom.xml                                                   |  5 +++++
 test/pom.xml                                              |  6 ------
 4 files changed, 14 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/0793476d/minicluster/pom.xml
----------------------------------------------------------------------
diff --git a/minicluster/pom.xml b/minicluster/pom.xml
index 3d1063b..d7c8778 100644
--- a/minicluster/pom.xml
+++ b/minicluster/pom.xml
@@ -46,6 +46,10 @@
       <artifactId>accumulo-start</artifactId>
     </dependency>
     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-test</artifactId>
+    </dependency>
+    <dependency>
       <groupId>commons-configuration</groupId>
       <artifactId>commons-configuration</artifactId>
       <scope>provided</scope>
@@ -76,12 +80,6 @@
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-test</artifactId>
-      <version>${hadoop.version}</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
       <groupId>org.slf4j</groupId>
       <artifactId>slf4j-api</artifactId>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/0793476d/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
index 7c00cec..4f9ff89 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
@@ -182,7 +182,7 @@ public class MiniAccumuloCluster {
     
     List<String> jvmOpts = new ArrayList<String>();
     jvmOpts.add("-Xmx" + config.getMemory(serverType));
-
+    
     if (config.isJDWPEnabled()) {
       Integer port = PortUtils.getRandomFreePort();
       jvmOpts.addAll(buildRemoteDebugParams(port));
@@ -232,13 +232,13 @@ public class MiniAccumuloCluster {
       miniDFS = new MiniDFSCluster(conf, 1, true, null);
       miniDFS.waitClusterUp();
       InetSocketAddress dfsAddress = miniDFS.getNameNode().getNameNodeAddress();
-      String uri = "hdfs://"+ dfsAddress.getHostName() + ":" + dfsAddress.getPort();
+      String uri = "hdfs://" + dfsAddress.getHostName() + ":" + dfsAddress.getPort();
       File coreFile = new File(config.getConfDir(), "core-site.xml");
       writeConfig(coreFile, Collections.singletonMap("fs.default.name", uri));
       File hdfsFile = new File(config.getConfDir(), "hdfs-site.xml");
       writeConfig(hdfsFile, Collections.singletonMap("dfs.support.append", "true"));
       
-      Map<String, String> siteConfig = config.getSiteConfig();
+      Map<String,String> siteConfig = config.getSiteConfig();
       siteConfig.put(Property.INSTANCE_DFS_URI.getKey(), uri);
       siteConfig.put(Property.INSTANCE_DFS_DIR.getKey(), "/accumulo");
       config.setSiteConfig(siteConfig);
@@ -283,7 +283,7 @@ public class MiniAccumuloCluster {
     }
   }
   
-  private void writeConfig(File file, Map<String, String> settings) throws IOException {
+  private void writeConfig(File file, Map<String,String> settings) throws IOException {
     FileWriter fileWriter = new FileWriter(file);
     fileWriter.append("<configuration>\n");
     
@@ -427,7 +427,7 @@ public class MiniAccumuloCluster {
     for (LogWriter lw : logWriters) {
       lw.flush();
     }
-
+    
     if (zooKeeperProcess != null) {
       zooKeeperProcess.destroy();
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/0793476d/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 6454ff3..8f67cbd 100644
--- a/pom.xml
+++ b/pom.xml
@@ -300,6 +300,11 @@
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-test</artifactId>
+        <version>${hadoop.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-tools</artifactId>
         <version>${hadoop.version}</version>
       </dependency>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/0793476d/test/pom.xml
----------------------------------------------------------------------
diff --git a/test/pom.xml b/test/pom.xml
index 2f89d7a..8343bb2 100644
--- a/test/pom.xml
+++ b/test/pom.xml
@@ -133,12 +133,6 @@
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-test</artifactId>
-      <version>${hadoop.version}</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
       <groupId>org.mortbay.jetty</groupId>
       <artifactId>jetty</artifactId>
       <scope>test</scope>


[30/50] git commit: Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/accumulo

Posted by kt...@apache.org.
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/accumulo


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/8513b556
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/8513b556
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/8513b556

Branch: refs/heads/ACCUMULO-1000
Commit: 8513b55607423175cd290d319f7c316bddfaa679
Parents: 057b8d6 3d7a6e7
Author: Eric Newton <ec...@apache.org>
Authored: Fri Jul 19 16:31:35 2013 -0400
Committer: Eric Newton <ec...@apache.org>
Committed: Fri Jul 19 16:31:35 2013 -0400

----------------------------------------------------------------------
 maven-plugin/pom.xml                            |  4 ++
 .../src/it/plugin-test/postbuild.groovy         |  3 ++
 .../apache/accumulo/plugin/CustomFilter.java    | 33 +++++++++++++++
 .../org/apache/accumulo/plugin/PluginIT.java    | 44 ++++++++++++++++++--
 .../maven/plugin/AbstractAccumuloMojo.java      | 31 +++++++-------
 .../apache/accumulo/maven/plugin/StartMojo.java |  2 +
 .../apache/accumulo/maven/plugin/StopMojo.java  |  2 +
 pom.xml                                         |  5 +++
 8 files changed, 107 insertions(+), 17 deletions(-)
----------------------------------------------------------------------



[26/50] git commit: added ability to invalidate server side conditional update sessions

Posted by kt...@apache.org.
added ability to invalidate server side conditional update sessions


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/ec537137
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/ec537137
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/ec537137

Branch: refs/heads/ACCUMULO-1000
Commit: ec537137aa958cf87d8d11ff5fdfe05c78dea624
Parents: a169064
Author: keith@deenlo.com <ke...@deenlo.com>
Authored: Fri Jul 19 16:00:13 2013 -0400
Committer: keith@deenlo.com <ke...@deenlo.com>
Committed: Fri Jul 19 16:00:13 2013 -0400

----------------------------------------------------------------------
 .../core/client/impl/ConditionalWriterImpl.java |  120 +-
 .../thrift/TabletClientService.java             | 2831 +++++++++++++++---
 core/src/main/thrift/tabletserver.thrift        |    9 +-
 .../server/security/SecurityOperation.java      |   15 +-
 .../server/tabletserver/TabletServer.java       |   97 +-
 .../test/performance/thrift/NullTserver.java    |   16 +-
 .../accumulo/test/ConditionalWriterTest.java    |    1 +
 7 files changed, 2571 insertions(+), 518 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/ec537137/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
index f0d6108..31403fb 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
@@ -34,10 +34,12 @@ import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.ConditionalWriter;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.TableDeletedException;
+import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.TableOfflineException;
 import org.apache.accumulo.core.client.impl.TabletLocator.TabletServerMutations;
 import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
@@ -57,6 +59,7 @@ import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.accumulo.core.security.VisibilityEvaluator;
 import org.apache.accumulo.core.security.VisibilityParseException;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.tabletserver.thrift.NoSuchScanIDException;
 import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
 import org.apache.accumulo.core.util.BadArgumentException;
 import org.apache.accumulo.core.util.ByteBufferUtil;
@@ -177,7 +180,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
     return queue;
   }
   
-  private void queueFailed(List<QCMutation> mutations) {
+  private void queueRetry(List<QCMutation> mutations) {
     for (QCMutation qcm : mutations) {
       qcm.resetDelay();
     }
@@ -208,7 +211,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
     }
     
     if (failures.size() > 0)
-      queueFailed(failures);
+      queueRetry(failures);
 
     for (Entry<String,TabletServerMutations<QCMutation>> entry : binnedMutations.entrySet()) {
       queue(entry.getKey(), entry.getValue());
@@ -350,6 +353,8 @@ class ConditionalWriterImpl implements ConditionalWriter {
     Map<Long,CMK> cmidToCm = new HashMap<Long,CMK>();
     MutableLong cmid = new MutableLong(0);
 
+    Long sessionId = null;
+    
     try {
       client = ThriftUtil.getTServerClient(location, instance.getConfiguration());
 
@@ -357,9 +362,11 @@ class ConditionalWriterImpl implements ConditionalWriter {
 
       CompressedIterators compressedIters = new CompressedIterators();
       convertMutations(mutations, cmidToCm, cmid, tmutations, compressedIters);
-
-      List<TCMResult> tresults = client.conditionalUpdate(tinfo, credentials, ByteBufferUtil.toByteBuffers(auths.getAuthorizations()), tmutations,
-          compressedIters.getSymbolTable());
+      
+      //TODO create a session per tserver and keep reusing it
+      sessionId = client.startConditionalUpdate(tinfo, credentials, ByteBufferUtil.toByteBuffers(auths.getAuthorizations()), tableId);
+      
+      List<TCMResult> tresults = client.conditionalUpdate(tinfo, sessionId, tmutations, compressedIters.getSymbolTable());
 
       HashSet<KeyExtent> extentsToInvalidate = new HashSet<KeyExtent>();
 
@@ -383,27 +390,108 @@ class ConditionalWriterImpl implements ConditionalWriter {
         locator.invalidateCache(ke);
       }
 
-      queueFailed(ignored);
+      queueRetry(ignored);
 
+    } catch (NoSuchScanIDException nssie){
+    	queueRetry(cmidToCm);
     } catch (ThriftSecurityException tse) {
       AccumuloSecurityException ase = new AccumuloSecurityException(credentials.getPrincipal(), tse.getCode(), Tables.getPrintableTableInfoFromId(instance,
           tableId), tse);
-      for (CMK cmk : cmidToCm.values())
-        cmk.cm.resultQueue.add(new Result(ase, cmk.cm, location));
+      queueException(location, cmidToCm, ase);
     } catch (TTransportException e) {
       locator.invalidateCache(location);
-      for (CMK cmk : cmidToCm.values())
-        cmk.cm.resultQueue.add(new Result(Status.UNKNOWN, cmk.cm, location));
+      invalidateSession(location, mutations, cmidToCm, sessionId);
     } catch (TApplicationException tae) {
-      for (CMK cmk : cmidToCm.values())
-        cmk.cm.resultQueue.add(new Result(new AccumuloServerException(location, tae), cmk.cm, location));
+      queueException(location, cmidToCm, new AccumuloServerException(location, tae));
     } catch (TException e) {
       locator.invalidateCache(location);
-      for (CMK cmk : cmidToCm.values())
-        cmk.cm.resultQueue.add(new Result(Status.UNKNOWN, cmk.cm, location));
+      invalidateSession(location, mutations, cmidToCm, sessionId);
     } catch (Exception e) {
-      for (CMK cmk : cmidToCm.values())
-        cmk.cm.resultQueue.add(new Result(e, cmk.cm, location));
+      queueException(location, cmidToCm, e);
+    } finally {
+      ThriftUtil.returnClient((TServiceClient) client);
+    }
+  }
+
+  private void queueRetry(Map<Long,CMK> cmidToCm) {
+    ArrayList<QCMutation> ignored = new ArrayList<QCMutation>();
+    for (CMK cmk : cmidToCm.values())
+    	ignored.add(cmk.cm);
+    queueRetry(ignored);
+  }
+
+  private void queueException(String location, Map<Long,CMK> cmidToCm, Exception e) {
+    for (CMK cmk : cmidToCm.values())
+      cmk.cm.resultQueue.add(new Result(e, cmk.cm, location));
+  }
+
+  private void invalidateSession(String location, TabletServerMutations<QCMutation> mutations, Map<Long,CMK> cmidToCm, Long sessionId) {
+    if(sessionId == null){
+      queueRetry(cmidToCm);
+    }else{
+      try {
+        invalidateSession(sessionId, location, mutations);
+        for (CMK cmk : cmidToCm.values())
+          cmk.cm.resultQueue.add(new Result(Status.UNKNOWN, cmk.cm, location));
+      }catch(Exception e2){
+        queueException(location, cmidToCm, e2);
+      }
+    }
+  }
+  
+  /*
+   * The purpose of this code is to ensure that a conditional mutation will not execute when its status is unknown. This allows a user to read the row when the
+   * status is unknown and not have to worry about the tserver applying the mutation after the scan.
+   * 
+   * If a conditional mutation is taking a long time to process, then this method will wait for it to finish... unless this exceeds timeout.
+   */
+  private void invalidateSession(long sessionId, String location, TabletServerMutations<QCMutation> mutations) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+    
+    // TODO could assume tserver will invalidate sessions after a given time period
+    
+    ArrayList<QCMutation> mutList = new ArrayList<QCMutation>();
+    
+    for (List<QCMutation> tml : mutations.getMutations().values()) {
+      mutList.addAll(tml);
+    }
+    
+    while (true) {
+      Map<String,TabletServerMutations<QCMutation>> binnedMutations = new HashMap<String,TabletLocator.TabletServerMutations<QCMutation>>();
+      List<QCMutation> failures = new ArrayList<QCMutation>();
+      
+      locator.binMutations(mutList, binnedMutations, failures, credentials);
+      
+      // TODO do failures matter? not if failures only indicates tablets are not assigned
+      
+      if (!binnedMutations.containsKey(location)) {
+        // the tablets are at different locations now, so there is no need to invalidate the session
+        // TODO could be a case where tablet comes back to tserver and then UNKNOW condMut goes through
+        return;
+      }
+      
+      try {
+        // if the mutation is currently processing, this method will block until its done or times out
+        invalidateSession(sessionId, location);
+        return;
+      } catch (TApplicationException tae) {
+        throw new AccumuloServerException(location, tae);
+      } catch (TException e) {
+        locator.invalidateCache(location);
+      }
+      
+      //TODO sleep
+    }
+	
+  }
+  
+  private void invalidateSession(long sessionId, String location) throws TException {
+    TabletClientService.Iface client = null;
+    
+    TInfo tinfo = Tracer.traceInfo();
+    
+    try {
+      client = ThriftUtil.getTServerClient(location, instance.getConfiguration());
+      client.invalidateConditionalUpdate(tinfo, sessionId);
     } finally {
       ThriftUtil.returnClient((TServiceClient) client);
     }


[23/50] git commit: Merge remote-tracking branch 'origin/master' into ACCUMULO-1000

Posted by kt...@apache.org.
Merge remote-tracking branch 'origin/master' into ACCUMULO-1000


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/a169064b
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/a169064b
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/a169064b

Branch: refs/heads/ACCUMULO-1000
Commit: a169064b93aa1be04507c156fe0092a0cbb8e2ab
Parents: 1fadfd2 3d7a6e7
Author: Keith Turner <kt...@apache.org>
Authored: Thu Jul 18 15:06:59 2013 -0400
Committer: Keith Turner <kt...@apache.org>
Committed: Thu Jul 18 15:06:59 2013 -0400

----------------------------------------------------------------------
 README                                          |  10 +-
 bin/accumulo                                    |   4 +-
 bin/config.sh                                   |  41 +-
 bin/start-all.sh                                |   8 +-
 bin/start-here.sh                               |   6 +-
 bin/start-server.sh                             |   2 +-
 bin/stop-all.sh                                 |   4 +-
 bin/stop-here.sh                                |   4 +-
 bin/tdown.sh                                    |   2 +-
 bin/tup.sh                                      |   2 +-
 .../1GB/native-standalone/accumulo-env.sh       |   4 +-
 conf/examples/1GB/standalone/accumulo-env.sh    |   4 +-
 .../2GB/native-standalone/accumulo-env.sh       |   4 +-
 conf/examples/2GB/standalone/accumulo-env.sh    |   4 +-
 .../3GB/native-standalone/accumulo-env.sh       |   4 +-
 conf/examples/3GB/standalone/accumulo-env.sh    |   4 +-
 .../512MB/native-standalone/accumulo-env.sh     |   4 +-
 conf/examples/512MB/standalone/accumulo-env.sh  |   4 +-
 .../accumulo/core/file/rfile/PrintInfo.java     |   2 +-
 .../security/crypto/CryptoModuleParameters.java |  12 +-
 .../accumulo_user_manual.tex                    |   1 +
 .../chapters/troubleshooting.tex                | 520 +++++++++++++++++++
 .../apache/accumulo/fate/zookeeper/ZooLock.java |  17 +-
 maven-plugin/pom.xml                            |   4 +
 .../src/it/plugin-test/postbuild.groovy         |   3 +
 .../apache/accumulo/plugin/CustomFilter.java    |  33 ++
 .../org/apache/accumulo/plugin/PluginIT.java    |  44 +-
 .../maven/plugin/AbstractAccumuloMojo.java      |  31 +-
 .../apache/accumulo/maven/plugin/StartMojo.java |   5 +
 .../apache/accumulo/maven/plugin/StopMojo.java  |   2 +
 .../minicluster/MiniAccumuloConfig.java         |   1 +
 pom.xml                                         |   9 +
 .../org/apache/accumulo/server/Accumulo.java    |   4 +-
 .../org/apache/accumulo/server/util/Admin.java  |   2 +-
 .../accumulo/server/util/DumpZookeeper.java     |   4 +-
 .../accumulo/server/util/ListInstances.java     |  35 +-
 .../accumulo/server/util/RestoreZookeeper.java  |   5 +-
 .../accumulo/server/util/TabletServerLocks.java |   9 +-
 .../start/classloader/AccumuloClassLoader.java  |   9 +-
 .../functional/BulkSplitOptimizationIT.java     |   2 -
 .../test/functional/DynamicThreadPoolsIT.java   |  29 +-
 .../accumulo/test/functional/MacTest.java       |   2 +-
 .../accumulo/test/functional/SplitIT.java       |   8 +-
 .../test/functional/ZookeeperRestartIT.java     |  81 +++
 test/system/continuous/agitator.pl              |   2 +-
 test/system/continuous/magitator.pl             |   6 +-
 test/system/continuous/mapred-setup.sh          |   2 +-
 test/system/continuous/start-stats.sh           |   2 +-
 test/system/randomwalk/README                   |   2 +-
 test/system/randomwalk/bin/reset-cluster.sh     |  10 +-
 test/system/randomwalk/bin/start-all.sh         |   4 +-
 test/system/randomwalk/bin/start-local.sh       |   4 +-
 test/system/scalability/run.py                  |   2 +-
 53 files changed, 875 insertions(+), 148 deletions(-)
----------------------------------------------------------------------



[27/50] git commit: ACCUMULO-1537 convert simpler test to use a common MAC; add option to use HDFS because LocalFileSystem does not support flush/sync semantics

Posted by kt...@apache.org.
ACCUMULO-1537 convert simpler test to use a common MAC; add option to use HDFS because LocalFileSystem does not support flush/sync semantics


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/122fa397
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/122fa397
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/122fa397

Branch: refs/heads/ACCUMULO-1000
Commit: 122fa39756b895ae0be0c1f3da2725c3cfb49689
Parents: f8b9145
Author: Eric Newton <ec...@apache.org>
Authored: Fri Jul 19 16:31:10 2013 -0400
Committer: Eric Newton <ec...@apache.org>
Committed: Fri Jul 19 16:31:10 2013 -0400

----------------------------------------------------------------------
 .../accumulo/test/functional/SimpleMacIT.java   | 80 ++++++++++++++++++++
 1 file changed, 80 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/122fa397/test/src/test/java/org/apache/accumulo/test/functional/SimpleMacIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SimpleMacIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SimpleMacIT.java
new file mode 100644
index 0000000..bf37212
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/SimpleMacIT.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.minicluster.MiniAccumuloCluster;
+import org.apache.accumulo.minicluster.MiniAccumuloCluster.LogWriter;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.log4j.Logger;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.rules.TemporaryFolder;
+
+public class SimpleMacIT {
+  public static final Logger log = Logger.getLogger(SimpleMacIT.class);
+      
+  public static final String ROOT_PASSWORD = "secret";
+  
+  static private TemporaryFolder folder = new TemporaryFolder();
+  static private MiniAccumuloCluster cluster;
+  
+  public static Connector getConnector() throws AccumuloException, AccumuloSecurityException {
+    return cluster.getConnector("root", ROOT_PASSWORD);
+  }
+  
+  @BeforeClass
+  public static void setUp() throws Exception {
+    folder.create();
+    MiniAccumuloConfig cfg = new MiniAccumuloConfig(folder.newFolder("mac"), ROOT_PASSWORD);
+    cluster = new MiniAccumuloCluster(cfg);
+    cluster.start();
+  }
+  
+  
+  @AfterClass
+  public static void tearDown() throws Exception {
+    if (cluster != null)
+      cluster.stop();
+    for (LogWriter log : cluster.getLogWriters())
+      log.flush();
+    folder.delete();
+  }
+  
+  static AtomicInteger tableCount = new AtomicInteger();
+  static public String makeTableName() {
+    return "table" + tableCount.getAndIncrement();
+  }
+  
+  static public String rootPath() {
+    return cluster.getConfig().getDir().getAbsolutePath();
+  }
+  
+  static Process exec(Class<? extends Object> clazz, String... args) throws IOException {
+    return cluster.exec(clazz, args);
+  }
+  
+  public static BatchWriterOpts BWOPTS = MacTest.BWOPTS;
+  public static ScannerOpts SOPTS = MacTest.SOPTS;
+}


[13/50] git commit: ACCUMULO-1562 fix typo in comment

Posted by kt...@apache.org.
ACCUMULO-1562 fix typo in comment


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/f4c6e6f6
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/f4c6e6f6
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/f4c6e6f6

Branch: refs/heads/ACCUMULO-1000
Commit: f4c6e6f60d716f167db79eb62a9730e50188837e
Parents: e656c3a
Author: Eric Newton <er...@gmail.com>
Authored: Wed Jul 17 12:10:15 2013 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Wed Jul 17 12:10:15 2013 -0400

----------------------------------------------------------------------
 server/src/main/java/org/apache/accumulo/server/util/Admin.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/f4c6e6f6/server/src/main/java/org/apache/accumulo/server/util/Admin.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/Admin.java b/server/src/main/java/org/apache/accumulo/server/util/Admin.java
index 3bb801a..fca811e 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/Admin.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/Admin.java
@@ -115,7 +115,7 @@ public class Admin {
   }
   
   /**
-   * flushing during shutdown is a perfomance optimization, its not required. The method will make an attempt to initiate flushes of all tables and give up if
+   * flushing during shutdown is a performance optimization, its not required. The method will make an attempt to initiate flushes of all tables and give up if
    * it takes too long.
    * 
    */


[07/50] git commit: ACCUMULO-1571

Posted by kt...@apache.org.
ACCUMULO-1571


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/cfb01d46
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/cfb01d46
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/cfb01d46

Branch: refs/heads/ACCUMULO-1000
Commit: cfb01d4675a7e33082b5586f2418f3a96f998b3b
Parents: 8ef0401
Author: Eric Newton <er...@gmail.com>
Authored: Wed Jul 17 11:27:58 2013 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Wed Jul 17 11:27:58 2013 -0400

----------------------------------------------------------------------
 .../main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/cfb01d46/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
index e591403..e23ca0b 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
@@ -39,7 +39,7 @@ public class PrintInfo {
   static class Opts extends Help {
     @Parameter(names = {"-d", "--dump"}, description = "dump the key/value pairs")
     boolean dump = false;
-    @Parameter(names = {"--historgram"}, description = "print a histogram of the key-value sizes")
+    @Parameter(names = {"--histogram"}, description = "print a histogram of the key-value sizes")
     boolean histogram = false;
     @Parameter(description = " <file> { <file> ... }")
     List<String> files = new ArrayList<String>();


[28/50] ACCUMULO-1537 convert simpler test to use a common MAC; add option to use HDFS because LocalFileSystem does not support flush/sync semantics

Posted by kt...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
index c0fc4ea..1ae91fe 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
@@ -24,6 +24,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
@@ -47,20 +48,28 @@ import org.apache.accumulo.core.security.TablePermission;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
 
-public class PermissionsIT extends MacTest {
-  private static final String TEST_USER = "test_user";
-  private static final PasswordToken TEST_PASS = new PasswordToken("test_password");
+public class PermissionsIT extends SimpleMacIT {
+
+  static AtomicInteger userId = new AtomicInteger(0);
+  
+  static String makeUserName() {
+    return "user_" + userId.getAndIncrement();
+  }
+  
   
   @Test(timeout = 60 * 1000)
   public void systemPermissionsTest() throws Exception {
+    String testUser = makeUserName();
+    PasswordToken testPasswd = new PasswordToken("test_password");
+
     // verify that the test is being run by root
     Connector c = getConnector();
     verifyHasOnlyTheseSystemPermissions(c, c.whoami(), SystemPermission.values());
     
     // create the test user
-    c.securityOperations().createLocalUser(TEST_USER, TEST_PASS);
-    Connector test_user_conn = c.getInstance().getConnector(TEST_USER, TEST_PASS);
-    verifyHasNoSystemPermissions(c, TEST_USER, SystemPermission.values());
+    c.securityOperations().createLocalUser(testUser, testPasswd);
+    Connector test_user_conn = c.getInstance().getConnector(testUser, testPasswd);
+    verifyHasNoSystemPermissions(c, testUser, SystemPermission.values());
     
     // test each permission
     for (SystemPermission perm : SystemPermission.values()) {
@@ -69,9 +78,9 @@ public class PermissionsIT extends MacTest {
       // verify GRANT can't be granted
       if (perm.equals(SystemPermission.GRANT)) {
         try {
-          c.securityOperations().grantSystemPermission(TEST_USER, perm);
+          c.securityOperations().grantSystemPermission(testUser, perm);
         } catch (AccumuloSecurityException e) {
-          verifyHasNoSystemPermissions(c, TEST_USER, perm);
+          verifyHasNoSystemPermissions(c, testUser, perm);
           continue;
         }
         throw new IllegalStateException("Should NOT be able to grant GRANT");
@@ -79,11 +88,11 @@ public class PermissionsIT extends MacTest {
       
       // test permission before and after granting it
       testMissingSystemPermission(c, test_user_conn, perm);
-      c.securityOperations().grantSystemPermission(TEST_USER, perm);
-      verifyHasOnlyTheseSystemPermissions(c, TEST_USER, perm);
+      c.securityOperations().grantSystemPermission(testUser, perm);
+      verifyHasOnlyTheseSystemPermissions(c, testUser, perm);
       testGrantedSystemPermission(c, test_user_conn, perm);
-      c.securityOperations().revokeSystemPermission(TEST_USER, perm);
-      verifyHasNoSystemPermissions(c, TEST_USER, perm);
+      c.securityOperations().revokeSystemPermission(testUser, perm);
+      verifyHasNoSystemPermissions(c, testUser, perm);
     }
   }
   
@@ -103,7 +112,7 @@ public class PermissionsIT extends MacTest {
     // test permission prior to granting it
     switch (perm) {
       case CREATE_TABLE:
-        tableName = "__CREATE_TABLE_WITHOUT_PERM_TEST__";
+        tableName = makeTableName() + "__CREATE_TABLE_WITHOUT_PERM_TEST__";
         try {
           test_user_conn.tableOperations().create(tableName);
           throw new IllegalStateException("Should NOT be able to create a table");
@@ -113,7 +122,7 @@ public class PermissionsIT extends MacTest {
         }
         break;
       case DROP_TABLE:
-        tableName = "__DROP_TABLE_WITHOUT_PERM_TEST__";
+        tableName = makeTableName() + "__DROP_TABLE_WITHOUT_PERM_TEST__";
         root_conn.tableOperations().create(tableName);
         try {
           test_user_conn.tableOperations().delete(tableName);
@@ -124,7 +133,7 @@ public class PermissionsIT extends MacTest {
         }
         break;
       case ALTER_TABLE:
-        tableName = "__ALTER_TABLE_WITHOUT_PERM_TEST__";
+        tableName = makeTableName() + "__ALTER_TABLE_WITHOUT_PERM_TEST__";
         root_conn.tableOperations().create(tableName);
         try {
           test_user_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
@@ -203,20 +212,20 @@ public class PermissionsIT extends MacTest {
     // test permission after granting it
     switch (perm) {
       case CREATE_TABLE:
-        tableName = "__CREATE_TABLE_WITH_PERM_TEST__";
+        tableName = makeTableName() + "__CREATE_TABLE_WITH_PERM_TEST__";
         test_user_conn.tableOperations().create(tableName);
         if (!root_conn.tableOperations().list().contains(tableName))
           throw new IllegalStateException("Should be able to create a table");
         break;
       case DROP_TABLE:
-        tableName = "__DROP_TABLE_WITH_PERM_TEST__";
+        tableName = makeTableName() + "__DROP_TABLE_WITH_PERM_TEST__";
         root_conn.tableOperations().create(tableName);
         test_user_conn.tableOperations().delete(tableName);
         if (root_conn.tableOperations().list().contains(tableName))
           throw new IllegalStateException("Should be able to delete a table");
         break;
       case ALTER_TABLE:
-        tableName = "__ALTER_TABLE_WITH_PERM_TEST__";
+        tableName = makeTableName() + "__ALTER_TABLE_WITH_PERM_TEST__";
         String table2 = tableName + "2";
         root_conn.tableOperations().create(tableName);
         test_user_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
@@ -282,55 +291,58 @@ public class PermissionsIT extends MacTest {
         throw new IllegalStateException(user + " SHOULD NOT have system permission " + p);
   }
   
-  private static final String TEST_TABLE = "__TABLE_PERMISSION_TEST__";
   
   @Test(timeout=30*1000)
   public void tablePermissionTest() throws Exception {
     // create the test user
+    String testUser = makeUserName();
+    PasswordToken testPasswd = new PasswordToken("test_password");
+
     Connector c = getConnector();
-    c.securityOperations().createLocalUser(TEST_USER, TEST_PASS);
-    Connector test_user_conn = c.getInstance().getConnector(TEST_USER, TEST_PASS);
+    c.securityOperations().createLocalUser(testUser, testPasswd);
+    Connector test_user_conn = c.getInstance().getConnector(testUser, testPasswd);
     
     // check for read-only access to metadata table
     verifyHasOnlyTheseTablePermissions(c, c.whoami(), MetadataTable.NAME, TablePermission.READ, TablePermission.ALTER_TABLE);
-    verifyHasOnlyTheseTablePermissions(c, TEST_USER, MetadataTable.NAME, TablePermission.READ);
-    
+    verifyHasOnlyTheseTablePermissions(c, testUser, MetadataTable.NAME, TablePermission.READ);
+    String tableName = makeTableName() + "__TABLE_PERMISSION_TEST__";
+      
     // test each permission
     for (TablePermission perm : TablePermission.values()) {
       log.debug("Verifying the " + perm + " permission");
       
       // test permission before and after granting it
-      createTestTable(c);
-      testMissingTablePermission(c, test_user_conn, perm);
-      c.securityOperations().grantTablePermission(TEST_USER, TEST_TABLE, perm);
-      verifyHasOnlyTheseTablePermissions(c, TEST_USER, TEST_TABLE, perm);
-      testGrantedTablePermission(c, test_user_conn, perm);
+      createTestTable(c, testUser, tableName);
+      testMissingTablePermission(c, test_user_conn, perm, tableName);
+      c.securityOperations().grantTablePermission(testUser, tableName, perm);
+      verifyHasOnlyTheseTablePermissions(c, testUser, tableName, perm);
+      testGrantedTablePermission(c, test_user_conn, perm, tableName);
       
-      createTestTable(c);
-      c.securityOperations().revokeTablePermission(TEST_USER, TEST_TABLE, perm);
-      verifyHasNoTablePermissions(c, TEST_USER, TEST_TABLE, perm);
+      createTestTable(c, testUser, tableName);
+      c.securityOperations().revokeTablePermission(testUser, tableName, perm);
+      verifyHasNoTablePermissions(c, testUser, tableName, perm);
     }
   }
   
-  private void createTestTable(Connector c) throws Exception, MutationsRejectedException {
-    if (!c.tableOperations().exists(TEST_TABLE)) {
+  private void createTestTable(Connector c, String testUser, String tableName) throws Exception, MutationsRejectedException {
+    if (!c.tableOperations().exists(tableName)) {
       // create the test table
-      c.tableOperations().create(TEST_TABLE);
+      c.tableOperations().create(tableName);
       // put in some initial data
-      BatchWriter writer = c.createBatchWriter(TEST_TABLE, new BatchWriterConfig());
+      BatchWriter writer = c.createBatchWriter(tableName, new BatchWriterConfig());
       Mutation m = new Mutation(new Text("row"));
       m.put(new Text("cf"), new Text("cq"), new Value("val".getBytes()));
       writer.addMutation(m);
       writer.close();
       
       // verify proper permissions for creator and test user
-      verifyHasOnlyTheseTablePermissions(c, c.whoami(), TEST_TABLE, TablePermission.values());
-      verifyHasNoTablePermissions(c, TEST_USER, TEST_TABLE, TablePermission.values());
+      verifyHasOnlyTheseTablePermissions(c, c.whoami(), tableName, TablePermission.values());
+      verifyHasNoTablePermissions(c, testUser, tableName, TablePermission.values());
       
     }
   }
   
-  private static void testMissingTablePermission(Connector root_conn, Connector test_user_conn, TablePermission perm) throws Exception {
+  private static void testMissingTablePermission(Connector root_conn, Connector test_user_conn, TablePermission perm, String tableName) throws Exception {
     Scanner scanner;
     BatchWriter writer;
     Mutation m;
@@ -340,7 +352,7 @@ public class PermissionsIT extends MacTest {
     switch (perm) {
       case READ:
         try {
-          scanner = test_user_conn.createScanner(TEST_TABLE, Authorizations.EMPTY);
+          scanner = test_user_conn.createScanner(tableName, Authorizations.EMPTY);
           int i = 0;
           for (Entry<Key,Value> entry : scanner)
             i += 1 + entry.getKey().getRowData().length();
@@ -354,7 +366,7 @@ public class PermissionsIT extends MacTest {
         break;
       case WRITE:
         try {
-          writer = test_user_conn.createBatchWriter(TEST_TABLE, new BatchWriterConfig());
+          writer = test_user_conn.createBatchWriter(tableName, new BatchWriterConfig());
           m = new Mutation(new Text("row"));
           m.put(new Text("a"), new Text("b"), new Value("c".getBytes()));
           writer.addMutation(m);
@@ -377,7 +389,7 @@ public class PermissionsIT extends MacTest {
         Map<String,Set<Text>> groups = new HashMap<String,Set<Text>>();
         groups.put("tgroup", new HashSet<Text>(Arrays.asList(new Text("t1"), new Text("t2"))));
         try {
-          test_user_conn.tableOperations().setLocalityGroups(TEST_TABLE, groups);
+          test_user_conn.tableOperations().setLocalityGroups(tableName, groups);
           throw new IllegalStateException("User should not be able to set locality groups");
         } catch (AccumuloSecurityException e) {
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
@@ -386,7 +398,7 @@ public class PermissionsIT extends MacTest {
         break;
       case DROP_TABLE:
         try {
-          test_user_conn.tableOperations().delete(TEST_TABLE);
+          test_user_conn.tableOperations().delete(tableName);
           throw new IllegalStateException("User should not be able delete the table");
         } catch (AccumuloSecurityException e) {
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
@@ -395,7 +407,7 @@ public class PermissionsIT extends MacTest {
         break;
       case GRANT:
         try {
-          test_user_conn.securityOperations().grantTablePermission("root", TEST_TABLE, TablePermission.GRANT);
+          test_user_conn.securityOperations().grantTablePermission("root", tableName, TablePermission.GRANT);
           throw new IllegalStateException("User should not be able grant permissions");
         } catch (AccumuloSecurityException e) {
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
@@ -407,7 +419,7 @@ public class PermissionsIT extends MacTest {
     }
   }
   
-  private static void testGrantedTablePermission(Connector root_conn, Connector test_user_conn, TablePermission perm) throws AccumuloException,
+  private static void testGrantedTablePermission(Connector root_conn, Connector test_user_conn, TablePermission perm, String tableName) throws AccumuloException,
       TableExistsException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
     Scanner scanner;
     BatchWriter writer;
@@ -417,13 +429,13 @@ public class PermissionsIT extends MacTest {
     // test permission after granting it
     switch (perm) {
       case READ:
-        scanner = test_user_conn.createScanner(TEST_TABLE, Authorizations.EMPTY);
+        scanner = test_user_conn.createScanner(tableName, Authorizations.EMPTY);
         Iterator<Entry<Key,Value>> iter = scanner.iterator();
         while (iter.hasNext())
           iter.next();
         break;
       case WRITE:
-        writer = test_user_conn.createBatchWriter(TEST_TABLE, new BatchWriterConfig());
+        writer = test_user_conn.createBatchWriter(tableName, new BatchWriterConfig());
         m = new Mutation(new Text("row"));
         m.put(new Text("a"), new Text("b"), new Value("c".getBytes()));
         writer.addMutation(m);
@@ -437,10 +449,10 @@ public class PermissionsIT extends MacTest {
         groups.put("tgroup", new HashSet<Text>(Arrays.asList(new Text("t1"), new Text("t2"))));
         break;
       case DROP_TABLE:
-        test_user_conn.tableOperations().delete(TEST_TABLE);
+        test_user_conn.tableOperations().delete(tableName);
         break;
       case GRANT:
-        test_user_conn.securityOperations().grantTablePermission("root", TEST_TABLE, TablePermission.GRANT);
+        test_user_conn.securityOperations().grantTablePermission("root", tableName, TablePermission.GRANT);
         break;
       default:
         throw new IllegalArgumentException("Unrecognized table Permission: " + perm);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/RenameIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/RenameIT.java b/test/src/test/java/org/apache/accumulo/test/functional/RenameIT.java
index 051df56..25292d6 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/RenameIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/RenameIT.java
@@ -23,22 +23,27 @@ import org.apache.accumulo.test.TestIngest;
 import org.apache.accumulo.test.VerifyIngest;
 import org.junit.Test;
 
-public class RenameIT extends MacTest {
+public class RenameIT extends SimpleMacIT {
   
   @Test(timeout=60*1000)
   public void renameTest() throws Exception {
+    String name1 = makeTableName();
+    String name2 = makeTableName();
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     ScannerOpts scanOpts = new ScannerOpts();
     TestIngest.Opts opts = new TestIngest.Opts();
     opts.createTable = true;
+    opts.tableName = name1;
     Connector c = getConnector();
     TestIngest.ingest(c, opts, bwOpts);
-    c.tableOperations().rename("test_ingest", "renamed");
+    c.tableOperations().rename(name1, name2);
     TestIngest.ingest(c, opts, bwOpts);
     VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+    vopts.tableName = name2;
     VerifyIngest.verifyIngest(c, vopts, scanOpts);
-    c.tableOperations().delete("test_ingest");
-    c.tableOperations().rename("renamed", "test_ingest");
+    c.tableOperations().delete(name1);
+    c.tableOperations().rename(name2, name1);
+    vopts.tableName = name1;
     VerifyIngest.verifyIngest(c, vopts, scanOpts);
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java b/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
index 2053916..d229ca7 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
@@ -42,6 +42,7 @@ public class RestartIT extends MacTest {
   @Override
   public void configure(MiniAccumuloConfig cfg) {
     cfg.setSiteConfig(Collections.singletonMap(Property.INSTANCE_ZK_TIMEOUT.getKey(), "5s"));
+    cfg.useMiniDFS(true);
   }
 
   private static final ScannerOpts SOPTS = new ScannerOpts();
@@ -111,9 +112,9 @@ public class RestartIT extends MacTest {
     List<ProcessReference> procs = new ArrayList<ProcessReference>(cluster.getProcesses().get(ServerType.TABLET_SERVER));
     for (ProcessReference tserver : procs) {
       cluster.killProcess(ServerType.TABLET_SERVER, tserver);
-      cluster.start();
-      VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
     }
+    cluster.start();
+    VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
   }
 
   @Test(timeout=2 * 60 * 1000)

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/RestartStressIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/RestartStressIT.java b/test/src/test/java/org/apache/accumulo/test/functional/RestartStressIT.java
index f60a8f0..ca4abb5 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/RestartStressIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/RestartStressIT.java
@@ -40,6 +40,7 @@ public class RestartStressIT extends MacTest {
     opts.put(Property.TSERV_MAJC_DELAY.getKey(), "100ms");
     opts.put(Property.TSERV_WALOG_MAX_SIZE.getKey(), "50K");
     cfg.setSiteConfig(opts);
+    cfg.useMiniDFS(true);
   }
 
   private static final TestIngest.Opts IOPTS;
@@ -52,7 +53,7 @@ public class RestartStressIT extends MacTest {
   private static final ScannerOpts SOPTS = new ScannerOpts();
   
   
-  @Test(timeout=120*1000)
+  @Test(timeout=600*1000)
   public void test() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");
@@ -61,7 +62,7 @@ public class RestartStressIT extends MacTest {
         "-u", "root", "-p", MacTest.PASSWORD, 
         "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), 
         "--rows", "" + IOPTS.rows);
-    for (int i = 0; i < 5; i++) {
+    for (int i = 0; i < 2; i++) {
       UtilWaitThread.sleep(10*1000);
       cluster.killProcess(ServerType.TABLET_SERVER, cluster.getProcesses().get(ServerType.TABLET_SERVER).iterator().next());
       cluster.start();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
index 5c71b30..1f4f513 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
@@ -36,14 +36,15 @@ import org.apache.accumulo.core.security.Authorizations;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
 
-public class ScanIteratorIT extends MacTest {
+public class ScanIteratorIT extends SimpleMacIT {
   
   @Test(timeout=30*1000)
   public void run() throws Exception {
+    String tableName = makeTableName();
     Connector c = getConnector();
-    c.tableOperations().create("foo");
+    c.tableOperations().create(tableName);
     
-    BatchWriter bw = c.createBatchWriter("foo", new BatchWriterConfig());
+    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
     
     for (int i = 0; i < 1000; i++) {
       Mutation m = new Mutation(new Text(String.format("%06d", i)));
@@ -55,12 +56,12 @@ public class ScanIteratorIT extends MacTest {
     
     bw.close();
     
-    Scanner scanner = c.createScanner("foo", new Authorizations());
+    Scanner scanner = c.createScanner(tableName, new Authorizations());
     
     setupIter(scanner);
     verify(scanner, 1, 999);
     
-    BatchScanner bscanner = c.createBatchScanner("foo", new Authorizations(), 3);
+    BatchScanner bscanner = c.createBatchScanner(tableName, new Authorizations(), 3);
     bscanner.setRanges(Collections.singleton(new Range((Key) null, null)));
     
     setupIter(bscanner);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
index ce5b817..6a38783 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
@@ -31,7 +31,7 @@ import org.apache.accumulo.core.security.Authorizations;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
 
-public class ScanRangeIT extends MacTest {
+public class ScanRangeIT extends SimpleMacIT {
   
   private static final int TS_LIMIT = 1;
   private static final int CQ_LIMIT = 5;
@@ -41,19 +41,21 @@ public class ScanRangeIT extends MacTest {
   @Test(timeout=60*1000)
   public void run() throws Exception {
     Connector c = getConnector();
-    c.tableOperations().create("table1");
-    c.tableOperations().create("table2");
+    String table1 = makeTableName();
+    c.tableOperations().create(table1);
+    String table2 = makeTableName();
+    c.tableOperations().create(table2);
     TreeSet<Text> splitRows = new TreeSet<Text>();
     int splits = 3;
     for (int i = (ROW_LIMIT / splits); i < ROW_LIMIT; i += (ROW_LIMIT / splits))
       splitRows.add(createRow(i));
-    c.tableOperations().addSplits("table2", splitRows);
+    c.tableOperations().addSplits(table2, splitRows);
     
-    insertData(c, "table1");
-    scanTable(c, "table1");
+    insertData(c, table1);
+    scanTable(c, table1);
     
-    insertData(c, "table2");
-    scanTable(c, "table2");
+    insertData(c, table2);
+    scanTable(c, table2);
   }
   
   private void scanTable(Connector c, String table) throws Exception {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
index 0293ae8..4f1a105 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
@@ -36,17 +36,18 @@ import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
 
-public class ServerSideErrorIT extends MacTest {
+public class ServerSideErrorIT extends SimpleMacIT {
   
   @Test(timeout=60*1000)
   public void run() throws Exception {
     Connector c = getConnector();
-    c.tableOperations().create("tt");
+    String tableName = makeTableName();
+    c.tableOperations().create(tableName);
     IteratorSetting is = new IteratorSetting(5, "Bad Aggregator", BadCombiner.class);
     Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("acf")));
-    c.tableOperations().attachIterator("tt", is);
+    c.tableOperations().attachIterator(tableName, is);
     
-    BatchWriter bw = c.createBatchWriter("tt", new BatchWriterConfig());
+    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
     
     Mutation m = new Mutation(new Text("r1"));
     m.put(new Text("acf"), new Text("foo"), new Value("1".getBytes()));
@@ -56,7 +57,7 @@ public class ServerSideErrorIT extends MacTest {
     bw.close();
     
     // try to scan table
-    Scanner scanner = c.createScanner("tt", Authorizations.EMPTY);
+    Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
     
     boolean caught = false;
     try {
@@ -71,7 +72,7 @@ public class ServerSideErrorIT extends MacTest {
       throw new Exception("Scan did not fail");
     
     // try to batch scan the table
-    BatchScanner bs = c.createBatchScanner("tt", Authorizations.EMPTY, 2);
+    BatchScanner bs = c.createBatchScanner(tableName, Authorizations.EMPTY, 2);
     bs.setRanges(Collections.singleton(new Range()));
     
     caught = false;
@@ -90,14 +91,14 @@ public class ServerSideErrorIT extends MacTest {
     
     // remove the bad agg so accumulo can shutdown
     TableOperations to = c.tableOperations();
-    for (Entry<String,String> e : to.getProperties("tt")) {
-      to.removeProperty("tt", e.getKey());
+    for (Entry<String,String> e : to.getProperties(tableName)) {
+      to.removeProperty(tableName, e.getKey());
     }
     
     UtilWaitThread.sleep(500);
     
     // should be able to scan now
-    scanner = c.createScanner("tt", Authorizations.EMPTY);
+    scanner = c.createScanner(tableName, Authorizations.EMPTY);
     for (Entry<Key,Value> entry : scanner) {
       entry.getKey();
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/SimpleMacIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SimpleMacIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SimpleMacIT.java
index bf37212..f996094 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/SimpleMacIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/SimpleMacIT.java
@@ -25,7 +25,6 @@ import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
-import org.apache.accumulo.minicluster.MiniAccumuloCluster.LogWriter;
 import org.apache.accumulo.minicluster.MiniAccumuloConfig;
 import org.apache.log4j.Logger;
 import org.junit.AfterClass;
@@ -45,21 +44,24 @@ public class SimpleMacIT {
   }
   
   @BeforeClass
-  public static void setUp() throws Exception {
-    folder.create();
-    MiniAccumuloConfig cfg = new MiniAccumuloConfig(folder.newFolder("mac"), ROOT_PASSWORD);
-    cluster = new MiniAccumuloCluster(cfg);
-    cluster.start();
+  synchronized public static void setUp() throws Exception {
+    if (cluster == null) {
+      folder.create();
+      MiniAccumuloConfig cfg = new MiniAccumuloConfig(folder.newFolder("mac"), ROOT_PASSWORD);
+      cluster = new MiniAccumuloCluster(cfg);
+      cluster.start();
+      Runtime.getRuntime().addShutdownHook(new Thread() {
+        @Override
+        public void run() {
+          folder.delete();
+        }
+      });
+    }
   }
   
   
   @AfterClass
   public static void tearDown() throws Exception {
-    if (cluster != null)
-      cluster.stop();
-    for (LogWriter log : cluster.getLogWriters())
-      log.flush();
-    folder.delete();
   }
   
   static AtomicInteger tableCount = new AtomicInteger();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java
index dda7c63..3c6c91e 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java
@@ -34,14 +34,15 @@ import org.junit.Test;
 /**
  * This test recreates issue ACCUMULO-516. Until that issue is fixed this test should time out.
  */
-public class SparseColumnFamilyIT extends MacTest {
+public class SparseColumnFamilyIT extends SimpleMacIT {
   
   @Test(timeout=30*1000)
   public void sparceColumnFamily() throws Exception {
+    String scftt = makeTableName();
     Connector c = getConnector();
-    c.tableOperations().create("scftt");
+    c.tableOperations().create(scftt);
     
-    BatchWriter bw = c.createBatchWriter("scftt", new BatchWriterConfig());
+    BatchWriter bw = c.createBatchWriter(scftt, new BatchWriterConfig());
     
     // create file in the tablet that has mostly column family 0, with a few entries for column family 1
     
@@ -52,7 +53,7 @@ public class SparseColumnFamilyIT extends MacTest {
     bw.addMutation(nm(99999 * 2, 1, 99999));
     bw.flush();
     
-    c.tableOperations().flush("scftt", null, null, true);
+    c.tableOperations().flush(scftt, null, null, true);
     
     // create a file that has column family 1 and 0 interleaved
     for (int i = 0; i < 100000; i++) {
@@ -60,9 +61,9 @@ public class SparseColumnFamilyIT extends MacTest {
     }
     bw.close();
     
-    c.tableOperations().flush("scftt", null, null, true);
+    c.tableOperations().flush(scftt, null, null, true);
     
-    Scanner scanner = c.createScanner("scftt", Authorizations.EMPTY);
+    Scanner scanner = c.createScanner(scftt, Authorizations.EMPTY);
     
     for (int i = 0; i < 200; i++) {
       

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
index 6ada2c2..78a4473 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
@@ -20,11 +20,11 @@ import static org.junit.Assert.assertEquals;
 
 import org.junit.Test;
 
-public class SplitRecoveryIT extends MacTest {
+public class SplitRecoveryIT extends SimpleMacIT {
   
   @Test(timeout=10*1000)
   public void test() throws Exception {
-    assertEquals(0, cluster.exec(SplitRecoveryTest.class).waitFor());
+    assertEquals(0, exec(SplitRecoveryTest.class).waitFor());
   }
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/StartIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/StartIT.java b/test/src/test/java/org/apache/accumulo/test/functional/StartIT.java
index 3a7fc93..3c1b98b 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/StartIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/StartIT.java
@@ -21,13 +21,13 @@ import static org.junit.Assert.*;
 import org.apache.accumulo.start.TestMain;
 import org.junit.Test;
 
-public class StartIT extends MacTest {
+public class StartIT extends SimpleMacIT {
   
   @Test(timeout=10*1000)
   public void test() throws Exception {
-    assertTrue(cluster.exec(TestMain.class, "exception").waitFor() != 0);
-    assertEquals(0, cluster.exec(TestMain.class, "success").waitFor());
-    assertTrue(cluster.exec(TestMain.class).waitFor() != 0);
+    assertTrue(exec(TestMain.class, "exception").waitFor() != 0);
+    assertEquals(0, exec(TestMain.class, "success").waitFor());
+    assertTrue(exec(TestMain.class).waitFor() != 0);
   }
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
index 8f2244b..cda6c98 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
@@ -33,7 +33,6 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.server.util.Admin;
 import org.apache.accumulo.test.TestIngest;
 import org.apache.accumulo.test.VerifyIngest;
 import org.apache.hadoop.fs.FileSystem;
@@ -41,19 +40,22 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
 
-public class TableIT extends MacTest {
+public class TableIT extends SimpleMacIT {
   
   @Test(timeout = 2 * 60 * 1000)
   public void test() throws Exception {
     Connector c = getConnector();
     TableOperations to = c.tableOperations();
-    to.create("test_ingest");
+    String tableName = makeTableName();
+    to.create(tableName);
     TestIngest.Opts opts = new TestIngest.Opts();
+    opts.tableName = tableName;
     TestIngest.ingest(c, opts, new BatchWriterOpts());
-    to.flush("test_ingest", null, null, true);
+    to.flush(tableName, null, null, true);
     VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+    vopts.tableName = tableName;
     VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
-    String id = to.tableIdMap().get("test_ingest");
+    String id = to.tableIdMap().get(tableName);
     Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     s.setRange(new KeyExtent(new Text(id), null, null).toMetadataRange());
     int count = 0;
@@ -63,21 +65,20 @@ public class TableIT extends MacTest {
     }
     assertTrue(count > 0);
     FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
-    assertTrue(fs.listStatus(new Path(cluster.getConfig().getDir() + "/accumulo/tables/" + id)).length > 0);
-    to.delete("test_ingest");
+    assertTrue(fs.listStatus(new Path(rootPath() + "/accumulo/tables/" + id)).length > 0);
+    to.delete(tableName);
     count = 0;
     for (@SuppressWarnings("unused")
     Entry<Key,Value> entry : s) {
       count++;
     }
     assertEquals(0, count);
-    assertEquals(0, fs.listStatus(new Path(cluster.getConfig().getDir() + "/accumulo/tables/" + id)).length);
-    assertNull(to.tableIdMap().get("test_ingest"));
-    to.create("test_ingest");
+    assertEquals(0, fs.listStatus(new Path(rootPath() + "/accumulo/tables/" + id)).length);
+    assertNull(to.tableIdMap().get(tableName));
+    to.create(tableName);
     TestIngest.ingest(c, opts, new BatchWriterOpts());
     VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
-    to.delete("test_ingest");
-    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+    to.delete(tableName);
   }
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
index 138584a..fb542b8 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
@@ -40,7 +40,7 @@ import org.junit.Test;
 /**
  * 
  */
-public class TimeoutIT extends MacTest {
+public class TimeoutIT extends SimpleMacIT {
   
   @Test(timeout=60*1000)
   public void run() throws Exception {
@@ -50,13 +50,14 @@ public class TimeoutIT extends MacTest {
   }
   
   public void testBatchWriterTimeout(Connector conn) throws Exception {
-    conn.tableOperations().create("foo1");
-    conn.tableOperations().addConstraint("foo1", SlowConstraint.class.getName());
+    String tableName = makeTableName();
+    conn.tableOperations().create(tableName);
+    conn.tableOperations().addConstraint(tableName, SlowConstraint.class.getName());
     
     // give constraint time to propagate through zookeeper
     UtilWaitThread.sleep(1000);
     
-    BatchWriter bw = conn.createBatchWriter("foo1", new BatchWriterConfig().setTimeout(3, TimeUnit.SECONDS));
+    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig().setTimeout(3, TimeUnit.SECONDS));
     
     Mutation mut = new Mutation("r1");
     mut.put("cf1", "cq1", "v1");
@@ -73,9 +74,10 @@ public class TimeoutIT extends MacTest {
   }
   
   public void testBatchScannerTimeout(Connector conn) throws Exception {
-    getConnector().tableOperations().create("timeout");
+    String tableName = makeTableName();
+    getConnector().tableOperations().create(tableName);
     
-    BatchWriter bw = getConnector().createBatchWriter("timeout", new BatchWriterConfig());
+    BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
     
     Mutation m = new Mutation("r1");
     m.put("cf1", "cq1", "v1");
@@ -86,7 +88,7 @@ public class TimeoutIT extends MacTest {
     bw.addMutation(m);
     bw.close();
     
-    BatchScanner bs = getConnector().createBatchScanner("timeout", Authorizations.EMPTY, 2);
+    BatchScanner bs = getConnector().createBatchScanner(tableName, Authorizations.EMPTY, 2);
     bs.setRanges(Collections.singletonList(new Range()));
     
     // should not timeout

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java b/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
index 51a45fa..2bbc7a5 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
@@ -44,21 +44,23 @@ import org.apache.accumulo.core.util.ByteArraySet;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
 
-public class VisibilityIT extends MacTest {
+public class VisibilityIT extends SimpleMacIT {
   
   @Test(timeout=30*1000)
   public void run() throws Exception {
     Connector c = getConnector();
-    c.tableOperations().create("vt");
-    c.tableOperations().create("vt2");
-    c.tableOperations().setProperty("vt2", Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), "DEFLABEL");
+    String table = makeTableName();
+    c.tableOperations().create(table);
+    String table2 = makeTableName();
+    c.tableOperations().create(table2);
+    c.tableOperations().setProperty(table2, Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), "DEFLABEL");
     
-    insertData(c);
-    queryData(c);
-    deleteData(c);
+    insertData(c, table);
+    queryData(c, table);
+    deleteData(c, table);
     
-    insertDefaultData(c);
-    queryDefaultData(c);
+    insertDefaultData(c, table2);
+    queryDefaultData(c, table2);
     
   }
   
@@ -82,9 +84,9 @@ public class VisibilityIT extends MacTest {
     m.putDelete(new Text(cf), new Text(cq), le);
   }
   
-  private void insertData(Connector c) throws Exception {
+  private void insertData(Connector c, String tableName) throws Exception {
     
-    BatchWriter bw = c.createBatchWriter("vt", new BatchWriterConfig());
+    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
     Mutation m1 = new Mutation(new Text("row1"));
     
     mput(m1, "cf1", "cq1", "", "v1");
@@ -105,9 +107,9 @@ public class VisibilityIT extends MacTest {
     bw.close();
   }
   
-  private void deleteData(Connector c) throws Exception {
+  private void deleteData(Connector c, String tableName) throws Exception {
     
-    BatchWriter bw = c.createBatchWriter("vt", new BatchWriterConfig());
+    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
     Mutation m1 = new Mutation(new Text("row1"));
     
     mputDelete(m1, "cf1", "cq1", "");
@@ -134,11 +136,11 @@ public class VisibilityIT extends MacTest {
     expected.put(nss("FOO"), nss("v11"));
     expected.put(nss("A", "FOO"), nss("v9"));
     
-    queryData(c, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "FOO", "L", "M", "Z"), expected);
+    queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "FOO", "L", "M", "Z"), expected);
   }
   
-  private void insertDefaultData(Connector c) throws Exception {
-    BatchWriter bw = c.createBatchWriter("vt2", new BatchWriterConfig());
+  private void insertDefaultData(Connector c, String tableName) throws Exception {
+    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
     Mutation m1 = new Mutation(new Text("row1"));
     
     mput(m1, "cf1", "cq1", "BASE", "v1");
@@ -164,7 +166,7 @@ public class VisibilityIT extends MacTest {
     }
   }
   
-  private void queryData(Connector c) throws Exception {
+  private void queryData(Connector c, String tableName) throws Exception {
     Map<Set<String>,Set<String>> expected = new HashMap<Set<String>,Set<String>>();
     expected.put(nss(), nss("v1"));
     expected.put(nss("A"), nss("v2"));
@@ -185,14 +187,14 @@ public class VisibilityIT extends MacTest {
     expected.put(nss("B", "FOO", "L"), nss("v12"));
     expected.put(nss("B", "FOO", "M"), nss("v12"));
     
-    queryData(c, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "FOO", "L", "M", "Z"), expected);
-    queryData(c, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "L", "M", "Z"), expected);
-    queryData(c, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "Z"), expected);
-    queryData(c, nss("A", "B", "FOO", "L", "M", "Z"), nss("Z"), expected);
-    queryData(c, nss("A", "B", "FOO", "L", "M", "Z"), nss(), expected);
+    queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "FOO", "L", "M", "Z"), expected);
+    queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "L", "M", "Z"), expected);
+    queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "Z"), expected);
+    queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("Z"), expected);
+    queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss(), expected);
   }
   
-  private void queryData(Connector c, Set<String> allAuths, Set<String> userAuths, Map<Set<String>,Set<String>> expected) throws Exception {
+  private void queryData(Connector c, String tableName, Set<String> allAuths, Set<String> userAuths, Map<Set<String>,Set<String>> expected) throws Exception {
     
     c.securityOperations().changeUserAuthorizations("root", new Authorizations(nbas(userAuths)));
     
@@ -212,25 +214,25 @@ public class VisibilityIT extends MacTest {
       }
       
       set1.retainAll(userAuths);
-      verify(c, set1, e);
+      verify(c, tableName, set1, e);
     }
     
   }
   
-  private void queryDefaultData(Connector c) throws Exception {
+  private void queryDefaultData(Connector c, String tableName) throws Exception {
     Scanner scanner;
     
     // should return no records
     c.securityOperations().changeUserAuthorizations("root", new Authorizations("BASE", "DEFLABEL"));
-    scanner = getConnector().createScanner("vt2", new Authorizations());
+    scanner = getConnector().createScanner(tableName, new Authorizations());
     verifyDefault(scanner, 0);
     
     // should return one record
-    scanner = getConnector().createScanner("vt2", new Authorizations("BASE"));
+    scanner = getConnector().createScanner(tableName, new Authorizations("BASE"));
     verifyDefault(scanner, 1);
     
     // should return all three records
-    scanner = getConnector().createScanner("vt2", new Authorizations("BASE", "DEFLABEL"));
+    scanner = getConnector().createScanner(tableName, new Authorizations("BASE", "DEFLABEL"));
     verifyDefault(scanner, 3);
   }
   
@@ -242,11 +244,11 @@ public class VisibilityIT extends MacTest {
       throw new Exception(" expected count !=0 " + expectedCount);
   }
   
-  private void verify(Connector c, Set<String> auths, Set<String> expectedValues) throws Exception {
+  private void verify(Connector c, String tableName, Set<String> auths, Set<String> expectedValues) throws Exception {
     ByteArraySet bas = nbas(auths);
     
     try {
-      verify(c, bas, expectedValues.toArray(new String[0]));
+      verify(c, tableName, bas, expectedValues.toArray(new String[0]));
     } catch (Exception e) {
       throw new Exception("Verification failed auths=" + auths + " exp=" + expectedValues, e);
     }
@@ -260,11 +262,11 @@ public class VisibilityIT extends MacTest {
     return bas;
   }
   
-  private void verify(Connector c, ByteArraySet nss, String... expected) throws Exception {
-    Scanner scanner = c.createScanner("vt", new Authorizations(nss));
+  private void verify(Connector c, String tableName, ByteArraySet nss, String... expected) throws Exception {
+    Scanner scanner = c.createScanner(tableName, new Authorizations(nss));
     verify(scanner.iterator(), expected);
     
-    BatchScanner bs = getConnector().createBatchScanner("vt", new Authorizations(nss), 3);
+    BatchScanner bs = getConnector().createBatchScanner(tableName, new Authorizations(nss), 3);
     bs.setRanges(Collections.singleton(new Range()));
     verify(bs.iterator(), expected);
     bs.close();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/WriteAheadLogIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/WriteAheadLogIT.java b/test/src/test/java/org/apache/accumulo/test/functional/WriteAheadLogIT.java
index 124629f..8149c5f 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/WriteAheadLogIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/WriteAheadLogIT.java
@@ -45,9 +45,10 @@ public class WriteAheadLogIT extends MacTest {
     siteConfig.put(Property.MASTER_RECOVERY_DELAY.getKey(), "0");
     siteConfig.put(Property.TSERV_MAXMEM.getKey(), "200K");
     siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1");
+    cfg.useMiniDFS(true);
   }
 
-  @Test(timeout=60*1000)
+  @Test(timeout=100*1000)
   public void test() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/WriteLotsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/WriteLotsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/WriteLotsIT.java
index df8e656..752d843 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/WriteLotsIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/WriteLotsIT.java
@@ -27,12 +27,13 @@ import org.apache.accumulo.test.TestIngest;
 import org.apache.accumulo.test.VerifyIngest;
 import org.junit.Test;
 
-public class WriteLotsIT extends MacTest {
+public class WriteLotsIT extends SimpleMacIT {
   
   @Test(timeout=20*1000)
   public void writeLots() throws Exception {
     final Connector c = getConnector();
-    c.tableOperations().create("test_ingest");
+    final String tableName = makeTableName();
+    c.tableOperations().create(tableName);
     final AtomicReference<Exception> ref = new AtomicReference<Exception>();
     List<Thread> threads = new ArrayList<Thread>();
     for (int i = 0; i < 10; i++) {
@@ -43,6 +44,7 @@ public class WriteLotsIT extends MacTest {
             TestIngest.Opts opts = new TestIngest.Opts();
             opts.startRow = index * 10000;
             opts.rows = 10000;
+            opts.tableName = tableName;
             TestIngest.ingest(c, opts, new BatchWriterOpts());
           } catch (Exception ex) {
             ref.set(ex);
@@ -60,6 +62,7 @@ public class WriteLotsIT extends MacTest {
     }
     VerifyIngest.Opts vopts = new VerifyIngest.Opts();
     vopts.rows = 10000 * 10;
+    vopts.tableName = tableName;
     VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/ZooCacheIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ZooCacheIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ZooCacheIT.java
index f483ce9..96b3b55 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ZooCacheIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ZooCacheIT.java
@@ -24,18 +24,18 @@ import java.util.concurrent.atomic.AtomicReference;
 
 import org.junit.Test;
 
-public class ZooCacheIT extends MacTest {
+public class ZooCacheIT extends SimpleMacIT {
   
   @Test(timeout=200*1000)
   public void test() throws Exception {
-    assertEquals(0, cluster.exec(CacheTestClean.class, "/zcTest-42", "/tmp/zcTest-42").waitFor());
+    assertEquals(0, exec(CacheTestClean.class, "/zcTest-42", "/tmp/zcTest-42").waitFor());
     final AtomicReference<Exception> ref = new AtomicReference<Exception>();
     List<Thread> threads = new ArrayList<Thread>();
     for (int i = 0; i < 3; i++) {
       Thread reader = new Thread() {
         public void run() {
           try {
-            CacheTestReader.main(new String[]{"/zcTest-42", "/tmp/zcTest-42", cluster.getZooKeepers()});
+            CacheTestReader.main(new String[]{"/zcTest-42", "/tmp/zcTest-42", getConnector().getInstance().getZooKeepers()});
           } catch(Exception ex) {
             ref.set(ex);
           }
@@ -44,7 +44,7 @@ public class ZooCacheIT extends MacTest {
       reader.start();
       threads.add(reader);
     }
-    assertEquals(0, cluster.exec(CacheTestWriter.class, "/zcTest-42", "/tmp/zcTest-42", "3","500").waitFor());
+    assertEquals(0, exec(CacheTestWriter.class, "/zcTest-42", "/tmp/zcTest-42", "3","500").waitFor());
     for (Thread t: threads) {
       t.join();
       if (ref.get() != null)


[12/50] git commit: ACCUMULO-1562 add troubleshooting chapter; not complete

Posted by kt...@apache.org.
ACCUMULO-1562 add troubleshooting chapter; not complete


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/e656c3a4
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/e656c3a4
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/e656c3a4

Branch: refs/heads/ACCUMULO-1000
Commit: e656c3a421f5de27e40be8e09df693049dbfa7fa
Parents: e6d6fab
Author: Eric Newton <er...@gmail.com>
Authored: Wed Jul 17 12:09:16 2013 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Wed Jul 17 12:09:16 2013 -0400

----------------------------------------------------------------------
 .../accumulo_user_manual.tex                    |   1 +
 .../chapters/troubleshooting.tex                | 520 +++++++++++++++++++
 2 files changed, 521 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/e656c3a4/docs/src/main/latex/accumulo_user_manual/accumulo_user_manual.tex
----------------------------------------------------------------------
diff --git a/docs/src/main/latex/accumulo_user_manual/accumulo_user_manual.tex b/docs/src/main/latex/accumulo_user_manual/accumulo_user_manual.tex
index 4750467..8c8fec3 100644
--- a/docs/src/main/latex/accumulo_user_manual/accumulo_user_manual.tex
+++ b/docs/src/main/latex/accumulo_user_manual/accumulo_user_manual.tex
@@ -51,4 +51,5 @@ Version 1.5}
 \include{chapters/security}
 \include{chapters/administration}
 \include{chapters/multivolume}
+\include{chapters/troubleshooting}
 \end{document}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e656c3a4/docs/src/main/latex/accumulo_user_manual/chapters/troubleshooting.tex
----------------------------------------------------------------------
diff --git a/docs/src/main/latex/accumulo_user_manual/chapters/troubleshooting.tex b/docs/src/main/latex/accumulo_user_manual/chapters/troubleshooting.tex
new file mode 100644
index 0000000..8e55008
--- /dev/null
+++ b/docs/src/main/latex/accumulo_user_manual/chapters/troubleshooting.tex
@@ -0,0 +1,520 @@
+ Licensed to the Apache Software Foundation (ASF) under one or more
+% contributor license agreements. See the NOTICE file distributed with
+% this work for additional information regarding copyright ownership.
+% The ASF licenses this file to You under the Apache License, Version 2.0
+% (the "License"); you may not use this file except in compliance with
+% the License. You may obtain a copy of the License at
+%
+%     http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS,
+% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+% See the License for the specific language governing permissions and
+% limitations under the License.
+
+\chapter{Troubleshooting}
+
+\section{Logs}
+
+Q. The tablet server does not seem to be running!? What happened?
+
+Accumulo is a distributed system.  It is supposed to run on remote
+equipment, across hundreds of computers.  Each program that runs on
+these remote computers writes down events as they occur, into a local
+file. By default, this is defined in
+\texttt{\$ACCUMULO\_HOME}/conf/accumule-env.sh as ACCUMULO\_LOG\_DIR.
+
+A. Look in the \texttt{\$ACCUMULO\_LOG\_DIR}/tserver*.log file.  Specifically, check the end of the file.
+
+Q. The tablet server did not start and the debug log does not exists!  What happened?
+
+When the individual programs are started, the stdout and stderr output
+of these programs are stored in ``.out'' and ``.err'' files in
+\texttt{\$ACCUMULO\_LOG\_DIR}.  Often, when there are missing configuration
+options, files or permissions, messages will be left in these files.
+
+A. Probably a start-up problem.  Look in \texttt{\$ACCUMULO\_LOG\_DIR}/tserver*.err
+
+\section{Monitor}
+
+Q. Accumulo is not working, what's wrong?
+
+There's a small web server that collects information about all the
+components that make up a running Accumulo instance. It will highlight
+unusual or unexpected conditions.
+
+A. Point your browser to the monitor (typically the master host, on port 50095).  Is anything red or yellow?
+
+Q. My browser is reporting connection refused, and I cannot get to the monitor
+
+The monitor program's output is also written to .err and .out files in
+the \texttt{\$ACCUMULO\_LOG\_DIR}. Look for problems in this file if the
+\texttt{\$ACCUMULO\_LOG\_DIR/monitor*.log} file does not exist.
+
+A. The monitor program is probably not running.  Check the log files for errors.
+
+Q. My browser hangs trying to talk to the monitor.
+
+Your browser needs to be able to reach the monitor program.  Often
+large clusters are firewalled, or use a VPN for internal
+communications. You can use SSH to proxy your browser to the cluster,
+or consult with your system administrator to gain access to the server
+from your browser.
+
+It is sometimes helpful to use a text-only browser to sanity-check the
+monitor while on the machine running the monitor:
+
+\small
+\begin{verbatim}
+  $ links http://localhost:50095
+\end{verbatim}
+\normalsize
+
+A. Verify that you are not firewalled from the monitor if it is running on a remote host.
+
+Q. The monitor responds, but there are no numbers for tservers and tables.  The summary page says the master is down.
+
+The monitor program gathers all the details about the master and the
+tablet servers through the master. It will be mostly blank if the
+master is down.
+
+A. Check for a running master.
+
+\section{HDFS}
+
+Accumulo reads and writes to the Hadoop Distributed File System.
+Accumulo needs this file system available at all times for normal operations.
+
+Q. Accumulo is having problems ``getting a block blk\_1234567890123.'' How do I fix it?
+
+This troubleshooting guide does not cover HDFS, but in general, you
+want to make sure that all the datanodes are running and an fsck check
+finds the file system clean:
+
+\small
+\begin{verbatim}
+  $ hadoop fsck /accumulo
+\end{verbatim}
+\normalsize
+
+On a larger cluster, you may need to increase the number of Xceivers
+
+\small
+\begin{verbatim}
+  <property>
+    <name>dfs.datanode.max.xcievers</name>
+    <value>4096</value>
+  </property>
+\end{verbatim}
+\normalsize
+
+A. Verify HDFS is healthy, check the datanode logs.
+
+\section{Zookeeper}
+
+Q. \texttt{accumulo init} is hanging.  It says something about talking to zookeeper.
+
+Zookeeper is also a distributed service.  You will need to ensure that
+it is up.  You can run the zookeeper command line tool to connect to
+any one of the zookeeper servers:
+
+\small
+\begin{verbatim}
+  $ zkCli.sh -server zoohost
+...
+[zk: zoohost:2181(CONNECTED) 0] 
+\end{verbatim}
+\normalsize
+
+It is important to see the word \texttt{CONNECTED}!  If you only see
+\texttt{CONNECTING} you will need to diagnose zookeeper errors.
+
+A. Check to make sure that zookeeper is up, and that
+\texttt{\$ACCUMULO\_HOME/conf/accumulo-site.xml} has been pointed to
+your zookeeper server(s).
+
+Q. Zookeeper is running, but it does not say \texttt{CONNECTED}
+
+Zookeeper processes talk to each other to elect a leader.  All updates
+go through the leader and propagate to a majority of all the other
+nodes.  If a majority of the nodes cannot be reached, zookeeper will
+not allow updates.  Zookeeper also limits the number connections to a
+server from any other single host.  By default, this limit is 10, and
+can be reached in some everything-on-one-machine test configurations.
+
+You can check the election status and connection status of clients by
+asking the zookeeper nodes for their status.  You connect to zookeeper
+and ask it with the four-letter ``stat'' command:
+
+\small
+\begin{verbatim}
+$ nc zoohost 2181
+stat
+Zookeeper version: 3.4.5-1392090, built on 09/30/2012 17:52 GMT
+Clients:
+ /127.0.0.1:58289[0](queued=0,recved=1,sent=0)
+ /127.0.0.1:60231[1](queued=0,recved=53910,sent=53915)
+
+Latency min/avg/max: 0/5/3008
+Received: 1561459
+Sent: 1561592
+Connections: 2
+Outstanding: 0
+Zxid: 0x621a3b
+Mode: standalone
+Node count: 22524
+$
+\end{verbatim}
+\normalsize
+
+
+A. Check zookeeper status, verify that it has a quorum, and has not exceeded maxClientCnxns.
+
+Q. My tablet server crashed!  The logs say that it lost it's zookeeper lock.
+
+Tablet servers reserve a lock in zookeeper to maintain their ownership
+over the tablets that have been assigned to them.  Part of their
+responsibility for keeping the lock is to send zookeeper a keep-alive
+message periodically.  If the tablet server fails to send a message in
+a timely fashion, zookeeper will remove the lock and notify the tablet
+server.  If the tablet server does not receive a message from
+zookeeper, it will assume its lock has been lost, too.  If a tablet
+server loses its lock, it kills itself: everything assumes it is dead
+already.
+
+A. Investigate why the tablet server did not send a timely message to
+zookeeper.
+
+\subsection{Keeping the tablet server lock}
+
+Q. My tablet server lost its lock.  Why?
+
+The primary reason a tablet server loses its lock is that it has been pushed into swap.
+
+A large java program (like the tablet server) may have a large portion
+of its memory image unused.  The operation system will favor pushing
+this allocated, but unused memory into swap so that the memory can be
+re-used as a disk buffer.  When the java virtual machine decides to
+access this memory, the OS will begin flushing disk buffers to return that
+memory to the VM.  This can cause the entire process to block long
+enough for the zookeeper lock to be lost.
+
+A. Configure your system to reduce the kernel parameter ``swappiness'' from the default (30) to zero.
+
+Q. My tablet server lost its lock, and I have already set swappiness to
+zero.  Why?
+
+Be careful not to over-subscribe memory.  This can be easy to do if
+your accumulo processes run on the same nodes as hadoop's map-reduce
+framework.  Remember to add up:
+
+\begin{itemize}
+\item{size of the JVM for the tablet server}
+\item{size of the in-memory map, if using the native map implementation}
+\item{size of the JVM for the data node}
+\item{size of the JVM for the task tracker}
+\item{size of the JVM times the maximum number of mappers and reducers}
+\item{size of the kernel and any support processes}
+\end{itemize}
+
+If a 16G node can run 2 mappers and 2 reducers, and each can be 2G,
+then there is only 8G for the data node, tserver, task tracker and OS.
+
+A. Reduce the memory footprint of each component until it fits comfortably.
+
+Q. My tablet server lost its lock, swappiness is zero, and my node has lots of unused memory!
+
+The JVM memory garbage collector may fall behind and cause a
+``stop-the-world'' garbage collection. On a large memory virtual
+machine, this collection can take a long time.  This happens more
+frequently when the JVM is getting low on free memory.  Check the logs
+of the tablet server.  You will see lines like this:
+
+\small
+\begin{verbatim}
+2013-06-20 13:43:20,607 [tabletserver.TabletServer] DEBUG: gc ParNew=0.00(+0.00) secs ConcurrentMarkSweep=0.00(+0.00) secs freemem=1,868,325,952(+1,868,325,952) totalmem=2,040,135,680
+\end{verbatim}
+\normalsize
+
+When ``freemem'' becomes small relative to the amount of memory
+needed, the JVM will spend more time finding free memory than
+performing work.  This can cause long delays in sending keep-alive
+messages to zookeeper.
+
+A. Ensure the tablet server JVM is not running low on memory.
+
+\section{Tools}
+
+The accumulo script can be used to run classes from the command line.
+This section shows how a few of the utilities work, but there are many
+more.
+
+There's a class that will examine an accumulo storage file and print
+out basic metadata.  
+
+\small
+\begin{verbatim}
+$ ./bin/accumulo org.apache.accumulo.core.file.rfile.PrintInfo /accumulo/tables/1/default_tablet/A000000n.rf
+2013-07-16 08:17:14,778 [util.NativeCodeLoader] INFO : Loaded the native-hadoop library
+Locality group         : <DEFAULT>
+        Start block          : 0
+        Num   blocks         : 1
+        Index level 0        : 62 bytes  1 blocks
+        First key            : 288be9ab4052fe9e span:34078a86a723e5d3:3da450f02108ced5 [] 1373373521623 false
+        Last key             : start:13fc375709e id:615f5ee2dd822d7a [] 1373373821660 false
+        Num entries          : 466
+        Column families      : [waitForCommits, start, md major compactor 1, md major compactor 2, md major compactor 3, bringOnline, prep, md major compactor 4, md major compactor 5, md root major compactor 3, minorCompaction, wal, compactFiles, md root major compactor 4, md root major compactor 1, md root major compactor 2, compact, id, client:update, span, update, commit, write, majorCompaction]
+
+Meta block     : BCFile.index
+      Raw size             : 4 bytes
+      Compressed size      : 12 bytes
+      Compression type     : gz
+
+Meta block     : RFile.index
+      Raw size             : 780 bytes
+      Compressed size      : 344 bytes
+      Compression type     : gz
+\end{verbatim}
+\normalsize
+
+When trying to diagnose problems related to key size, the PrintInfo tool can provide a histogram of the individual key sizes:
+
+\small
+\begin{verbatim}
+$ ./bin/accumulo org.apache.accumulo.core.file.rfile.PrintInfo --histogram /accumulo/tables/1/default_tablet/A000000n.rf
+...
+Up to size      count      %-age
+         10 :        222  28.23%
+        100 :        244  71.77%
+       1000 :          0   0.00%
+      10000 :          0   0.00%
+     100000 :          0   0.00%
+    1000000 :          0   0.00%
+   10000000 :          0   0.00%
+  100000000 :          0   0.00%
+ 1000000000 :          0   0.00%
+10000000000 :          0   0.00%
+\end{verbatim}
+\normalsize
+
+Likewise, PrintInfo will dump the key-value pairs and show you the contents of the RFile:
+
+\small
+\begin{verbatim}
+$ ./bin/accumulo org.apache.accumulo.core.file.rfile.PrintInfo --dump /accumulo/tables/1/default_tablet/A000000n.rf
+row columnFamily:columnQualifier [visibility] timestamp deleteFlag -> Value
+...
+\end{verbatim}
+\normalsize
+
+Q. Accumulo is not showing me any data!
+
+A. Do you have your auths set so that it matches your visibilities?
+
+Q. What are my visibilities?
+
+A. Use ``PrintInfo'' on a representative file to get some idea of the visibilities in the underlying data.
+
+Note that the use of PrintInfo is an administrative tool and can only
+by used by someone who can access the underlying Accumulo data. It
+does not provide the normal access controls in Accumulo.
+
+If you would like to backup, or otherwise examine the contents of Zookeeper, there are commands to dump and load to/from XML.
+
+\small
+\begin{verbatim}
+$ ./bin/accumulo org.apache.accumulo.server.util.DumpZookeeper --root /accumulo >dump.xml
+$ ./bin/accumulo org.apache.accumulo.server.util.RestoreZookeeper --overwrite < dump.xml
+\end{verbatim}
+\normalsize
+
+Q. How can I get the information in the monitor page for my cluster monitoring system?
+
+A. Use GetMasterStats:
+
+\small
+\begin{verbatim}
+$ ./bin/accumulo org.apache.accumulo.test.GetMasterStats | grep Load
+ OS Load Average: 0.27
+\end{verbatim}
+\normalsize
+
+Q. The monitor page is showing an offline tablet.  How can I find out which tablet it is?
+
+A. Use FindOfflineTablets:
+
+\small
+\begin{verbatim}
+$ ./bin/accumulo org.apache.accumulo.server.util.FindOfflineTablets
+2<<@(null,null,localhost:9997) is UNASSIGNED  #walogs:2
+\end{verbatim}
+\normalsize
+
+Here's what the output means:
+
+\begin{enumerate}
+\item{\texttt{2\<\<} This is the tablet from (-inf, +inf) for the
+  table with id 2.  ``tables -l'' in the shell will show table ids for
+  tables.}
+\item{\@(null, null, localhost:9997)} Location information.  The
+  format is \textt{@(assigned, hosted, last)}.  In this case, the
+  tablet has not been assigned, is not hosted anywhere, and was once
+  hosted on localhost.
+\item{#walogs:2} The number of write-ahead logs that this tablet requires for recovery.
+\end{enumerate}
+
+An unassigned tablet with write-ahead logs is probably waiting for
+logs to be sorted for efficient recovery.
+
+Q. How can I be sure that the !METADATA table is up and consistent?
+
+A. \texttt{CheckForMetadataProblems} will verify the start/end of
+every tablet matches, and the start and stop for the table is empty:
+
+\small
+\begin{verbatim}
+$ ./bin/accumulo org.apache.accumulo.server.util.CheckForMetadataProblems -u root --password
+Enter the connection password: 
+All is well for table !0
+All is well for table 1
+\end{verbatim}
+\normalsize
+
+Q. My hadoop cluster has lost a file due to a NameNode failure.  How can I remove the file?
+
+A. There's a utility that will check every file reference and ensure
+that the file exists in HDFS.  Optionally, it will remove the
+reference:
+
+\small
+\begin{verbatim}
+$ ./bin/accumulo org.apache.accumulo.server.util.RemoveEntriesForMissingFiles -u root --password
+Enter the connection password: 
+2013-07-16 13:10:57,293 [util.RemoveEntriesForMissingFiles] INFO : File /accumulo/tables/2/default_tablet/F0000005.rf is missing
+2013-07-16 13:10:57,296 [util.RemoveEntriesForMissingFiles] INFO : 1 files of 3 missing
+\end{verbatim}
+\normalsize
+
+Q. I have many entries in zookeeper for old instances I no longer need.  How can I remove them?
+
+A. Use CleanZookeeper:
+
+\small
+\begin{verbatim}
+$ ./bin/accumulo org.apache.accumulo.server.util.CleanZookeeper
+\end{verbatim}
+\normalsize
+
+This command will not delete the instance pointed to by the local \texttt{conf/accumulo-site.xml} file.
+
+Q. I need to decommission a node.  How do I stop the tablet server on it?
+
+A. Use the admin command:
+
+\small
+\begin{verbatim}
+$ ./bin/accumulo admin stop hostname:9997
+2013-07-16 13:15:38,403 [util.Admin] INFO : Stopping server 12.34.56.78:9997
+\end{verbatim}
+\normalsize
+
+Q. I cannot login to a tablet server host, and the tablet server will not shut down.  How can I kill the server?
+
+A. Sometimes you can kill a ``stuck'' tablet server by deleting it's lock in zookeeper:
+
+\small
+\begin{verbatim}
+$ ./bin/accumulo org.apache.accumulo.server.util.TabletServerLocks --list
+                  127.0.0.1:9997 TSERV_CLIENT=127.0.0.1:9997
+$ ./bin/accumulo org.apache.accumulo.server.util.TabletServerLocks -delete 127.0.0.1:9997
+$ ./bin/accumulo org.apache.accumulo.server.util.TabletServerLocks -list
+                  127.0.0.1:9997             null
+\end{verbatim}
+\normalsize
+
+You can find the master and instance id for any accumulo instances using the same zookeeper instance:
+
+\small
+\begin{verbatim}
+$ ./bin/accumulo org.apache.accumulo.server.util.ListInstances
+INFO : Using ZooKeepers localhost:2181
+
+ Instance Name       | Instance ID                          | Master                        
+---------------------+--------------------------------------+-------------------------------
+              "test" | 6140b72e-edd8-4126-b2f5-e74a8bbe323b |                127.0.0.1:9999
+\end{verbatim}
+\normalsize
+
+\section{METADATA Table}
+
+Accumulo tracks information about all other tables in the !METADATA
+table.  The !METADATA table information is tracked in a very simple
+table that always consists of a single tablet, called the !ROOT table.
+The root table information, such as its location and write-ahead logs
+are stored in Zookeeper.
+
+Let's create a table and put some data into it:
+
+\small
+\begin{verbatim}
+shell> createtable test
+shell> tables -l
+!METADATA       =>         !0
+test            =>          3
+trace           =>          1
+shell> insert a b c d
+shell> flush -w
+\end{verbatim}
+\normalsize
+
+Now let's take a look at the !METADATA table information for this table:
+
+\small
+\begin{verbatim}
+shell> table !METADATA
+shell> scan -b 3; -e 3<
+3< file:/default_tablet/F000009y.rf []    186,1
+3< last:13fe86cd27101e5 []    127.0.0.1:9997
+3< loc:13fe86cd27101e5 []    127.0.0.1:9997
+3< log:127.0.0.1+9997/0cb7ce52-ac46-4bf7-ae1d-acdcfaa97995 []    127.0.0.1+9997/0cb7ce52-ac46-4bf7-ae1d-acdcfaa97995|6
+3< srv:dir []    /default_tablet
+3< srv:flush []    1
+3< srv:lock []    tservers/127.0.0.1:9997/zlock-0000000001$13fe86cd27101e5
+3< srv:time []    M1373998392323
+3< ~tab:~pr []    \x00
+\end{verbatim}
+\normalsize
+
+Let's decode this little session:
+
+\begin{enumerate}
+\item{\texttt{scan -b 3; -e 3<} Every tablet gets its own row. Every row starts with the table id followed by ``;'' or ``<'', and followed by the end row split point for that tablet.}
+\item{\texttt{file:/default\_tablet/F000009y.rf [] 186,1} File entry for this tablet.  This tablet contains a single file reference. The file is ``/accumulo/tables/3/default\_tablet/F000009y.rf''.  It contains 1 key/value pair, and is 186 bytes long. }
+\item{\texttt{last:13fe86cd27101e5 []    127.0.0.1:9997} Last location for this tablet.  It was last held on 127.0.0.1:9997, and the unique tablet server lock data was ``13fe86cd27101e5''. The default balancer will tend to put tablets back on their last location. }
+\item{\texttt{loc:13fe86cd27101e5 []    127.0.0.1:9997} The current location of this tablet.}
+\item{\texttt{log:127.0.0.1+9997/0cb7ce52-ac46-4bf7-ae1d-acdcfaa97995 []    127.0.0.1+9997/0cb7ce52-ac46-4bf7-ae1d-acdcfaa97995|6} This tablet has a reference to a single write-ahead log.  This file can be found in /accumulo/wal/127.0.0.1+9997/0cb7ce52-ac46-4bf7-ae1d-acdcfaa97995.  The value of this entry could refer to multiple files.  This tablet's data is encoded as ``6'' within the log.}
+\item{\texttt{srv:dir []    /default\_tablet} Files written for this tablet will be placed into /accumulo/tables/3/default\_tablet.}
+\item{\texttt{srv:flush []    1} Flush id.  This table has successfully completed the flush with the id of ``1''. }
+\item{\texttt{srv:lock []    tservers/127.0.0.1:9997/zlock-0000000001\$13fe86cd27101e5}  This is the lock information for the tablet holding the present lock.  This information is checked against zookeeper whenever this is updated, which prevents a !METADATA table update from a tablet server that no longer holds its lock.}
+\item{\texttt{srv:time []    M1373998392323} }
+\item{\texttt{~tab:~pr []    \x00} The end-row marker for the previous tablet (prev-row).  The first byte indicates the presence of a prev-row.  This tablet has the range (-inf, +inf), so it has no prev-row (or end row). }
+\end{enumerate}
+
+Besides these columns, you may see:
+
+\begin{enumerate}
+\item{\texttt{rowId future:zooKeeperID location} Tablet has been assigned to a tablet, but not yet loaded.}
+\item{\texttt{~del:filename} When a tablet server is done use a file, it will create a delete marker in the !METADATA table, unassociated with any table.  The garbage collector will remove the marker, and the file, when no other reference to the file exists.}
+\item{\texttt{~blip:txid} Bulk-Load In Progress marker}
+\item{\texttt{rowId loaded:filename} A file has been bulk-loaded into this tablet, however the bulk load has not yet completed on other tablets, so this is marker prevents the file from being loaded multiple times.}
+\item{\texttt{rowId !cloned} A marker that indicates that this tablet has been successfully cloned.}
+\item{\texttt{rowId splitRatio:ratio} A marker that indicates a split is in progress, and the files are being split at the given ratio.}
+\item{\texttt{rowId chopped} A marker that indicates that the files in the tablet do not contain keys outside the range of the tablet.}
+\item{\texttt{rowId scan} A marker that ....}
+
+\end{enumerate}
+
+
+\section{}
+


[02/50] git commit: ACCUMULO-1563 committing Jonathan Hsieh's patch to prevent the test from writing to /

Posted by kt...@apache.org.
ACCUMULO-1563 committing Jonathan Hsieh's patch to prevent the test from writing to /

git-svn-id: https://svn.apache.org/repos/asf/accumulo/branches/1.5@1502583 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/1fc73a9d
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/1fc73a9d
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/1fc73a9d

Branch: refs/heads/ACCUMULO-1000
Commit: 1fc73a9d718b24350191b0d46c6c6012748dc76b
Parents: bd6c426
Author: Eric C. Newton <ec...@apache.org>
Authored: Fri Jul 12 15:11:14 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Fri Jul 12 15:11:14 2013 +0000

----------------------------------------------------------------------
 .../main/java/org/apache/accumulo/test/CreateRFiles.java  |  2 +-
 .../test/functional/BulkSplitOptimizationTest.java        |  8 ++++----
 test/system/auto/simple/bulk.py                           | 10 +++++-----
 test/system/auto/simple/compaction.py                     |  4 ++--
 4 files changed, 12 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/1fc73a9d/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java b/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java
index fc50ed9..82f9b6b 100644
--- a/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java
+++ b/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java
@@ -58,7 +58,7 @@ public class CreateRFiles {
     int count = 0;
     while (currEnd <= opts.end && currStart < currEnd) {
       
-      final String tia = String.format("--rfile /%s/mf%05d --timestamp 1 --size 50 --random 56 --rows %d --start %d --user root", opts.outputDirectory, count, currEnd - currStart, currStart);
+      final String tia = String.format("--rfile %s/mf%05d --timestamp 1 --size 50 --random 56 --rows %d --start %d --user root", opts.outputDirectory, count, currEnd - currStart, currStart);
       
       Runnable r = new Runnable() {
         

http://git-wip-us.apache.org/repos/asf/accumulo/blob/1fc73a9d/test/src/main/java/org/apache/accumulo/test/functional/BulkSplitOptimizationTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BulkSplitOptimizationTest.java b/test/src/main/java/org/apache/accumulo/test/functional/BulkSplitOptimizationTest.java
index 5a62e5e..0cdd3b7 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/BulkSplitOptimizationTest.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BulkSplitOptimizationTest.java
@@ -44,8 +44,8 @@ public class BulkSplitOptimizationTest extends FunctionalTest {
   @Override
   public void cleanup() throws Exception {
     FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
-    fs.delete(new Path("/tmp/testmf"), true);
-    fs.delete(new Path("/tmp/testmf_failures"), true);
+    fs.delete(new Path("tmp/testmf"), true);
+    fs.delete(new Path("tmp/testmf_failures"), true);
   }
   
   @Override
@@ -63,11 +63,11 @@ public class BulkSplitOptimizationTest extends FunctionalTest {
   public void run() throws Exception {
     
     FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
-    fs.delete(new Path("/tmp/testmf"), true);
+    fs.delete(new Path("tmp/testmf"), true);
     AuthenticationToken token = this.getToken();
     CreateRFiles.main(new String[] {"--output", "tmp/testmf", "--numThreads", "8", "--start", "0", "--end", "100000", "--splits", "99"});
     
-    bulkImport(fs, TABLE_NAME, "/tmp/testmf");
+    bulkImport(fs, TABLE_NAME, "tmp/testmf");
     
     checkSplits(TABLE_NAME, 0, 0);
     checkRFiles(TABLE_NAME, 1, 1, 100, 100);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/1fc73a9d/test/system/auto/simple/bulk.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/bulk.py b/test/system/auto/simple/bulk.py
index 3899176..b0b73b4 100755
--- a/test/system/auto/simple/bulk.py
+++ b/test/system/auto/simple/bulk.py
@@ -42,13 +42,13 @@ class SimpleBulkTest(TestUtilsMixin, unittest.TestCase):
             self.masterHost(),
             'org.apache.accumulo.test.BulkImportDirectory',
             ['-u', ROOT, '-p', ROOT_PASSWORD,
-             '-t', 'test_ingest', '--source', dir, '--failures', '/testBulkFail', '-i', INSTANCE_NAME])
+             '-t', 'test_ingest', '--source', dir, '--failures', 'testBulkFail', '-i', INSTANCE_NAME])
         self.wait(handle)
         self.assert_(handle.returncode == 0)
         
 
     def createRFiles(self):
-        args = '--rfile /testrf/rf%02d --timestamp 1 --size 50 --random 56 --rows %1d --start %ld --cols 1 -u root -i ' + INSTANCE_NAME
+        args = '--rfile testrf/rf%02d --timestamp 1 --size 50 --random 56 --rows %1d --start %ld --cols 1 -u root -i ' + INSTANCE_NAME
         log.info('creating rfiles')
         handles = []
         for i in range(COUNT):
@@ -73,9 +73,9 @@ class SimpleBulkTest(TestUtilsMixin, unittest.TestCase):
 
         # initialize the database
         self.createTable('test_ingest')
-        self.execute(self.masterHost(), 'hadoop dfs -rmr /testrf'.split())
-        self.execute(self.masterHost(), 'hadoop dfs -rmr /testBulkFail'.split())
-        self.execute(self.masterHost(), 'hadoop dfs -mkdir /testBulkFail'.split())
+        self.execute(self.masterHost(), 'hadoop dfs -rmr testrf'.split())
+        self.execute(self.masterHost(), 'hadoop dfs -rmr testBulkFail'.split())
+        self.execute(self.masterHost(), 'hadoop dfs -mkdir testBulkFail'.split())
 
         # insert some data
         self.createRFiles()

http://git-wip-us.apache.org/repos/asf/accumulo/blob/1fc73a9d/test/system/auto/simple/compaction.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/compaction.py b/test/system/auto/simple/compaction.py
index f2ff90d..9bd0b56 100755
--- a/test/system/auto/simple/compaction.py
+++ b/test/system/auto/simple/compaction.py
@@ -52,8 +52,8 @@ class CompactionTest(SimpleBulkTest):
 
         # initialize the database
         self.createTable('test_ingest')
-        self.execute(self.masterHost(), 'hadoop dfs -rmr /testrf'.split())
-        self.execute(self.masterHost(), 'hadoop dfs -rmr /testrfFail'.split())
+        self.execute(self.masterHost(), 'hadoop dfs -rmr testrf'.split())
+        self.execute(self.masterHost(), 'hadoop dfs -rmr testrfFail'.split())
 
         # insert some data
         self.createRFiles(self.masterHost())


[39/50] git commit: ACCUMULO-1596 moved Mutator interface up to IZooReaderWriter

Posted by kt...@apache.org.
ACCUMULO-1596 moved Mutator interface up to IZooReaderWriter


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/85451233
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/85451233
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/85451233

Branch: refs/heads/ACCUMULO-1000
Commit: 85451233c6a00352daaf460fb668d02d76bdd953
Parents: 734cd50
Author: Eric Newton <ec...@apache.org>
Authored: Mon Jul 22 13:52:57 2013 -0400
Committer: Eric Newton <ec...@apache.org>
Committed: Mon Jul 22 13:52:57 2013 -0400

----------------------------------------------------------------------
 server/src/main/java/org/apache/accumulo/server/master/Master.java | 2 +-
 .../apache/accumulo/server/master/state/tables/TableManager.java   | 2 +-
 .../apache/accumulo/server/master/tableOps/CancelCompactions.java  | 2 +-
 .../org/apache/accumulo/server/master/tableOps/CompactRange.java   | 2 +-
 .../org/apache/accumulo/server/master/tableOps/RenameTable.java    | 2 +-
 .../java/org/apache/accumulo/server/master/tableOps/Utils.java     | 2 +-
 .../java/org/apache/accumulo/server/tabletserver/TabletServer.java | 2 --
 7 files changed, 6 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/85451233/server/src/main/java/org/apache/accumulo/server/master/Master.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/Master.java b/server/src/main/java/org/apache/accumulo/server/master/Master.java
index 0cb0378..3d14d12 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/Master.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/Master.java
@@ -84,8 +84,8 @@ import org.apache.accumulo.fate.AgeOffStore;
 import org.apache.accumulo.fate.Fate;
 import org.apache.accumulo.fate.TStore.TStatus;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
+import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
 import org.apache.accumulo.fate.zookeeper.ZooLock.LockLossReason;
-import org.apache.accumulo.fate.zookeeper.ZooReaderWriter.Mutator;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
 import org.apache.accumulo.server.Accumulo;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/85451233/server/src/main/java/org/apache/accumulo/server/master/state/tables/TableManager.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/state/tables/TableManager.java b/server/src/main/java/org/apache/accumulo/server/master/state/tables/TableManager.java
index fedbc96..bc81786 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/state/tables/TableManager.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/state/tables/TableManager.java
@@ -29,7 +29,7 @@ import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.master.state.tables.TableState;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
-import org.apache.accumulo.fate.zookeeper.ZooReaderWriter.Mutator;
+import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
 import org.apache.accumulo.server.client.HdfsZooInstance;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/85451233/server/src/main/java/org/apache/accumulo/server/master/tableOps/CancelCompactions.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/CancelCompactions.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/CancelCompactions.java
index efddc79..7d06639 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/CancelCompactions.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/CancelCompactions.java
@@ -20,7 +20,7 @@ import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
-import org.apache.accumulo.fate.zookeeper.ZooReaderWriter.Mutator;
+import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.master.Master;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/85451233/server/src/main/java/org/apache/accumulo/server/master/tableOps/CompactRange.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/CompactRange.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/CompactRange.java
index 09046f7..41b052b 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/CompactRange.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/CompactRange.java
@@ -47,7 +47,7 @@ import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
-import org.apache.accumulo.fate.zookeeper.ZooReaderWriter.Mutator;
+import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection;
 import org.apache.accumulo.server.master.Master;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/85451233/server/src/main/java/org/apache/accumulo/server/master/tableOps/RenameTable.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/RenameTable.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/RenameTable.java
index 0ca29cd..16201b1 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/RenameTable.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/RenameTable.java
@@ -25,7 +25,7 @@ import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
-import org.apache.accumulo.fate.zookeeper.ZooReaderWriter.Mutator;
+import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
 import org.apache.accumulo.server.master.Master;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.log4j.Logger;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/85451233/server/src/main/java/org/apache/accumulo/server/master/tableOps/Utils.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/Utils.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/Utils.java
index d467119..e57801c 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/Utils.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/Utils.java
@@ -29,7 +29,7 @@ import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.DistributedReadWriteLock;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
-import org.apache.accumulo.fate.zookeeper.ZooReaderWriter.Mutator;
+import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
 import org.apache.accumulo.fate.zookeeper.ZooReservation;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.zookeeper.ZooQueueLock;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/85451233/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
index d83102d..9824c64 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
@@ -18,7 +18,6 @@ package org.apache.accumulo.server.tabletserver;
 
 import static org.apache.accumulo.server.problems.ProblemType.TABLET_LOAD;
 
-import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.lang.management.GarbageCollectorMXBean;
@@ -196,7 +195,6 @@ import org.apache.accumulo.server.zookeeper.TransactionWatcher;
 import org.apache.accumulo.server.zookeeper.ZooCache;
 import org.apache.accumulo.server.zookeeper.ZooLock;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.start.Platform;
 import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
 import org.apache.accumulo.start.classloader.vfs.ContextManager;
 import org.apache.accumulo.trace.instrument.Span;


[24/50] added ability to invalidate server side conditional update sessions

Posted by kt...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/ec537137/core/src/main/thrift/tabletserver.thrift
----------------------------------------------------------------------
diff --git a/core/src/main/thrift/tabletserver.thrift b/core/src/main/thrift/tabletserver.thrift
index 5dfcb4b..c6d442c 100644
--- a/core/src/main/thrift/tabletserver.thrift
+++ b/core/src/main/thrift/tabletserver.thrift
@@ -166,9 +166,14 @@ service TabletClientService extends client.ClientService {
     throws (1:client.ThriftSecurityException sec, 
             2:NotServingTabletException nste, 
             3:ConstraintViolationException cve),
-  
-  list<data.TCMResult> conditionalUpdate(1:trace.TInfo tinfo, 2:security.TCredentials credentials, 3:list<binary> authorizations, 4:data.CMBatch mutations, 5:list<string> symbols)
+
+  data.UpdateID startConditionalUpdate(1:trace.TInfo tinfo, 2:security.TCredentials credentials, 3:list<binary> authorizations, 4:string tableID)
      throws (1:client.ThriftSecurityException sec);
+  
+  list<data.TCMResult> conditionalUpdate(1:trace.TInfo tinfo, 2:data.UpdateID sessID, 3:data.CMBatch mutations, 4:list<string> symbols)
+     throws (1:NoSuchScanIDException nssi);
+
+  void invalidateConditionalUpdate(1:trace.TInfo tinfo, 2:data.UpdateID sessID);
 
   // on success, returns an empty list
   list<data.TKeyExtent> bulkImport(3:trace.TInfo tinfo, 1:security.TCredentials credentials, 4:i64 tid, 2:data.TabletFiles files, 5:bool setTime) throws (1:client.ThriftSecurityException sec),

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ec537137/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java b/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
index 465eb8e..94a8cd6 100644
--- a/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
+++ b/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
@@ -313,23 +313,10 @@ public class SecurityOperation {
     return hasTablePermission(credentials.getPrincipal(), table, TablePermission.WRITE, true);
   }
   
-  public boolean canConditionallyUpdate(TCredentials credentials, Map<TKeyExtent,List<TConditionalMutation>> mutations, List<String> symbols,
-      List<ByteBuffer> authorizations) throws ThriftSecurityException {
-    Set<TKeyExtent> ks = mutations.keySet();
-    
-    byte[] table = null;
-    
-    for (TKeyExtent tke : ks) {
-      if (table == null)
-        table = tke.getTable();
-      else if (!Arrays.equals(table, tke.getTable()))
-        return false;
-    }
+  public boolean canConditionallyUpdate(TCredentials credentials, String tableID, List<ByteBuffer> authorizations) throws ThriftSecurityException {
     
     authenticate(credentials);
     
-    String tableID = new String(table);
-    
     return hasTablePermission(credentials.getPrincipal(), tableID, TablePermission.WRITE, true)
         && hasTablePermission(credentials.getPrincipal(), tableID, TablePermission.READ, true);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ec537137/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
index 4b905ce..ee1d1b6 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
@@ -397,9 +397,30 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
       
     }
     
+    synchronized Session reserveSession(long sessionId, boolean wait) {
+      Session session = sessions.get(sessionId);
+      if (session != null) {
+        while(wait && session.reserved){
+          try {
+            wait(1000);
+          } catch (InterruptedException e) {
+            throw new RuntimeException();
+          }
+        }
+        
+        if (session.reserved)
+          throw new IllegalStateException();
+        session.reserved = true;
+      }
+      
+      return session;
+      
+    }
+    
     synchronized void unreserveSession(Session session) {
       if (!session.reserved)
         throw new IllegalStateException();
+      notifyAll();
       session.reserved = false;
       session.lastAccessTime = System.currentTimeMillis();
     }
@@ -409,7 +430,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
       if (session != null)
         unreserveSession(session);
     }
-    
+        
     synchronized Session getSession(long sessionId) {
       Session session = sessions.get(sessionId);
       if (session != null)
@@ -418,9 +439,15 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     }
     
     Session removeSession(long sessionId) {
+      return removeSession(sessionId, false);
+    }
+    
+    Session removeSession(long sessionId, boolean unreserve) {
       Session session = null;
       synchronized (this) {
         session = sessions.remove(sessionId);
+        if(unreserve && session != null)
+          unreserveSession(session);
       }
       
       // do clean up out side of lock..
@@ -719,6 +746,12 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     
   }
   
+  private static class ConditionalSession extends Session {
+    public TCredentials credentials;
+    public Authorizations auths;
+    public String tableId;
+  }
+  
   private static class UpdateSession extends Session {
     public Tablet currentTablet;
     public MapCounter<Tablet> successfulCommits = new MapCounter<Tablet>();
@@ -1856,7 +1889,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
 
     }
 
-    private Map<KeyExtent,List<ServerConditionalMutation>> conditionalUpdate(TCredentials credentials, List<ByteBuffer> authorizations,
+    private Map<KeyExtent,List<ServerConditionalMutation>> conditionalUpdate(TCredentials credentials, Authorizations authorizations,
         Map<KeyExtent,List<ServerConditionalMutation>> updates, ArrayList<TCMResult> results, List<String> symbols) {
       // sort each list of mutations, this is done to avoid deadlock and doing seeks in order is more efficient and detect duplicate rows.
       ConditionalMutationSet.sortConditionalMutations(updates);
@@ -1869,7 +1902,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
       // get as many locks as possible w/o blocking... defer any rows that are locked
       List<RowLock> locks = rowLocks.acquireRowlocks(updates, deferred);
       try {
-        checkConditions(updates, results, new Authorizations(authorizations), symbols);
+        checkConditions(updates, results, authorizations, symbols);
         writeConditionalMutations(updates, results, credentials);
       } finally {
         rowLocks.releaseRowLocks(locks);
@@ -1878,11 +1911,10 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     }
     
     @Override
-    public List<TCMResult> conditionalUpdate(TInfo tinfo, TCredentials credentials, List<ByteBuffer> authorizations,
-        Map<TKeyExtent,List<TConditionalMutation>> mutations, List<String> symbols) throws ThriftSecurityException {
+    public long startConditionalUpdate(TInfo tinfo, TCredentials credentials, List<ByteBuffer> authorizations, String tableID) throws ThriftSecurityException, TException {
       
       Authorizations userauths = null;
-      if (!security.canConditionallyUpdate(credentials, mutations, symbols, authorizations))
+      if (!security.canConditionallyUpdate(credentials, tableID, authorizations))
         throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
       
       userauths = security.getUserAuthorizations(credentials);
@@ -1890,23 +1922,58 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
         if (!userauths.contains(ByteBufferUtil.toBytes(auth)))
           throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.BAD_AUTHORIZATIONS);
 
+      ConditionalSession cs = new ConditionalSession();
+      cs.auths = new Authorizations(authorizations);
+      cs.credentials = credentials;
+      cs.tableId = tableID;
+      
+      return sessionManager.createSession(cs, false);
+    }
+
+    @Override
+    public List<TCMResult> conditionalUpdate(TInfo tinfo, long sessID, Map<TKeyExtent,List<TConditionalMutation>> mutations, List<String> symbols)
+        throws NoSuchScanIDException, TException {
       // TODO sessions, should show up in list scans
       // TODO timeout like scans do
       
-      Map<KeyExtent,List<ServerConditionalMutation>> updates = Translator.translate(mutations, Translator.TKET,
-          new Translator.ListTranslator<TConditionalMutation,ServerConditionalMutation>(ServerConditionalMutation.TCMT));
+      ConditionalSession cs = (ConditionalSession) sessionManager.reserveSession(sessID);
       
-      ArrayList<TCMResult> results = new ArrayList<TCMResult>();
+      if(cs == null)
+        throw new NoSuchScanIDException();
       
-      Map<KeyExtent,List<ServerConditionalMutation>> deferred = conditionalUpdate(credentials, authorizations, updates, results, symbols);
-
-      while (deferred.size() > 0) {
-        deferred = conditionalUpdate(credentials, authorizations, deferred, results, symbols);
+      
+      
+      try{
+        Map<KeyExtent,List<ServerConditionalMutation>> updates = Translator.translate(mutations, Translator.TKET,
+            new Translator.ListTranslator<TConditionalMutation,ServerConditionalMutation>(ServerConditionalMutation.TCMT));
+        
+        Text tid = new Text(cs.tableId);
+        for(KeyExtent ke : updates.keySet())
+          if(!ke.getTableId().equals(tid))
+            throw new IllegalArgumentException("Unexpected table id "+tid+" != "+ke.getTableId());
+        
+        ArrayList<TCMResult> results = new ArrayList<TCMResult>();
+        
+        Map<KeyExtent,List<ServerConditionalMutation>> deferred = conditionalUpdate(cs.credentials, cs.auths, updates, results, symbols);
+  
+        while (deferred.size() > 0) {
+          deferred = conditionalUpdate(cs.credentials, cs.auths, deferred, results, symbols);
+        }
+  
+        return results;
+      }finally{
+        sessionManager.removeSession(sessID, true);
       }
-
-      return results;
     }
 
+    @Override
+    public void invalidateConditionalUpdate(TInfo tinfo, long sessID) throws TException {
+      //this method should wait for any running conditional update to complete
+      //after this method returns a conditional update should not be able to start
+      ConditionalSession cs = (ConditionalSession) sessionManager.reserveSession(sessID, true);
+      if(cs != null)
+        sessionManager.removeSession(sessID, true);
+    }
 
     @Override
     public void splitTablet(TInfo tinfo, TCredentials credentials, TKeyExtent tkeyExtent, ByteBuffer splitPoint) throws NotServingTabletException,

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ec537137/test/src/main/java/org/apache/accumulo/test/performance/thrift/NullTserver.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/thrift/NullTserver.java b/test/src/main/java/org/apache/accumulo/test/performance/thrift/NullTserver.java
index f8b32de..67e7249 100644
--- a/test/src/main/java/org/apache/accumulo/test/performance/thrift/NullTserver.java
+++ b/test/src/main/java/org/apache/accumulo/test/performance/thrift/NullTserver.java
@@ -52,6 +52,7 @@ import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.tabletserver.thrift.ActiveCompaction;
 import org.apache.accumulo.core.tabletserver.thrift.ActiveScan;
+import org.apache.accumulo.core.tabletserver.thrift.NoSuchScanIDException;
 import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
 import org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Iface;
 import org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Processor;
@@ -204,10 +205,21 @@ public class NullTserver {
     }
 
     @Override
-    public List<TCMResult> conditionalUpdate(TInfo tinfo, TCredentials credentials, List<ByteBuffer> authorizations,
-        Map<TKeyExtent,List<TConditionalMutation>> mutations, List<String> symbols) throws TException {
+    public long startConditionalUpdate(TInfo tinfo, TCredentials credentials, List<ByteBuffer> authorizations, String tableID) throws ThriftSecurityException,
+        TException {
+      return 0;
+    }
+
+    @Override
+    public List<TCMResult> conditionalUpdate(TInfo tinfo, long sessID, Map<TKeyExtent,List<TConditionalMutation>> mutations, List<String> symbols)
+        throws NoSuchScanIDException, TException {
       return null;
     }
+
+    @Override
+    public void invalidateConditionalUpdate(TInfo tinfo, long sessID) throws TException {
+      
+    }
   }
   
   static class Opts extends Help {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ec537137/test/src/test/java/org/apache/accumulo/test/ConditionalWriterTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/ConditionalWriterTest.java b/test/src/test/java/org/apache/accumulo/test/ConditionalWriterTest.java
index 33dc458..65a5636 100644
--- a/test/src/test/java/org/apache/accumulo/test/ConditionalWriterTest.java
+++ b/test/src/test/java/org/apache/accumulo/test/ConditionalWriterTest.java
@@ -1127,6 +1127,7 @@ public class ConditionalWriterTest {
     TabletLocator locator = TabletLocator.getLocator(zki, new Text(Tables.getNameToIdMap(zki).get(table)));
     while (locator.locateTablet(new Text("a"), false, false, CredentialHelper.create("root", new PasswordToken(secret), zki.getInstanceID())) != null) {
       UtilWaitThread.sleep(50);
+      locator.invalidateCache();
     }
   }
 


[37/50] git commit: ACCUMULO-1595 remove mlock config entry

Posted by kt...@apache.org.
ACCUMULO-1595 remove mlock config entry


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/93f741e7
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/93f741e7
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/93f741e7

Branch: refs/heads/ACCUMULO-1000
Commit: 93f741e7efe761c980eda74b7fb679bd015838f3
Parents: b3f1155
Author: Eric Newton <ec...@apache.org>
Authored: Mon Jul 22 13:10:34 2013 -0400
Committer: Eric Newton <ec...@apache.org>
Committed: Mon Jul 22 13:10:34 2013 -0400

----------------------------------------------------------------------
 core/src/main/java/org/apache/accumulo/core/conf/Property.java | 6 ------
 1 file changed, 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/93f741e7/core/src/main/java/org/apache/accumulo/core/conf/Property.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
index ca86c9a..a28c52b 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@ -187,12 +187,6 @@ public enum Property {
       "A long running scan could possibly hold memory that has been minor compacted.  To prevent this, the in memory map is dumped to a local file and the "
           + "scan is switched to that local file.  We can not switch to the minor compacted file because it may have been modified by iterators.  The file "
           + "dumped to the local dir is an exact copy of what was in memory."),
-  TSERV_LOCK_MEMORY("tserver.memory.lock", "false", PropertyType.BOOLEAN,
-      "The tablet server must communicate with zookeeper frequently to maintain its locks.  If the tablet server's memory is swapped out"
-          + " the java garbage collector can stop all processing for long periods.  Change this property to true and the tablet server will "
-          + " attempt to lock all of its memory to RAM, which may reduce delays during java garbage collection.  You will have to modify the "
-          + " system limit for \"max locked memory\". This feature is only available when running on Linux.  Alternatively you may also "
-          + " want to set /proc/sys/vm/swappiness to zero (again, this is Linux-specific)."),
   TSERV_BULK_PROCESS_THREADS("tserver.bulk.process.threads", "1", PropertyType.COUNT,
       "The master will task a tablet server with pre-processing a bulk file prior to assigning it to the appropriate tablet servers.  This configuration"
           + " value controls the number of threads used to process the files."),


[08/50] git commit: Merge branch '1.5.1-SNAPSHOT'

Posted by kt...@apache.org.
Merge branch '1.5.1-SNAPSHOT'

Conflicts:
	test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
	test/system/auto/simple/bulk.py
	test/system/auto/simple/compaction.py


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/a131e5d8
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/a131e5d8
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/a131e5d8

Branch: refs/heads/ACCUMULO-1000
Commit: a131e5d86a52e971c7bcfda55ca10f2623eb9f22
Parents: af06b16 cfb01d4
Author: Eric Newton <er...@gmail.com>
Authored: Wed Jul 17 11:34:02 2013 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Wed Jul 17 11:34:02 2013 -0400

----------------------------------------------------------------------
 .../accumulo/core/file/rfile/PrintInfo.java     |  2 +-
 .../apache/accumulo/fate/zookeeper/ZooLock.java | 12 ++-----
 .../accumulo/server/util/DumpZookeeper.java     |  4 +--
 .../accumulo/server/util/ListInstances.java     | 35 ++++++++++----------
 .../accumulo/server/util/RestoreZookeeper.java  |  5 ++-
 .../accumulo/server/util/TabletServerLocks.java |  9 +++--
 .../functional/BulkSplitOptimizationIT.java     |  2 --
 7 files changed, 32 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/a131e5d8/test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
index 05b9992,0000000..59e2d42
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
@@@ -1,91 -1,0 +1,89 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
 +import java.util.Collections;
 +
 +import org.apache.accumulo.core.cli.ClientOpts.Password;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.util.CachedConfiguration;
 +import org.apache.accumulo.core.util.UtilWaitThread;
 +import org.apache.accumulo.minicluster.MiniAccumuloConfig;
 +import org.apache.accumulo.test.VerifyIngest;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.junit.Test;
 +
 +/**
 + * This test verifies that when a lot of files are bulk imported into a table with one tablet and then splits that not all map files go to the children tablets.
 + * 
 + * 
 + * 
 + */
 +
 +public class BulkSplitOptimizationIT extends MacTest {
 +  
 +  private static final String TABLE_NAME = "test_ingest";
 +  
 +  @Override
 +  public void configure(MiniAccumuloConfig cfg) {
 +    cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "1s"));
 +  }
 +
 +  static final int ROWS = 100000;
 +  static final int SPLITS = 99;
 +
 +  @Test(timeout=30*1000)
 +  public void testBulkSplitOptimization() throws Exception {
 +    final Connector c = getConnector();
 +    c.tableOperations().create(TABLE_NAME);
 +    c.tableOperations().setProperty(TABLE_NAME, Property.TABLE_MAJC_RATIO.getKey(), "1000");
 +    c.tableOperations().setProperty(TABLE_NAME, Property.TABLE_FILE_MAX.getKey(), "1000");
 +    c.tableOperations().setProperty(TABLE_NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "1G");
 +    
 +    FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
 +    FunctionalTestUtils.createRFiles(c, fs, "tmp/testmf", ROWS, SPLITS, 8);
-     
 +    FunctionalTestUtils.bulkImport(c, fs, TABLE_NAME, "tmp/testmf");
-     
 +    FunctionalTestUtils.checkSplits(c, TABLE_NAME, 0, 0);
 +    FunctionalTestUtils.checkRFiles(c, TABLE_NAME, 1, 1, 100, 100);
 +    
 +    // initiate splits
 +    getConnector().tableOperations().setProperty(TABLE_NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "100K");
 +    
 +    UtilWaitThread.sleep(2000);
 +    
 +    // wait until over split threshold
 +    while (getConnector().tableOperations().listSplits(TABLE_NAME).size() < 50) {
 +      UtilWaitThread.sleep(500);
 +    }
 +    
 +    FunctionalTestUtils.checkSplits(c, TABLE_NAME, 50, 100);
 +    VerifyIngest.Opts opts = new VerifyIngest.Opts();
 +    opts.timestamp = 1;
 +    opts.dataSize = 50;
 +    opts.random = 56;
 +    opts.rows = 100000;
 +    opts.startRow = 0;
 +    opts.cols = 1;
 +    opts.password = new Password(PASSWORD);
 +    VerifyIngest.verifyIngest(c, opts, SOPTS);
 +    
 +    // ensure each tablet does not have all map files
 +    FunctionalTestUtils.checkRFiles(c, TABLE_NAME, 50, 100, 1, 4);
 +  }
 +
 +}


[48/50] git commit: fixed compile bug caused by merge

Posted by kt...@apache.org.
fixed compile bug caused by merge


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/5183ae44
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/5183ae44
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/5183ae44

Branch: refs/heads/ACCUMULO-1000
Commit: 5183ae44c087078331dbec7fbc654d6722a74c11
Parents: d0e8b37
Author: Keith Turner <kt...@apache.org>
Authored: Mon Jul 22 17:13:22 2013 -0400
Committer: Keith Turner <kt...@apache.org>
Committed: Mon Jul 22 17:13:22 2013 -0400

----------------------------------------------------------------------
 .../org/apache/accumulo/server/security/SecurityOperation.java  | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/5183ae44/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java b/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
index 64b6177..cebc338 100644
--- a/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
+++ b/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
@@ -17,7 +17,6 @@
 package org.apache.accumulo.server.security;
 
 import java.nio.ByteBuffer;
-import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -32,7 +31,6 @@ import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.thrift.IterInfo;
 import org.apache.accumulo.core.data.thrift.TColumn;
-import org.apache.accumulo.core.data.thrift.TConditionalMutation;
 import org.apache.accumulo.core.data.thrift.TKeyExtent;
 import org.apache.accumulo.core.data.thrift.TRange;
 import org.apache.accumulo.core.master.thrift.TableOperation;
@@ -334,8 +332,7 @@ public class SecurityOperation {
     
     authenticate(credentials);
     
-    return hasTablePermission(credentials.getPrincipal(), tableID, TablePermission.WRITE, true)
-        && hasTablePermission(credentials.getPrincipal(), tableID, TablePermission.READ, true);
+    return hasTablePermission(credentials, tableID, TablePermission.WRITE, true) && hasTablePermission(credentials, tableID, TablePermission.READ, true);
   }
 
   public boolean canSplitTablet(TCredentials credentials, String table) throws ThriftSecurityException {


[35/50] git commit: ACCUMULO-1000 added TODO

Posted by kt...@apache.org.
ACCUMULO-1000 added TODO


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/7bb5f8ff
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/7bb5f8ff
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/7bb5f8ff

Branch: refs/heads/ACCUMULO-1000
Commit: 7bb5f8ff3cc35c49573e452f0ceba029d803354b
Parents: fdb95b4
Author: keith@deenlo.com <ke...@deenlo.com>
Authored: Sat Jul 20 12:10:30 2013 -0400
Committer: keith@deenlo.com <ke...@deenlo.com>
Committed: Sat Jul 20 12:10:30 2013 -0400

----------------------------------------------------------------------
 .../org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bb5f8ff/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
index 0e86ec7..c87c865 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
@@ -361,6 +361,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
       if (mutations != null)
         sendToServer(location, mutations);
       
+      //TODO if exception is thrown, will not reschedule
       reschedule(this);
     }
   }


[17/50] git commit: ACCUMULO-1572 ignore connection lost; eventually we'll get an session lost event

Posted by kt...@apache.org.
ACCUMULO-1572 ignore connection lost; eventually we'll get an session lost event


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/7b617230
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/7b617230
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/7b617230

Branch: refs/heads/ACCUMULO-1000
Commit: 7b617230979811d0e0ec8fffa6b633b70278c466
Parents: c82c431
Author: Eric Newton <er...@gmail.com>
Authored: Wed Jul 17 14:08:53 2013 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Wed Jul 17 14:08:53 2013 -0400

----------------------------------------------------------------------
 .../main/java/org/apache/accumulo/fate/zookeeper/ZooLock.java   | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/7b617230/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooLock.java
----------------------------------------------------------------------
diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooLock.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooLock.java
index 961539a..fb2f3d8 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooLock.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooLock.java
@@ -235,7 +235,7 @@ public class ZooLock implements Watcher {
               lostLock(LockLossReason.LOCK_DELETED);
             } else if (asyncLock != null && event.getType() == EventType.NodeDeleted && event.getPath().equals(path + "/" + asyncLock)) {
               failedToAcquireLock();
-            } else if (event.getState() != KeeperState.Expired && (lock != null || asyncLock != null)) {
+            } else if (event.getState() != KeeperState.Disconnected && event.getState() != KeeperState.Expired && (lock != null || asyncLock != null)) {
               log.debug("Unexpected event watching lock node "+event+" "+asyncLockPath);
               try {
                 Stat stat2 = zooKeeper.getStatus(asyncLockPath, this);
@@ -349,6 +349,9 @@ public class ZooLock implements Watcher {
       try { // set the watch on the parent node again
         zooKeeper.getStatus(path, this);
         watchingParent = true;
+      } catch (KeeperException.ConnectionLossException ex) {
+        // we can't look at the lock because we aren't connected, but our session is still good
+        log.warn("lost connection to zookeeper");
       } catch (Exception ex) {
         if (lock != null || asyncLock != null) {
           lockWatcher.unableToMonitorLockNode(ex);


[50/50] git commit: ACCUMULO-1000 more timeout handling

Posted by kt...@apache.org.
ACCUMULO-1000 more timeout handling


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/95931ea0
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/95931ea0
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/95931ea0

Branch: refs/heads/ACCUMULO-1000
Commit: 95931ea0bb7c41210758c37ec0cd9aa873772e67
Parents: 79019ef
Author: Keith Turner <kt...@apache.org>
Authored: Tue Jul 23 12:54:12 2013 -0400
Committer: Keith Turner <kt...@apache.org>
Committed: Tue Jul 23 12:54:12 2013 -0400

----------------------------------------------------------------------
 .../accumulo/core/client/impl/ConditionalWriterImpl.java    | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/95931ea0/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
index 55aa718..157b6cb 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
@@ -85,7 +85,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
   
   private static final Logger log = Logger.getLogger(ConditionalWriterImpl.class);
 
-  private static final int MAX_SLEEP = 5000;
+  private static final int MAX_SLEEP = 30000;
 
   private static final long SESSION_CACHE_TIME = 60000;
 
@@ -592,13 +592,15 @@ class ConditionalWriterImpl implements ConditionalWriter {
     
     long sleepTime = 50;
 
+    long startTime = System.currentTimeMillis();
+
     while (true) {
       Map<String,TabletServerMutations<QCMutation>> binnedMutations = new HashMap<String,TabletLocator.TabletServerMutations<QCMutation>>();
       List<QCMutation> failures = new ArrayList<QCMutation>();
       
       locator.binMutations(mutList, binnedMutations, failures, credentials);
       
-      // TODO do failures matter? not if failures only indicates tablets are not assigned
+      // failures should not matter, if failures only indicates tablets are not assigned
       
       if (!binnedMutations.containsKey(location)) {
         // the tablets are at different locations now, so there is no need to invalidate the session
@@ -616,6 +618,9 @@ class ConditionalWriterImpl implements ConditionalWriter {
         locator.invalidateCache(location);
       }
       
+      if ((System.currentTimeMillis() - startTime) + sleepTime > timeout)
+        throw new TimedOutException(Collections.singleton(location));
+
       UtilWaitThread.sleep(sleepTime);
       sleepTime = Math.min(2 * sleepTime, MAX_SLEEP);
 


[21/50] git commit: ACCUMULO-1577 committing Michael Berman's patch

Posted by kt...@apache.org.
ACCUMULO-1577 committing Michael Berman's patch


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/f8b9145d
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/f8b9145d
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/f8b9145d

Branch: refs/heads/ACCUMULO-1000
Commit: f8b9145d4c9feb12de9093a75720697f12d0b3b3
Parents: 048b308
Author: Eric Newton <er...@gmail.com>
Authored: Wed Jul 17 15:34:05 2013 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Wed Jul 17 15:34:05 2013 -0400

----------------------------------------------------------------------
 .../org/apache/accumulo/start/classloader/AccumuloClassLoader.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/f8b9145d/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
----------------------------------------------------------------------
diff --git a/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java b/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
index ccd85a6..a68ce0e 100644
--- a/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
+++ b/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
@@ -76,7 +76,7 @@ public class AccumuloClassLoader {
     String configFile = System.getProperty("org.apache.accumulo.config.file", "accumulo-site.xml");
     if (System.getenv("ACCUMULO_CONF_DIR") != null) {
       // accumulo conf dir should be set
-      SITE_CONF = System.getenv("ACCUMULO_CONF_DIR");
+      SITE_CONF = System.getenv("ACCUMULO_CONF_DIR") + "/" + configFile;
     } else if (System.getenv("ACCUMULO_HOME") != null) {
       // if no accumulo conf dir, try accumulo home default
       SITE_CONF = System.getenv("ACCUMULO_HOME") + "/conf/" + configFile;


[05/50] git commit: ACCUMULO-1574

Posted by kt...@apache.org.
ACCUMULO-1574


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/a1fda97f
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/a1fda97f
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/a1fda97f

Branch: refs/heads/ACCUMULO-1000
Commit: a1fda97fe788be4cceb9bb10a0d2247ac0ac145c
Parents: 122b1b1
Author: Eric Newton <er...@gmail.com>
Authored: Wed Jul 17 11:26:50 2013 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Wed Jul 17 11:26:50 2013 -0400

----------------------------------------------------------------------
 .../org/apache/accumulo/server/util/TabletServerLocks.java  | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/a1fda97f/server/src/main/java/org/apache/accumulo/server/util/TabletServerLocks.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/TabletServerLocks.java b/server/src/main/java/org/apache/accumulo/server/util/TabletServerLocks.java
index c3df185..dfb05d0 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/TabletServerLocks.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/TabletServerLocks.java
@@ -20,8 +20,10 @@ import java.util.List;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
+import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
+import org.apache.accumulo.fate.zookeeper.ZooCache;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.zookeeper.ZooLock;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
@@ -41,17 +43,20 @@ public class TabletServerLocks {
    */
   public static void main(String[] args) throws Exception {
     
-    String tserverPath = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZTSERVERS;
+    Instance instance = HdfsZooInstance.getInstance();
+    String tserverPath = ZooUtil.getRoot(instance) + Constants.ZTSERVERS;
     Opts opts = new Opts();
     opts.parseArgs(TabletServerLocks.class.getName(), args);
     
+    ZooCache cache = new ZooCache(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut());
+    
     if (opts.list) {
       IZooReaderWriter zoo = ZooReaderWriter.getInstance();
       
       List<String> tabletServers = zoo.getChildren(tserverPath);
       
       for (String tabletServer : tabletServers) {
-        byte[] lockData = ZooLock.getLockData(tserverPath + "/" + tabletServer);
+        byte[] lockData = ZooLock.getLockData(cache, tserverPath + "/" + tabletServer, null);
         String holder = null;
         if (lockData != null) {
           holder = new String(lockData);


[29/50] git commit: ACCUMULO-1537 convert simpler test to use a common MAC; add option to use HDFS because LocalFileSystem does not support flush/sync semantics

Posted by kt...@apache.org.
ACCUMULO-1537 convert simpler test to use a common MAC; add option to use HDFS because LocalFileSystem does not support flush/sync semantics


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/057b8d6c
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/057b8d6c
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/057b8d6c

Branch: refs/heads/ACCUMULO-1000
Commit: 057b8d6cb0ccbf472067a755d76eba7ee2e3fd71
Parents: 122fa39
Author: Eric Newton <ec...@apache.org>
Authored: Fri Jul 19 16:31:26 2013 -0400
Committer: Eric Newton <ec...@apache.org>
Committed: Fri Jul 19 16:31:26 2013 -0400

----------------------------------------------------------------------
 .../core/client/impl/BatchWriterImpl.java       |   4 +-
 .../chapters/troubleshooting.tex                |   4 +-
 minicluster/pom.xml                             |   6 ++
 .../minicluster/MiniAccumuloCluster.java        |  62 ++++++++++-
 .../minicluster/MiniAccumuloConfig.java         |   9 ++
 pom.xml                                         |   2 +-
 test/pom.xml                                    |  12 +++
 .../org/apache/accumulo/test/TestIngest.java    |   5 +-
 .../apache/accumulo/test/TestRandomDeletes.java |  23 ++--
 .../accumulo/test/functional/AddSplitIT.java    |  30 +++---
 .../test/functional/BadIteratorMincIT.java      |  32 +++---
 .../test/functional/BatchWriterFlushIT.java     |  24 +++--
 .../accumulo/test/functional/BinaryIT.java      |   2 +-
 .../accumulo/test/functional/BulkFileIT.java    |  13 +--
 .../apache/accumulo/test/functional/BulkIT.java |  15 +--
 .../accumulo/test/functional/ClassLoaderIT.java |   4 +-
 .../accumulo/test/functional/CombinerIT.java    |  20 ++--
 .../accumulo/test/functional/ConstraintIT.java  |  40 ++++---
 .../test/functional/CreateAndUseIT.java         |  27 ++---
 .../test/functional/CreateManyScannersIT.java   |   9 +-
 .../accumulo/test/functional/DeleteIT.java      |   2 +-
 .../accumulo/test/functional/DeleteRowsIT.java  |  44 ++++----
 .../test/functional/DeleteRowsSplitIT.java      |  24 ++---
 .../test/functional/FateStarvationIT.java       |  14 +--
 .../test/functional/HalfDeadTServerIT.java      |  17 +--
 .../accumulo/test/functional/LogicalTimeIT.java |  41 ++++---
 .../accumulo/test/functional/MacTest.java       |   5 +-
 .../accumulo/test/functional/MapReduceIT.java   |   8 +-
 .../accumulo/test/functional/MergeIT.java       |  69 ++++++------
 .../accumulo/test/functional/MergeMetaIT.java   |   2 +-
 .../accumulo/test/functional/NativeMapIT.java   |   4 +-
 .../accumulo/test/functional/PermissionsIT.java | 108 ++++++++++---------
 .../accumulo/test/functional/RenameIT.java      |  13 ++-
 .../accumulo/test/functional/RestartIT.java     |   5 +-
 .../test/functional/RestartStressIT.java        |   5 +-
 .../test/functional/ScanIteratorIT.java         |  11 +-
 .../accumulo/test/functional/ScanRangeIT.java   |  18 ++--
 .../test/functional/ServerSideErrorIT.java      |  19 ++--
 .../accumulo/test/functional/SimpleMacIT.java   |  24 +++--
 .../test/functional/SparseColumnFamilyIT.java   |  13 +--
 .../test/functional/SplitRecoveryIT.java        |   4 +-
 .../accumulo/test/functional/StartIT.java       |   8 +-
 .../accumulo/test/functional/TableIT.java       |  25 ++---
 .../accumulo/test/functional/TimeoutIT.java     |  16 +--
 .../accumulo/test/functional/VisibilityIT.java  |  68 ++++++------
 .../test/functional/WriteAheadLogIT.java        |   3 +-
 .../accumulo/test/functional/WriteLotsIT.java   |   7 +-
 .../accumulo/test/functional/ZooCacheIT.java    |   8 +-
 48 files changed, 530 insertions(+), 398 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/core/src/main/java/org/apache/accumulo/core/client/impl/BatchWriterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/BatchWriterImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/BatchWriterImpl.java
index 167ba03..c884416 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/BatchWriterImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/BatchWriterImpl.java
@@ -30,7 +30,9 @@ public class BatchWriterImpl implements BatchWriter {
   private TabletServerBatchWriter bw;
   
   public BatchWriterImpl(Instance instance, TCredentials credentials, String table, BatchWriterConfig config) {
-    ArgumentChecker.notNull(instance, credentials, table, config);
+    ArgumentChecker.notNull(instance, credentials, table);
+    if (config == null)
+      config= new BatchWriterConfig();
     this.table = table;
     this.bw = new TabletServerBatchWriter(instance, credentials, config);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/docs/src/main/latex/accumulo_user_manual/chapters/troubleshooting.tex
----------------------------------------------------------------------
diff --git a/docs/src/main/latex/accumulo_user_manual/chapters/troubleshooting.tex b/docs/src/main/latex/accumulo_user_manual/chapters/troubleshooting.tex
index 8e55008..2e0a8ee 100644
--- a/docs/src/main/latex/accumulo_user_manual/chapters/troubleshooting.tex
+++ b/docs/src/main/latex/accumulo_user_manual/chapters/troubleshooting.tex
@@ -450,7 +450,7 @@ INFO : Using ZooKeepers localhost:2181
 
 Accumulo tracks information about all other tables in the !METADATA
 table.  The !METADATA table information is tracked in a very simple
-table that always consists of a single tablet, called the !ROOT table.
+table that always consists of a single tablet, called the !!ROOT table.
 The root table information, such as its location and write-ahead logs
 are stored in Zookeeper.
 
@@ -511,7 +511,7 @@ Besides these columns, you may see:
 \item{\texttt{rowId !cloned} A marker that indicates that this tablet has been successfully cloned.}
 \item{\texttt{rowId splitRatio:ratio} A marker that indicates a split is in progress, and the files are being split at the given ratio.}
 \item{\texttt{rowId chopped} A marker that indicates that the files in the tablet do not contain keys outside the range of the tablet.}
-\item{\texttt{rowId scan} A marker that ....}
+\item{\texttt{rowId scan} A marker that prevents a file from being removed while there are still active scans using it.}
 
 \end{enumerate}
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/minicluster/pom.xml
----------------------------------------------------------------------
diff --git a/minicluster/pom.xml b/minicluster/pom.xml
index aaa2c66..3d1063b 100644
--- a/minicluster/pom.xml
+++ b/minicluster/pom.xml
@@ -76,6 +76,12 @@
       <scope>test</scope>
     </dependency>
     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-test</artifactId>
+      <version>${hadoop.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
       <groupId>org.slf4j</groupId>
       <artifactId>slf4j-api</artifactId>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
index 0418396..7c00cec 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
@@ -23,6 +23,7 @@ import java.io.FileWriter;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
+import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -41,6 +42,7 @@ import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.master.thrift.MasterGoalState;
 import org.apache.accumulo.core.util.Daemon;
 import org.apache.accumulo.core.util.Pair;
@@ -53,6 +55,10 @@ import org.apache.accumulo.server.util.PortUtils;
 import org.apache.accumulo.server.util.time.SimpleTimer;
 import org.apache.accumulo.start.Main;
 import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.zookeeper.server.ZooKeeperServerMain;
 
 /**
@@ -124,6 +130,7 @@ public class MiniAccumuloCluster {
   private List<LogWriter> logWriters = new ArrayList<MiniAccumuloCluster.LogWriter>();
   
   private MiniAccumuloConfig config;
+  private MiniDFSCluster miniDFS;
   
   public Process exec(Class<? extends Object> clazz, String... args) throws IOException {
     return exec(clazz, Collections.singletonList("-Xmx" + config.getDefaultMemory()), args);
@@ -211,7 +218,34 @@ public class MiniAccumuloCluster {
     config.getWalogDir().mkdirs();
     config.getLibDir().mkdirs();
     
+    if (config.useMiniDFS()) {
+      File nn = new File(config.getAccumuloDir(), "nn");
+      nn.mkdirs();
+      File dn = new File(config.getAccumuloDir(), "dn");
+      dn.mkdirs();
+      Configuration conf = new Configuration();
+      conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nn.getAbsolutePath());
+      conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dn.getAbsolutePath());
+      conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "1");
+      conf.set(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, "true");
+      conf.set(DataNode.DATA_DIR_PERMISSION_KEY, "775");
+      miniDFS = new MiniDFSCluster(conf, 1, true, null);
+      miniDFS.waitClusterUp();
+      InetSocketAddress dfsAddress = miniDFS.getNameNode().getNameNodeAddress();
+      String uri = "hdfs://"+ dfsAddress.getHostName() + ":" + dfsAddress.getPort();
+      File coreFile = new File(config.getConfDir(), "core-site.xml");
+      writeConfig(coreFile, Collections.singletonMap("fs.default.name", uri));
+      File hdfsFile = new File(config.getConfDir(), "hdfs-site.xml");
+      writeConfig(hdfsFile, Collections.singletonMap("dfs.support.append", "true"));
+      
+      Map<String, String> siteConfig = config.getSiteConfig();
+      siteConfig.put(Property.INSTANCE_DFS_URI.getKey(), uri);
+      siteConfig.put(Property.INSTANCE_DFS_DIR.getKey(), "/accumulo");
+      config.setSiteConfig(siteConfig);
+    }
+    
     File siteFile = new File(config.getConfDir(), "accumulo-site.xml");
+    writeConfig(siteFile, config.getSiteConfig());
     
     FileWriter fileWriter = new FileWriter(siteFile);
     fileWriter.append("<configuration>\n");
@@ -249,6 +283,16 @@ public class MiniAccumuloCluster {
     }
   }
   
+  private void writeConfig(File file, Map<String, String> settings) throws IOException {
+    FileWriter fileWriter = new FileWriter(file);
+    fileWriter.append("<configuration>\n");
+    
+    for (Entry<String,String> entry : settings.entrySet())
+      fileWriter.append("<property><name>" + entry.getKey() + "</name><value>" + entry.getValue() + "</value></property>\n");
+    fileWriter.append("</configuration>\n");
+    fileWriter.close();
+  }
+  
   /**
    * Starts Accumulo and Zookeeper processes. Can only be called once.
    * 
@@ -329,7 +373,7 @@ public class MiniAccumuloCluster {
     return result;
   }
   
-  public void killProcess(ServerType type, ProcessReference proc) throws ProcessNotFoundException {
+  public void killProcess(ServerType type, ProcessReference proc) throws ProcessNotFoundException, InterruptedException {
     boolean found = false;
     switch (type) {
       case MASTER:
@@ -343,6 +387,7 @@ public class MiniAccumuloCluster {
         for (Process tserver : tabletServerProcesses) {
           if (proc.equals(tserver)) {
             tabletServerProcesses.remove(tserver);
+            tserver.destroy();
             found = true;
             break;
           }
@@ -379,21 +424,28 @@ public class MiniAccumuloCluster {
    * call stop in a finally block as soon as possible.
    */
   public void stop() throws IOException, InterruptedException {
-    if (zooKeeperProcess != null)
+    for (LogWriter lw : logWriters) {
+      lw.flush();
+    }
+
+    if (zooKeeperProcess != null) {
       zooKeeperProcess.destroy();
-    if (masterProcess != null)
+    }
+    if (masterProcess != null) {
       masterProcess.destroy();
+    }
     if (tabletServerProcesses != null) {
       for (Process tserver : tabletServerProcesses) {
         tserver.destroy();
       }
     }
     
-    for (LogWriter lw : logWriters)
-      lw.flush();
     zooKeeperProcess = null;
     masterProcess = null;
     tabletServerProcesses.clear();
+    if (config.useMiniDFS() && miniDFS != null)
+      miniDFS.shutdown();
+    miniDFS = null;
   }
   
   /**

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloConfig.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloConfig.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloConfig.java
index 600ea4b..a95e0c4 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloConfig.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloConfig.java
@@ -57,6 +57,8 @@ public class MiniAccumuloConfig {
   
   private boolean initialized = false;
   
+  private boolean useMiniDFS = false;
+  
   /**
    * @param dir
    *          An empty or nonexistant directory that Accumulo and Zookeeper can store data in. Creating the directory is left to the user. Java 7, Guava, and
@@ -353,4 +355,11 @@ public class MiniAccumuloConfig {
     return this;
   }
   
+  public boolean useMiniDFS() {
+    return useMiniDFS;
+  }
+  
+  public void useMiniDFS(boolean useMiniDFS) {
+    this.useMiniDFS = useMiniDFS;
+  }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index edcaa16..226ccde 100644
--- a/pom.xml
+++ b/pom.xml
@@ -670,7 +670,7 @@
             <configuration>
               <!--parallel>classes</parallel-->
               <perCoreThreadCount>false</perCoreThreadCount>
-              <threadCount>${accumulo.it.threads}</threadCount> 
+              <threadCount>${accumulo.it.threads}</threadCount>
               <redirectTestOutputToFile>true</redirectTestOutputToFile>
             </configuration>
           </execution>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/pom.xml
----------------------------------------------------------------------
diff --git a/test/pom.xml b/test/pom.xml
index 9bf9dcf..2f89d7a 100644
--- a/test/pom.xml
+++ b/test/pom.xml
@@ -127,6 +127,18 @@
       <scope>test</scope>
     </dependency>
     <dependency>
+      <groupId>org.apache.cxf</groupId>
+      <artifactId>cxf-rt-frontend-jaxrs</artifactId>
+      <version>2.5.0</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-test</artifactId>
+      <version>${hadoop.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
       <groupId>org.mortbay.jetty</groupId>
       <artifactId>jetty</artifactId>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/main/java/org/apache/accumulo/test/TestIngest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/TestIngest.java b/test/src/main/java/org/apache/accumulo/test/TestIngest.java
index f81c813..972a20e 100644
--- a/test/src/main/java/org/apache/accumulo/test/TestIngest.java
+++ b/test/src/main/java/org/apache/accumulo/test/TestIngest.java
@@ -103,7 +103,7 @@ public class TestIngest {
 
     @Parameter(names={"-cv","--columnVisibility"}, description="place columns in this column family", converter=VisibilityConverter.class)
     public ColumnVisibility columnVisibility = new ColumnVisibility();
-
+    
     public Opts() { super("test_ingest"); }
   }
   
@@ -191,7 +191,8 @@ public class TestIngest {
     try {
       opts.startTracing(name);
       
-      Logger.getLogger(TabletServerBatchWriter.class.getName()).setLevel(Level.TRACE);
+      if (opts.debug)
+        Logger.getLogger(TabletServerBatchWriter.class.getName()).setLevel(Level.TRACE);
       
       // test batch update
       

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/main/java/org/apache/accumulo/test/TestRandomDeletes.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/TestRandomDeletes.java b/test/src/main/java/org/apache/accumulo/test/TestRandomDeletes.java
index 7819429..1b553f4 100644
--- a/test/src/main/java/org/apache/accumulo/test/TestRandomDeletes.java
+++ b/test/src/main/java/org/apache/accumulo/test/TestRandomDeletes.java
@@ -21,8 +21,8 @@ import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeSet;
 
-import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ClientOnDefaultTable;
 import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Connector;
@@ -64,10 +64,10 @@ public class TestRandomDeletes {
     }
   }
   
-  private static TreeSet<RowColumn> scanAll(ClientOpts opts, ScannerOpts scanOpts, Text t) throws Exception {
+  private static TreeSet<RowColumn> scanAll(ClientOnDefaultTable opts, ScannerOpts scanOpts, String tableName) throws Exception {
     TreeSet<RowColumn> result = new TreeSet<RowColumn>();
     Connector conn = opts.getConnector();
-    Scanner scanner = conn.createScanner(t.toString(), auths);
+    Scanner scanner = conn.createScanner(tableName, auths);
     scanner.setBatchSize(scanOpts.scanBatchSize);
     for (Entry<Key,Value> entry : scanner) {
       Key key = entry.getKey();
@@ -78,13 +78,13 @@ public class TestRandomDeletes {
     return result;
   }
   
-  private static long scrambleDeleteHalfAndCheck(ClientOpts opts, ScannerOpts scanOpts, BatchWriterOpts bwOpts, Text t, Set<RowColumn> rows) throws Exception {
+  private static long scrambleDeleteHalfAndCheck(ClientOnDefaultTable opts, ScannerOpts scanOpts, BatchWriterOpts bwOpts, String tableName, Set<RowColumn> rows) throws Exception {
     int result = 0;
     ArrayList<RowColumn> entries = new ArrayList<RowColumn>(rows);
     java.util.Collections.shuffle(entries);
     
     Connector connector = opts.getConnector();
-    BatchWriter mutations = connector.createBatchWriter(t.toString(), bwOpts.getBatchWriterConfig());
+    BatchWriter mutations = connector.createBatchWriter(tableName, bwOpts.getBatchWriterConfig());
     
     for (int i = 0; i < (entries.size() + 1) / 2; i++) {
       RowColumn rc = entries.get(i);
@@ -97,7 +97,7 @@ public class TestRandomDeletes {
     
     mutations.close();
     
-    Set<RowColumn> current = scanAll(opts, scanOpts, t);
+    Set<RowColumn> current = scanAll(opts, scanOpts, tableName);
     current.removeAll(rows);
     if (current.size() > 0) {
       throw new RuntimeException(current.size() + " records not deleted");
@@ -107,22 +107,25 @@ public class TestRandomDeletes {
   
   static public void main(String[] args) {
     
-    ClientOpts opts = new ClientOpts();
+    ClientOnDefaultTable opts = new ClientOnDefaultTable("test_ingest");
     ScannerOpts scanOpts = new ScannerOpts();
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     opts.parseArgs(TestRandomDeletes.class.getName(), args, scanOpts, bwOpts);
     
+    log.info("starting random delete test");
+
+    
     try {
       long deleted = 0;
       
-      Text t = new Text("test_ingest");
+      String tableName = opts.getTableName();
       
-      TreeSet<RowColumn> doomed = scanAll(opts, scanOpts, t);
+      TreeSet<RowColumn> doomed = scanAll(opts, scanOpts, tableName);
       log.info("Got " + doomed.size() + " rows");
       
       long startTime = System.currentTimeMillis();
       while (true) {
-        long half = scrambleDeleteHalfAndCheck(opts, scanOpts, bwOpts, t, doomed);
+        long half = scrambleDeleteHalfAndCheck(opts, scanOpts, bwOpts, tableName, doomed);
         deleted += half;
         if (half == 0)
           break;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/AddSplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/AddSplitIT.java b/test/src/test/java/org/apache/accumulo/test/functional/AddSplitIT.java
index 2307191..4f939ba 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/AddSplitIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/AddSplitIT.java
@@ -24,7 +24,6 @@ import java.util.TreeSet;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.Scanner;
@@ -37,32 +36,33 @@ import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
 
-public class AddSplitIT extends MacTest {
+public class AddSplitIT extends SimpleMacIT {
   
   @Test(timeout=30*1000)
   public void addSplitTest() throws Exception {
 
+    String tableName = makeTableName();
     Connector c = getConnector();
-    c.tableOperations().create("foo");
+    c.tableOperations().create(tableName);
     
-    insertData(1l);
+    insertData(tableName, 1l);
     
     TreeSet<Text> splits = new TreeSet<Text>();
     splits.add(new Text(String.format("%09d", 333)));
     splits.add(new Text(String.format("%09d", 666)));
     
-    c.tableOperations().addSplits("foo", splits);
+    c.tableOperations().addSplits(tableName, splits);
     
     UtilWaitThread.sleep(100);
     
-    Collection<Text> actualSplits = c.tableOperations().listSplits("foo");
+    Collection<Text> actualSplits = c.tableOperations().listSplits(tableName);
     
     if (!splits.equals(new TreeSet<Text>(actualSplits))) {
       throw new Exception(splits + " != " + actualSplits);
     }
     
-    verifyData(1l);
-    insertData(2l);
+    verifyData(tableName, 1l);
+    insertData(tableName, 2l);
     
     // did not clear splits on purpose, it should ignore existing split points
     // and still create the three additional split points
@@ -71,21 +71,21 @@ public class AddSplitIT extends MacTest {
     splits.add(new Text(String.format("%09d", 500)));
     splits.add(new Text(String.format("%09d", 800)));
     
-    c.tableOperations().addSplits("foo", splits);
+    c.tableOperations().addSplits(tableName, splits);
     
     UtilWaitThread.sleep(100);
     
-    actualSplits = c.tableOperations().listSplits("foo");
+    actualSplits = c.tableOperations().listSplits(tableName);
     
     if (!splits.equals(new TreeSet<Text>(actualSplits))) {
       throw new Exception(splits + " != " + actualSplits);
     }
     
-    verifyData(2l);
+    verifyData(tableName, 2l);
   }
   
-  private void verifyData(long ts) throws Exception {
-    Scanner scanner = getConnector().createScanner("foo", Authorizations.EMPTY);
+  private void verifyData(String tableName, long ts) throws Exception {
+    Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
     
     Iterator<Entry<Key,Value>> iter = scanner.iterator();
     
@@ -117,8 +117,8 @@ public class AddSplitIT extends MacTest {
     
   }
   
-  private void insertData(long ts) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
-    BatchWriter bw = getConnector().createBatchWriter("foo", new BatchWriterConfig());
+  private void insertData(String tableName, long ts) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
+    BatchWriter bw = getConnector().createBatchWriter(tableName, null);
     
     for (int i = 0; i < 10000; i++) {
       String row = String.format("%09d", i);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/BadIteratorMincIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BadIteratorMincIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BadIteratorMincIT.java
index 356a4a7..33ab344 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/BadIteratorMincIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BadIteratorMincIT.java
@@ -33,29 +33,31 @@ import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
 
-public class BadIteratorMincIT extends MacTest {
+public class BadIteratorMincIT extends SimpleMacIT {
   
   @Test(timeout=60*1000)
   public void test() throws Exception {
     Connector c = getConnector();
-    c.tableOperations().create("foo");
+    
+    String tableName = makeTableName();
+    c.tableOperations().create(tableName);
     IteratorSetting is = new IteratorSetting(30, BadIterator.class);
-    c.tableOperations().attachIterator("foo", is, EnumSet.of(IteratorScope.minc));
-    BatchWriter bw = c.createBatchWriter("foo", new BatchWriterConfig());
+    c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.minc));
+    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
     
     Mutation m = new Mutation(new Text("r1"));
-    m.put(new Text("acf"), new Text("foo"), new Value("1".getBytes()));
+    m.put(new Text("acf"), new Text(tableName), new Value("1".getBytes()));
     bw.addMutation(m);
     bw.close();
     
-    c.tableOperations().flush("foo", null, null, false);
+    c.tableOperations().flush(tableName, null, null, false);
     UtilWaitThread.sleep(1000);
     
     // minc should fail, so there should be no files
-    FunctionalTestUtils.checkRFiles(c, "foo", 1, 1, 0, 0);
+    FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 0, 0);
     
     // try to scan table
-    Scanner scanner = c.createScanner("foo", Authorizations.EMPTY);
+    Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
     
     int count = 0;
     for (@SuppressWarnings("unused")
@@ -67,12 +69,12 @@ public class BadIteratorMincIT extends MacTest {
       throw new Exception("Did not see expected # entries " + count);
     
     // remove the bad iterator
-    c.tableOperations().removeIterator("foo", BadIterator.class.getSimpleName(), EnumSet.of(IteratorScope.minc));
+    c.tableOperations().removeIterator(tableName, BadIterator.class.getSimpleName(), EnumSet.of(IteratorScope.minc));
     
     UtilWaitThread.sleep(5000);
     
     // minc should complete
-    FunctionalTestUtils.checkRFiles(c, "foo", 1, 1, 1, 1);
+    FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 1, 1);
     
     count = 0;
     for (@SuppressWarnings("unused")
@@ -84,23 +86,23 @@ public class BadIteratorMincIT extends MacTest {
       throw new Exception("Did not see expected # entries " + count);
     
     // now try putting bad iterator back and deleting the table
-    c.tableOperations().attachIterator("foo", is, EnumSet.of(IteratorScope.minc));
-    bw = c.createBatchWriter("foo", new BatchWriterConfig());
+    c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.minc));
+    bw = c.createBatchWriter(tableName, new BatchWriterConfig());
     m = new Mutation(new Text("r2"));
-    m.put(new Text("acf"), new Text("foo"), new Value("1".getBytes()));
+    m.put(new Text("acf"), new Text(tableName), new Value("1".getBytes()));
     bw.addMutation(m);
     bw.close();
     
     // make sure property is given time to propagate
     UtilWaitThread.sleep(500);
     
-    c.tableOperations().flush("foo", null, null, false);
+    c.tableOperations().flush(tableName, null, null, false);
     
     // make sure the flush has time to start
     UtilWaitThread.sleep(1000);
     
     // this should not hang
-    c.tableOperations().delete("foo");
+    c.tableOperations().delete(tableName);
   }
   
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/BatchWriterFlushIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BatchWriterFlushIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BatchWriterFlushIT.java
index a390ae4..55042f7 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/BatchWriterFlushIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BatchWriterFlushIT.java
@@ -38,24 +38,26 @@ import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
 
-public class BatchWriterFlushIT extends MacTest {
+public class BatchWriterFlushIT extends SimpleMacIT {
   
   private static final int NUM_TO_FLUSH = 100000;
   
   @Test(timeout=30*1000)
   public void run() throws Exception {
     Connector c = getConnector();
-    c.tableOperations().create("bwft");
-    c.tableOperations().create("bwlt");
-    runFlushTest();
-    runLatencyTest();
+    String bwft = makeTableName();
+    c.tableOperations().create(bwft);
+    String bwlt = makeTableName();
+    c.tableOperations().create(bwlt);
+    runFlushTest(bwft);
+    runLatencyTest(bwlt);
     
   }
   
-  private void runLatencyTest() throws Exception {
+  private void runLatencyTest(String tableName) throws Exception {
     // should automatically flush after 2 seconds
-    BatchWriter bw = getConnector().createBatchWriter("bwlt", new BatchWriterConfig().setMaxLatency(1000, TimeUnit.MILLISECONDS));
-    Scanner scanner = getConnector().createScanner("bwlt", Authorizations.EMPTY);
+    BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig().setMaxLatency(1000, TimeUnit.MILLISECONDS));
+    Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
     
     Mutation m = new Mutation(new Text(String.format("r_%10d", 1)));
     m.put(new Text("cf"), new Text("cq"), new Value(("" + 1).getBytes()));
@@ -87,9 +89,9 @@ public class BatchWriterFlushIT extends MacTest {
     bw.close();
   }
   
-  private void runFlushTest() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException, Exception {
-    BatchWriter bw = getConnector().createBatchWriter("bwft", new BatchWriterConfig());
-    Scanner scanner = getConnector().createScanner("bwft", Authorizations.EMPTY);
+  private void runFlushTest(String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException, Exception {
+    BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
+    Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
     Random r = new Random();
     
     for (int i = 0; i < 4; i++) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java
index e225073..0d110b9 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java
@@ -28,7 +28,7 @@ import org.junit.Test;
 
 public class BinaryIT extends MacTest {
   
-  @Test(timeout=30*1000)
+  @Test(timeout=60*1000)
   public void test() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("bt");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/BulkFileIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BulkFileIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BulkFileIT.java
index 07d92cc..ac7684a 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/BulkFileIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BulkFileIT.java
@@ -39,16 +39,17 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
 
-public class BulkFileIT extends MacTest {
+public class BulkFileIT extends SimpleMacIT {
   
   @Test(timeout=60*1000)
   public void testBulkFile() throws Exception {
     Connector c = getConnector();
-    c.tableOperations().create("bulkFile");
+    String tableName = makeTableName();
+    c.tableOperations().create(tableName);
     SortedSet<Text> splits = new TreeSet<Text>();
     for (String split : "0333 0666 0999 1333 1666".split(" "))
       splits.add(new Text(split));
-    c.tableOperations().addSplits("bulkFile", splits);
+    c.tableOperations().addSplits(tableName, splits);
     Configuration conf = new Configuration();
     AccumuloConfiguration aconf = ServerConfiguration.getDefaultConfiguration();
     FileSystem fs = TraceFileSystem.wrap(FileUtil.getFileSystem(conf, aconf));
@@ -72,11 +73,11 @@ public class BulkFileIT extends MacTest {
     writeData(writer3, 1000, 1999);
     writer3.close();
     
-    FunctionalTestUtils.bulkImport(c,  fs, "bulkFile", dir);
+    FunctionalTestUtils.bulkImport(c,  fs, tableName, dir);
     
-    FunctionalTestUtils.checkRFiles(c, "bulkFile", 6, 6, 1, 1);
+    FunctionalTestUtils.checkRFiles(c, tableName, 6, 6, 1, 1);
     
-    verifyData("bulkFile", 0, 1999);
+    verifyData(tableName, 0, 1999);
     
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/BulkIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BulkIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BulkIT.java
index d5115ff..3eea057 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/BulkIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BulkIT.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.junit.Test;
 
-public class BulkIT extends MacTest {
+public class BulkIT extends SimpleMacIT {
   
   static final int N = 100000;
   static final int COUNT = 5;
@@ -33,17 +33,19 @@ public class BulkIT extends MacTest {
   @Test(timeout=120*1000)
   public void test() throws Exception {
     Connector c = getConnector();
-    c.tableOperations().create("test_ingest");
+    String tableName = makeTableName();
+    c.tableOperations().create(tableName);
     FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
-    String base = cluster.getConfig().getDir().getAbsolutePath();
-    fs.mkdirs(new Path(base + "/testBulkFail"));
+    String base = "target/accumulo-maven-plugin";
+    fs.mkdirs(new Path("target/accumulo-maven-plugin/testBulkFail"));
     
     Opts opts = new Opts();
     opts.timestamp = 1;
     opts.random = 56;
     opts.rows = N;
-    opts.instance = cluster.getInstanceName();
+    opts.instance = c.getInstance().getInstanceName();
     opts.cols = 1;
+    opts.tableName = tableName;
     for (int i = 0; i < COUNT; i++) {
       opts.outputFile = base + String.format("/testrf/rf%02d", i);
       opts.startRow = N * i;
@@ -54,8 +56,9 @@ public class BulkIT extends MacTest {
     opts.rows = 1;
     // create an rfile with one entry, there was a bug with this:
     TestIngest.ingest(c, opts , BWOPTS);
-    c.tableOperations().importDirectory("test_ingest", base + "/testrf", base + "/testBulkFail", false);
+    c.tableOperations().importDirectory(tableName, base + "/testrf", base + "/testBulkFail", false);
     VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+    vopts.tableName = tableName;
     vopts.random = 56;
     for (int i = 0; i < COUNT; i++) {
       vopts.startRow = i * N;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/ClassLoaderIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ClassLoaderIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ClassLoaderIT.java
index 5b2f186..38c09d1 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ClassLoaderIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ClassLoaderIT.java
@@ -42,7 +42,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.junit.Test;
 
-public class ClassLoaderIT extends MacTest {
+public class ClassLoaderIT extends SimpleMacIT {
   
   @Test(timeout=60*1000)
   public void test() throws Exception {
@@ -55,7 +55,7 @@ public class ClassLoaderIT extends MacTest {
     bw.close();
     scanCheck(c, "Test");
     FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
-    Path jarPath = new Path(cluster.getConfig().getDir().getAbsolutePath() + "/lib/Test.jar");
+    Path jarPath = new Path(rootPath() + "/lib/Test.jar");
     fs.copyFromLocalFile(new Path(System.getProperty("user.dir")+"/system/auto/TestCombinerX.jar"), jarPath);
     UtilWaitThread.sleep(1000);
     IteratorSetting is = new IteratorSetting(10, "TestCombiner", "org.apache.accumulo.test.functional.TestCombiner");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/CombinerIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CombinerIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CombinerIT.java
index de93549..d10d084 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/CombinerIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/CombinerIT.java
@@ -35,13 +35,12 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.LongCombiner.Type;
 import org.apache.accumulo.core.iterators.user.SummingCombiner;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.server.util.Admin;
 import org.junit.Test;
 
-public class CombinerIT extends MacTest {
+public class CombinerIT extends SimpleMacIT {
   
-  private void checkSum(Connector c) throws Exception {
-    Scanner s = c.createScanner("test", Authorizations.EMPTY);
+  private void checkSum(String tableName, Connector c) throws Exception {
+    Scanner s = c.createScanner(tableName, Authorizations.EMPTY);
     Iterator<Entry<Key,Value>> i = s.iterator();
     assertTrue(i.hasNext());
     Entry<Key,Value> entry = i.next();
@@ -52,23 +51,20 @@ public class CombinerIT extends MacTest {
   @Test(timeout=60*1000)
   public void aggregationTest() throws Exception {
     Connector c = getConnector();
-    c.tableOperations().create("test");
+    String tableName = makeTableName();
+    c.tableOperations().create(tableName);
     IteratorSetting setting = new IteratorSetting(10, SummingCombiner.class);
     SummingCombiner.setEncodingType(setting, Type.STRING);
     SummingCombiner.setColumns(setting, Collections.singletonList(new IteratorSetting.Column("cf")));
-    c.tableOperations().attachIterator("test", setting);
-    BatchWriter bw = c.createBatchWriter("test", new BatchWriterConfig());
+    c.tableOperations().attachIterator(tableName, setting);
+    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
     for (int i = 0; i < 10; i++) {
       Mutation m = new Mutation("row1");
       m.put("cf".getBytes(), "col1".getBytes(), ("" + i).getBytes());
       bw.addMutation(m);
     }
     bw.close();
-    checkSum(c);
-    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
-    cluster.stop();
-    cluster.start();
-    checkSum(c);
+    checkSum(tableName, c);
   }
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/ConstraintIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ConstraintIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ConstraintIT.java
index fd2e91f..022aef3 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ConstraintIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ConstraintIT.java
@@ -21,7 +21,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
@@ -38,34 +37,33 @@ import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint;
 import org.apache.accumulo.examples.simple.constraints.NumericValueConstraint;
 import org.apache.hadoop.io.Text;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
 import org.junit.Test;
 
-public class ConstraintIT extends MacTest {
+public class ConstraintIT extends SimpleMacIT {
   
   @Test(timeout=30*1000)
   public void run() throws Exception {
+    String[] tableNames = { makeTableName(), makeTableName(), makeTableName() }; 
     Connector c = getConnector();
-    for (String table : "ct ct2 ct3".split(" ")) {
+    for (String table : tableNames) {
       c.tableOperations().create(table);
       c.tableOperations().addConstraint(table, NumericValueConstraint.class.getName());
       c.tableOperations().addConstraint(table, AlphaNumKeyConstraint.class.getName());
     }
       
-    Logger logger = Logger.getLogger(Constants.CORE_PACKAGE_NAME);
-    logger.setLevel(Level.TRACE);
+//    Logger logger = Logger.getLogger(Constants.CORE_PACKAGE_NAME);
+    //logger.setLevel(Level.TRACE);
     
-    test1();
+    test1(tableNames[0]);
     
-    logger.setLevel(Level.TRACE);
+    //logger.setLevel(Level.TRACE);
     
-    test2("ct2", false);
-    test2("ct3", true);
+    test2(tableNames[1], false);
+    test2(tableNames[2], true);
   }
   
-  private void test1() throws Exception {
-    BatchWriter bw = getConnector().createBatchWriter("ct", new BatchWriterConfig());
+  private void test1(String tableName) throws Exception {
+    BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
     
     Mutation mut1 = new Mutation(new Text("r1"));
     mut1.put(new Text("cf1"), new Text("cq1"), new Value("123".getBytes()));
@@ -75,7 +73,7 @@ public class ConstraintIT extends MacTest {
     // should not throw any exceptions
     bw.close();
     
-    bw = getConnector().createBatchWriter("ct", new BatchWriterConfig());
+    bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
     
     // create a mutation with a non numeric value
     Mutation mut2 = new Mutation(new Text("r1"));
@@ -115,7 +113,7 @@ public class ConstraintIT extends MacTest {
     }
     
     // verify mutation did not go through
-    Scanner scanner = getConnector().createScanner("ct", Authorizations.EMPTY);
+    Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
     scanner.setRange(new Range(new Text("r1")));
     
     Iterator<Entry<Key,Value>> iter = scanner.iterator();
@@ -132,11 +130,11 @@ public class ConstraintIT extends MacTest {
     }
     
     // remove the numeric value constraint
-    getConnector().tableOperations().removeConstraint("ct", 2);
+    getConnector().tableOperations().removeConstraint(tableName, 2);
     UtilWaitThread.sleep(1000);
     
     // now should be able to add a non numeric value
-    bw = getConnector().createBatchWriter("ct", new BatchWriterConfig());
+    bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
     bw.addMutation(mut2);
     bw.close();
     
@@ -155,11 +153,11 @@ public class ConstraintIT extends MacTest {
     }
     
     // add a constraint that references a non-existant class
-    getConnector().tableOperations().setProperty("ct", Property.TABLE_CONSTRAINT_PREFIX + "1", "com.foobar.nonExistantClass");
+    getConnector().tableOperations().setProperty(tableName, Property.TABLE_CONSTRAINT_PREFIX + "1", "com.foobar.nonExistantClass");
     UtilWaitThread.sleep(1000);
     
     // add a mutation
-    bw = getConnector().createBatchWriter("ct", new BatchWriterConfig());
+    bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
     
     Mutation mut3 = new Mutation(new Text("r1"));
     mut3.put(new Text("cf1"), new Text("cq1"), new Value("foo".getBytes()));
@@ -195,11 +193,11 @@ public class ConstraintIT extends MacTest {
     }
     
     // remove the bad constraint
-    getConnector().tableOperations().removeConstraint("ct", 1);
+    getConnector().tableOperations().removeConstraint(tableName, 1);
     UtilWaitThread.sleep(1000);
     
     // try the mutation again
-    bw = getConnector().createBatchWriter("ct", new BatchWriterConfig());
+    bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
     bw.addMutation(mut3);
     bw.close();
     

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CreateAndUseIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
index cfcf21a..454a1ef 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
@@ -33,7 +33,7 @@ import org.apache.accumulo.core.security.Authorizations;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
 
-public class CreateAndUseIT extends MacTest {
+public class CreateAndUseIT extends SimpleMacIT {
   
   @Test(timeout=60*1000)
   public void run() throws Exception {
@@ -48,9 +48,10 @@ public class CreateAndUseIT extends MacTest {
     Text cf = new Text("cf1");
     Text cq = new Text("cq1");
     
-    getConnector().tableOperations().create("t1");
-    getConnector().tableOperations().addSplits("t1", splits);
-    BatchWriter bw = getConnector().createBatchWriter("t1", new BatchWriterConfig());
+    String tableName = makeTableName();
+    getConnector().tableOperations().create(tableName);
+    getConnector().tableOperations().addSplits(tableName, splits);
+    BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
     
     for (int i = 1; i < 257; i++) {
       Mutation m = new Mutation(new Text(String.format("%08x", (i << 8) - 16)));
@@ -62,7 +63,7 @@ public class CreateAndUseIT extends MacTest {
     bw.close();
     
     // verify data is there
-    Scanner scanner1 = getConnector().createScanner("t1", Authorizations.EMPTY);
+    Scanner scanner1 = getConnector().createScanner(tableName, Authorizations.EMPTY);
     
     int ei = 1;
     
@@ -83,9 +84,10 @@ public class CreateAndUseIT extends MacTest {
     }
     
     // TEST 2 create a table and immediately scan it
-    getConnector().tableOperations().create("t2");
-    getConnector().tableOperations().addSplits("t2", splits);
-    Scanner scanner2 = getConnector().createScanner("t2", Authorizations.EMPTY);
+    String table2 = makeTableName();
+    getConnector().tableOperations().create(table2);
+    getConnector().tableOperations().addSplits(table2, splits);
+    Scanner scanner2 = getConnector().createScanner(table2, Authorizations.EMPTY);
     int count = 0;
     for (Entry<Key,Value> entry : scanner2) {
       if (entry != null)
@@ -102,10 +104,11 @@ public class CreateAndUseIT extends MacTest {
     for (int i = 1; i < 257; i++) {
       ranges.add(new Range(new Text(String.format("%08x", (i << 8) - 16))));
     }
-    
-    getConnector().tableOperations().create("t3");
-    getConnector().tableOperations().addSplits("t3", splits);
-    BatchScanner bs = getConnector().createBatchScanner("t3", Authorizations.EMPTY, 3);
+
+    String table3 = makeTableName();
+    getConnector().tableOperations().create(table3);
+    getConnector().tableOperations().addSplits(table3, splits);
+    BatchScanner bs = getConnector().createBatchScanner(table3, Authorizations.EMPTY, 3);
     bs.setRanges(ranges);
     count = 0;
     for (Entry<Key,Value> entry : bs) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java
index c41fae1..39b61f4 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java
@@ -20,14 +20,15 @@ import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.security.Authorizations;
 import org.junit.Test;
 
-public class CreateManyScannersIT extends MacTest {
+public class CreateManyScannersIT extends SimpleMacIT {
   
-  @Test(timeout=10*1000)
+  @Test(timeout=20*1000)
   public void run() throws Exception {
     Connector c = getConnector();
-    c.tableOperations().create("mscant");
+    String tableName = makeTableName();
+    c.tableOperations().create(tableName);
     for (int i = 0; i < 100000; i++) {
-      c.createScanner("mscant", Authorizations.EMPTY);
+      c.createScanner(tableName, Authorizations.EMPTY);
     }
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
index 08feb8a..2e6bc4f 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
@@ -44,7 +44,7 @@ public class DeleteIT extends MacTest {
     vopts.cols = opts.cols = 1;
     vopts.random = opts.random = 56;
     TestIngest.ingest(c, opts, BWOPTS);
-    assertEquals(0, cluster.exec(TestRandomDeletes.class, "-p", MacTest.PASSWORD, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers()).waitFor());
+    assertEquals(0, cluster.exec(TestRandomDeletes.class, "-u", "root", "-p", MacTest.PASSWORD, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers()).waitFor());
     TestIngest.ingest(c, opts, BWOPTS);
     VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
index b560b56..d1ab6c4 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
@@ -28,7 +28,7 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
@@ -38,7 +38,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 import org.junit.Test;
 
-public class DeleteRowsIT extends MacTest {
+public class DeleteRowsIT extends SimpleMacIT {
   private static final Logger log = Logger.getLogger(DeleteRowsIT.class);
   
   private static final int ROWS_PER_TABLET = 10;
@@ -62,37 +62,39 @@ public class DeleteRowsIT extends MacTest {
     // Delete ranges of rows, and verify the tablets are removed.
     int i = 0;
     // Eliminate whole tablets
-    testSplit("test" + i++, "f", "h", "abcdefijklmnopqrstuvwxyz", 260);
+    String tableName = makeTableName();
+    testSplit(tableName + i++, "f", "h", "abcdefijklmnopqrstuvwxyz", 260);
     // Eliminate whole tablets, partial first tablet
-    testSplit("test" + i++, "f1", "h", "abcdeff1ijklmnopqrstuvwxyz", 262);
+    testSplit(tableName + i++, "f1", "h", "abcdeff1ijklmnopqrstuvwxyz", 262);
     // Eliminate whole tablets, partial last tablet
-    testSplit("test" + i++, "f", "h1", "abcdefijklmnopqrstuvwxyz", 258);
+    testSplit(tableName + i++, "f", "h1", "abcdefijklmnopqrstuvwxyz", 258);
     // Eliminate whole tablets, partial first and last tablet
-    testSplit("test" + i++, "f1", "h1", "abcdeff1ijklmnopqrstuvwxyz", 260);
+    testSplit(tableName + i++, "f1", "h1", "abcdeff1ijklmnopqrstuvwxyz", 260);
     // Eliminate one tablet
-    testSplit("test" + i++, "f", "g", "abcdefhijklmnopqrstuvwxyz", 270);
+    testSplit(tableName + i++, "f", "g", "abcdefhijklmnopqrstuvwxyz", 270);
     // Eliminate partial tablet, matches start split
-    testSplit("test" + i++, "f", "f1", "abcdefghijklmnopqrstuvwxyz", 278);
+    testSplit(tableName + i++, "f", "f1", "abcdefghijklmnopqrstuvwxyz", 278);
     // Eliminate partial tablet, matches end split
-    testSplit("test" + i++, "f1", "g", "abcdeff1hijklmnopqrstuvwxyz", 272);
+    testSplit(tableName + i++, "f1", "g", "abcdeff1hijklmnopqrstuvwxyz", 272);
     // Eliminate tablets starting at -inf
-    testSplit("test" + i++, null, "h", "ijklmnopqrstuvwxyz", 200);
+    testSplit(tableName + i++, null, "h", "ijklmnopqrstuvwxyz", 200);
     // Eliminate tablets ending at +inf
-    testSplit("test" + i++, "t", null, "abcdefghijklmnopqrst", 200);
+    testSplit(tableName + i++, "t", null, "abcdefghijklmnopqrst", 200);
     // Eliminate some rows inside one tablet
-    testSplit("test" + i++, "t0", "t2", "abcdefghijklmnopqrstt0uvwxyz", 278);
+    testSplit(tableName + i++, "t0", "t2", "abcdefghijklmnopqrstt0uvwxyz", 278);
     // Eliminate some rows in the first tablet
-    testSplit("test" + i++, null, "A1", "abcdefghijklmnopqrstuvwxyz", 278);
+    testSplit(tableName + i++, null, "A1", "abcdefghijklmnopqrstuvwxyz", 278);
     // Eliminate some rows in the last tablet
-    testSplit("test" + i++, "{1", null, "abcdefghijklmnopqrstuvwxyz{1", 272);
+    testSplit(tableName + i++, "{1", null, "abcdefghijklmnopqrstuvwxyz{1", 272);
     // Delete everything
-    testSplit("test" + i++, null, null, "", 0);
+    testSplit(tableName + i++, null, null, "", 0);
   }
   
   private void testSplit(String table, String start, String end, String result, int entries) throws Exception {
     // Put a bunch of rows on each tablet
-    this.getConnector().tableOperations().create(table);
-    BatchWriter bw = this.getConnector().createBatchWriter(table, new BatchWriterConfig());
+    Connector c = getConnector();
+    c.tableOperations().create(table);
+    BatchWriter bw = c.createBatchWriter(table, null);
     for (String row : ROWS) {
       for (int j = 0; j < ROWS_PER_TABLET; j++) {
         Mutation m = new Mutation(row + j);
@@ -103,19 +105,19 @@ public class DeleteRowsIT extends MacTest {
     bw.flush();
     bw.close();
     // Split the table
-    this.getConnector().tableOperations().addSplits(table, SPLITS);
+    c.tableOperations().addSplits(table, SPLITS);
     
     Text startText = start == null ? null : new Text(start);
     Text endText = end == null ? null : new Text(end);
-    this.getConnector().tableOperations().deleteRows(table, startText, endText);
-    Collection<Text> remainingSplits = this.getConnector().tableOperations().listSplits(table);
+    c.tableOperations().deleteRows(table, startText, endText);
+    Collection<Text> remainingSplits = c.tableOperations().listSplits(table);
     StringBuilder sb = new StringBuilder();
     // See that whole tablets are removed
     for (Text split : remainingSplits)
       sb.append(split.toString());
     assertEquals(result, sb.toString());
     // See that the rows are really deleted
-    Scanner scanner = this.getConnector().createScanner(table, Authorizations.EMPTY);
+    Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
     int count = 0;
     for (Entry<Key,Value> entry : scanner) {
       Text row = entry.getKey().getRow();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
index a26b196..6b28986 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
@@ -22,7 +22,6 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map.Entry;
-import java.util.Random;
 import java.util.SortedSet;
 import java.util.TreeSet;
 
@@ -39,7 +38,7 @@ import org.apache.log4j.Logger;
 import org.junit.Test;
 
 // attempt to reproduce ACCUMULO-315
-public class DeleteRowsSplitIT extends MacTest {
+public class DeleteRowsSplitIT extends SimpleMacIT {
   
   private static final Logger log = Logger.getLogger(DeleteRowsSplitIT.class);
   
@@ -53,25 +52,20 @@ public class DeleteRowsSplitIT extends MacTest {
     }
   }
   
-  static final String TABLE;
-  static {
-    Random random = new Random();
-    TABLE = "table" + Long.toHexString(random.nextLong());
-  }
-  
   @Test(timeout=200*1000)
   public void run() throws Exception {
     // Delete ranges of rows, and verify the are removed
     // Do this while adding many splits
+    final String tableName = makeTableName();
     
     // Eliminate whole tablets
     for (int test = 0; test < 50; test++) {
       // create a table
       log.info("Test " + test);
-      getConnector().tableOperations().create(TABLE);
+      getConnector().tableOperations().create(tableName);
       
       // put some data in it
-      fillTable(TABLE);
+      fillTable(tableName);
       
       // generate a random delete range
       final Text start = new Text();
@@ -86,7 +80,7 @@ public class DeleteRowsSplitIT extends MacTest {
           try {
             // split the table
             final SortedSet<Text> afterEnd = SPLITS.tailSet(new Text(end.toString() + "\0"));
-            getConnector().tableOperations().addSplits(TABLE, afterEnd);
+            getConnector().tableOperations().addSplits(tableName, afterEnd);
           } catch (Exception ex) {
             log.error(ex, ex);
             synchronized (fail) {
@@ -99,7 +93,7 @@ public class DeleteRowsSplitIT extends MacTest {
       
       UtilWaitThread.sleep(test * 2);
       
-      getConnector().tableOperations().deleteRows(TABLE, start, end);
+      getConnector().tableOperations().deleteRows(tableName, start, end);
       
       t.join();
       synchronized (fail) {
@@ -107,14 +101,14 @@ public class DeleteRowsSplitIT extends MacTest {
       }
       
       // scan the table
-      Scanner scanner = getConnector().createScanner(TABLE, Authorizations.EMPTY);
+      Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
       for (Entry<Key,Value> entry : scanner) {
         Text row = entry.getKey().getRow();
         assertTrue(row.compareTo(start) <= 0 || row.compareTo(end) > 0);
       }
       
       // delete the table
-      getConnector().tableOperations().delete(TABLE);
+      getConnector().tableOperations().delete(tableName);
     }
   }
   
@@ -132,7 +126,7 @@ public class DeleteRowsSplitIT extends MacTest {
   }
   
   private void fillTable(String table) throws Exception {
-    BatchWriter bw = getConnector().createBatchWriter(TABLE, new BatchWriterConfig());
+    BatchWriter bw = getConnector().createBatchWriter(table, new BatchWriterConfig());
     for (String row : ROWS) {
       Mutation m = new Mutation(row);
       m.put("cf", "cq", "value");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java b/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
index 454062f..aed97b9 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
@@ -29,14 +29,15 @@ import org.junit.Test;
 /**
  * See ACCUMULO-779
  */
-public class FateStarvationIT extends MacTest {
+public class FateStarvationIT extends SimpleMacIT {
   
   @Test(timeout=2 * 60 * 1000)
   public void run() throws Exception {
+    String tableName = makeTableName();
     Connector c = getConnector();
-    c.tableOperations().create("test_ingest");
+    c.tableOperations().create(tableName);
     
-    c.tableOperations().addSplits("test_ingest", TestIngest.getSplitPoints(0, 100000, 50));
+    c.tableOperations().addSplits(tableName, TestIngest.getSplitPoints(0, 100000, 50));
     
     TestIngest.Opts opts = new TestIngest.Opts();
     opts.random = 89;
@@ -44,9 +45,10 @@ public class FateStarvationIT extends MacTest {
     opts.dataSize = 50;
     opts.rows = 100000;
     opts.cols = 1;
+    opts.tableName = tableName;
     TestIngest.ingest(c, opts, new BatchWriterOpts());
     
-    c.tableOperations().flush("test_ingest", null, null, true);
+    c.tableOperations().flush(tableName, null, null, true);
     
     List<Text> splits = new ArrayList<Text>(TestIngest.getSplitPoints(0, 100000, 67));
     Random rand = new Random();
@@ -55,10 +57,10 @@ public class FateStarvationIT extends MacTest {
       int idx1 = rand.nextInt(splits.size() - 1);
       int idx2 = rand.nextInt(splits.size() - (idx1 + 1)) + idx1 + 1;
       
-      c.tableOperations().compact("test_ingest", splits.get(idx1), splits.get(idx2), false, false);
+      c.tableOperations().compact(tableName, splits.get(idx1), splits.get(idx2), false, false);
     }
     
-    c.tableOperations().offline("test_ingest");
+    c.tableOperations().offline(tableName);
   }
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java b/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
index ee08012..e058ed3 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
@@ -49,7 +49,8 @@ public class HalfDeadTServerIT extends MacTest {
     Map<String,String> siteConfig = new HashMap<String,String>();
     siteConfig.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "15s");
     siteConfig.put(Property.GENERAL_RPC_TIMEOUT.getKey(), "5s");
-    cfg.setSiteConfig(siteConfig );
+    cfg.setSiteConfig(siteConfig);
+    cfg.useMiniDFS(true);
   }
   
   class DumpOutput extends Daemon {
@@ -84,12 +85,12 @@ public class HalfDeadTServerIT extends MacTest {
   }
   
   
-  @Test(timeout=30*1000)
+  @Test(timeout=100*1000)
   public void testRecover() throws Exception {
     test(10);
   }
   
-  @Test(timeout=60*1000)
+  @Test(timeout=120*1000)
   public void testTimeout() throws Exception {
     String results = test(40);
     if (results != null)
@@ -101,8 +102,6 @@ public class HalfDeadTServerIT extends MacTest {
       return null;
     Connector c = getConnector();
     assertEquals(1, c.instanceOperations().getTabletServers().size());
-    // don't need the regular tablet server
-    cluster.killProcess(ServerType.TABLET_SERVER, cluster.getProcesses().get(ServerType.TABLET_SERVER).iterator().next());
     
     // create our own tablet server with the special test library
     String javaHome = System.getProperty("java.home");
@@ -126,8 +125,12 @@ public class HalfDeadTServerIT extends MacTest {
     Process tserver = builder.start();
     DumpOutput t = new DumpOutput(tserver.getInputStream());
     t.start();
+    UtilWaitThread.sleep(1000);
+    // don't need the regular tablet server
+    cluster.killProcess(ServerType.TABLET_SERVER, cluster.getProcesses().get(ServerType.TABLET_SERVER).iterator().next());
+    UtilWaitThread.sleep(1000);
     c.tableOperations().create("test_ingest");
-    assertTrue(c.instanceOperations().getTabletServers().size() > 1);
+    assertEquals(1, c.instanceOperations().getTabletServers().size());
     int rows = 100*1000;
     Process ingest = cluster.exec(TestIngest.class, "-u", "root", "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-p", MacTest.PASSWORD, "--rows", rows + "");
     UtilWaitThread.sleep(500);
@@ -151,7 +154,9 @@ public class HalfDeadTServerIT extends MacTest {
     assertTrue(results.contains("sleeping\nsleeping\nsleeping\n"));
     assertTrue(results.contains("Zookeeper error, will retry"));
     ingest.destroy();
+    ingest.waitFor();
     tserver.destroy();
+    tserver.waitFor();
     t.join();
     return results;
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java b/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
index 4ffef57..fafb57a 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
@@ -31,33 +31,32 @@ import org.junit.Test;
 
 public class LogicalTimeIT extends MacTest {
 
-  
-  
   @Test(timeout=120*1000)
   public void run() throws Exception {
     int tc = 0;
+    String tableName = "foo";
     Connector c = getConnector();
-    runMergeTest(c, "foo" + tc++, new String[] {"m"}, new String[] {"a"}, null, null, "b", 2l);
-    runMergeTest(c, "foo" + tc++, new String[] {"m"}, new String[] {"z"}, null, null, "b", 2l);
-    runMergeTest(c, "foo" + tc++, new String[] {"m"}, new String[] {"a", "z"}, null, null, "b", 2l);
-    runMergeTest(c, "foo" + tc++, new String[] {"m"}, new String[] {"a", "c", "z"}, null, null, "b", 3l);
-    runMergeTest(c, "foo" + tc++, new String[] {"m"}, new String[] {"a", "y", "z"}, null, null, "b", 3l);
+    runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a"}, null, null, "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"z"}, null, null, "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a", "z"}, null, null, "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a", "c", "z"}, null, null, "b", 3l);
+    runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a", "y", "z"}, null, null, "b", 3l);
     
-    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a"}, null, null, "b", 2l);
-    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"h"}, null, null, "b", 2l);
-    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"s"}, null, null, "b", 2l);
-    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s"}, null, null, "b", 2l);
-    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "c", "h", "s"}, null, null, "b", 3l);
-    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s", "i"}, null, null, "b", 3l);
-    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"t", "a", "h", "s"}, null, null, "b", 3l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a"}, null, null, "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"h"}, null, null, "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"s"}, null, null, "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s"}, null, null, "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "c", "h", "s"}, null, null, "b", 3l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s", "i"}, null, null, "b", 3l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"t", "a", "h", "s"}, null, null, "b", 3l);
     
-    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a"}, null, "h", "b", 2l);
-    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"h"}, null, "h", "b", 2l);
-    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"s"}, null, "h", "b", 1l);
-    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s"}, null, "h", "b", 2l);
-    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "c", "h", "s"}, null, "h", "b", 3l);
-    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s", "i"}, null, "h", "b", 3l);
-    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"t", "a", "h", "s"}, null, "h", "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a"}, null, "h", "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"h"}, null, "h", "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"s"}, null, "h", "b", 1l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s"}, null, "h", "b", 2l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "c", "h", "s"}, null, "h", "b", 3l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s", "i"}, null, "h", "b", 3l);
+    runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"t", "a", "h", "s"}, null, "h", "b", 2l);
     
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java b/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java
index 622702f..5fe60e2 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java
@@ -32,8 +32,9 @@ import org.junit.rules.TemporaryFolder;
 public class MacTest {
   public static final Logger log = Logger.getLogger(MacTest.class);
   public static final String PASSWORD = "secret";
-  static final ScannerOpts SOPTS = new ScannerOpts();
-  static final BatchWriterOpts BWOPTS = new BatchWriterOpts();
+  public static final ScannerOpts SOPTS = new ScannerOpts();
+  public static final BatchWriterOpts BWOPTS = new BatchWriterOpts();
+  
   public TemporaryFolder folder = new TemporaryFolder();
   public MiniAccumuloCluster cluster;
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
index b8592d9..839d51d 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.io.Text;
 import org.codehaus.plexus.util.Base64;
 import org.junit.Test;
 
-public class MapReduceIT extends MacTest {
+public class MapReduceIT extends SimpleMacIT {
   
   static final String tablename = "mapredf";
   static final String input_cf = "cf-HASHTYPE";
@@ -55,9 +55,9 @@ public class MapReduceIT extends MacTest {
     }
     bw.close();
     
-    Process hash = cluster.exec(RowHash.class, 
-        "-i", cluster.getInstanceName(),
-        "-z", cluster.getZooKeepers(),
+    Process hash = exec(RowHash.class, 
+        "-i", c.getInstance().getInstanceName(),
+        "-z", c.getInstance().getZooKeepers(),
         "-u", "root",
         "-p", MacTest.PASSWORD,
         "-t", tablename,

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java
index 4428277..2d3e78e 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java
@@ -16,17 +16,17 @@
  */
 package org.apache.accumulo.test.functional;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
 
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashSet;
+import java.util.Map.Entry;
 import java.util.SortedSet;
 import java.util.TreeSet;
-import java.util.Map.Entry;
 
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.admin.TimeType;
@@ -38,7 +38,7 @@ import org.apache.accumulo.core.util.Merge;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
 
-public class MergeIT extends MacTest {
+public class MergeIT extends SimpleMacIT {
   
   SortedSet<Text> splits(String [] points) {
     SortedSet<Text> result = new TreeSet<Text>();
@@ -50,38 +50,40 @@ public class MergeIT extends MacTest {
   @Test(timeout=30*1000)
   public void merge() throws Exception {
     Connector c = getConnector();
-    c.tableOperations().create("test");
-    c.tableOperations().addSplits("test", splits("a b c d e f g h i j k".split(" ")));
-    BatchWriter bw = c.createBatchWriter("test", new BatchWriterConfig());
+    String tableName = makeTableName();
+    c.tableOperations().create(tableName);
+    c.tableOperations().addSplits(tableName, splits("a b c d e f g h i j k".split(" ")));
+    BatchWriter bw = c.createBatchWriter(tableName, null);
     for (String row : "a b c d e f g h i j k".split(" ")) {
       Mutation m = new Mutation(row);
       m.put("cf", "cq", "value");
       bw.addMutation(m);
     }
     bw.close();
-    c.tableOperations().flush("test", null, null, true);
-    c.tableOperations().merge("test", new Text("c1"), new Text("f1"));
-    assertEquals(8, c.tableOperations().listSplits("test").size());
+    c.tableOperations().flush(tableName, null, null, true);
+    c.tableOperations().merge(tableName, new Text("c1"), new Text("f1"));
+    assertEquals(8, c.tableOperations().listSplits(tableName).size());
   }
   
   @Test(timeout=30*1000)
   public void mergeSize() throws Exception {
     Connector c = getConnector();
-    c.tableOperations().create("merge");
-    c.tableOperations().addSplits("merge", splits("a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")));
-    BatchWriter bw = c.createBatchWriter("merge", new BatchWriterConfig());
+    String tableName = makeTableName();
+    c.tableOperations().create(tableName);
+    c.tableOperations().addSplits(tableName, splits("a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")));
+    BatchWriter bw = c.createBatchWriter(tableName, null);
     for (String row : "c e f y".split(" ")) {
       Mutation m = new Mutation(row);
       m.put("cf", "cq", "mersydotesanddozeydotesanlittolamsiedives");
       bw.addMutation(m);
     }
     bw.close();
-    c.tableOperations().flush("merge", null, null, true);
+    c.tableOperations().flush(tableName, null, null, true);
     Merge merge = new Merge();
-    merge.mergomatic(c, "merge", null, null, 100, false);
-    assertArrayEquals("b c d e f x y".split(" "), toStrings(c.tableOperations().listSplits("merge")));
-    merge.mergomatic(c, "merge", null, null, 100, true);
-    assertArrayEquals("c e f y".split(" "), toStrings(c.tableOperations().listSplits("merge")));
+    merge.mergomatic(c, tableName, null, null, 100, false);
+    assertArrayEquals("b c d e f x y".split(" "), toStrings(c.tableOperations().listSplits(tableName)));
+    merge.mergomatic(c, tableName, null, null, 100, true);
+    assertArrayEquals("c e f y".split(" "), toStrings(c.tableOperations().listSplits(tableName)));
   }
 
   private String[] toStrings(Collection<Text> listSplits) {
@@ -101,22 +103,23 @@ public class MergeIT extends MacTest {
   public void mergeTest() throws Exception {
     int tc = 0;
     Connector c = getConnector();
-    runMergeTest(c, "foo" + tc++, ns(), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
+    String tableName = makeTableName();
+    runMergeTest(c, tableName + tc++, ns(), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
     
-    runMergeTest(c, "foo" + tc++, ns("m"), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
-    runMergeTest(c, "foo" + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns("m", "n"), ns(null, "z"));
-    runMergeTest(c, "foo" + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns(null, "b"), ns("l", "m"));
+    runMergeTest(c, tableName + tc++, ns("m"), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
+    runMergeTest(c, tableName + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns("m", "n"), ns(null, "z"));
+    runMergeTest(c, tableName + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns(null, "b"), ns("l", "m"));
     
-    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns(), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns(null, "s"));
-    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("c", "m"));
-    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("n", "r"));
-    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns(null, "s"));
-    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b", "m"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns(null, "s"));
-    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("q", "r"));
-    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("aa", "b"));
-    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("r", "s"), ns(null, "z"));
-    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("l", "m"));
-    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns("q", "r"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns(), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns(null, "s"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("c", "m"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("n", "r"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns(null, "s"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns(null, "s"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("q", "r"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("aa", "b"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("r", "s"), ns(null, "z"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("l", "m"));
+    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns("q", "r"));
     
   }
   
@@ -140,7 +143,7 @@ public class MergeIT extends MacTest {
     }
     conn.tableOperations().addSplits(table, splitSet);
     
-    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+    BatchWriter bw = conn.createBatchWriter(table, null);
     HashSet<String> expected = new HashSet<String>();
     for (String row : inserts) {
       Mutation m = new Mutation(row);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java
index f9c8b3c..8ccd516 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java
@@ -35,7 +35,7 @@ import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
 
-public class MergeMetaIT extends MacTest {
+public class MergeMetaIT extends SimpleMacIT {
   
   @Test(timeout = 60 * 1000)
   public void mergeMeta() throws Exception {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/057b8d6c/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java b/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
index ec00fe5..f6e252a 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
@@ -20,11 +20,11 @@ import static org.junit.Assert.assertEquals;
 
 import org.junit.Test;
 
-public class NativeMapIT extends MacTest {
+public class NativeMapIT extends SimpleMacIT {
   
   @Test(timeout=15*1000)
   public void test() throws Exception {
-    assertEquals(0, cluster.exec(NativeMapTest.class).waitFor());
+    assertEquals(0, exec(NativeMapTest.class).waitFor());
   }
   
 }


[44/50] git commit: Merge branch 'Merge ctubbsii changes for ACCUMULO-998 and ACCUMULO-1533'

Posted by kt...@apache.org.
Merge branch 'Merge ctubbsii changes for ACCUMULO-998 and ACCUMULO-1533'


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/2ad6a818
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/2ad6a818
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/2ad6a818

Branch: refs/heads/ACCUMULO-1000
Commit: 2ad6a818857ad0f2caabb0fa086ba1666968e413
Parents: 1a48f7c a69a9d6
Author: Christopher Tubbs <ct...@apache.org>
Authored: Mon Jul 22 14:34:29 2013 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Mon Jul 22 14:34:29 2013 -0400

----------------------------------------------------------------------
 .../apache/accumulo/core/cli/ClientOpts.java    |   4 +-
 .../core/client/AccumuloSecurityException.java  |  18 +-
 .../apache/accumulo/core/client/Instance.java   |  15 -
 .../accumulo/core/client/ZooKeeperInstance.java |  61 +-
 .../core/client/impl/ConnectorImpl.java         |   1 -
 .../mapreduce/AccumuloFileOutputFormat.java     |  49 --
 .../client/mapreduce/AccumuloOutputFormat.java  | 164 -----
 .../core/client/mapreduce/InputFormatBase.java  | 385 ------------
 .../accumulo/core/client/mock/MockInstance.java |   6 -
 .../core/conf/AccumuloConfiguration.java        |  18 +-
 .../apache/accumulo/core/data/ColumnUpdate.java |  10 -
 .../accumulo/core/security/thrift/AuthInfo.java | 616 -------------------
 .../core/security/thrift/SecurityErrorCode.java | 112 ----
 .../thrift/ThriftSecurityException.java         | 521 ----------------
 .../apache/accumulo/core/util/shell/Shell.java  |   4 +-
 .../core/util/shell/commands/FateCommand.java   |   7 +-
 .../accumulo/core/zookeeper/ZooCache.java       |  40 --
 .../apache/accumulo/core/zookeeper/ZooUtil.java |  42 ++
 core/src/main/thrift/security.thrift            |  40 --
 .../core/client/impl/TabletLocatorImplTest.java |   6 -
 .../mapreduce/AccumuloInputFormatTest.java      |  40 --
 .../client/security/SecurityErrorCodeTest.java  |   8 +-
 .../accumulo/core/file/rfile/RFileTest.java     | 251 ++++----
 .../accumulo/server/client/HdfsZooInstance.java |  16 +-
 .../accumulo/server/conf/ZooConfiguration.java  |   5 +-
 25 files changed, 183 insertions(+), 2256 deletions(-)
----------------------------------------------------------------------



[04/50] git commit: ACCUMULO-1573

Posted by kt...@apache.org.
ACCUMULO-1573


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/122b1b13
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/122b1b13
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/122b1b13

Branch: refs/heads/ACCUMULO-1000
Commit: 122b1b13f0433c0b69693ad57713de048467dad3
Parents: 4453294
Author: Eric Newton <er...@gmail.com>
Authored: Wed Jul 17 11:26:17 2013 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Wed Jul 17 11:26:17 2013 -0400

----------------------------------------------------------------------
 .../java/org/apache/accumulo/server/util/RestoreZookeeper.java  | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/122b1b13/server/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java b/server/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
index f80c098..bad0d8b 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
@@ -64,7 +64,10 @@ public class RestoreZookeeper {
         cwd.push(path);
       } else if ("dump".equals(name)) {
         String root = attributes.getValue("root");
-        cwd.push(root);
+        if (root.equals("/"))
+          cwd.push("");
+        else
+          cwd.push(root);
         create(root, "", "utf-8");
       }
     }


[38/50] git commit: ACCUMULO-1596 moved Mutator interface up to IZooReaderWriter

Posted by kt...@apache.org.
ACCUMULO-1596 moved Mutator interface up to IZooReaderWriter


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/734cd505
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/734cd505
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/734cd505

Branch: refs/heads/ACCUMULO-1000
Commit: 734cd505d37f229d9c7204276059f33d3a7707ce
Parents: 93f741e
Author: Eric Newton <ec...@apache.org>
Authored: Mon Jul 22 13:44:48 2013 -0400
Committer: Eric Newton <ec...@apache.org>
Committed: Mon Jul 22 13:44:48 2013 -0400

----------------------------------------------------------------------
 .../org/apache/accumulo/fate/zookeeper/IZooReaderWriter.java    | 5 ++++-
 .../org/apache/accumulo/fate/zookeeper/ZooReaderWriter.java     | 4 ----
 2 files changed, 4 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/734cd505/fate/src/main/java/org/apache/accumulo/fate/zookeeper/IZooReaderWriter.java
----------------------------------------------------------------------
diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/IZooReaderWriter.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/IZooReaderWriter.java
index 5dcad23..a43ae7c 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/IZooReaderWriter.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/IZooReaderWriter.java
@@ -18,7 +18,6 @@ package org.apache.accumulo.fate.zookeeper;
 
 import java.util.List;
 
-import org.apache.accumulo.fate.zookeeper.ZooReaderWriter.Mutator;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
 import org.apache.zookeeper.KeeperException;
@@ -54,6 +53,10 @@ public interface IZooReaderWriter extends IZooReader {
   
   public abstract void delete(String path, int version) throws InterruptedException, KeeperException;
   
+  public interface Mutator {
+    byte[] mutate(byte[] currentValue) throws Exception;
+  }
+  
   public abstract byte[] mutate(String zPath, byte[] createValue, List<ACL> acl, Mutator mutator) throws Exception;
   
   public abstract boolean isLockHeld(ZooUtil.LockID lockID) throws KeeperException, InterruptedException;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/734cd505/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReaderWriter.java
----------------------------------------------------------------------
diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReaderWriter.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReaderWriter.java
index 7800ec0..13c3c1a 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReaderWriter.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReaderWriter.java
@@ -114,10 +114,6 @@ public class ZooReaderWriter extends ZooReader implements IZooReaderWriter {
     getZooKeeper().delete(path, version);
   }
   
-  public interface Mutator {
-    byte[] mutate(byte[] currentValue) throws Exception;
-  }
-  
   @Override
   public byte[] mutate(String zPath, byte[] createValue, List<ACL> acl, Mutator mutator) throws Exception {
     if (createValue != null) {


[47/50] git commit: Merge remote-tracking branch 'origin' into ACCUMULO-1000

Posted by kt...@apache.org.
Merge remote-tracking branch 'origin' into ACCUMULO-1000


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/d0e8b37d
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/d0e8b37d
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/d0e8b37d

Branch: refs/heads/ACCUMULO-1000
Commit: d0e8b37d157aa7032793cc465a063c1c6d2d6cf2
Parents: 96a4815 2ad6a81
Author: Keith Turner <kt...@apache.org>
Authored: Mon Jul 22 17:09:40 2013 -0400
Committer: Keith Turner <kt...@apache.org>
Committed: Mon Jul 22 17:09:40 2013 -0400

----------------------------------------------------------------------
 .../apache/accumulo/core/cli/ClientOpts.java    |   4 +-
 .../core/client/AccumuloSecurityException.java  |  18 +-
 .../apache/accumulo/core/client/Instance.java   |  15 -
 .../accumulo/core/client/ZooKeeperInstance.java |  61 +-
 .../core/client/impl/ConnectorImpl.java         |   1 -
 .../mapreduce/AccumuloFileOutputFormat.java     |  49 --
 .../client/mapreduce/AccumuloOutputFormat.java  | 164 -----
 .../core/client/mapreduce/InputFormatBase.java  | 385 ------------
 .../accumulo/core/client/mock/MockInstance.java |   6 -
 .../core/conf/AccumuloConfiguration.java        |  18 +-
 .../org/apache/accumulo/core/conf/Property.java |   6 -
 .../apache/accumulo/core/data/ColumnUpdate.java |  10 -
 .../accumulo/core/security/thrift/AuthInfo.java | 616 -------------------
 .../core/security/thrift/SecurityErrorCode.java | 112 ----
 .../thrift/ThriftSecurityException.java         | 521 ----------------
 .../apache/accumulo/core/util/shell/Shell.java  |   4 +-
 .../core/util/shell/commands/FateCommand.java   |   7 +-
 .../accumulo/core/zookeeper/ZooCache.java       |  40 --
 .../apache/accumulo/core/zookeeper/ZooUtil.java |  42 ++
 core/src/main/thrift/security.thrift            |  40 --
 .../core/client/impl/TabletLocatorImplTest.java |   6 -
 .../mapreduce/AccumuloInputFormatTest.java      |  40 --
 .../client/security/SecurityErrorCodeTest.java  |   8 +-
 .../accumulo/core/file/rfile/RFileTest.java     | 251 ++++----
 .../fate/zookeeper/IZooReaderWriter.java        |   5 +-
 .../fate/zookeeper/ZooReaderWriter.java         |   4 -
 .../accumulo/server/client/HdfsZooInstance.java |  16 +-
 .../accumulo/server/conf/ZooConfiguration.java  |   5 +-
 .../apache/accumulo/server/master/Master.java   |   2 +-
 .../master/state/tables/TableManager.java       |   2 +-
 .../master/tableOps/CancelCompactions.java      |   2 +-
 .../server/master/tableOps/CompactRange.java    |   2 +-
 .../server/master/tableOps/RenameTable.java     |   2 +-
 .../accumulo/server/master/tableOps/Utils.java  |   2 +-
 .../server/metrics/AbstractMetricsImpl.java     |   1 -
 .../server/tabletserver/TabletServer.java       |  17 -
 .../accumulo/server/util/TServerUtils.java      |  11 +-
 37 files changed, 201 insertions(+), 2294 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/d0e8b37d/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/d0e8b37d/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/d0e8b37d/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
----------------------------------------------------------------------


[10/50] git commit: Merge branch '1.5.1-SNAPSHOT'

Posted by kt...@apache.org.
Merge branch '1.5.1-SNAPSHOT'


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/e6d6fab0
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/e6d6fab0
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/e6d6fab0

Branch: refs/heads/ACCUMULO-1000
Commit: e6d6fab048174c2a6e8d1c769a8f25e6aece98ee
Parents: a131e5d c82c431
Author: Eric Newton <er...@gmail.com>
Authored: Wed Jul 17 11:57:38 2013 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Wed Jul 17 11:57:38 2013 -0400

----------------------------------------------------------------------
 README                                          | 10 ++---
 bin/accumulo                                    |  4 +-
 bin/config.sh                                   | 41 +++++++++++++++-----
 bin/start-all.sh                                |  8 ++--
 bin/start-here.sh                               |  6 +--
 bin/start-server.sh                             |  2 +-
 bin/stop-all.sh                                 |  4 +-
 bin/stop-here.sh                                |  4 +-
 bin/tdown.sh                                    |  2 +-
 bin/tup.sh                                      |  2 +-
 .../1GB/native-standalone/accumulo-env.sh       |  4 +-
 conf/examples/1GB/standalone/accumulo-env.sh    |  4 +-
 .../2GB/native-standalone/accumulo-env.sh       |  4 +-
 conf/examples/2GB/standalone/accumulo-env.sh    |  4 +-
 .../3GB/native-standalone/accumulo-env.sh       |  4 +-
 conf/examples/3GB/standalone/accumulo-env.sh    |  4 +-
 .../512MB/native-standalone/accumulo-env.sh     |  4 +-
 conf/examples/512MB/standalone/accumulo-env.sh  |  4 +-
 .../org/apache/accumulo/server/Accumulo.java    |  4 +-
 .../start/classloader/AccumuloClassLoader.java  |  9 +++--
 test/system/continuous/agitator.pl              |  2 +-
 test/system/continuous/magitator.pl             |  6 +--
 test/system/continuous/mapred-setup.sh          |  2 +-
 test/system/continuous/start-stats.sh           |  2 +-
 test/system/randomwalk/README                   |  2 +-
 test/system/randomwalk/bin/reset-cluster.sh     | 10 ++---
 test/system/randomwalk/bin/start-all.sh         |  4 +-
 test/system/randomwalk/bin/start-local.sh       |  4 +-
 test/system/scalability/run.py                  |  2 +-
 29 files changed, 94 insertions(+), 68 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/e6d6fab0/README
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e6d6fab0/bin/accumulo
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e6d6fab0/bin/config.sh
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e6d6fab0/bin/start-server.sh
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e6d6fab0/server/src/main/java/org/apache/accumulo/server/Accumulo.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e6d6fab0/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
----------------------------------------------------------------------
diff --cc start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
index d9ff8e2,594ad8f..ccd85a6
--- a/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
+++ b/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
@@@ -48,9 -45,8 +48,9 @@@ public class AccumuloClassLoader 
    
    public static final String CLASSPATH_PROPERTY_NAME = "general.classpaths";
    
 +  /* @formatter:off */
    public static final String ACCUMULO_CLASSPATH_VALUE = 
-       "$ACCUMULO_HOME/conf,\n" + 
+       "$ACCUMULO_CONF_DIR,\n" + 
            "$ACCUMULO_HOME/lib/[^.].*.jar,\n" + 
            "$ZOOKEEPER_HOME/zookeeper[^.].*.jar,\n" + 
            "$HADOOP_CONF_DIR,\n" +


[42/50] ACCUMULO-1533 Clean up deprecated mapreduce and thrift code

Posted by kt...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooCache.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooCache.java b/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooCache.java
deleted file mode 100644
index c1e30c6..0000000
--- a/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooCache.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.core.zookeeper;
-
-import org.apache.accumulo.fate.zookeeper.ZooReader;
-import org.apache.zookeeper.Watcher;
-
-/**
- * This class remains here for backwards compatibility.
- * 
- * @deprecated since 1.5, replaced by {@link org.apache.accumulo.fate.zookeeper.ZooCache}
- */
-@Deprecated
-public class ZooCache extends org.apache.accumulo.fate.zookeeper.ZooCache {
-  public ZooCache(String zooKeepers, int sessionTimeout) {
-    super(zooKeepers, sessionTimeout);
-  }
-  
-  public ZooCache(String zooKeepers, int sessionTimeout, Watcher watcher) {
-    super(zooKeepers, sessionTimeout, watcher);
-  }
-  
-  public ZooCache(ZooReader reader, Watcher watcher) {
-    super(reader, watcher);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooUtil.java b/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooUtil.java
index 86dc4d2..17447e5 100644
--- a/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooUtil.java
@@ -16,10 +16,23 @@
  */
 package org.apache.accumulo.core.zookeeper;
 
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.file.FileUtil;
+import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.log4j.Logger;
 
 public class ZooUtil extends org.apache.accumulo.fate.zookeeper.ZooUtil {
+  
+  private static final Logger log = Logger.getLogger(ZooUtil.class);
+  
   public static String getRoot(final Instance instance) {
     return getRoot(instance.getInstanceID());
   }
@@ -27,4 +40,33 @@ public class ZooUtil extends org.apache.accumulo.fate.zookeeper.ZooUtil {
   public static String getRoot(final String instanceId) {
     return Constants.ZROOT + "/" + instanceId;
   }
+  
+  /**
+   * Utility to support certain client side utilities to minimize command-line options.
+   */
+  public static String getInstanceIDFromHdfs(Path instanceDirectory) {
+    try {
+      @SuppressWarnings("deprecation")
+      FileSystem fs = FileUtil.getFileSystem(CachedConfiguration.getInstance(), AccumuloConfiguration.getSiteConfiguration());
+      FileStatus[] files = null;
+      try {
+        files = fs.listStatus(instanceDirectory);
+      } catch (FileNotFoundException ex) {
+        // ignored
+      }
+      log.debug("Trying to read instance id from " + instanceDirectory);
+      if (files == null || files.length == 0) {
+        log.error("unable obtain instance id at " + instanceDirectory);
+        throw new RuntimeException("Accumulo not initialized, there is no instance id at " + instanceDirectory);
+      } else if (files.length != 1) {
+        log.error("multiple potential instances in " + instanceDirectory);
+        throw new RuntimeException("Accumulo found multiple possible instance ids in " + instanceDirectory);
+      } else {
+        String result = files[0].getPath().getName();
+        return result;
+      }
+    } catch (IOException e) {
+      throw new RuntimeException("Accumulo not initialized, there is no instance id at " + instanceDirectory, e);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/thrift/security.thrift
----------------------------------------------------------------------
diff --git a/core/src/main/thrift/security.thrift b/core/src/main/thrift/security.thrift
index 3f1a371..66235a8 100644
--- a/core/src/main/thrift/security.thrift
+++ b/core/src/main/thrift/security.thrift
@@ -17,39 +17,6 @@
 namespace java org.apache.accumulo.core.security.thrift
 namespace cpp org.apache.accumulo.core.security.thrift
 
-/**
-@deprecated since 1.5, see org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode
-*/
-enum SecurityErrorCode {
-    DEFAULT_SECURITY_ERROR = 0,
-    BAD_CREDENTIALS = 1,
-    PERMISSION_DENIED = 2,
-    USER_DOESNT_EXIST = 3,
-    CONNECTION_ERROR = 4,
-    USER_EXISTS = 5,
-    GRANT_INVALID = 6,
-    BAD_AUTHORIZATIONS = 7,
-    INVALID_INSTANCEID = 8,
-    TABLE_DOESNT_EXIST = 9,
-    UNSUPPORTED_OPERATION = 10,
-    INVALID_TOKEN = 11,
-    AUTHENTICATOR_FAILED = 12,
-    AUTHORIZOR_FAILED = 13,
-    PERMISSIONHANDLER_FAILED = 14,
-    TOKEN_EXPIRED = 15
-    SERIALIZATION_ERROR = 16;
-    INSUFFICIENT_PROPERTIES = 17;
-}
-
-/**
-@deprecated since 1.5
-*/
-struct AuthInfo {
-    1:string user,
-    2:binary password,
-    3:string instanceId
-}
-
 struct TCredentials {
     1:string principal,
     2:string tokenClassName,
@@ -57,10 +24,3 @@ struct TCredentials {
     4:string instanceId
 }
 
-/**
-@deprecated since 1.5, see org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException
-*/
-exception ThriftSecurityException {
-    1:string user,
-    2:SecurityErrorCode code
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java b/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java
index f160cb3..0a34575 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java
@@ -451,12 +451,6 @@ public class TabletLocatorImplTest extends TestCase {
       throw new UnsupportedOperationException();
     }
     
-    @Deprecated
-    @Override
-    public Connector getConnector(org.apache.accumulo.core.security.thrift.AuthInfo auth) throws AccumuloException, AccumuloSecurityException {
-      return getConnector(auth.user, auth.getPassword());
-    }
-    
     @Override
     public Connector getConnector(String principal, AuthenticationToken token) throws AccumuloException, AccumuloSecurityException {
       throw new UnsupportedOperationException();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
index 0ee03a2..c9539c4 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
@@ -55,46 +55,6 @@ public class AccumuloInputFormatTest {
   private static final String TEST_TABLE_1 = PREFIX + "_mapreduce_table_1";
   
   /**
-   * Test basic setting & getting of max versions.
-   * 
-   * @throws IOException
-   *           Signals that an I/O exception has occurred.
-   */
-  @Deprecated
-  @Test
-  public void testMaxVersions() throws IOException {
-    Job job = new Job();
-    AccumuloInputFormat.setMaxVersions(job.getConfiguration(), 1);
-    int version = AccumuloInputFormat.getMaxVersions(job.getConfiguration());
-    assertEquals(1, version);
-  }
-  
-  /**
-   * Test max versions with an invalid value.
-   * 
-   * @throws IOException
-   *           Signals that an I/O exception has occurred.
-   */
-  @Deprecated
-  @Test(expected = IOException.class)
-  public void testMaxVersionsLessThan1() throws IOException {
-    Job job = new Job();
-    AccumuloInputFormat.setMaxVersions(job.getConfiguration(), 0);
-  }
-  
-  /**
-   * Test no max version configured.
-   * 
-   * @throws IOException
-   */
-  @Deprecated
-  @Test
-  public void testNoMaxVersion() throws IOException {
-    Job job = new Job();
-    assertEquals(-1, AccumuloInputFormat.getMaxVersions(job.getConfiguration()));
-  }
-  
-  /**
    * Check that the iterator configuration is getting stored in the Job conf correctly.
    * 
    * @throws IOException

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/test/java/org/apache/accumulo/core/client/security/SecurityErrorCodeTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/security/SecurityErrorCodeTest.java b/core/src/test/java/org/apache/accumulo/core/client/security/SecurityErrorCodeTest.java
index 2ff881b..7554a49 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/security/SecurityErrorCodeTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/security/SecurityErrorCodeTest.java
@@ -25,13 +25,11 @@ import org.junit.Test;
  * 
  */
 public class SecurityErrorCodeTest {
-
-  @SuppressWarnings("deprecation")
+  
   @Test
   public void testEnumsSame() {
     HashSet<String> secNames1 = new HashSet<String>();
     HashSet<String> secNames2 = new HashSet<String>();
-    HashSet<String> secNames3 = new HashSet<String>();
     
     for (SecurityErrorCode sec : SecurityErrorCode.values())
       secNames1.add(sec.name());
@@ -39,10 +37,6 @@ public class SecurityErrorCodeTest {
     for (org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode sec : org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode.values())
       secNames2.add(sec.name());
     
-    for (org.apache.accumulo.core.security.thrift.SecurityErrorCode sec : org.apache.accumulo.core.security.thrift.SecurityErrorCode.values())
-      secNames3.add(sec.name());
-    
     Assert.assertEquals(secNames1, secNames2);
-    Assert.assertEquals(secNames1, secNames3);
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java b/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
index 37b35a2..a61d4bb 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
@@ -1798,7 +1798,7 @@ public class RFileTest {
     restoreOldConfiguration(oldSiteConfigProperty, conf);
   }
   
-  @Test
+  // @Test
   public void testEncryptedRFiles() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     @SuppressWarnings("deprecation")

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java b/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
index f306b86..2ad81cf 100644
--- a/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
+++ b/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
@@ -34,7 +34,6 @@ import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.security.CredentialHelper;
-import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.util.ByteBufferUtil;
 import org.apache.accumulo.core.util.OpTimer;
 import org.apache.accumulo.core.util.StringUtil;
@@ -123,8 +122,7 @@ public class HdfsZooInstance implements Instance {
   
   private static synchronized void _getInstanceID() {
     if (instanceId == null) {
-      @SuppressWarnings("deprecation")
-      String instanceIdFromFile = ZooKeeperInstance.getInstanceIDFromHdfs(ServerConstants.getInstanceIdLocation());
+      String instanceIdFromFile = ZooUtil.getInstanceIDFromHdfs(ServerConstants.getInstanceIdLocation());
       instanceId = instanceIdFromFile;
     }
   }
@@ -146,12 +144,7 @@ public class HdfsZooInstance implements Instance {
   
   @Override
   public Connector getConnector(String principal, AuthenticationToken token) throws AccumuloException, AccumuloSecurityException {
-    return getConnector(CredentialHelper.create(principal, token, getInstanceID()));
-  }
-  
-  @SuppressWarnings("deprecation")
-  private Connector getConnector(TCredentials cred) throws AccumuloException, AccumuloSecurityException {
-    return new ConnectorImpl(this, cred);
+    return new ConnectorImpl(this, CredentialHelper.create(principal, token, getInstanceID()));
   }
   
   @Override
@@ -191,9 +184,4 @@ public class HdfsZooInstance implements Instance {
     System.out.println("Masters: " + StringUtil.join(instance.getMasterLocations(), ", "));
   }
   
-  @Deprecated
-  @Override
-  public Connector getConnector(org.apache.accumulo.core.security.thrift.AuthInfo auth) throws AccumuloException, AccumuloSecurityException {
-    return getConnector(auth.user, auth.getPassword());
-  }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/server/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java b/server/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java
index d006498..18381c7 100644
--- a/server/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java
+++ b/server/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java
@@ -26,7 +26,6 @@ import java.util.TreeMap;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
@@ -61,13 +60,13 @@ public class ZooConfiguration extends AccumuloConfiguration {
     if (instance == null) {
       propCache = new ZooCache(parent.get(Property.INSTANCE_ZK_HOST), (int) parent.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT));
       instance = new ZooConfiguration(parent);
-      @SuppressWarnings("deprecation")
-      String deprecatedInstanceIdFromHdfs = ZooKeeperInstance.getInstanceIDFromHdfs(ServerConstants.getInstanceIdLocation());
+      String deprecatedInstanceIdFromHdfs = ZooUtil.getInstanceIDFromHdfs(ServerConstants.getInstanceIdLocation());
       instanceId = deprecatedInstanceIdFromHdfs;
     }
     return instance;
   }
   
+  @Override
   public void invalidateCache() {
     if (propCache != null)
       propCache.clear();


[33/50] git commit: ACCUMULO-1132 Provide AuthenticationToken type for system user

Posted by kt...@apache.org.
ACCUMULO-1132 Provide AuthenticationToken type for system user


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/a943f323
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/a943f323
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/a943f323

Branch: refs/heads/ACCUMULO-1000
Commit: a943f323b6ef9a614edee55c075eb63567b5c80a
Parents: 0793476
Author: Christopher Tubbs <ct...@apache.org>
Authored: Fri Jul 19 19:05:22 2013 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Fri Jul 19 19:05:22 2013 -0400

----------------------------------------------------------------------
 .../client/admin/SecurityOperationsImpl.java    |   2 +-
 .../core/client/impl/ConnectorImpl.java         |   5 +-
 .../client/security/tokens/PasswordToken.java   |   7 +-
 .../core/security/CredentialHelper.java         |   2 +-
 .../accumulo/core/security/Credentials.java     |  18 +-
 server/pom.xml                                  |   6 +
 .../server/client/ClientServiceHandler.java     |  14 +-
 .../accumulo/server/client/HdfsZooInstance.java |   3 -
 .../client/security/token/SystemToken.java      |  30 ---
 .../server/gc/GarbageCollectWriteAheadLogs.java |   8 +-
 .../server/gc/SimpleGarbageCollector.java       |   9 +-
 .../accumulo/server/master/LiveTServerSet.java  |  24 +--
 .../apache/accumulo/server/master/Master.java   |   8 +-
 .../server/master/TabletGroupWatcher.java       |  12 +-
 .../master/balancer/TableLoadBalancer.java      |   4 +-
 .../server/master/balancer/TabletBalancer.java  |   8 +-
 .../server/master/state/MetaDataStateStore.java |   4 +-
 .../server/master/tableOps/BulkImport.java      |   4 +-
 .../server/master/tableOps/CloneTable.java      |  10 +-
 .../server/master/tableOps/CreateTable.java     |  12 +-
 .../server/master/tableOps/DeleteTable.java     |   6 +-
 .../server/master/tableOps/ImportTable.java     |   8 +-
 .../apache/accumulo/server/monitor/Monitor.java |   6 +-
 .../monitor/servlets/TServersServlet.java       |  18 +-
 .../server/monitor/servlets/TablesServlet.java  |   6 +-
 .../accumulo/server/problems/ProblemReport.java |   6 +-
 .../server/problems/ProblemReports.java         |   8 +-
 .../security/AuditedSecurityOperation.java      |   2 +-
 .../server/security/SecurityConstants.java      | 111 ----------
 .../server/security/SecurityOperation.java      | 207 +++++++++----------
 .../server/security/SystemCredentials.java      | 132 ++++++++++++
 .../accumulo/server/tabletserver/Tablet.java    |  34 +--
 .../server/tabletserver/TabletServer.java       |  78 ++++---
 .../org/apache/accumulo/server/util/Admin.java  |   6 +-
 .../server/util/FindOfflineTablets.java         |   6 +-
 .../apache/accumulo/server/util/Initialize.java |   4 +-
 .../accumulo/server/util/MetadataTableUtil.java |  16 +-
 .../server/security/SystemCredentialsTest.java  |  67 ++++++
 server/src/test/resources/accumulo-site.xml     |  32 +++
 .../apache/accumulo/test/GetMasterStats.java    |   6 +-
 .../continuous/ContinuousStatsCollector.java    |   7 +-
 .../test/functional/SplitRecoveryTest.java      |  26 +--
 .../metadata/MetadataBatchScanTest.java         |   6 +-
 .../test/performance/thrift/NullTserver.java    |   4 +-
 .../test/randomwalk/concurrent/Shutdown.java    |  24 +--
 .../test/randomwalk/concurrent/StartAll.java    |   8 +-
 .../randomwalk/security/WalkingSecurity.java    |   3 +-
 47 files changed, 549 insertions(+), 478 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/core/src/main/java/org/apache/accumulo/core/client/admin/SecurityOperationsImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/SecurityOperationsImpl.java b/core/src/main/java/org/apache/accumulo/core/client/admin/SecurityOperationsImpl.java
index 84a1ebd..d5e1d8b 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/admin/SecurityOperationsImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/SecurityOperationsImpl.java
@@ -157,7 +157,7 @@ public class SecurityOperationsImpl implements SecurityOperations {
         client.changeLocalUserPassword(Tracer.traceInfo(), credentials, principal, ByteBuffer.wrap(token.getPassword()));
       }
     });
-    if (this.credentials.principal.equals(principal)) {
+    if (this.credentials.getPrincipal().equals(principal)) {
       this.credentials = toChange;
     }
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
index 1702082..3c6e445 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
@@ -58,9 +58,8 @@ public class ConnectorImpl extends Connector {
     
     this.credentials = cred;
     
-    // hardcoded string for SYSTEM user since the definition is
-    // in server code
-    if (!cred.getPrincipal().equals("!SYSTEM")) {
+    // Skip fail fast for system services; string literal for class name, to avoid
+    if (!"org.apache.accumulo.server.security.SystemCredentials$SystemToken".equals(cred.getTokenClassName())) {
       ServerClient.execute(instance, new ClientExec<ClientService.Client>() {
         @Override
         public void execute(ClientService.Client iface) throws Exception {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/core/src/main/java/org/apache/accumulo/core/client/security/tokens/PasswordToken.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/security/tokens/PasswordToken.java b/core/src/main/java/org/apache/accumulo/core/client/security/tokens/PasswordToken.java
index 50d6938..c39fb8d 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/security/tokens/PasswordToken.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/security/tokens/PasswordToken.java
@@ -137,15 +137,14 @@ public class PasswordToken implements AuthenticationToken {
       }
     }
   }
-
+  
   @Override
   public void init(Properties properties) {
-    if (properties.containsKey("password")){
+    if (properties.containsKey("password")) {
       setPassword(CharBuffer.wrap(properties.get("password")));
-    }else
+    } else
       throw new IllegalArgumentException("Missing 'password' property");
   }
-
   
   @Override
   public Set<TokenProperty> getProperties() {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/core/src/main/java/org/apache/accumulo/core/security/CredentialHelper.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/CredentialHelper.java b/core/src/main/java/org/apache/accumulo/core/security/CredentialHelper.java
index 69e3ba1..15fc47a 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/CredentialHelper.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/CredentialHelper.java
@@ -77,7 +77,7 @@ public class CredentialHelper {
   }
   
   public static AuthenticationToken extractToken(TCredentials toAuth) throws AccumuloSecurityException {
-    return extractToken(toAuth.tokenClassName, toAuth.getToken());
+    return extractToken(toAuth.getTokenClassName(), toAuth.getToken());
   }
   
   public static TCredentials createSquelchError(String principal, AuthenticationToken token, String instanceID) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/core/src/main/java/org/apache/accumulo/core/security/Credentials.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/Credentials.java b/core/src/main/java/org/apache/accumulo/core/security/Credentials.java
index 31fe18d..2c1dd8b 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/Credentials.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/Credentials.java
@@ -24,20 +24,30 @@ import org.apache.accumulo.core.security.thrift.TCredentials;
 /**
  * A wrapper for internal use. This class carries the instance, principal, and authentication token for use in the public API, in a non-serialized form. This is
  * important, so that the authentication token carried in a {@link Connector} can be destroyed, invalidating future RPC operations from that {@link Connector}.
+ * <p>
+ * See ACCUMULO-1312
+ * 
+ * @since 1.6.0
  */
 public class Credentials {
   
-  private Instance instance;
   private String principal;
   private AuthenticationToken token;
   
-  public Credentials(Instance instance, String principal, AuthenticationToken token) {
-    this.instance = instance;
+  public Credentials(String principal, AuthenticationToken token) {
     this.principal = principal;
     this.token = token;
   }
   
-  public TCredentials toThrift() {
+  public String getPrincipal() {
+    return principal;
+  }
+  
+  public AuthenticationToken getToken() {
+    return token;
+  }
+  
+  public TCredentials toThrift(Instance instance) {
     return CredentialHelper.createSquelchError(principal, token, instance.getInstanceID());
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/pom.xml
----------------------------------------------------------------------
diff --git a/server/pom.xml b/server/pom.xml
index 75447be..ff846b4 100644
--- a/server/pom.xml
+++ b/server/pom.xml
@@ -124,6 +124,12 @@
     </dependency>
   </dependencies>
   <build>
+    <testResources>
+      <testResource>
+        <filtering>true</filtering>
+        <directory>src/test/resources</directory>
+      </testResource>
+    </testResources>
     <pluginManagement>
       <plugins>
         <plugin>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java b/server/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java
index 6c3f110..6fd6a65 100644
--- a/server/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java
+++ b/server/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java
@@ -135,14 +135,14 @@ public class ClientServiceHandler implements ClientService.Iface {
   @Override
   public void changeLocalUserPassword(TInfo tinfo, TCredentials credentials, String principal, ByteBuffer password) throws ThriftSecurityException {
     PasswordToken token = new PasswordToken(password);
-    TCredentials toChange = CredentialHelper.createSquelchError(principal, token, credentials.instanceId);
+    TCredentials toChange = CredentialHelper.createSquelchError(principal, token, credentials.getInstanceId());
     security.changePassword(credentials, toChange);
   }
   
   @Override
   public void createLocalUser(TInfo tinfo, TCredentials credentials, String principal, ByteBuffer password) throws ThriftSecurityException {
     PasswordToken token = new PasswordToken(password);
-    TCredentials newUser = CredentialHelper.createSquelchError(principal, token, credentials.instanceId);
+    TCredentials newUser = CredentialHelper.createSquelchError(principal, token, credentials.getInstanceId());
     security.createUser(credentials, newUser, new Authorizations());
   }
   
@@ -230,11 +230,10 @@ public class ClientServiceHandler implements ClientService.Iface {
   }
   
   @Override
-  public List<String> bulkImportFiles(TInfo tinfo, final TCredentials tikw, final long tid, final String tableId, final List<String> files,
+  public List<String> bulkImportFiles(TInfo tinfo, final TCredentials credentials, final long tid, final String tableId, final List<String> files,
       final String errorDir, final boolean setTime) throws ThriftSecurityException, ThriftTableOperationException, TException {
     try {
-      final TCredentials credentials = new TCredentials(tikw);
-      if (!security.hasSystemPermission(credentials, credentials.getPrincipal(), SystemPermission.SYSTEM))
+      if (!security.canPerformSystemActions(credentials))
         throw new AccumuloSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
       return transactionWatcher.run(Constants.BULK_ARBITRATOR_TYPE, tid, new Callable<List<String>>() {
         @Override
@@ -281,7 +280,6 @@ public class ClientServiceHandler implements ClientService.Iface {
     }
   }
   
-  @SuppressWarnings({"rawtypes", "unchecked"})
   @Override
   public boolean checkTableClass(TInfo tinfo, TCredentials credentials, String tableName, String className, String interfaceMatch) throws TException,
       ThriftTableOperationException, ThriftSecurityException {
@@ -291,7 +289,7 @@ public class ClientServiceHandler implements ClientService.Iface {
     String tableId = checkTableId(tableName, null);
     
     ClassLoader loader = getClass().getClassLoader();
-    Class shouldMatch;
+    Class<?> shouldMatch;
     try {
       shouldMatch = loader.loadClass(interfaceMatch);
       
@@ -307,7 +305,7 @@ public class ClientServiceHandler implements ClientService.Iface {
         currentLoader = AccumuloVFSClassLoader.getClassLoader();
       }
       
-      Class test = currentLoader.loadClass(className).asSubclass(shouldMatch);
+      Class<?> test = currentLoader.loadClass(className).asSubclass(shouldMatch);
       test.newInstance();
       return true;
     } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java b/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
index db5ece0..f306b86 100644
--- a/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
+++ b/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
@@ -145,7 +145,6 @@ public class HdfsZooInstance implements Instance {
   }
   
   @Override
-  // Not really deprecated, just not for client use
   public Connector getConnector(String principal, AuthenticationToken token) throws AccumuloException, AccumuloSecurityException {
     return getConnector(CredentialHelper.create(principal, token, getInstanceID()));
   }
@@ -156,13 +155,11 @@ public class HdfsZooInstance implements Instance {
   }
   
   @Override
-  // Not really deprecated, just not for client use
   public Connector getConnector(String user, byte[] pass) throws AccumuloException, AccumuloSecurityException {
     return getConnector(user, new PasswordToken(pass));
   }
   
   @Override
-  // Not really deprecated, just not for client use
   public Connector getConnector(String user, ByteBuffer pass) throws AccumuloException, AccumuloSecurityException {
     return getConnector(user, ByteBufferUtil.toBytes(pass));
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/client/security/token/SystemToken.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/client/security/token/SystemToken.java b/server/src/main/java/org/apache/accumulo/server/client/security/token/SystemToken.java
deleted file mode 100644
index 72b2217..0000000
--- a/server/src/main/java/org/apache/accumulo/server/client/security/token/SystemToken.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.client.security.token;
-
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-
-/**
- * @since 1.5.0
- */
-
-public class SystemToken extends PasswordToken {
-  
-  public SystemToken(byte[] systemPassword) {
-    super(systemPassword);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java b/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java
index d50cff2..9bf7bf6 100644
--- a/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java
+++ b/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java
@@ -40,7 +40,7 @@ import org.apache.accumulo.core.util.ThriftUtil;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.AddressUtil;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.util.MetadataTableUtil.LogEntry;
@@ -165,7 +165,7 @@ public class GarbageCollectWriteAheadLogs {
           Client tserver = null;
           try {
             tserver = ThriftUtil.getClient(new TabletClientService.Client.Factory(), address, conf);
-            tserver.removeLogs(Tracer.traceInfo(), SecurityConstants.getSystemCredentials(), paths2strings(entry.getValue()));
+            tserver.removeLogs(Tracer.traceInfo(), SystemCredentials.get().getAsThrift(), paths2strings(entry.getValue()));
             log.debug("deleted " + entry.getValue() + " from " + entry.getKey());
             status.currentLog.deleted += entry.getValue().size();
           } catch (TException e) {
@@ -206,7 +206,7 @@ public class GarbageCollectWriteAheadLogs {
       result.add(path.toString());
     return result;
   }
-
+  
   private static Map<String,ArrayList<Path>> mapServersToFiles(Map<Path,String> fileToServerMap) {
     Map<String,ArrayList<Path>> result = new HashMap<String,ArrayList<Path>>();
     for (Entry<Path,String> fileServer : fileToServerMap.entrySet()) {
@@ -223,7 +223,7 @@ public class GarbageCollectWriteAheadLogs {
   private static int removeMetadataEntries(Map<Path,String> fileToServerMap, Set<Path> sortedWALogs, GCStatus status) throws IOException, KeeperException,
       InterruptedException {
     int count = 0;
-    Iterator<LogEntry> iterator = MetadataTableUtil.getLogEntries(SecurityConstants.getSystemCredentials());
+    Iterator<LogEntry> iterator = MetadataTableUtil.getLogEntries(SystemCredentials.get().getAsThrift());
     while (iterator.hasNext()) {
       for (String filename : iterator.next().logSet) {
         Path path;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java b/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
index f18e5bc..de73282 100644
--- a/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
+++ b/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
@@ -85,7 +85,7 @@ import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.master.state.tables.TableManager;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.Halt;
 import org.apache.accumulo.server.util.TServerUtils;
 import org.apache.accumulo.server.util.TabletIterator;
@@ -162,7 +162,7 @@ public class SimpleGarbageCollector implements Iface {
     if (opts.address != null)
       gc.useAddress(address);
     
-    gc.init(fs, instance, SecurityConstants.getSystemCredentials(), serverConf.getConfiguration().getBoolean(Property.GC_TRASH_IGNORE));
+    gc.init(fs, instance, SystemCredentials.get().getAsThrift(), serverConf.getConfiguration().getBoolean(Property.GC_TRASH_IGNORE));
     Accumulo.enableTracing(address, "gc");
     gc.run();
   }
@@ -582,8 +582,7 @@ public class SimpleGarbageCollector implements Iface {
       Map<Key,Value> tabletKeyValues = tabletIterator.next();
       
       for (Entry<Key,Value> entry : tabletKeyValues.entrySet()) {
-        if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)
-            || entry.getKey().getColumnFamily().equals(ScanFileColumnFamily.NAME)) {
+        if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME) || entry.getKey().getColumnFamily().equals(ScanFileColumnFamily.NAME)) {
           
           String cf = entry.getKey().getColumnQualifier().toString();
           String delete = cf;
@@ -638,7 +637,7 @@ public class SimpleGarbageCollector implements Iface {
     if (!offline) {
       Connector c;
       try {
-        c = instance.getConnector(SecurityConstants.SYSTEM_PRINCIPAL, SecurityConstants.getSystemToken());
+        c = instance.getConnector(SystemCredentials.get().getPrincipal(), SystemCredentials.get().getToken());
         writer = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
         rootWriter = c.createBatchWriter(RootTable.NAME, new BatchWriterConfig());
       } catch (AccumuloException e) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/master/LiveTServerSet.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/LiveTServerSet.java b/server/src/main/java/org/apache/accumulo/server/master/LiveTServerSet.java
index bebff7f..68255b8 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/LiveTServerSet.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/LiveTServerSet.java
@@ -37,7 +37,7 @@ import org.apache.accumulo.core.util.ServerServices;
 import org.apache.accumulo.core.util.ThriftUtil;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.server.master.state.TServerInstance;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.AddressUtil;
 import org.apache.accumulo.server.util.Halt;
 import org.apache.accumulo.server.util.time.SimpleTimer;
@@ -83,7 +83,7 @@ public class LiveTServerSet implements Watcher {
     public void assignTablet(ZooLock lock, KeyExtent extent) throws TException {
       TabletClientService.Client client = ThriftUtil.getClient(new TabletClientService.Client.Factory(), address, conf);
       try {
-        client.loadTablet(Tracer.traceInfo(), SecurityConstants.getSystemCredentials(), lockString(lock), extent.toThrift());
+        client.loadTablet(Tracer.traceInfo(), SystemCredentials.get().getAsThrift(), lockString(lock), extent.toThrift());
       } finally {
         ThriftUtil.returnClient(client);
       }
@@ -92,7 +92,7 @@ public class LiveTServerSet implements Watcher {
     public void unloadTablet(ZooLock lock, KeyExtent extent, boolean save) throws TException {
       TabletClientService.Client client = ThriftUtil.getClient(new TabletClientService.Client.Factory(), address, conf);
       try {
-        client.unloadTablet(Tracer.traceInfo(), SecurityConstants.getSystemCredentials(), lockString(lock), extent.toThrift(), save);
+        client.unloadTablet(Tracer.traceInfo(), SystemCredentials.get().getAsThrift(), lockString(lock), extent.toThrift(), save);
       } finally {
         ThriftUtil.returnClient(client);
       }
@@ -107,7 +107,7 @@ public class LiveTServerSet implements Watcher {
       
       try {
         TabletClientService.Client client = ThriftUtil.createClient(new TabletClientService.Client.Factory(), transport);
-        return client.getTabletServerStatus(Tracer.traceInfo(), SecurityConstants.getSystemCredentials());
+        return client.getTabletServerStatus(Tracer.traceInfo(), SystemCredentials.get().getAsThrift());
       } finally {
         if (transport != null)
           transport.close();
@@ -117,7 +117,7 @@ public class LiveTServerSet implements Watcher {
     public void halt(ZooLock lock) throws TException, ThriftSecurityException {
       TabletClientService.Client client = ThriftUtil.getClient(new TabletClientService.Client.Factory(), address, conf);
       try {
-        client.halt(Tracer.traceInfo(), SecurityConstants.getSystemCredentials(), lockString(lock));
+        client.halt(Tracer.traceInfo(), SystemCredentials.get().getAsThrift(), lockString(lock));
       } finally {
         ThriftUtil.returnClient(client);
       }
@@ -126,7 +126,7 @@ public class LiveTServerSet implements Watcher {
     public void fastHalt(ZooLock lock) throws TException {
       TabletClientService.Client client = ThriftUtil.getClient(new TabletClientService.Client.Factory(), address, conf);
       try {
-        client.fastHalt(Tracer.traceInfo(), SecurityConstants.getSystemCredentials(), lockString(lock));
+        client.fastHalt(Tracer.traceInfo(), SystemCredentials.get().getAsThrift(), lockString(lock));
       } finally {
         ThriftUtil.returnClient(client);
       }
@@ -135,8 +135,8 @@ public class LiveTServerSet implements Watcher {
     public void flush(ZooLock lock, String tableId, byte[] startRow, byte[] endRow) throws TException {
       TabletClientService.Client client = ThriftUtil.getClient(new TabletClientService.Client.Factory(), address, conf);
       try {
-        client.flush(Tracer.traceInfo(), SecurityConstants.getSystemCredentials(), lockString(lock), tableId,
-            startRow == null ? null : ByteBuffer.wrap(startRow), endRow == null ? null : ByteBuffer.wrap(endRow));
+        client.flush(Tracer.traceInfo(), SystemCredentials.get().getAsThrift(), lockString(lock), tableId, startRow == null ? null : ByteBuffer.wrap(startRow),
+            endRow == null ? null : ByteBuffer.wrap(endRow));
       } finally {
         ThriftUtil.returnClient(client);
       }
@@ -145,7 +145,7 @@ public class LiveTServerSet implements Watcher {
     public void chop(ZooLock lock, KeyExtent extent) throws TException {
       TabletClientService.Client client = ThriftUtil.getClient(new TabletClientService.Client.Factory(), address, conf);
       try {
-        client.chop(Tracer.traceInfo(), SecurityConstants.getSystemCredentials(), lockString(lock), extent.toThrift());
+        client.chop(Tracer.traceInfo(), SystemCredentials.get().getAsThrift(), lockString(lock), extent.toThrift());
       } finally {
         ThriftUtil.returnClient(client);
       }
@@ -154,7 +154,7 @@ public class LiveTServerSet implements Watcher {
     public void splitTablet(ZooLock lock, KeyExtent extent, Text splitPoint) throws TException, ThriftSecurityException, NotServingTabletException {
       TabletClientService.Client client = ThriftUtil.getClient(new TabletClientService.Client.Factory(), address, conf);
       try {
-        client.splitTablet(Tracer.traceInfo(), SecurityConstants.getSystemCredentials(), extent.toThrift(),
+        client.splitTablet(Tracer.traceInfo(), SystemCredentials.get().getAsThrift(), extent.toThrift(),
             ByteBuffer.wrap(splitPoint.getBytes(), 0, splitPoint.getLength()));
       } finally {
         ThriftUtil.returnClient(client);
@@ -164,7 +164,7 @@ public class LiveTServerSet implements Watcher {
     public void flushTablet(ZooLock lock, KeyExtent extent) throws TException {
       TabletClientService.Client client = ThriftUtil.getClient(new TabletClientService.Client.Factory(), address, conf);
       try {
-        client.flushTablet(Tracer.traceInfo(), SecurityConstants.getSystemCredentials(), lockString(lock), extent.toThrift());
+        client.flushTablet(Tracer.traceInfo(), SystemCredentials.get().getAsThrift(), lockString(lock), extent.toThrift());
       } finally {
         ThriftUtil.returnClient(client);
       }
@@ -173,7 +173,7 @@ public class LiveTServerSet implements Watcher {
     public void compact(ZooLock lock, String tableId, byte[] startRow, byte[] endRow) throws TException {
       TabletClientService.Client client = ThriftUtil.getClient(new TabletClientService.Client.Factory(), address, conf);
       try {
-        client.compact(Tracer.traceInfo(), SecurityConstants.getSystemCredentials(), lockString(lock), tableId,
+        client.compact(Tracer.traceInfo(), SystemCredentials.get().getAsThrift(), lockString(lock), tableId,
             startRow == null ? null : ByteBuffer.wrap(startRow), endRow == null ? null : ByteBuffer.wrap(endRow));
       } finally {
         ThriftUtil.returnClient(client);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/master/Master.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/Master.java b/server/src/main/java/org/apache/accumulo/server/master/Master.java
index b5ffd0a..0cb0378 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/Master.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/Master.java
@@ -129,8 +129,8 @@ import org.apache.accumulo.server.master.tableOps.TraceRepo;
 import org.apache.accumulo.server.master.tserverOps.ShutdownTServer;
 import org.apache.accumulo.server.monitor.Monitor;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.security.SecurityOperation;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.AddressUtil;
 import org.apache.accumulo.server.util.DefaultMap;
 import org.apache.accumulo.server.util.Halt;
@@ -291,7 +291,7 @@ public class Master implements LiveTServerSet.Listener, TableObserver, CurrentSt
           @Override
           public void run() {
             try {
-              MetadataTableUtil.moveMetaDeleteMarkers(instance, SecurityConstants.getSystemCredentials());
+              MetadataTableUtil.moveMetaDeleteMarkers(instance, SystemCredentials.get().getAsThrift());
               Accumulo.updateAccumuloVersion(fs);
               
               log.info("Upgrade complete");
@@ -409,7 +409,7 @@ public class Master implements LiveTServerSet.Listener, TableObserver, CurrentSt
   }
   
   public Connector getConnector() throws AccumuloException, AccumuloSecurityException {
-    return instance.getConnector(SecurityConstants.SYSTEM_PRINCIPAL, SecurityConstants.getSystemToken());
+    return instance.getConnector(SystemCredentials.get().getPrincipal(), SystemCredentials.get().getToken());
   }
   
   private void waitAround(EventCoordinator.Listener listener) {
@@ -1503,7 +1503,7 @@ public class Master implements LiveTServerSet.Listener, TableObserver, CurrentSt
       }
     });
     
-    TCredentials systemAuths = SecurityConstants.getSystemCredentials();
+    TCredentials systemAuths = SystemCredentials.get().getAsThrift();
     watchers.add(new TabletGroupWatcher(this, new MetaDataStateStore(instance, systemAuths, this), null));
     watchers.add(new TabletGroupWatcher(this, new RootTabletStateStore(instance, systemAuths, this), watchers.get(0)));
     watchers.add(new TabletGroupWatcher(this, new ZooTabletStateStore(new ZooStore(zroot)), watchers.get(1)));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/master/TabletGroupWatcher.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/TabletGroupWatcher.java b/server/src/main/java/org/apache/accumulo/server/master/TabletGroupWatcher.java
index c0479dd..fb905c9 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/TabletGroupWatcher.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/TabletGroupWatcher.java
@@ -70,7 +70,7 @@ import org.apache.accumulo.server.master.state.TabletLocationState;
 import org.apache.accumulo.server.master.state.TabletState;
 import org.apache.accumulo.server.master.state.TabletStateStore;
 import org.apache.accumulo.server.master.state.tables.TableManager;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tabletserver.TabletTime;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.hadoop.io.Text;
@@ -410,7 +410,7 @@ class TabletGroupWatcher extends Daemon {
         if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
           datafiles.add(new FileRef(this.master.fs, key));
           if (datafiles.size() > 1000) {
-            MetadataTableUtil.addDeleteEntries(extent, datafiles, SecurityConstants.getSystemCredentials());
+            MetadataTableUtil.addDeleteEntries(extent, datafiles, SystemCredentials.get().getAsThrift());
             datafiles.clear();
           }
         } else if (TabletsSection.ServerColumnFamily.TIME_COLUMN.hasColumns(key)) {
@@ -420,12 +420,12 @@ class TabletGroupWatcher extends Daemon {
         } else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
           datafiles.add(new FileRef(this.master.fs, key));
           if (datafiles.size() > 1000) {
-            MetadataTableUtil.addDeleteEntries(extent, datafiles, SecurityConstants.getSystemCredentials());
+            MetadataTableUtil.addDeleteEntries(extent, datafiles, SystemCredentials.get().getAsThrift());
             datafiles.clear();
           }
         }
       }
-      MetadataTableUtil.addDeleteEntries(extent, datafiles, SecurityConstants.getSystemCredentials());
+      MetadataTableUtil.addDeleteEntries(extent, datafiles, SystemCredentials.get().getAsThrift());
       BatchWriter bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
       try {
         deleteTablets(info, deleteRange, bw, conn);
@@ -448,8 +448,8 @@ class TabletGroupWatcher extends Daemon {
       } else {
         // Recreate the default tablet to hold the end of the table
         Master.log.debug("Recreating the last tablet to point to " + extent.getPrevEndRow());
-        MetadataTableUtil.addTablet(new KeyExtent(extent.getTableId(), null, extent.getPrevEndRow()), Constants.DEFAULT_TABLET_LOCATION,
-            SecurityConstants.getSystemCredentials(), timeType, this.master.masterLock);
+        MetadataTableUtil.addTablet(new KeyExtent(extent.getTableId(), null, extent.getPrevEndRow()), Constants.DEFAULT_TABLET_LOCATION, SystemCredentials
+            .get().getAsThrift(), timeType, this.master.masterLock);
       }
     } catch (Exception ex) {
       throw new AccumuloException(ex);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/master/balancer/TableLoadBalancer.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/balancer/TableLoadBalancer.java b/server/src/main/java/org/apache/accumulo/server/master/balancer/TableLoadBalancer.java
index b9cecbf..3e0a2bf 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/balancer/TableLoadBalancer.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/balancer/TableLoadBalancer.java
@@ -33,7 +33,7 @@ import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.master.state.TabletMigration;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
 import org.apache.log4j.Logger;
 
@@ -119,7 +119,7 @@ public class TableLoadBalancer extends TabletBalancer {
   protected TableOperations getTableOperations() {
     if (tops == null)
       try {
-        tops = configuration.getInstance().getConnector(SecurityConstants.getSystemPrincipal(), SecurityConstants.getSystemToken()).tableOperations();
+        tops = configuration.getInstance().getConnector(SystemCredentials.get().getPrincipal(), SystemCredentials.get().getToken()).tableOperations();
       } catch (AccumuloException e) {
         log.error("Unable to access table operations from within table balancer", e);
       } catch (AccumuloSecurityException e) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/master/balancer/TabletBalancer.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/balancer/TabletBalancer.java b/server/src/main/java/org/apache/accumulo/server/master/balancer/TabletBalancer.java
index d6dce2f..625fa40 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/balancer/TabletBalancer.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/balancer/TabletBalancer.java
@@ -22,7 +22,6 @@ import java.util.Map;
 import java.util.Set;
 import java.util.SortedMap;
 
-import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
@@ -33,7 +32,8 @@ import org.apache.accumulo.core.util.ThriftUtil;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.master.state.TabletMigration;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
+import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.log4j.Logger;
 import org.apache.thrift.TException;
 import org.apache.thrift.transport.TTransportException;
@@ -43,7 +43,7 @@ public abstract class TabletBalancer {
   private static final Logger log = Logger.getLogger(TabletBalancer.class);
   
   protected ServerConfiguration configuration;
-
+  
   /**
    * Initialize the TabletBalancer. This gives the balancer the opportunity to read the configuration.
    */
@@ -98,7 +98,7 @@ public abstract class TabletBalancer {
     log.debug("Scanning tablet server " + tserver + " for table " + tableId);
     Client client = ThriftUtil.getClient(new TabletClientService.Client.Factory(), tserver.getLocation(), configuration.getConfiguration());
     try {
-      List<TabletStats> onlineTabletsForTable = client.getTabletStats(Tracer.traceInfo(), SecurityConstants.getSystemCredentials(), tableId);
+      List<TabletStats> onlineTabletsForTable = client.getTabletStats(Tracer.traceInfo(), SystemCredentials.get().getAsThrift(), tableId);
       return onlineTabletsForTable;
     } catch (TTransportException e) {
       log.error("Unable to connect to " + tserver + ": " + e);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java b/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java
index b58e618..5cb7b0c 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java
@@ -32,7 +32,7 @@ import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.hadoop.io.Text;
 
 public class MetaDataStateStore extends TabletStateStore {
@@ -59,7 +59,7 @@ public class MetaDataStateStore extends TabletStateStore {
   }
   
   protected MetaDataStateStore(String tableName) {
-    this(HdfsZooInstance.getInstance(), SecurityConstants.getSystemCredentials(), null, tableName);
+    this(HdfsZooInstance.getInstance(), SystemCredentials.get().getAsThrift(), null, tableName);
   }
   
   public MetaDataStateStore() {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
index 4f44d79..cfbdc97 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
@@ -68,7 +68,7 @@ import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection;
 import org.apache.accumulo.server.master.Master;
 import org.apache.accumulo.server.master.state.TServerInstance;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tabletserver.UniqueNameAllocator;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.zookeeper.DistributedWorkQueue;
@@ -557,7 +557,7 @@ class LoadFiles extends MasterRepo {
               server = pair.getFirst();
               List<String> attempt = Collections.singletonList(file);
               log.debug("Asking " + pair.getFirst() + " to bulk import " + file);
-              List<String> fail = client.bulkImportFiles(Tracer.traceInfo(), SecurityConstants.getSystemCredentials(), tid, tableId, attempt, errorDir, setTime);
+              List<String> fail = client.bulkImportFiles(Tracer.traceInfo(), SystemCredentials.get().getAsThrift(), tid, tableId, attempt, errorDir, setTime);
               if (fail.isEmpty()) {
                 loaded.add(file);
               } else {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/master/tableOps/CloneTable.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/CloneTable.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/CloneTable.java
index 8bf437d..3534a78 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/CloneTable.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/CloneTable.java
@@ -32,7 +32,7 @@ import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.master.Master;
 import org.apache.accumulo.server.master.state.tables.TableManager;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.log4j.Logger;
 
@@ -108,14 +108,14 @@ class CloneMetadata extends MasterRepo {
     Instance instance = HdfsZooInstance.getInstance();
     // need to clear out any metadata entries for tableId just in case this
     // died before and is executing again
-    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, SecurityConstants.getSystemCredentials(), environment.getMasterLock());
+    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, SystemCredentials.get().getAsThrift(), environment.getMasterLock());
     MetadataTableUtil.cloneTable(instance, cloneInfo.srcTableId, cloneInfo.tableId);
     return new FinishCloneTable(cloneInfo);
   }
   
   @Override
   public void undo(long tid, Master environment) throws Exception {
-    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, SecurityConstants.getSystemCredentials(), environment.getMasterLock());
+    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, SystemCredentials.get().getAsThrift(), environment.getMasterLock());
   }
   
 }
@@ -183,7 +183,7 @@ class ClonePermissions extends MasterRepo {
     // give all table permissions to the creator
     for (TablePermission permission : TablePermission.values()) {
       try {
-        AuditedSecurityOperation.getInstance().grantTablePermission(SecurityConstants.getSystemCredentials(), cloneInfo.user, cloneInfo.tableId, permission);
+        AuditedSecurityOperation.getInstance().grantTablePermission(SystemCredentials.get().getAsThrift(), cloneInfo.user, cloneInfo.tableId, permission);
       } catch (ThriftSecurityException e) {
         Logger.getLogger(FinishCloneTable.class).error(e.getMessage(), e);
         throw e;
@@ -198,7 +198,7 @@ class ClonePermissions extends MasterRepo {
   
   @Override
   public void undo(long tid, Master environment) throws Exception {
-    AuditedSecurityOperation.getInstance().deleteTable(SecurityConstants.getSystemCredentials(), cloneInfo.tableId);
+    AuditedSecurityOperation.getInstance().deleteTable(SystemCredentials.get().getAsThrift(), cloneInfo.tableId);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/master/tableOps/CreateTable.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/CreateTable.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/CreateTable.java
index d9acd8d..2f35f97 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/CreateTable.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/CreateTable.java
@@ -36,8 +36,8 @@ import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.master.Master;
 import org.apache.accumulo.server.master.state.tables.TableManager;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.security.SecurityOperation;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tabletserver.TabletTime;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.util.TablePropUtil;
@@ -115,7 +115,7 @@ class PopulateMetadata extends MasterRepo {
   public Repo<Master> call(long tid, Master environment) throws Exception {
     
     KeyExtent extent = new KeyExtent(new Text(tableInfo.tableId), null, null);
-    MetadataTableUtil.addTablet(extent, Constants.DEFAULT_TABLET_LOCATION, SecurityConstants.getSystemCredentials(), tableInfo.timeType,
+    MetadataTableUtil.addTablet(extent, Constants.DEFAULT_TABLET_LOCATION, SystemCredentials.get().getAsThrift(), tableInfo.timeType,
         environment.getMasterLock());
     
     return new FinishCreateTable(tableInfo);
@@ -124,7 +124,7 @@ class PopulateMetadata extends MasterRepo {
   
   @Override
   public void undo(long tid, Master environment) throws Exception {
-    MetadataTableUtil.deleteTable(tableInfo.tableId, false, SecurityConstants.getSystemCredentials(), environment.getMasterLock());
+    MetadataTableUtil.deleteTable(tableInfo.tableId, false, SystemCredentials.get().getAsThrift(), environment.getMasterLock());
   }
   
 }
@@ -153,7 +153,7 @@ class CreateDir extends MasterRepo {
   @Override
   public void undo(long tid, Master master) throws Exception {
     VolumeManager fs = master.getFileSystem();
-    for(String dir : ServerConstants.getTablesDirs()) {
+    for (String dir : ServerConstants.getTablesDirs()) {
       fs.deleteRecursively(new Path(dir + "/" + tableInfo.tableId));
     }
     
@@ -225,7 +225,7 @@ class SetupPermissions extends MasterRepo {
     SecurityOperation security = AuditedSecurityOperation.getInstance();
     for (TablePermission permission : TablePermission.values()) {
       try {
-        security.grantTablePermission(SecurityConstants.getSystemCredentials(), tableInfo.user, tableInfo.tableId, permission);
+        security.grantTablePermission(SystemCredentials.get().getAsThrift(), tableInfo.user, tableInfo.tableId, permission);
       } catch (ThriftSecurityException e) {
         Logger.getLogger(FinishCreateTable.class).error(e.getMessage(), e);
         throw e;
@@ -240,7 +240,7 @@ class SetupPermissions extends MasterRepo {
   
   @Override
   public void undo(long tid, Master env) throws Exception {
-    AuditedSecurityOperation.getInstance().deleteTable(SecurityConstants.getSystemCredentials(), tableInfo.tableId);
+    AuditedSecurityOperation.getInstance().deleteTable(SystemCredentials.get().getAsThrift(), tableInfo.tableId);
   }
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java
index 7d6186e..3786d27 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java
@@ -47,7 +47,7 @@ import org.apache.accumulo.server.master.state.TabletState;
 import org.apache.accumulo.server.master.state.tables.TableManager;
 import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
@@ -155,7 +155,7 @@ class CleanUp extends MasterRepo {
       // Intentionally do not pass master lock. If master loses lock, this operation may complete before master can kill itself.
       // If the master lock passed to deleteTable, it is possible that the delete mutations will be dropped. If the delete operations
       // are dropped and the operation completes, then the deletes will not be repeated.
-      MetadataTableUtil.deleteTable(tableId, refCount != 0, SecurityConstants.getSystemCredentials(), null);
+      MetadataTableUtil.deleteTable(tableId, refCount != 0, SystemCredentials.get().getAsThrift(), null);
     } catch (Exception e) {
       log.error("error deleting " + tableId + " from metadata table", e);
     }
@@ -189,7 +189,7 @@ class CleanUp extends MasterRepo {
     
     // remove any permissions associated with this table
     try {
-      AuditedSecurityOperation.getInstance().deleteTable(SecurityConstants.getSystemCredentials(), tableId);
+      AuditedSecurityOperation.getInstance().deleteTable(SystemCredentials.get().getAsThrift(), tableId);
     } catch (ThriftSecurityException e) {
       log.error(e.getMessage(), e);
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java
index ae6930b..364c267 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java
@@ -59,8 +59,8 @@ import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.master.Master;
 import org.apache.accumulo.server.master.state.tables.TableManager;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.security.SecurityOperation;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tabletserver.UniqueNameAllocator;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.util.TablePropUtil;
@@ -293,7 +293,7 @@ class PopulateMetadataTable extends MasterRepo {
   
   @Override
   public void undo(long tid, Master environment) throws Exception {
-    MetadataTableUtil.deleteTable(tableInfo.tableId, false, SecurityConstants.getSystemCredentials(), environment.getMasterLock());
+    MetadataTableUtil.deleteTable(tableInfo.tableId, false, SystemCredentials.get().getAsThrift(), environment.getMasterLock());
   }
 }
 
@@ -484,7 +484,7 @@ class ImportSetupPermissions extends MasterRepo {
     SecurityOperation security = AuditedSecurityOperation.getInstance();
     for (TablePermission permission : TablePermission.values()) {
       try {
-        security.grantTablePermission(SecurityConstants.getSystemCredentials(), tableInfo.user, tableInfo.tableId, permission);
+        security.grantTablePermission(SystemCredentials.get().getAsThrift(), tableInfo.user, tableInfo.tableId, permission);
       } catch (ThriftSecurityException e) {
         Logger.getLogger(ImportSetupPermissions.class).error(e.getMessage(), e);
         throw e;
@@ -499,7 +499,7 @@ class ImportSetupPermissions extends MasterRepo {
   
   @Override
   public void undo(long tid, Master env) throws Exception {
-    AuditedSecurityOperation.getInstance().deleteTable(SecurityConstants.getSystemCredentials(), tableInfo.tableId);
+    AuditedSecurityOperation.getInstance().deleteTable(SystemCredentials.get().getAsThrift(), tableInfo.tableId);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java b/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java
index 56e473a..5957f26 100644
--- a/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java
+++ b/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java
@@ -70,7 +70,7 @@ import org.apache.accumulo.server.monitor.servlets.trace.ShowTrace;
 import org.apache.accumulo.server.monitor.servlets.trace.Summary;
 import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.problems.ProblemType;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.EmbeddedWebServer;
 import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.log4j.Logger;
@@ -292,7 +292,7 @@ public class Monitor {
         try {
           client = MasterClient.getConnection(HdfsZooInstance.getInstance());
           if (client != null) {
-            mmi = client.getMasterStats(Tracer.traceInfo(), SecurityConstants.getSystemCredentials());
+            mmi = client.getMasterStats(Tracer.traceInfo(), SystemCredentials.get().getAsThrift());
             retry = false;
           } else {
             mmi = null;
@@ -432,7 +432,7 @@ public class Monitor {
           address = new ServerServices(new String(zk.getData(path + "/" + locks.get(0), null, null))).getAddress(Service.GC_CLIENT);
           GCMonitorService.Client client = ThriftUtil.getClient(new GCMonitorService.Client.Factory(), address, config.getConfiguration());
           try {
-            result = client.getStatus(Tracer.traceInfo(), SecurityConstants.getSystemCredentials());
+            result = client.getStatus(Tracer.traceInfo(), SystemCredentials.get().getAsThrift());
           } finally {
             ThriftUtil.returnClient(client);
           }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TServersServlet.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TServersServlet.java b/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TServersServlet.java
index 095725e..8484608 100644
--- a/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TServersServlet.java
+++ b/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TServersServlet.java
@@ -27,7 +27,6 @@ import java.util.Map.Entry;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
-import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.master.thrift.DeadServer;
 import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
@@ -51,8 +50,9 @@ import org.apache.accumulo.server.monitor.util.celltypes.PercentageType;
 import org.apache.accumulo.server.monitor.util.celltypes.ProgressChartType;
 import org.apache.accumulo.server.monitor.util.celltypes.TServerLinkType;
 import org.apache.accumulo.server.monitor.util.celltypes.TableLinkType;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tabletserver.TabletStatsKeeper;
+import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.commons.codec.binary.Base64;
 
 public class TServersServlet extends BasicServlet {
@@ -126,9 +126,9 @@ public class TServersServlet extends BasicServlet {
       TabletClientService.Client client = ThriftUtil.getClient(new TabletClientService.Client.Factory(), address, Monitor.getSystemConfiguration());
       try {
         for (String tableId : Monitor.getMmi().tableMap.keySet()) {
-          tsStats.addAll(client.getTabletStats(Tracer.traceInfo(), SecurityConstants.getSystemCredentials(), tableId));
+          tsStats.addAll(client.getTabletStats(Tracer.traceInfo(), SystemCredentials.get().getAsThrift(), tableId));
         }
-        historical = client.getHistoricalStats(Tracer.traceInfo(), SecurityConstants.getSystemCredentials());
+        historical = client.getHistoricalStats(Tracer.traceInfo(), SystemCredentials.get().getAsThrift());
       } finally {
         ThriftUtil.returnClient(client);
       }
@@ -239,12 +239,10 @@ public class TServersServlet extends BasicServlet {
     
     opHistoryDetails.addRow("Split", historical.splits.num, historical.splits.fail, null, null,
         historical.splits.num != 0 ? (historical.splits.elapsed / historical.splits.num) : null, splitStdDev, historical.splits.elapsed);
-    opHistoryDetails.addRow("Major&nbsp;Compaction", total.majors.num, total.majors.fail,
-        total.majors.num != 0 ? (total.majors.queueTime / total.majors.num) : null, majorQueueStdDev,
-        total.majors.num != 0 ? (total.majors.elapsed / total.majors.num) : null, majorStdDev, total.majors.elapsed);
-    opHistoryDetails.addRow("Minor&nbsp;Compaction", total.minors.num, total.minors.fail,
-        total.minors.num != 0 ? (total.minors.queueTime / total.minors.num) : null, minorQueueStdDev,
-        total.minors.num != 0 ? (total.minors.elapsed / total.minors.num) : null, minorStdDev, total.minors.elapsed);
+    opHistoryDetails.addRow("Major&nbsp;Compaction", total.majors.num, total.majors.fail, total.majors.num != 0 ? (total.majors.queueTime / total.majors.num)
+        : null, majorQueueStdDev, total.majors.num != 0 ? (total.majors.elapsed / total.majors.num) : null, majorStdDev, total.majors.elapsed);
+    opHistoryDetails.addRow("Minor&nbsp;Compaction", total.minors.num, total.minors.fail, total.minors.num != 0 ? (total.minors.queueTime / total.minors.num)
+        : null, minorQueueStdDev, total.minors.num != 0 ? (total.minors.elapsed / total.minors.num) : null, minorStdDev, total.minors.elapsed);
     opHistoryDetails.generate(req, sb);
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TablesServlet.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TablesServlet.java b/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TablesServlet.java
index 127989c..85d17ff 100644
--- a/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TablesServlet.java
+++ b/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TablesServlet.java
@@ -47,7 +47,7 @@ import org.apache.accumulo.server.monitor.util.celltypes.DurationType;
 import org.apache.accumulo.server.monitor.util.celltypes.NumberType;
 import org.apache.accumulo.server.monitor.util.celltypes.TableLinkType;
 import org.apache.accumulo.server.monitor.util.celltypes.TableStateType;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.hadoop.io.Text;
 
 public class TablesServlet extends BasicServlet {
@@ -151,8 +151,8 @@ public class TablesServlet extends BasicServlet {
       locs.add(instance.getRootTabletLocation());
     } else {
       String systemTableName = MetadataTable.ID.equals(tableId) ? RootTable.NAME : MetadataTable.NAME;
-      MetaDataTableScanner scanner = new MetaDataTableScanner(instance, SecurityConstants.getSystemCredentials(), new Range(KeyExtent.getMetadataEntry(
-          new Text(tableId), new Text()), KeyExtent.getMetadataEntry(new Text(tableId), null)), systemTableName);
+      MetaDataTableScanner scanner = new MetaDataTableScanner(instance, SystemCredentials.get().getAsThrift(), new Range(KeyExtent.getMetadataEntry(new Text(
+          tableId), new Text()), KeyExtent.getMetadataEntry(new Text(tableId), null)), systemTableName);
       
       while (scanner.hasNext()) {
         TabletLocationState state = scanner.next();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/problems/ProblemReport.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/problems/ProblemReport.java b/server/src/main/java/org/apache/accumulo/server/problems/ProblemReport.java
index a34de9f..530ef76 100644
--- a/server/src/main/java/org/apache/accumulo/server/problems/ProblemReport.java
+++ b/server/src/main/java/org/apache/accumulo/server/problems/ProblemReport.java
@@ -34,7 +34,7 @@ import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
 import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.hadoop.io.Text;
@@ -125,13 +125,13 @@ public class ProblemReport {
   void removeFromMetadataTable() throws Exception {
     Mutation m = new Mutation(new Text("~err_" + tableName));
     m.putDelete(new Text(problemType.name()), new Text(resource));
-    MetadataTableUtil.getMetadataTable(SecurityConstants.getSystemCredentials()).update(m);
+    MetadataTableUtil.getMetadataTable(SystemCredentials.get().getAsThrift()).update(m);
   }
   
   void saveToMetadataTable() throws Exception {
     Mutation m = new Mutation(new Text("~err_" + tableName));
     m.put(new Text(problemType.name()), new Text(resource), new Value(encode()));
-    MetadataTableUtil.getMetadataTable(SecurityConstants.getSystemCredentials()).update(m);
+    MetadataTableUtil.getMetadataTable(SystemCredentials.get().getAsThrift()).update(m);
   }
   
   void removeFromZooKeeper() throws Exception {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java b/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
index 5b82621..5422e90 100644
--- a/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
+++ b/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
@@ -47,7 +47,7 @@ import org.apache.accumulo.core.util.NamingThreadFactory;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.commons.collections.map.LRUMap;
@@ -155,7 +155,7 @@ public class ProblemReports implements Iterable<ProblemReport> {
       return;
     }
     
-    Connector connector = HdfsZooInstance.getInstance().getConnector(SecurityConstants.getSystemPrincipal(), SecurityConstants.getSystemToken());
+    Connector connector = HdfsZooInstance.getInstance().getConnector(SystemCredentials.get().getPrincipal(), SystemCredentials.get().getToken());
     Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     scanner.addScanIterator(new IteratorSetting(1, "keys-only", SortedKeyIterator.class));
     
@@ -174,7 +174,7 @@ public class ProblemReports implements Iterable<ProblemReport> {
     }
     
     if (hasProblems)
-      MetadataTableUtil.getMetadataTable(SecurityConstants.getSystemCredentials()).update(delMut);
+      MetadataTableUtil.getMetadataTable(SystemCredentials.get().getAsThrift()).update(delMut);
   }
   
   public Iterator<ProblemReport> iterator(final String table) {
@@ -210,7 +210,7 @@ public class ProblemReports implements Iterable<ProblemReport> {
           if (iter2 == null) {
             try {
               if ((table == null || !table.equals(MetadataTable.ID)) && iter1Count == 0) {
-                Connector connector = HdfsZooInstance.getInstance().getConnector(SecurityConstants.getSystemPrincipal(), SecurityConstants.getSystemToken());
+                Connector connector = HdfsZooInstance.getInstance().getConnector(SystemCredentials.get().getPrincipal(), SystemCredentials.get().getToken());
                 Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
                 
                 scanner.setTimeout(3, TimeUnit.SECONDS);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java b/server/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
index 125915b..a74f584 100644
--- a/server/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
+++ b/server/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
@@ -92,7 +92,7 @@ public class AuditedSecurityOperation extends SecurityOperation {
   
   // Is INFO the right level to check? Do we even need that check?
   private static boolean shouldAudit(TCredentials credentials) {
-    return !credentials.getPrincipal().equals(SecurityConstants.SYSTEM_PRINCIPAL);
+    return !SystemCredentials.get().getToken().getClass().getName().equals(credentials.getTokenClassName());
   }
   
   /*

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/security/SecurityConstants.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/security/SecurityConstants.java b/server/src/main/java/org/apache/accumulo/server/security/SecurityConstants.java
deleted file mode 100644
index 5c42a69..0000000
--- a/server/src/main/java/org/apache/accumulo/server/security/SecurityConstants.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.security;
-
-import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-import java.security.SecurityPermission;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.security.CredentialHelper;
-import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.conf.ServerConfiguration;
-import org.apache.commons.codec.binary.Base64;
-import org.apache.log4j.Logger;
-
-public class SecurityConstants {
-  private static SecurityPermission SYSTEM_CREDENTIALS_PERMISSION = new SecurityPermission("systemCredentialsPermission");
-  static Logger log = Logger.getLogger(SecurityConstants.class);
-  
-  public static final String SYSTEM_PRINCIPAL = "!SYSTEM";
-  private static final AuthenticationToken SYSTEM_TOKEN = makeSystemPassword();
-  private static final TCredentials systemCredentials = CredentialHelper.createSquelchError(SYSTEM_PRINCIPAL, SYSTEM_TOKEN, HdfsZooInstance.getInstance()
-      .getInstanceID());
-  public static byte[] confChecksum = null;
-  
-  public static AuthenticationToken getSystemToken() {
-    return SYSTEM_TOKEN;
-  }
-  
-  public static TCredentials getSystemCredentials() {
-    SecurityManager sm = System.getSecurityManager();
-    if (sm != null) {
-      sm.checkPermission(SYSTEM_CREDENTIALS_PERMISSION);
-    }
-    return systemCredentials;
-  }
-  
-  public static String getSystemPrincipal() {
-    return SYSTEM_PRINCIPAL;
-  }
-  
-  private static AuthenticationToken makeSystemPassword() {
-    int wireVersion = ServerConstants.WIRE_VERSION;
-    byte[] inst = HdfsZooInstance.getInstance().getInstanceID().getBytes(Constants.UTF8);
-    try {
-      confChecksum = getSystemConfigChecksum();
-    } catch (NoSuchAlgorithmException e) {
-      throw new RuntimeException("Failed to compute configuration checksum", e);
-    }
-    
-    ByteArrayOutputStream bytes = new ByteArrayOutputStream(3 * (Integer.SIZE / Byte.SIZE) + inst.length + confChecksum.length);
-    DataOutputStream out = new DataOutputStream(bytes);
-    try {
-      out.write(wireVersion * -1);
-      out.write(inst.length);
-      out.write(inst);
-      out.write(confChecksum.length);
-      out.write(confChecksum);
-    } catch (IOException e) {
-      throw new RuntimeException(e); // this is impossible with
-      // ByteArrayOutputStream; crash hard
-      // if this happens
-    }
-    return new PasswordToken(Base64.encodeBase64(bytes.toByteArray()));
-  }
-  
-  private static byte[] getSystemConfigChecksum() throws NoSuchAlgorithmException {
-    if (confChecksum == null) {
-      MessageDigest md = MessageDigest.getInstance(Constants.PW_HASH_ALGORITHM);
-      
-      // seed the config with the version and instance id, so at least
-      // it's not empty
-      md.update(ServerConstants.WIRE_VERSION.toString().getBytes(Constants.UTF8));
-      md.update(HdfsZooInstance.getInstance().getInstanceID().getBytes(Constants.UTF8));
-      
-      for (Entry<String,String> entry : ServerConfiguration.getSiteConfiguration()) {
-        // only include instance properties
-        if (entry.getKey().startsWith(Property.INSTANCE_PREFIX.toString())) {
-          md.update(entry.getKey().getBytes(Constants.UTF8));
-          md.update(entry.getValue().getBytes(Constants.UTF8));
-        }
-      }
-      
-      confChecksum = md.digest();
-    }
-    return confChecksum;
-  }
-}


[19/50] git commit: ACCUMULO-1572 integration test

Posted by kt...@apache.org.
ACCUMULO-1572 integration test


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/388d58c6
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/388d58c6
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/388d58c6

Branch: refs/heads/ACCUMULO-1000
Commit: 388d58c6d02224e76fab77db852258eccc2dab7a
Parents: 5cfb88b
Author: Eric Newton <er...@gmail.com>
Authored: Wed Jul 17 14:12:28 2013 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Wed Jul 17 14:12:28 2013 -0400

----------------------------------------------------------------------
 .../test/functional/ZookeeperRestartIT.java     | 81 ++++++++++++++++++++
 1 file changed, 81 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/388d58c6/test/src/test/java/org/apache/accumulo/test/functional/ZookeeperRestartIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ZookeeperRestartIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ZookeeperRestartIT.java
new file mode 100644
index 0000000..f718a63
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ZookeeperRestartIT.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.minicluster.ProcessReference;
+import org.apache.accumulo.minicluster.ServerType;
+import org.junit.Test;
+
+public class ZookeeperRestartIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    Map<String,String> siteConfig = new HashMap<String, String>();
+    siteConfig.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "3s");
+    cfg.setSiteConfig(siteConfig);
+  }
+
+  @Test(timeout = 20 * 1000)
+  public void test() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    BatchWriter bw = c.createBatchWriter("test_ingest", null);
+    Mutation m = new Mutation("row");
+    m.put("cf", "cq", "value");
+    bw.addMutation(m);
+    bw.close();
+    
+    // kill zookeeper
+    for (ProcessReference proc : cluster.getProcesses().get(ServerType.ZOOKEEPER))
+      cluster.killProcess(ServerType.ZOOKEEPER, proc);
+    
+    // give the servers time to react
+    UtilWaitThread.sleep(1000);
+    
+    // start zookeeper back up
+    cluster.start();
+    
+    // use the tservers
+    Scanner s = c.createScanner("test_ingest", Authorizations.EMPTY);
+    Iterator<Entry<Key,Value>> i = s.iterator();
+    assertTrue(i.hasNext());
+    assertEquals("row", i.next().getKey().getRow().toString());
+    assertFalse(i.hasNext());
+    // use the master
+    c.tableOperations().delete("test_ingest");
+  }
+  
+}


[06/50] git commit: ACCUMULO-1575

Posted by kt...@apache.org.
ACCUMULO-1575


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/8ef04012
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/8ef04012
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/8ef04012

Branch: refs/heads/ACCUMULO-1000
Commit: 8ef040122228a459a16243e6456ebb03f27d0047
Parents: a1fda97
Author: Eric Newton <er...@gmail.com>
Authored: Wed Jul 17 11:27:23 2013 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Wed Jul 17 11:27:23 2013 -0400

----------------------------------------------------------------------
 .../apache/accumulo/fate/zookeeper/ZooLock.java | 12 ++-----
 .../accumulo/server/util/ListInstances.java     | 35 ++++++++++----------
 2 files changed, 19 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/8ef04012/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooLock.java
----------------------------------------------------------------------
diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooLock.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooLock.java
index cb0d902..961539a 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooLock.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooLock.java
@@ -431,16 +431,6 @@ public class ZooLock implements Watcher {
     return zc.get(path + "/" + lockNode, stat);
   }
   
-  private static ZooCache getLockDataZooCache;
-  
-  public static byte[] getLockData(String path) {
-    return getLockData(path, null);
-  }
-  
-  public static byte[] getLockData(String path, Stat stat) {
-    return getLockData(getLockDataZooCache, path, stat);
-  }
-  
   public static long getSessionId(ZooCache zc, String path) throws KeeperException, InterruptedException {
     List<String> children = zc.getChildren(path);
     
@@ -459,6 +449,8 @@ public class ZooLock implements Watcher {
     return 0;
   }
   
+  private static ZooCache getLockDataZooCache;
+  
   public long getSessionId() throws KeeperException, InterruptedException {
     return getSessionId(getLockDataZooCache, path);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8ef04012/server/src/main/java/org/apache/accumulo/server/util/ListInstances.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/ListInstances.java b/server/src/main/java/org/apache/accumulo/server/util/ListInstances.java
index 64c5dd3..daab268 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/ListInstances.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/ListInstances.java
@@ -27,10 +27,10 @@ import java.util.UUID;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
+import org.apache.accumulo.fate.zookeeper.ZooCache;
+import org.apache.accumulo.fate.zookeeper.ZooReader;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.zookeeper.ZooLock;
-import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.log4j.Logger;
 
 import com.beust.jcommander.Parameter;
@@ -43,6 +43,8 @@ public class ListInstances {
   private static final int UUID_WIDTH = 37;
   private static final int MASTER_WIDTH = 30;
   
+  private static final int ZOOKEEPER_TIMER_MILLIS = 30 * 1000;
+  
   static class Opts extends Help {
     @Parameter(names="--print-errors", description="display errors while listing instances")
     boolean printErrors = false;
@@ -62,22 +64,24 @@ public class ListInstances {
     }
     
     System.out.println("INFO : Using ZooKeepers " + opts.keepers);
-    
-    TreeMap<String,UUID> instanceNames = getInstanceNames();
+    ZooReader rdr = new ZooReader(opts.keepers, ZOOKEEPER_TIMER_MILLIS); 
+    ZooCache cache = new ZooCache(opts.keepers, ZOOKEEPER_TIMER_MILLIS);
+
+    TreeMap<String,UUID> instanceNames = getInstanceNames(rdr);
     
     System.out.println();
     printHeader();
     
     for (Entry<String,UUID> entry : instanceNames.entrySet()) {
-      printInstanceInfo(entry.getKey(), entry.getValue());
+      printInstanceInfo(cache, entry.getKey(), entry.getValue());
     }
     
-    TreeSet<UUID> instancedIds = getInstanceIDs();
+    TreeSet<UUID> instancedIds = getInstanceIDs(rdr);
     instancedIds.removeAll(instanceNames.values());
     
     if (opts.printAll) {
       for (UUID uuid : instancedIds) {
-        printInstanceInfo(null, uuid);
+        printInstanceInfo(cache, null, uuid);
       }
     } else if (instancedIds.size() > 0) {
       System.out.println();
@@ -118,8 +122,8 @@ public class ListInstances {
     
   }
   
-  private static void printInstanceInfo(String instanceName, UUID iid) {
-    String master = getMaster(iid);
+  private static void printInstanceInfo(ZooCache cache, String instanceName, UUID iid) {
+    String master = getMaster(cache, iid);
     if (instanceName == null) {
       instanceName = "";
     }
@@ -130,8 +134,7 @@ public class ListInstances {
     
     System.out.printf("%" + NAME_WIDTH + "s |%" + UUID_WIDTH + "s |%" + MASTER_WIDTH + "s%n", "\"" + instanceName + "\"", iid, master);
   }
-  
-  private static String getMaster(UUID iid) {
+  private static String getMaster(ZooCache cache, UUID iid) {
     
     if (iid == null) {
       return null;
@@ -139,8 +142,7 @@ public class ListInstances {
     
     try {
       String masterLocPath = Constants.ZROOT + "/" + iid + Constants.ZMASTER_LOCK;
-      
-      byte[] master = ZooLock.getLockData(masterLocPath);
+      byte[] master = ZooLock.getLockData(cache, masterLocPath, null);
       if (master == null) {
         return null;
       }
@@ -151,9 +153,8 @@ public class ListInstances {
     }
   }
   
-  private static TreeMap<String,UUID> getInstanceNames() {
+  private static TreeMap<String,UUID> getInstanceNames(ZooReader zk) {
     
-    IZooReaderWriter zk = ZooReaderWriter.getInstance();
     String instancesPath = Constants.ZROOT + Constants.ZINSTANCES;
     
     TreeMap<String,UUID> tm = new TreeMap<String,UUID>();
@@ -181,11 +182,9 @@ public class ListInstances {
     return tm;
   }
   
-  private static TreeSet<UUID> getInstanceIDs() {
+  private static TreeSet<UUID> getInstanceIDs(ZooReader zk) {
     TreeSet<UUID> ts = new TreeSet<UUID>();
     
-    IZooReaderWriter zk = ZooReaderWriter.getInstance();
-    
     try {
       List<String> children = zk.getChildren(Constants.ZROOT);
       


[03/50] git commit: ACCUMULO-1573

Posted by kt...@apache.org.
ACCUMULO-1573


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/44532948
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/44532948
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/44532948

Branch: refs/heads/ACCUMULO-1000
Commit: 44532948ed4ddca0034a17d993106f0a8a42f274
Parents: 1fc73a9
Author: Eric Newton <er...@gmail.com>
Authored: Wed Jul 17 11:25:58 2013 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Wed Jul 17 11:25:58 2013 -0400

----------------------------------------------------------------------
 .../main/java/org/apache/accumulo/server/util/DumpZookeeper.java | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/44532948/server/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java b/server/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java
index 9be6f61..d1858a1 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java
@@ -60,10 +60,8 @@ public class DumpZookeeper {
     
     Logger.getRootLogger().setLevel(Level.WARN);
     PrintStream out = System.out;
-    // int timeout = 30 * 1000;
-    // String server = args[0];
     if (args.length > 0)
-      opts.root = args[0];
+      opts.root = opts.root;
     try {
       zk = ZooReaderWriter.getInstance();
       


[25/50] added ability to invalidate server side conditional update sessions

Posted by kt...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/ec537137/core/src/main/java/org/apache/accumulo/core/tabletserver/thrift/TabletClientService.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/tabletserver/thrift/TabletClientService.java b/core/src/main/java/org/apache/accumulo/core/tabletserver/thrift/TabletClientService.java
index 94744b5..395cc01 100644
--- a/core/src/main/java/org/apache/accumulo/core/tabletserver/thrift/TabletClientService.java
+++ b/core/src/main/java/org/apache/accumulo/core/tabletserver/thrift/TabletClientService.java
@@ -70,7 +70,11 @@ import org.slf4j.LoggerFactory;
 
     public void update(org.apache.accumulo.trace.thrift.TInfo tinfo, org.apache.accumulo.core.security.thrift.TCredentials credentials, org.apache.accumulo.core.data.thrift.TKeyExtent keyExtent, org.apache.accumulo.core.data.thrift.TMutation mutation) throws org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException, NotServingTabletException, ConstraintViolationException, org.apache.thrift.TException;
 
-    public List<org.apache.accumulo.core.data.thrift.TCMResult> conditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, org.apache.accumulo.core.security.thrift.TCredentials credentials, List<ByteBuffer> authorizations, Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations, List<String> symbols) throws org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException, org.apache.thrift.TException;
+    public long startConditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, org.apache.accumulo.core.security.thrift.TCredentials credentials, List<ByteBuffer> authorizations, String tableID) throws org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException, org.apache.thrift.TException;
+
+    public List<org.apache.accumulo.core.data.thrift.TCMResult> conditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, long sessID, Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations, List<String> symbols) throws NoSuchScanIDException, org.apache.thrift.TException;
+
+    public void invalidateConditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, long sessID) throws org.apache.thrift.TException;
 
     public List<org.apache.accumulo.core.data.thrift.TKeyExtent> bulkImport(org.apache.accumulo.trace.thrift.TInfo tinfo, org.apache.accumulo.core.security.thrift.TCredentials credentials, long tid, Map<org.apache.accumulo.core.data.thrift.TKeyExtent,Map<String,org.apache.accumulo.core.data.thrift.MapFileInfo>> files, boolean setTime) throws org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException, org.apache.thrift.TException;
 
@@ -128,7 +132,11 @@ import org.slf4j.LoggerFactory;
 
     public void update(org.apache.accumulo.trace.thrift.TInfo tinfo, org.apache.accumulo.core.security.thrift.TCredentials credentials, org.apache.accumulo.core.data.thrift.TKeyExtent keyExtent, org.apache.accumulo.core.data.thrift.TMutation mutation, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.update_call> resultHandler) throws org.apache.thrift.TException;
 
-    public void conditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, org.apache.accumulo.core.security.thrift.TCredentials credentials, List<ByteBuffer> authorizations, Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations, List<String> symbols, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.conditionalUpdate_call> resultHandler) throws org.apache.thrift.TException;
+    public void startConditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, org.apache.accumulo.core.security.thrift.TCredentials credentials, List<ByteBuffer> authorizations, String tableID, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.startConditionalUpdate_call> resultHandler) throws org.apache.thrift.TException;
+
+    public void conditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, long sessID, Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations, List<String> symbols, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.conditionalUpdate_call> resultHandler) throws org.apache.thrift.TException;
+
+    public void invalidateConditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, long sessID, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.invalidateConditionalUpdate_call> resultHandler) throws org.apache.thrift.TException;
 
     public void bulkImport(org.apache.accumulo.trace.thrift.TInfo tinfo, org.apache.accumulo.core.security.thrift.TCredentials credentials, long tid, Map<org.apache.accumulo.core.data.thrift.TKeyExtent,Map<String,org.apache.accumulo.core.data.thrift.MapFileInfo>> files, boolean setTime, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.bulkImport_call> resultHandler) throws org.apache.thrift.TException;
 
@@ -457,36 +465,85 @@ import org.slf4j.LoggerFactory;
       return;
     }
 
-    public List<org.apache.accumulo.core.data.thrift.TCMResult> conditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, org.apache.accumulo.core.security.thrift.TCredentials credentials, List<ByteBuffer> authorizations, Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations, List<String> symbols) throws org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException, org.apache.thrift.TException
+    public long startConditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, org.apache.accumulo.core.security.thrift.TCredentials credentials, List<ByteBuffer> authorizations, String tableID) throws org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException, org.apache.thrift.TException
     {
-      send_conditionalUpdate(tinfo, credentials, authorizations, mutations, symbols);
-      return recv_conditionalUpdate();
+      send_startConditionalUpdate(tinfo, credentials, authorizations, tableID);
+      return recv_startConditionalUpdate();
     }
 
-    public void send_conditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, org.apache.accumulo.core.security.thrift.TCredentials credentials, List<ByteBuffer> authorizations, Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations, List<String> symbols) throws org.apache.thrift.TException
+    public void send_startConditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, org.apache.accumulo.core.security.thrift.TCredentials credentials, List<ByteBuffer> authorizations, String tableID) throws org.apache.thrift.TException
     {
-      conditionalUpdate_args args = new conditionalUpdate_args();
+      startConditionalUpdate_args args = new startConditionalUpdate_args();
       args.setTinfo(tinfo);
       args.setCredentials(credentials);
       args.setAuthorizations(authorizations);
+      args.setTableID(tableID);
+      sendBase("startConditionalUpdate", args);
+    }
+
+    public long recv_startConditionalUpdate() throws org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException, org.apache.thrift.TException
+    {
+      startConditionalUpdate_result result = new startConditionalUpdate_result();
+      receiveBase(result, "startConditionalUpdate");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      if (result.sec != null) {
+        throw result.sec;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "startConditionalUpdate failed: unknown result");
+    }
+
+    public List<org.apache.accumulo.core.data.thrift.TCMResult> conditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, long sessID, Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations, List<String> symbols) throws NoSuchScanIDException, org.apache.thrift.TException
+    {
+      send_conditionalUpdate(tinfo, sessID, mutations, symbols);
+      return recv_conditionalUpdate();
+    }
+
+    public void send_conditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, long sessID, Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations, List<String> symbols) throws org.apache.thrift.TException
+    {
+      conditionalUpdate_args args = new conditionalUpdate_args();
+      args.setTinfo(tinfo);
+      args.setSessID(sessID);
       args.setMutations(mutations);
       args.setSymbols(symbols);
       sendBase("conditionalUpdate", args);
     }
 
-    public List<org.apache.accumulo.core.data.thrift.TCMResult> recv_conditionalUpdate() throws org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException, org.apache.thrift.TException
+    public List<org.apache.accumulo.core.data.thrift.TCMResult> recv_conditionalUpdate() throws NoSuchScanIDException, org.apache.thrift.TException
     {
       conditionalUpdate_result result = new conditionalUpdate_result();
       receiveBase(result, "conditionalUpdate");
       if (result.isSetSuccess()) {
         return result.success;
       }
-      if (result.sec != null) {
-        throw result.sec;
+      if (result.nssi != null) {
+        throw result.nssi;
       }
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "conditionalUpdate failed: unknown result");
     }
 
+    public void invalidateConditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, long sessID) throws org.apache.thrift.TException
+    {
+      send_invalidateConditionalUpdate(tinfo, sessID);
+      recv_invalidateConditionalUpdate();
+    }
+
+    public void send_invalidateConditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, long sessID) throws org.apache.thrift.TException
+    {
+      invalidateConditionalUpdate_args args = new invalidateConditionalUpdate_args();
+      args.setTinfo(tinfo);
+      args.setSessID(sessID);
+      sendBase("invalidateConditionalUpdate", args);
+    }
+
+    public void recv_invalidateConditionalUpdate() throws org.apache.thrift.TException
+    {
+      invalidateConditionalUpdate_result result = new invalidateConditionalUpdate_result();
+      receiveBase(result, "invalidateConditionalUpdate");
+      return;
+    }
+
     public List<org.apache.accumulo.core.data.thrift.TKeyExtent> bulkImport(org.apache.accumulo.trace.thrift.TInfo tinfo, org.apache.accumulo.core.security.thrift.TCredentials credentials, long tid, Map<org.apache.accumulo.core.data.thrift.TKeyExtent,Map<String,org.apache.accumulo.core.data.thrift.MapFileInfo>> files, boolean setTime) throws org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException, org.apache.thrift.TException
     {
       send_bulkImport(tinfo, credentials, tid, files, setTime);
@@ -1253,24 +1310,63 @@ import org.slf4j.LoggerFactory;
       }
     }
 
-    public void conditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, org.apache.accumulo.core.security.thrift.TCredentials credentials, List<ByteBuffer> authorizations, Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations, List<String> symbols, org.apache.thrift.async.AsyncMethodCallback<conditionalUpdate_call> resultHandler) throws org.apache.thrift.TException {
+    public void startConditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, org.apache.accumulo.core.security.thrift.TCredentials credentials, List<ByteBuffer> authorizations, String tableID, org.apache.thrift.async.AsyncMethodCallback<startConditionalUpdate_call> resultHandler) throws org.apache.thrift.TException {
       checkReady();
-      conditionalUpdate_call method_call = new conditionalUpdate_call(tinfo, credentials, authorizations, mutations, symbols, resultHandler, this, ___protocolFactory, ___transport);
+      startConditionalUpdate_call method_call = new startConditionalUpdate_call(tinfo, credentials, authorizations, tableID, resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
       ___manager.call(method_call);
     }
 
-    public static class conditionalUpdate_call extends org.apache.thrift.async.TAsyncMethodCall {
+    public static class startConditionalUpdate_call extends org.apache.thrift.async.TAsyncMethodCall {
       private org.apache.accumulo.trace.thrift.TInfo tinfo;
       private org.apache.accumulo.core.security.thrift.TCredentials credentials;
       private List<ByteBuffer> authorizations;
-      private Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations;
-      private List<String> symbols;
-      public conditionalUpdate_call(org.apache.accumulo.trace.thrift.TInfo tinfo, org.apache.accumulo.core.security.thrift.TCredentials credentials, List<ByteBuffer> authorizations, Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations, List<String> symbols, org.apache.thrift.async.AsyncMethodCallback<conditionalUpdate_call> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      private String tableID;
+      public startConditionalUpdate_call(org.apache.accumulo.trace.thrift.TInfo tinfo, org.apache.accumulo.core.security.thrift.TCredentials credentials, List<ByteBuffer> authorizations, String tableID, org.apache.thrift.async.AsyncMethodCallback<startConditionalUpdate_call> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
         super(client, protocolFactory, transport, resultHandler, false);
         this.tinfo = tinfo;
         this.credentials = credentials;
         this.authorizations = authorizations;
+        this.tableID = tableID;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("startConditionalUpdate", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        startConditionalUpdate_args args = new startConditionalUpdate_args();
+        args.setTinfo(tinfo);
+        args.setCredentials(credentials);
+        args.setAuthorizations(authorizations);
+        args.setTableID(tableID);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public long getResult() throws org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_startConditionalUpdate();
+      }
+    }
+
+    public void conditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, long sessID, Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations, List<String> symbols, org.apache.thrift.async.AsyncMethodCallback<conditionalUpdate_call> resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      conditionalUpdate_call method_call = new conditionalUpdate_call(tinfo, sessID, mutations, symbols, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class conditionalUpdate_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private org.apache.accumulo.trace.thrift.TInfo tinfo;
+      private long sessID;
+      private Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations;
+      private List<String> symbols;
+      public conditionalUpdate_call(org.apache.accumulo.trace.thrift.TInfo tinfo, long sessID, Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations, List<String> symbols, org.apache.thrift.async.AsyncMethodCallback<conditionalUpdate_call> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.tinfo = tinfo;
+        this.sessID = sessID;
         this.mutations = mutations;
         this.symbols = symbols;
       }
@@ -1279,15 +1375,14 @@ import org.slf4j.LoggerFactory;
         prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("conditionalUpdate", org.apache.thrift.protocol.TMessageType.CALL, 0));
         conditionalUpdate_args args = new conditionalUpdate_args();
         args.setTinfo(tinfo);
-        args.setCredentials(credentials);
-        args.setAuthorizations(authorizations);
+        args.setSessID(sessID);
         args.setMutations(mutations);
         args.setSymbols(symbols);
         args.write(prot);
         prot.writeMessageEnd();
       }
 
-      public List<org.apache.accumulo.core.data.thrift.TCMResult> getResult() throws org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException, org.apache.thrift.TException {
+      public List<org.apache.accumulo.core.data.thrift.TCMResult> getResult() throws NoSuchScanIDException, org.apache.thrift.TException {
         if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
@@ -1297,6 +1392,41 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    public void invalidateConditionalUpdate(org.apache.accumulo.trace.thrift.TInfo tinfo, long sessID, org.apache.thrift.async.AsyncMethodCallback<invalidateConditionalUpdate_call> resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      invalidateConditionalUpdate_call method_call = new invalidateConditionalUpdate_call(tinfo, sessID, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class invalidateConditionalUpdate_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private org.apache.accumulo.trace.thrift.TInfo tinfo;
+      private long sessID;
+      public invalidateConditionalUpdate_call(org.apache.accumulo.trace.thrift.TInfo tinfo, long sessID, org.apache.thrift.async.AsyncMethodCallback<invalidateConditionalUpdate_call> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.tinfo = tinfo;
+        this.sessID = sessID;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("invalidateConditionalUpdate", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        invalidateConditionalUpdate_args args = new invalidateConditionalUpdate_args();
+        args.setTinfo(tinfo);
+        args.setSessID(sessID);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public void getResult() throws org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        (new Client(prot)).recv_invalidateConditionalUpdate();
+      }
+    }
+
     public void bulkImport(org.apache.accumulo.trace.thrift.TInfo tinfo, org.apache.accumulo.core.security.thrift.TCredentials credentials, long tid, Map<org.apache.accumulo.core.data.thrift.TKeyExtent,Map<String,org.apache.accumulo.core.data.thrift.MapFileInfo>> files, boolean setTime, org.apache.thrift.async.AsyncMethodCallback<bulkImport_call> resultHandler) throws org.apache.thrift.TException {
       checkReady();
       bulkImport_call method_call = new bulkImport_call(tinfo, credentials, tid, files, setTime, resultHandler, this, ___protocolFactory, ___transport);
@@ -1950,7 +2080,9 @@ import org.slf4j.LoggerFactory;
       processMap.put("applyUpdates", new applyUpdates());
       processMap.put("closeUpdate", new closeUpdate());
       processMap.put("update", new update());
+      processMap.put("startConditionalUpdate", new startConditionalUpdate());
       processMap.put("conditionalUpdate", new conditionalUpdate());
+      processMap.put("invalidateConditionalUpdate", new invalidateConditionalUpdate());
       processMap.put("bulkImport", new bulkImport());
       processMap.put("splitTablet", new splitTablet());
       processMap.put("loadTablet", new loadTablet());
@@ -2213,6 +2345,31 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    public static class startConditionalUpdate<I extends Iface> extends org.apache.thrift.ProcessFunction<I, startConditionalUpdate_args> {
+      public startConditionalUpdate() {
+        super("startConditionalUpdate");
+      }
+
+      public startConditionalUpdate_args getEmptyArgsInstance() {
+        return new startConditionalUpdate_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public startConditionalUpdate_result getResult(I iface, startConditionalUpdate_args args) throws org.apache.thrift.TException {
+        startConditionalUpdate_result result = new startConditionalUpdate_result();
+        try {
+          result.success = iface.startConditionalUpdate(args.tinfo, args.credentials, args.authorizations, args.tableID);
+          result.setSuccessIsSet(true);
+        } catch (org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException sec) {
+          result.sec = sec;
+        }
+        return result;
+      }
+    }
+
     public static class conditionalUpdate<I extends Iface> extends org.apache.thrift.ProcessFunction<I, conditionalUpdate_args> {
       public conditionalUpdate() {
         super("conditionalUpdate");
@@ -2229,14 +2386,34 @@ import org.slf4j.LoggerFactory;
       public conditionalUpdate_result getResult(I iface, conditionalUpdate_args args) throws org.apache.thrift.TException {
         conditionalUpdate_result result = new conditionalUpdate_result();
         try {
-          result.success = iface.conditionalUpdate(args.tinfo, args.credentials, args.authorizations, args.mutations, args.symbols);
-        } catch (org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException sec) {
-          result.sec = sec;
+          result.success = iface.conditionalUpdate(args.tinfo, args.sessID, args.mutations, args.symbols);
+        } catch (NoSuchScanIDException nssi) {
+          result.nssi = nssi;
         }
         return result;
       }
     }
 
+    public static class invalidateConditionalUpdate<I extends Iface> extends org.apache.thrift.ProcessFunction<I, invalidateConditionalUpdate_args> {
+      public invalidateConditionalUpdate() {
+        super("invalidateConditionalUpdate");
+      }
+
+      public invalidateConditionalUpdate_args getEmptyArgsInstance() {
+        return new invalidateConditionalUpdate_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public invalidateConditionalUpdate_result getResult(I iface, invalidateConditionalUpdate_args args) throws org.apache.thrift.TException {
+        invalidateConditionalUpdate_result result = new invalidateConditionalUpdate_result();
+        iface.invalidateConditionalUpdate(args.tinfo, args.sessID);
+        return result;
+      }
+    }
+
     public static class bulkImport<I extends Iface> extends org.apache.thrift.ProcessFunction<I, bulkImport_args> {
       public bulkImport() {
         super("bulkImport");
@@ -13868,34 +14045,31 @@ import org.slf4j.LoggerFactory;
 
   }
 
-  public static class conditionalUpdate_args implements org.apache.thrift.TBase<conditionalUpdate_args, conditionalUpdate_args._Fields>, java.io.Serializable, Cloneable   {
-    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("conditionalUpdate_args");
+  public static class startConditionalUpdate_args implements org.apache.thrift.TBase<startConditionalUpdate_args, startConditionalUpdate_args._Fields>, java.io.Serializable, Cloneable   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("startConditionalUpdate_args");
 
     private static final org.apache.thrift.protocol.TField TINFO_FIELD_DESC = new org.apache.thrift.protocol.TField("tinfo", org.apache.thrift.protocol.TType.STRUCT, (short)1);
     private static final org.apache.thrift.protocol.TField CREDENTIALS_FIELD_DESC = new org.apache.thrift.protocol.TField("credentials", org.apache.thrift.protocol.TType.STRUCT, (short)2);
     private static final org.apache.thrift.protocol.TField AUTHORIZATIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("authorizations", org.apache.thrift.protocol.TType.LIST, (short)3);
-    private static final org.apache.thrift.protocol.TField MUTATIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("mutations", org.apache.thrift.protocol.TType.MAP, (short)4);
-    private static final org.apache.thrift.protocol.TField SYMBOLS_FIELD_DESC = new org.apache.thrift.protocol.TField("symbols", org.apache.thrift.protocol.TType.LIST, (short)5);
+    private static final org.apache.thrift.protocol.TField TABLE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("tableID", org.apache.thrift.protocol.TType.STRING, (short)4);
 
     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
     static {
-      schemes.put(StandardScheme.class, new conditionalUpdate_argsStandardSchemeFactory());
-      schemes.put(TupleScheme.class, new conditionalUpdate_argsTupleSchemeFactory());
+      schemes.put(StandardScheme.class, new startConditionalUpdate_argsStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new startConditionalUpdate_argsTupleSchemeFactory());
     }
 
     public org.apache.accumulo.trace.thrift.TInfo tinfo; // required
     public org.apache.accumulo.core.security.thrift.TCredentials credentials; // required
     public List<ByteBuffer> authorizations; // required
-    public Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations; // required
-    public List<String> symbols; // required
+    public String tableID; // required
 
     /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
     @SuppressWarnings("all") public enum _Fields implements org.apache.thrift.TFieldIdEnum {
       TINFO((short)1, "tinfo"),
       CREDENTIALS((short)2, "credentials"),
       AUTHORIZATIONS((short)3, "authorizations"),
-      MUTATIONS((short)4, "mutations"),
-      SYMBOLS((short)5, "symbols");
+      TABLE_ID((short)4, "tableID");
 
       private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -13916,10 +14090,8 @@ import org.slf4j.LoggerFactory;
             return CREDENTIALS;
           case 3: // AUTHORIZATIONS
             return AUTHORIZATIONS;
-          case 4: // MUTATIONS
-            return MUTATIONS;
-          case 5: // SYMBOLS
-            return SYMBOLS;
+          case 4: // TABLE_ID
+            return TABLE_ID;
           default:
             return null;
         }
@@ -13970,37 +14142,32 @@ import org.slf4j.LoggerFactory;
       tmpMap.put(_Fields.AUTHORIZATIONS, new org.apache.thrift.meta_data.FieldMetaData("authorizations", org.apache.thrift.TFieldRequirementType.DEFAULT, 
           new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
               new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , true))));
-      tmpMap.put(_Fields.MUTATIONS, new org.apache.thrift.meta_data.FieldMetaData("mutations", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.MAP          , "CMBatch")));
-      tmpMap.put(_Fields.SYMBOLS, new org.apache.thrift.meta_data.FieldMetaData("symbols", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
-              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+      tmpMap.put(_Fields.TABLE_ID, new org.apache.thrift.meta_data.FieldMetaData("tableID", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
       metaDataMap = Collections.unmodifiableMap(tmpMap);
-      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(conditionalUpdate_args.class, metaDataMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(startConditionalUpdate_args.class, metaDataMap);
     }
 
-    public conditionalUpdate_args() {
+    public startConditionalUpdate_args() {
     }
 
-    public conditionalUpdate_args(
+    public startConditionalUpdate_args(
       org.apache.accumulo.trace.thrift.TInfo tinfo,
       org.apache.accumulo.core.security.thrift.TCredentials credentials,
       List<ByteBuffer> authorizations,
-      Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations,
-      List<String> symbols)
+      String tableID)
     {
       this();
       this.tinfo = tinfo;
       this.credentials = credentials;
       this.authorizations = authorizations;
-      this.mutations = mutations;
-      this.symbols = symbols;
+      this.tableID = tableID;
     }
 
     /**
      * Performs a deep copy on <i>other</i>.
      */
-    public conditionalUpdate_args(conditionalUpdate_args other) {
+    public startConditionalUpdate_args(startConditionalUpdate_args other) {
       if (other.isSetTinfo()) {
         this.tinfo = new org.apache.accumulo.trace.thrift.TInfo(other.tinfo);
       }
@@ -14016,20 +14183,13 @@ import org.slf4j.LoggerFactory;
         }
         this.authorizations = __this__authorizations;
       }
-      if (other.isSetMutations()) {
-        this.mutations = other.mutations;
-      }
-      if (other.isSetSymbols()) {
-        List<String> __this__symbols = new ArrayList<String>();
-        for (String other_element : other.symbols) {
-          __this__symbols.add(other_element);
-        }
-        this.symbols = __this__symbols;
+      if (other.isSetTableID()) {
+        this.tableID = other.tableID;
       }
     }
 
-    public conditionalUpdate_args deepCopy() {
-      return new conditionalUpdate_args(this);
+    public startConditionalUpdate_args deepCopy() {
+      return new startConditionalUpdate_args(this);
     }
 
     @Override
@@ -14037,15 +14197,14 @@ import org.slf4j.LoggerFactory;
       this.tinfo = null;
       this.credentials = null;
       this.authorizations = null;
-      this.mutations = null;
-      this.symbols = null;
+      this.tableID = null;
     }
 
     public org.apache.accumulo.trace.thrift.TInfo getTinfo() {
       return this.tinfo;
     }
 
-    public conditionalUpdate_args setTinfo(org.apache.accumulo.trace.thrift.TInfo tinfo) {
+    public startConditionalUpdate_args setTinfo(org.apache.accumulo.trace.thrift.TInfo tinfo) {
       this.tinfo = tinfo;
       return this;
     }
@@ -14069,7 +14228,7 @@ import org.slf4j.LoggerFactory;
       return this.credentials;
     }
 
-    public conditionalUpdate_args setCredentials(org.apache.accumulo.core.security.thrift.TCredentials credentials) {
+    public startConditionalUpdate_args setCredentials(org.apache.accumulo.core.security.thrift.TCredentials credentials) {
       this.credentials = credentials;
       return this;
     }
@@ -14108,7 +14267,7 @@ import org.slf4j.LoggerFactory;
       return this.authorizations;
     }
 
-    public conditionalUpdate_args setAuthorizations(List<ByteBuffer> authorizations) {
+    public startConditionalUpdate_args setAuthorizations(List<ByteBuffer> authorizations) {
       this.authorizations = authorizations;
       return this;
     }
@@ -14128,77 +14287,27 @@ import org.slf4j.LoggerFactory;
       }
     }
 
-    public int getMutationsSize() {
-      return (this.mutations == null) ? 0 : this.mutations.size();
-    }
-
-    public void putToMutations(org.apache.accumulo.core.data.thrift.TKeyExtent key, List<org.apache.accumulo.core.data.thrift.TConditionalMutation> val) {
-      if (this.mutations == null) {
-        this.mutations = new HashMap<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>>();
-      }
-      this.mutations.put(key, val);
-    }
-
-    public Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> getMutations() {
-      return this.mutations;
-    }
-
-    public conditionalUpdate_args setMutations(Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations) {
-      this.mutations = mutations;
-      return this;
-    }
-
-    public void unsetMutations() {
-      this.mutations = null;
-    }
-
-    /** Returns true if field mutations is set (has been assigned a value) and false otherwise */
-    public boolean isSetMutations() {
-      return this.mutations != null;
-    }
-
-    public void setMutationsIsSet(boolean value) {
-      if (!value) {
-        this.mutations = null;
-      }
-    }
-
-    public int getSymbolsSize() {
-      return (this.symbols == null) ? 0 : this.symbols.size();
-    }
-
-    public java.util.Iterator<String> getSymbolsIterator() {
-      return (this.symbols == null) ? null : this.symbols.iterator();
-    }
-
-    public void addToSymbols(String elem) {
-      if (this.symbols == null) {
-        this.symbols = new ArrayList<String>();
-      }
-      this.symbols.add(elem);
-    }
-
-    public List<String> getSymbols() {
-      return this.symbols;
+    public String getTableID() {
+      return this.tableID;
     }
 
-    public conditionalUpdate_args setSymbols(List<String> symbols) {
-      this.symbols = symbols;
+    public startConditionalUpdate_args setTableID(String tableID) {
+      this.tableID = tableID;
       return this;
     }
 
-    public void unsetSymbols() {
-      this.symbols = null;
+    public void unsetTableID() {
+      this.tableID = null;
     }
 
-    /** Returns true if field symbols is set (has been assigned a value) and false otherwise */
-    public boolean isSetSymbols() {
-      return this.symbols != null;
+    /** Returns true if field tableID is set (has been assigned a value) and false otherwise */
+    public boolean isSetTableID() {
+      return this.tableID != null;
     }
 
-    public void setSymbolsIsSet(boolean value) {
+    public void setTableIDIsSet(boolean value) {
       if (!value) {
-        this.symbols = null;
+        this.tableID = null;
       }
     }
 
@@ -14228,19 +14337,11 @@ import org.slf4j.LoggerFactory;
         }
         break;
 
-      case MUTATIONS:
-        if (value == null) {
-          unsetMutations();
-        } else {
-          setMutations((Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>>)value);
-        }
-        break;
-
-      case SYMBOLS:
+      case TABLE_ID:
         if (value == null) {
-          unsetSymbols();
+          unsetTableID();
         } else {
-          setSymbols((List<String>)value);
+          setTableID((String)value);
         }
         break;
 
@@ -14258,11 +14359,8 @@ import org.slf4j.LoggerFactory;
       case AUTHORIZATIONS:
         return getAuthorizations();
 
-      case MUTATIONS:
-        return getMutations();
-
-      case SYMBOLS:
-        return getSymbols();
+      case TABLE_ID:
+        return getTableID();
 
       }
       throw new IllegalStateException();
@@ -14281,10 +14379,8 @@ import org.slf4j.LoggerFactory;
         return isSetCredentials();
       case AUTHORIZATIONS:
         return isSetAuthorizations();
-      case MUTATIONS:
-        return isSetMutations();
-      case SYMBOLS:
-        return isSetSymbols();
+      case TABLE_ID:
+        return isSetTableID();
       }
       throw new IllegalStateException();
     }
@@ -14293,12 +14389,12 @@ import org.slf4j.LoggerFactory;
     public boolean equals(Object that) {
       if (that == null)
         return false;
-      if (that instanceof conditionalUpdate_args)
-        return this.equals((conditionalUpdate_args)that);
+      if (that instanceof startConditionalUpdate_args)
+        return this.equals((startConditionalUpdate_args)that);
       return false;
     }
 
-    public boolean equals(conditionalUpdate_args that) {
+    public boolean equals(startConditionalUpdate_args that) {
       if (that == null)
         return false;
 
@@ -14329,21 +14425,12 @@ import org.slf4j.LoggerFactory;
           return false;
       }
 
-      boolean this_present_mutations = true && this.isSetMutations();
-      boolean that_present_mutations = true && that.isSetMutations();
-      if (this_present_mutations || that_present_mutations) {
-        if (!(this_present_mutations && that_present_mutations))
-          return false;
-        if (!this.mutations.equals(that.mutations))
-          return false;
-      }
-
-      boolean this_present_symbols = true && this.isSetSymbols();
-      boolean that_present_symbols = true && that.isSetSymbols();
-      if (this_present_symbols || that_present_symbols) {
-        if (!(this_present_symbols && that_present_symbols))
+      boolean this_present_tableID = true && this.isSetTableID();
+      boolean that_present_tableID = true && that.isSetTableID();
+      if (this_present_tableID || that_present_tableID) {
+        if (!(this_present_tableID && that_present_tableID))
           return false;
-        if (!this.symbols.equals(that.symbols))
+        if (!this.tableID.equals(that.tableID))
           return false;
       }
 
@@ -14355,13 +14442,13 @@ import org.slf4j.LoggerFactory;
       return 0;
     }
 
-    public int compareTo(conditionalUpdate_args other) {
+    public int compareTo(startConditionalUpdate_args other) {
       if (!getClass().equals(other.getClass())) {
         return getClass().getName().compareTo(other.getClass().getName());
       }
 
       int lastComparison = 0;
-      conditionalUpdate_args typedOther = (conditionalUpdate_args)other;
+      startConditionalUpdate_args typedOther = (startConditionalUpdate_args)other;
 
       lastComparison = Boolean.valueOf(isSetTinfo()).compareTo(typedOther.isSetTinfo());
       if (lastComparison != 0) {
@@ -14393,22 +14480,12 @@ import org.slf4j.LoggerFactory;
           return lastComparison;
         }
       }
-      lastComparison = Boolean.valueOf(isSetMutations()).compareTo(typedOther.isSetMutations());
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-      if (isSetMutations()) {
-        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mutations, typedOther.mutations);
-        if (lastComparison != 0) {
-          return lastComparison;
-        }
-      }
-      lastComparison = Boolean.valueOf(isSetSymbols()).compareTo(typedOther.isSetSymbols());
+      lastComparison = Boolean.valueOf(isSetTableID()).compareTo(typedOther.isSetTableID());
       if (lastComparison != 0) {
         return lastComparison;
       }
-      if (isSetSymbols()) {
-        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.symbols, typedOther.symbols);
+      if (isSetTableID()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableID, typedOther.tableID);
         if (lastComparison != 0) {
           return lastComparison;
         }
@@ -14430,7 +14507,7 @@ import org.slf4j.LoggerFactory;
 
     @Override
     public String toString() {
-      StringBuilder sb = new StringBuilder("conditionalUpdate_args(");
+      StringBuilder sb = new StringBuilder("startConditionalUpdate_args(");
       boolean first = true;
 
       sb.append("tinfo:");
@@ -14457,19 +14534,11 @@ import org.slf4j.LoggerFactory;
       }
       first = false;
       if (!first) sb.append(", ");
-      sb.append("mutations:");
-      if (this.mutations == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.mutations);
-      }
-      first = false;
-      if (!first) sb.append(", ");
-      sb.append("symbols:");
-      if (this.symbols == null) {
+      sb.append("tableID:");
+      if (this.tableID == null) {
         sb.append("null");
       } else {
-        sb.append(this.symbols);
+        sb.append(this.tableID);
       }
       first = false;
       sb.append(")");
@@ -14503,15 +14572,15 @@ import org.slf4j.LoggerFactory;
       }
     }
 
-    private static class conditionalUpdate_argsStandardSchemeFactory implements SchemeFactory {
-      public conditionalUpdate_argsStandardScheme getScheme() {
-        return new conditionalUpdate_argsStandardScheme();
+    private static class startConditionalUpdate_argsStandardSchemeFactory implements SchemeFactory {
+      public startConditionalUpdate_argsStandardScheme getScheme() {
+        return new startConditionalUpdate_argsStandardScheme();
       }
     }
 
-    private static class conditionalUpdate_argsStandardScheme extends StandardScheme<conditionalUpdate_args> {
+    private static class startConditionalUpdate_argsStandardScheme extends StandardScheme<startConditionalUpdate_args> {
 
-      public void read(org.apache.thrift.protocol.TProtocol iprot, conditionalUpdate_args struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol iprot, startConditionalUpdate_args struct) throws org.apache.thrift.TException {
         org.apache.thrift.protocol.TField schemeField;
         iprot.readStructBegin();
         while (true)
@@ -14557,52 +14626,10 @@ import org.slf4j.LoggerFactory;
                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
               }
               break;
-            case 4: // MUTATIONS
-              if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-                {
-                  org.apache.thrift.protocol.TMap _map223 = iprot.readMapBegin();
-                  struct.mutations = new HashMap<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>>(2*_map223.size);
-                  for (int _i224 = 0; _i224 < _map223.size; ++_i224)
-                  {
-                    org.apache.accumulo.core.data.thrift.TKeyExtent _key225; // required
-                    List<org.apache.accumulo.core.data.thrift.TConditionalMutation> _val226; // required
-                    _key225 = new org.apache.accumulo.core.data.thrift.TKeyExtent();
-                    _key225.read(iprot);
-                    {
-                      org.apache.thrift.protocol.TList _list227 = iprot.readListBegin();
-                      _val226 = new ArrayList<org.apache.accumulo.core.data.thrift.TConditionalMutation>(_list227.size);
-                      for (int _i228 = 0; _i228 < _list227.size; ++_i228)
-                      {
-                        org.apache.accumulo.core.data.thrift.TConditionalMutation _elem229; // required
-                        _elem229 = new org.apache.accumulo.core.data.thrift.TConditionalMutation();
-                        _elem229.read(iprot);
-                        _val226.add(_elem229);
-                      }
-                      iprot.readListEnd();
-                    }
-                    struct.mutations.put(_key225, _val226);
-                  }
-                  iprot.readMapEnd();
-                }
-                struct.setMutationsIsSet(true);
-              } else { 
-                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-              }
-              break;
-            case 5: // SYMBOLS
-              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
-                {
-                  org.apache.thrift.protocol.TList _list230 = iprot.readListBegin();
-                  struct.symbols = new ArrayList<String>(_list230.size);
-                  for (int _i231 = 0; _i231 < _list230.size; ++_i231)
-                  {
-                    String _elem232; // required
-                    _elem232 = iprot.readString();
-                    struct.symbols.add(_elem232);
-                  }
-                  iprot.readListEnd();
-                }
-                struct.setSymbolsIsSet(true);
+            case 4: // TABLE_ID
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+                struct.tableID = iprot.readString();
+                struct.setTableIDIsSet(true);
               } else { 
                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
               }
@@ -14618,7 +14645,7 @@ import org.slf4j.LoggerFactory;
         struct.validate();
       }
 
-      public void write(org.apache.thrift.protocol.TProtocol oprot, conditionalUpdate_args struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol oprot, startConditionalUpdate_args struct) throws org.apache.thrift.TException {
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
@@ -14636,44 +14663,17 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(AUTHORIZATIONS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.authorizations.size()));
-            for (ByteBuffer _iter233 : struct.authorizations)
+            for (ByteBuffer _iter223 : struct.authorizations)
             {
-              oprot.writeBinary(_iter233);
+              oprot.writeBinary(_iter223);
             }
             oprot.writeListEnd();
           }
           oprot.writeFieldEnd();
         }
-        if (struct.mutations != null) {
-          oprot.writeFieldBegin(MUTATIONS_FIELD_DESC);
-          {
-            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.LIST, struct.mutations.size()));
-            for (Map.Entry<org.apache.accumulo.core.data.thrift.TKeyExtent, List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> _iter234 : struct.mutations.entrySet())
-            {
-              _iter234.getKey().write(oprot);
-              {
-                oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter234.getValue().size()));
-                for (org.apache.accumulo.core.data.thrift.TConditionalMutation _iter235 : _iter234.getValue())
-                {
-                  _iter235.write(oprot);
-                }
-                oprot.writeListEnd();
-              }
-            }
-            oprot.writeMapEnd();
-          }
-          oprot.writeFieldEnd();
-        }
-        if (struct.symbols != null) {
-          oprot.writeFieldBegin(SYMBOLS_FIELD_DESC);
-          {
-            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.symbols.size()));
-            for (String _iter236 : struct.symbols)
-            {
-              oprot.writeString(_iter236);
-            }
-            oprot.writeListEnd();
-          }
+        if (struct.tableID != null) {
+          oprot.writeFieldBegin(TABLE_ID_FIELD_DESC);
+          oprot.writeString(struct.tableID);
           oprot.writeFieldEnd();
         }
         oprot.writeFieldStop();
@@ -14682,16 +14682,16 @@ import org.slf4j.LoggerFactory;
 
     }
 
-    private static class conditionalUpdate_argsTupleSchemeFactory implements SchemeFactory {
-      public conditionalUpdate_argsTupleScheme getScheme() {
-        return new conditionalUpdate_argsTupleScheme();
+    private static class startConditionalUpdate_argsTupleSchemeFactory implements SchemeFactory {
+      public startConditionalUpdate_argsTupleScheme getScheme() {
+        return new startConditionalUpdate_argsTupleScheme();
       }
     }
 
-    private static class conditionalUpdate_argsTupleScheme extends TupleScheme<conditionalUpdate_args> {
+    private static class startConditionalUpdate_argsTupleScheme extends TupleScheme<startConditionalUpdate_args> {
 
       @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, conditionalUpdate_args struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol prot, startConditionalUpdate_args struct) throws org.apache.thrift.TException {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         BitSet optionals = new BitSet();
         if (struct.isSetTinfo()) {
@@ -14703,13 +14703,10 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetAuthorizations()) {
           optionals.set(2);
         }
-        if (struct.isSetMutations()) {
+        if (struct.isSetTableID()) {
           optionals.set(3);
         }
-        if (struct.isSetSymbols()) {
-          optionals.set(4);
-        }
-        oprot.writeBitSet(optionals, 5);
+        oprot.writeBitSet(optionals, 4);
         if (struct.isSetTinfo()) {
           struct.tinfo.write(oprot);
         }
@@ -14719,43 +14716,21 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetAuthorizations()) {
           {
             oprot.writeI32(struct.authorizations.size());
-            for (ByteBuffer _iter237 : struct.authorizations)
-            {
-              oprot.writeBinary(_iter237);
-            }
-          }
-        }
-        if (struct.isSetMutations()) {
-          {
-            oprot.writeI32(struct.mutations.size());
-            for (Map.Entry<org.apache.accumulo.core.data.thrift.TKeyExtent, List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> _iter238 : struct.mutations.entrySet())
+            for (ByteBuffer _iter224 : struct.authorizations)
             {
-              _iter238.getKey().write(oprot);
-              {
-                oprot.writeI32(_iter238.getValue().size());
-                for (org.apache.accumulo.core.data.thrift.TConditionalMutation _iter239 : _iter238.getValue())
-                {
-                  _iter239.write(oprot);
-                }
-              }
+              oprot.writeBinary(_iter224);
             }
           }
         }
-        if (struct.isSetSymbols()) {
-          {
-            oprot.writeI32(struct.symbols.size());
-            for (String _iter240 : struct.symbols)
-            {
-              oprot.writeString(_iter240);
-            }
-          }
+        if (struct.isSetTableID()) {
+          oprot.writeString(struct.tableID);
         }
       }
 
       @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, conditionalUpdate_args struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol prot, startConditionalUpdate_args struct) throws org.apache.thrift.TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
-        BitSet incoming = iprot.readBitSet(5);
+        BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           struct.tinfo = new org.apache.accumulo.trace.thrift.TInfo();
           struct.tinfo.read(iprot);
@@ -14768,74 +14743,39 @@ import org.slf4j.LoggerFactory;
         }
         if (incoming.get(2)) {
           {
-            org.apache.thrift.protocol.TList _list241 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.authorizations = new ArrayList<ByteBuffer>(_list241.size);
-            for (int _i242 = 0; _i242 < _list241.size; ++_i242)
+            org.apache.thrift.protocol.TList _list225 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.authorizations = new ArrayList<ByteBuffer>(_list225.size);
+            for (int _i226 = 0; _i226 < _list225.size; ++_i226)
             {
-              ByteBuffer _elem243; // required
-              _elem243 = iprot.readBinary();
-              struct.authorizations.add(_elem243);
+              ByteBuffer _elem227; // required
+              _elem227 = iprot.readBinary();
+              struct.authorizations.add(_elem227);
             }
           }
           struct.setAuthorizationsIsSet(true);
         }
         if (incoming.get(3)) {
-          {
-            org.apache.thrift.protocol.TMap _map244 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.LIST, iprot.readI32());
-            struct.mutations = new HashMap<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>>(2*_map244.size);
-            for (int _i245 = 0; _i245 < _map244.size; ++_i245)
-            {
-              org.apache.accumulo.core.data.thrift.TKeyExtent _key246; // required
-              List<org.apache.accumulo.core.data.thrift.TConditionalMutation> _val247; // required
-              _key246 = new org.apache.accumulo.core.data.thrift.TKeyExtent();
-              _key246.read(iprot);
-              {
-                org.apache.thrift.protocol.TList _list248 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-                _val247 = new ArrayList<org.apache.accumulo.core.data.thrift.TConditionalMutation>(_list248.size);
-                for (int _i249 = 0; _i249 < _list248.size; ++_i249)
-                {
-                  org.apache.accumulo.core.data.thrift.TConditionalMutation _elem250; // required
-                  _elem250 = new org.apache.accumulo.core.data.thrift.TConditionalMutation();
-                  _elem250.read(iprot);
-                  _val247.add(_elem250);
-                }
-              }
-              struct.mutations.put(_key246, _val247);
-            }
-          }
-          struct.setMutationsIsSet(true);
-        }
-        if (incoming.get(4)) {
-          {
-            org.apache.thrift.protocol.TList _list251 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.symbols = new ArrayList<String>(_list251.size);
-            for (int _i252 = 0; _i252 < _list251.size; ++_i252)
-            {
-              String _elem253; // required
-              _elem253 = iprot.readString();
-              struct.symbols.add(_elem253);
-            }
-          }
-          struct.setSymbolsIsSet(true);
+          struct.tableID = iprot.readString();
+          struct.setTableIDIsSet(true);
         }
       }
     }
 
   }
 
-  public static class conditionalUpdate_result implements org.apache.thrift.TBase<conditionalUpdate_result, conditionalUpdate_result._Fields>, java.io.Serializable, Cloneable   {
-    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("conditionalUpdate_result");
+  public static class startConditionalUpdate_result implements org.apache.thrift.TBase<startConditionalUpdate_result, startConditionalUpdate_result._Fields>, java.io.Serializable, Cloneable   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("startConditionalUpdate_result");
 
-    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
+    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I64, (short)0);
     private static final org.apache.thrift.protocol.TField SEC_FIELD_DESC = new org.apache.thrift.protocol.TField("sec", org.apache.thrift.protocol.TType.STRUCT, (short)1);
 
     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
     static {
-      schemes.put(StandardScheme.class, new conditionalUpdate_resultStandardSchemeFactory());
-      schemes.put(TupleScheme.class, new conditionalUpdate_resultTupleSchemeFactory());
+      schemes.put(StandardScheme.class, new startConditionalUpdate_resultStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new startConditionalUpdate_resultTupleSchemeFactory());
     }
 
-    public List<org.apache.accumulo.core.data.thrift.TCMResult> success; // required
+    public long success; // required
     public org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException sec; // required
 
     /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
@@ -14900,100 +14840,82 @@ import org.slf4j.LoggerFactory;
     }
 
     // isset id assignments
+    private static final int __SUCCESS_ISSET_ID = 0;
+    private byte __isset_bitfield = 0;
     public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
     static {
       Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
       tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
-              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.accumulo.core.data.thrift.TCMResult.class))));
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64          , "UpdateID")));
       tmpMap.put(_Fields.SEC, new org.apache.thrift.meta_data.FieldMetaData("sec", org.apache.thrift.TFieldRequirementType.DEFAULT, 
           new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
       metaDataMap = Collections.unmodifiableMap(tmpMap);
-      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(conditionalUpdate_result.class, metaDataMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(startConditionalUpdate_result.class, metaDataMap);
     }
 
-    public conditionalUpdate_result() {
+    public startConditionalUpdate_result() {
     }
 
-    public conditionalUpdate_result(
-      List<org.apache.accumulo.core.data.thrift.TCMResult> success,
+    public startConditionalUpdate_result(
+      long success,
       org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException sec)
     {
       this();
       this.success = success;
+      setSuccessIsSet(true);
       this.sec = sec;
     }
 
     /**
      * Performs a deep copy on <i>other</i>.
      */
-    public conditionalUpdate_result(conditionalUpdate_result other) {
-      if (other.isSetSuccess()) {
-        List<org.apache.accumulo.core.data.thrift.TCMResult> __this__success = new ArrayList<org.apache.accumulo.core.data.thrift.TCMResult>();
-        for (org.apache.accumulo.core.data.thrift.TCMResult other_element : other.success) {
-          __this__success.add(new org.apache.accumulo.core.data.thrift.TCMResult(other_element));
-        }
-        this.success = __this__success;
-      }
+    public startConditionalUpdate_result(startConditionalUpdate_result other) {
+      __isset_bitfield = other.__isset_bitfield;
+      this.success = other.success;
       if (other.isSetSec()) {
         this.sec = new org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException(other.sec);
       }
     }
 
-    public conditionalUpdate_result deepCopy() {
-      return new conditionalUpdate_result(this);
+    public startConditionalUpdate_result deepCopy() {
+      return new startConditionalUpdate_result(this);
     }
 
     @Override
     public void clear() {
-      this.success = null;
+      setSuccessIsSet(false);
+      this.success = 0;
       this.sec = null;
     }
 
-    public int getSuccessSize() {
-      return (this.success == null) ? 0 : this.success.size();
-    }
-
-    public java.util.Iterator<org.apache.accumulo.core.data.thrift.TCMResult> getSuccessIterator() {
-      return (this.success == null) ? null : this.success.iterator();
-    }
-
-    public void addToSuccess(org.apache.accumulo.core.data.thrift.TCMResult elem) {
-      if (this.success == null) {
-        this.success = new ArrayList<org.apache.accumulo.core.data.thrift.TCMResult>();
-      }
-      this.success.add(elem);
-    }
-
-    public List<org.apache.accumulo.core.data.thrift.TCMResult> getSuccess() {
+    public long getSuccess() {
       return this.success;
     }
 
-    public conditionalUpdate_result setSuccess(List<org.apache.accumulo.core.data.thrift.TCMResult> success) {
+    public startConditionalUpdate_result setSuccess(long success) {
       this.success = success;
+      setSuccessIsSet(true);
       return this;
     }
 
     public void unsetSuccess() {
-      this.success = null;
+      __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
     }
 
     /** Returns true if field success is set (has been assigned a value) and false otherwise */
     public boolean isSetSuccess() {
-      return this.success != null;
+      return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID);
     }
 
     public void setSuccessIsSet(boolean value) {
-      if (!value) {
-        this.success = null;
-      }
+      __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
     }
 
     public org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException getSec() {
       return this.sec;
     }
 
-    public conditionalUpdate_result setSec(org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException sec) {
+    public startConditionalUpdate_result setSec(org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException sec) {
       this.sec = sec;
       return this;
     }
@@ -15019,7 +14941,7 @@ import org.slf4j.LoggerFactory;
         if (value == null) {
           unsetSuccess();
         } else {
-          setSuccess((List<org.apache.accumulo.core.data.thrift.TCMResult>)value);
+          setSuccess((Long)value);
         }
         break;
 
@@ -15037,7 +14959,7 @@ import org.slf4j.LoggerFactory;
     public Object getFieldValue(_Fields field) {
       switch (field) {
       case SUCCESS:
-        return getSuccess();
+        return Long.valueOf(getSuccess());
 
       case SEC:
         return getSec();
@@ -15065,21 +14987,21 @@ import org.slf4j.LoggerFactory;
     public boolean equals(Object that) {
       if (that == null)
         return false;
-      if (that instanceof conditionalUpdate_result)
-        return this.equals((conditionalUpdate_result)that);
+      if (that instanceof startConditionalUpdate_result)
+        return this.equals((startConditionalUpdate_result)that);
       return false;
     }
 
-    public boolean equals(conditionalUpdate_result that) {
+    public boolean equals(startConditionalUpdate_result that) {
       if (that == null)
         return false;
 
-      boolean this_present_success = true && this.isSetSuccess();
-      boolean that_present_success = true && that.isSetSuccess();
+      boolean this_present_success = true;
+      boolean that_present_success = true;
       if (this_present_success || that_present_success) {
         if (!(this_present_success && that_present_success))
           return false;
-        if (!this.success.equals(that.success))
+        if (this.success != that.success)
           return false;
       }
 
@@ -15100,13 +15022,13 @@ import org.slf4j.LoggerFactory;
       return 0;
     }
 
-    public int compareTo(conditionalUpdate_result other) {
+    public int compareTo(startConditionalUpdate_result other) {
       if (!getClass().equals(other.getClass())) {
         return getClass().getName().compareTo(other.getClass().getName());
       }
 
       int lastComparison = 0;
-      conditionalUpdate_result typedOther = (conditionalUpdate_result)other;
+      startConditionalUpdate_result typedOther = (startConditionalUpdate_result)other;
 
       lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess());
       if (lastComparison != 0) {
@@ -15145,15 +15067,11 @@ import org.slf4j.LoggerFactory;
 
     @Override
     public String toString() {
-      StringBuilder sb = new StringBuilder("conditionalUpdate_result(");
+      StringBuilder sb = new StringBuilder("startConditionalUpdate_result(");
       boolean first = true;
 
       sb.append("success:");
-      if (this.success == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.success);
-      }
+      sb.append(this.success);
       first = false;
       if (!first) sb.append(", ");
       sb.append("sec:");
@@ -15182,21 +15100,23 @@ import org.slf4j.LoggerFactory;
 
     private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
       try {
+        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+        __isset_bitfield = 0;
         read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
       } catch (org.apache.thrift.TException te) {
         throw new java.io.IOException(te);
       }
     }
 
-    private static class conditionalUpdate_resultStandardSchemeFactory implements SchemeFactory {
-      public conditionalUpdate_resultStandardScheme getScheme() {
-        return new conditionalUpdate_resultStandardScheme();
+    private static class startConditionalUpdate_resultStandardSchemeFactory implements SchemeFactory {
+      public startConditionalUpdate_resultStandardScheme getScheme() {
+        return new startConditionalUpdate_resultStandardScheme();
       }
     }
 
-    private static class conditionalUpdate_resultStandardScheme extends StandardScheme<conditionalUpdate_result> {
+    private static class startConditionalUpdate_resultStandardScheme extends StandardScheme<startConditionalUpdate_result> {
 
-      public void read(org.apache.thrift.protocol.TProtocol iprot, conditionalUpdate_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol iprot, startConditionalUpdate_result struct) throws org.apache.thrift.TException {
         org.apache.thrift.protocol.TField schemeField;
         iprot.readStructBegin();
         while (true)
@@ -15207,19 +15127,8 @@ import org.slf4j.LoggerFactory;
           }
           switch (schemeField.id) {
             case 0: // SUCCESS
-              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
-                {
-                  org.apache.thrift.protocol.TList _list254 = iprot.readListBegin();
-                  struct.success = new ArrayList<org.apache.accumulo.core.data.thrift.TCMResult>(_list254.size);
-                  for (int _i255 = 0; _i255 < _list254.size; ++_i255)
-                  {
-                    org.apache.accumulo.core.data.thrift.TCMResult _elem256; // required
-                    _elem256 = new org.apache.accumulo.core.data.thrift.TCMResult();
-                    _elem256.read(iprot);
-                    struct.success.add(_elem256);
-                  }
-                  iprot.readListEnd();
-                }
+              if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+                struct.success = iprot.readI64();
                 struct.setSuccessIsSet(true);
               } else { 
                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
@@ -15245,20 +15154,13 @@ import org.slf4j.LoggerFactory;
         struct.validate();
       }
 
-      public void write(org.apache.thrift.protocol.TProtocol oprot, conditionalUpdate_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol oprot, startConditionalUpdate_result struct) throws org.apache.thrift.TException {
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
-        if (struct.success != null) {
+        if (struct.isSetSuccess()) {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
-          {
-            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (org.apache.accumulo.core.data.thrift.TCMResult _iter257 : struct.success)
-            {
-              _iter257.write(oprot);
-            }
-            oprot.writeListEnd();
-          }
+          oprot.writeI64(struct.success);
           oprot.writeFieldEnd();
         }
         if (struct.sec != null) {
@@ -15272,16 +15174,16 @@ import org.slf4j.LoggerFactory;
 
     }
 
-    private static class conditionalUpdate_resultTupleSchemeFactory implements SchemeFactory {
-      public conditionalUpdate_resultTupleScheme getScheme() {
-        return new conditionalUpdate_resultTupleScheme();
+    private static class startConditionalUpdate_resultTupleSchemeFactory implements SchemeFactory {
+      public startConditionalUpdate_resultTupleScheme getScheme() {
+        return new startConditionalUpdate_resultTupleScheme();
       }
     }
 
-    private static class conditionalUpdate_resultTupleScheme extends TupleScheme<conditionalUpdate_result> {
+    private static class startConditionalUpdate_resultTupleScheme extends TupleScheme<startConditionalUpdate_result> {
 
       @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, conditionalUpdate_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol prot, startConditionalUpdate_result struct) throws org.apache.thrift.TException {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         BitSet optionals = new BitSet();
         if (struct.isSetSuccess()) {
@@ -15292,13 +15194,7 @@ import org.slf4j.LoggerFactory;
         }
         oprot.writeBitSet(optionals, 2);
         if (struct.isSetSuccess()) {
-          {
-            oprot.writeI32(struct.success.size());
-            for (org.apache.accumulo.core.data.thrift.TCMResult _iter258 : struct.success)
-            {
-              _iter258.write(oprot);
-            }
-          }
+          oprot.writeI64(struct.success);
         }
         if (struct.isSetSec()) {
           struct.sec.write(oprot);
@@ -15306,21 +15202,11 @@ import org.slf4j.LoggerFactory;
       }
 
       @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, conditionalUpdate_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol prot, startConditionalUpdate_result struct) throws org.apache.thrift.TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
-          {
-            org.apache.thrift.protocol.TList _list259 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<org.apache.accumulo.core.data.thrift.TCMResult>(_list259.size);
-            for (int _i260 = 0; _i260 < _list259.size; ++_i260)
-            {
-              org.apache.accumulo.core.data.thrift.TCMResult _elem261; // required
-              _elem261 = new org.apache.accumulo.core.data.thrift.TCMResult();
-              _elem261.read(iprot);
-              struct.success.add(_elem261);
-            }
-          }
+          struct.success = iprot.readI64();
           struct.setSuccessIsSet(true);
         }
         if (incoming.get(1)) {
@@ -15333,6 +15219,2013 @@ import org.slf4j.LoggerFactory;
 
   }
 
+  public static class conditionalUpdate_args implements org.apache.thrift.TBase<conditionalUpdate_args, conditionalUpdate_args._Fields>, java.io.Serializable, Cloneable   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("conditionalUpdate_args");
+
+    private static final org.apache.thrift.protocol.TField TINFO_FIELD_DESC = new org.apache.thrift.protocol.TField("tinfo", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+    private static final org.apache.thrift.protocol.TField SESS_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("sessID", org.apache.thrift.protocol.TType.I64, (short)2);
+    private static final org.apache.thrift.protocol.TField MUTATIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("mutations", org.apache.thrift.protocol.TType.MAP, (short)3);
+    private static final org.apache.thrift.protocol.TField SYMBOLS_FIELD_DESC = new org.apache.thrift.protocol.TField("symbols", org.apache.thrift.protocol.TType.LIST, (short)4);
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new conditionalUpdate_argsStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new conditionalUpdate_argsTupleSchemeFactory());
+    }
+
+    public org.apache.accumulo.trace.thrift.TInfo tinfo; // required
+    public long sessID; // required
+    public Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations; // required
+    public List<String> symbols; // required
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    @SuppressWarnings("all") public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+      TINFO((short)1, "tinfo"),
+      SESS_ID((short)2, "sessID"),
+      MUTATIONS((short)3, "mutations"),
+      SYMBOLS((short)4, "symbols");
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          case 1: // TINFO
+            return TINFO;
+          case 2: // SESS_ID
+            return SESS_ID;
+          case 3: // MUTATIONS
+            return MUTATIONS;
+          case 4: // SYMBOLS
+            return SYMBOLS;
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+
+    // isset id assignments
+    private static final int __SESSID_ISSET_ID = 0;
+    private byte __isset_bitfield = 0;
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.TINFO, new org.apache.thrift.meta_data.FieldMetaData("tinfo", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.accumulo.trace.thrift.TInfo.class)));
+      tmpMap.put(_Fields.SESS_ID, new org.apache.thrift.meta_data.FieldMetaData("sessID", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64          , "UpdateID")));
+      tmpMap.put(_Fields.MUTATIONS, new org.apache.thrift.meta_data.FieldMetaData("mutations", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.MAP          , "CMBatch")));
+      tmpMap.put(_Fields.SYMBOLS, new org.apache.thrift.meta_data.FieldMetaData("symbols", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(conditionalUpdate_args.class, metaDataMap);
+    }
+
+    public conditionalUpdate_args() {
+    }
+
+    public conditionalUpdate_args(
+      org.apache.accumulo.trace.thrift.TInfo tinfo,
+      long sessID,
+      Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations,
+      List<String> symbols)
+    {
+      this();
+      this.tinfo = tinfo;
+      this.sessID = sessID;
+      setSessIDIsSet(true);
+      this.mutations = mutations;
+      this.symbols = symbols;
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public conditionalUpdate_args(conditionalUpdate_args other) {
+      __isset_bitfield = other.__isset_bitfield;
+      if (other.isSetTinfo()) {
+        this.tinfo = new org.apache.accumulo.trace.thrift.TInfo(other.tinfo);
+      }
+      this.sessID = other.sessID;
+      if (other.isSetMutations()) {
+        this.mutations = other.mutations;
+      }
+      if (other.isSetSymbols()) {
+        List<String> __this__symbols = new ArrayList<String>();
+        for (String other_element : other.symbols) {
+          __this__symbols.add(other_element);
+        }
+        this.symbols = __this__symbols;
+      }
+    }
+
+    public conditionalUpdate_args deepCopy() {
+      return new conditionalUpdate_args(this);
+    }
+
+    @Override
+    public void clear() {
+      this.tinfo = null;
+      setSessIDIsSet(false);
+      this.sessID = 0;
+      this.mutations = null;
+      this.symbols = null;
+    }
+
+    public org.apache.accumulo.trace.thrift.TInfo getTinfo() {
+      return this.tinfo;
+    }
+
+    public conditionalUpdate_args setTinfo(org.apache.accumulo.trace.thrift.TInfo tinfo) {
+      this.tinfo = tinfo;
+      return this;
+    }
+
+    public void unsetTinfo() {
+      this.tinfo = null;
+    }
+
+    /** Returns true if field tinfo is set (has been assigned a value) and false otherwise */
+    public boolean isSetTinfo() {
+      return this.tinfo != null;
+    }
+
+    public void setTinfoIsSet(boolean value) {
+      if (!value) {
+        this.tinfo = null;
+      }
+    }
+
+    public long getSessID() {
+      return this.sessID;
+    }
+
+    public conditionalUpdate_args setSessID(long sessID) {
+      this.sessID = sessID;
+      setSessIDIsSet(true);
+      return this;
+    }
+
+    public void unsetSessID() {
+      __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SESSID_ISSET_ID);
+    }
+
+    /** Returns true if field sessID is set (has been assigned a value) and false otherwise */
+    public boolean isSetSessID() {
+      return EncodingUtils.testBit(__isset_bitfield, __SESSID_ISSET_ID);
+    }
+
+    public void setSessIDIsSet(boolean value) {
+      __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SESSID_ISSET_ID, value);
+    }
+
+    public int getMutationsSize() {
+      return (this.mutations == null) ? 0 : this.mutations.size();
+    }
+
+    public void putToMutations(org.apache.accumulo.core.data.thrift.TKeyExtent key, List<org.apache.accumulo.core.data.thrift.TConditionalMutation> val) {
+      if (this.mutations == null) {
+        this.mutations = new HashMap<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>>();
+      }
+      this.mutations.put(key, val);
+    }
+
+    public Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> getMutations() {
+      return this.mutations;
+    }
+
+    public conditionalUpdate_args setMutations(Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>> mutations) {
+      this.mutations = mutations;
+      return this;
+    }
+
+    public void unsetMutations() {
+      this.mutations = null;
+    }
+
+    /** Returns true if field mutations is set (has been assigned a value) and false otherwise */
+    public boolean isSetMutations() {
+      return this.mutations != null;
+    }
+
+    public void setMutationsIsSet(boolean value) {
+      if (!value) {
+        this.mutations = null;
+      }
+    }
+
+    public int getSymbolsSize() {
+      return (this.symbols == null) ? 0 : this.symbols.size();
+    }
+
+    public java.util.Iterator<String> getSymbolsIterator() {
+      return (this.symbols == null) ? null : this.symbols.iterator();
+    }
+
+    public void addToSymbols(String elem) {
+      if (this.symbols == null) {
+        this.symbols = new ArrayList<String>();
+      }
+      this.symbols.add(elem);
+    }
+
+    public List<String> getSymbols() {
+      return this.symbols;
+    }
+
+    public conditionalUpdate_args setSymbols(List<String> symbols) {
+      this.symbols = symbols;
+      return this;
+    }
+
+    public void unsetSymbols() {
+      this.symbols = null;
+    }
+
+    /** Returns true if field symbols is set (has been assigned a value) and false otherwise */
+    public boolean isSetSymbols() {
+      return this.symbols != null;
+    }
+
+    public void setSymbolsIsSet(boolean value) {
+      if (!value) {
+        this.symbols = null;
+      }
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      case TINFO:
+        if (value == null) {
+          unsetTinfo();
+        } else {
+          setTinfo((org.apache.accumulo.trace.thrift.TInfo)value);
+        }
+        break;
+
+      case SESS_ID:
+        if (value == null) {
+          unsetSessID();
+        } else {
+          setSessID((Long)value);
+        }
+        break;
+
+      case MUTATIONS:
+        if (value == null) {
+          unsetMutations();
+        } else {
+          setMutations((Map<org.apache.accumulo.core.data.thrift.TKeyExtent,List<org.apache.accumulo.core.data.thrift.TConditionalMutation>>)value);
+        }
+        break;
+
+      case SYMBOLS:
+        if (value == null) {
+          unsetSymbols();
+        } else {
+          setSymbols((List<String>)value);
+        }
+        break;
+
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      case TINFO:
+        return getTinfo();
+
+      case SESS_ID:
+        return Long.valueOf(getSessID());
+
+      case MUTATIONS:
+        return getMutations();
+
+      case SYMBOLS:
+        return getSymbols();
+
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      case TINFO:
+        return isSetTinfo();
+      case SESS_ID:
+        return isSetSessID();
+      case MUTATIONS:
+        return isSetMutations();
+      case SYMBOLS:
+        return isSetSymbols();
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof conditionalUpdate_args)
+        return this.equals((conditionalUpdate_args)that);
+      return false;
+    }
+
+    public boolean equals(conditionalUpdate_args that) {
+      if (that == null)
+        return false;
+
+      boolean this_present_tinfo = true && this.isSetTinfo();
+      boolean that_present_tinfo = true && that.isSetTinfo();
+      if (this_present_tinfo || that_present_tinfo) {
+        if (!(this_present_tinfo && that_present_tinfo))
+          return false;
+        if (!this.tinfo.equals(that.tinfo))
+          return false;
+      }
+
+      boolean this_present_sessID = true;
+      boolean that_present_sessID = true;
+      if (this_present_sessID || that_present_sessID) {
+        if (!(this_present_sessID && that_present_sessID))
+          return false;
+        if (this.sessID != that.sessID)
+          return false;
+      }
+
+      boolean this_present_mutations = true && this.isSetMutations();
+      boolean that_present_mutations = true && that.isSetMutations();
+      if (this_present_mutations || that_present_mutations) {
+        if (!(this_present_mutations && that_present_mutations))
+          return false;
+        if (!this.mutations.equals(that.mutations))
+          return false;
+      }
+
+      boolean this_present_symbols = true && this.isSetSymbols();
+      boolean that_present_symbols = true && that.isSetSymbols();
+      if (this_present_symbols || that_present_symbols) {
+        if (!(this_present_symbols && that_present_symbols))
+          return false;
+        if (!this.symbols.equals(that.symbols))
+          return false;
+      }
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      return 0;
+    }
+
+    public int compareTo(conditionalUpdate_args other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+      conditionalUpdate_args typedOther = (conditionalUpdate_args)other;
+
+      lastComparison = Boolean.valueOf(isSetTinfo()).compareTo(typedOther.isSetTinfo());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetTinfo()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tinfo, typedOther.tinfo);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      lastComparison = Boolean.valueOf(isSetSessID()).compareTo(typedOther.isSetSessID());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetSessID()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessID, typedOther.sessID);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      lastComparison = Boolean.valueOf(isSetMutations()).compareTo(typedOther.isSetMutations());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetMutations()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mutations, typedOther.mutations);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      lastC

<TRUNCATED>

[20/50] git commit: Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/accumulo

Posted by kt...@apache.org.
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/accumulo


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/048b308d
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/048b308d
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/048b308d

Branch: refs/heads/ACCUMULO-1000
Commit: 048b308d911b63542fd7ae11b7c68b60ff296a27
Parents: 88c11e6 388d58c
Author: Eric Newton <er...@gmail.com>
Authored: Wed Jul 17 14:13:23 2013 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Wed Jul 17 14:13:23 2013 -0400

----------------------------------------------------------------------
 .../apache/accumulo/fate/zookeeper/ZooLock.java |  5 +-
 .../test/functional/ZookeeperRestartIT.java     | 81 ++++++++++++++++++++
 2 files changed, 85 insertions(+), 1 deletion(-)
----------------------------------------------------------------------



[45/50] git commit: ACCUMULO-1000 fixed a lot of odds and ends

Posted by kt...@apache.org.
ACCUMULO-1000 fixed a lot of odds and ends


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/5e908585
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/5e908585
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/5e908585

Branch: refs/heads/ACCUMULO-1000
Commit: 5e908585aa76b605840c87e2f769e2aff642b3a6
Parents: 7bb5f8f
Author: Keith Turner <kt...@apache.org>
Authored: Mon Jul 22 16:37:03 2013 -0400
Committer: Keith Turner <kt...@apache.org>
Committed: Mon Jul 22 16:37:03 2013 -0400

----------------------------------------------------------------------
 .../core/client/impl/ConditionalWriterImpl.java |  43 +++--
 .../iterators/system/ColumnQualifierFilter.java |   5 +-
 .../accumulo/server/tabletserver/Tablet.java    |   8 +-
 .../server/tabletserver/TabletServer.java       | 173 ++++++++++---------
 4 files changed, 123 insertions(+), 106 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/5e908585/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
index c87c865..ed20054 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
@@ -65,6 +65,7 @@ import org.apache.accumulo.core.util.BadArgumentException;
 import org.apache.accumulo.core.util.ByteBufferUtil;
 import org.apache.accumulo.core.util.LoggingRunnable;
 import org.apache.accumulo.core.util.ThriftUtil;
+import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.accumulo.trace.thrift.TInfo;
 import org.apache.commons.collections.map.LRUMap;
@@ -81,6 +82,10 @@ class ConditionalWriterImpl implements ConditionalWriter {
   
   private static final Logger log = Logger.getLogger(ConditionalWriterImpl.class);
 
+  private static final int MAX_SLEEP = 5000;
+
+  private static final long SESSION_CACHE_TIME = 60000;
+
   private Authorizations auths;
   private VisibilityEvaluator ve;
   @SuppressWarnings("unchecked")
@@ -167,7 +172,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
     
     void resetDelay() {
       // TODO eventually timeout a mutation
-      delay = Math.min(delay * 2, 5000);
+      delay = Math.min(delay * 2, MAX_SLEEP);
       resetTime = System.currentTimeMillis();
     }
   }
@@ -231,7 +236,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
     
     synchronized (serverQueue) {
       serverQueue.queue.add(mutations);
-      //never execute more that one task per server
+      // never execute more than one task per server
       if(!serverQueue.taskQueued){
         threadPool.execute(new LoggingRunnable(log, new SendTask(location)));
         serverQueue.taskQueued = true;
@@ -357,12 +362,13 @@ class ConditionalWriterImpl implements ConditionalWriter {
     
     @Override
     public void run() {
-      TabletServerMutations<QCMutation> mutations = dequeue(location);
-      if (mutations != null)
-        sendToServer(location, mutations);
-      
-      //TODO if exception is thrown, will not reschedule
-      reschedule(this);
+      try {
+        TabletServerMutations<QCMutation> mutations = dequeue(location);
+        if (mutations != null)
+          sendToServer(location, mutations);
+      } finally {
+        reschedule(this);
+      }
     }
   }
   
@@ -380,6 +386,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
   private static class SessionID {
     long sessionID;
     boolean reserved;
+    long lastAccessTime;
   }
   
   private HashMap<String, SessionID> cachedSessionIDs = new HashMap<String, SessionID>();
@@ -392,8 +399,12 @@ class ConditionalWriterImpl implements ConditionalWriter {
         if (sid.reserved)
           throw new IllegalStateException();
         
-        sid.reserved = true;
-        return sid.sessionID;
+        if (System.currentTimeMillis() - sid.lastAccessTime > SESSION_CACHE_TIME) {
+          cachedSessionIDs.remove(location);
+        } else {
+          sid.reserved = true;
+          return sid.sessionID;
+        }
       }
     }
     
@@ -423,6 +434,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
       if(!sid.reserved)
         throw new IllegalStateException();
       sid.reserved = false;
+      sid.lastAccessTime = System.currentTimeMillis();
     }
   }
   
@@ -470,9 +482,6 @@ class ConditionalWriterImpl implements ConditionalWriter {
         }
       }
 
-
-      // TODO maybe have thrift call return bad extents
-
       for (KeyExtent ke : extentsToInvalidate) {
         locator.invalidateCache(ke);
       }
@@ -533,14 +542,14 @@ class ConditionalWriterImpl implements ConditionalWriter {
    */
   private void invalidateSession(long sessionId, String location, TabletServerMutations<QCMutation> mutations) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
     
-    // TODO could assume tserver will invalidate sessions after a given time period
-    
     ArrayList<QCMutation> mutList = new ArrayList<QCMutation>();
     
     for (List<QCMutation> tml : mutations.getMutations().values()) {
       mutList.addAll(tml);
     }
     
+    long sleepTime = 50;
+
     while (true) {
       Map<String,TabletServerMutations<QCMutation>> binnedMutations = new HashMap<String,TabletLocator.TabletServerMutations<QCMutation>>();
       List<QCMutation> failures = new ArrayList<QCMutation>();
@@ -565,7 +574,9 @@ class ConditionalWriterImpl implements ConditionalWriter {
         locator.invalidateCache(location);
       }
       
-      //TODO sleep
+      UtilWaitThread.sleep(sleepTime);
+      sleepTime = Math.min(2 * sleepTime, MAX_SLEEP);
+
     }
 	
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/5e908585/core/src/main/java/org/apache/accumulo/core/iterators/system/ColumnQualifierFilter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/system/ColumnQualifierFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/system/ColumnQualifierFilter.java
index 1595f5a..d5ca3b4 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/system/ColumnQualifierFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/system/ColumnQualifierFilter.java
@@ -19,6 +19,7 @@ package org.apache.accumulo.core.iterators.system;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.Set;
 
 import org.apache.accumulo.core.data.ArrayByteSequence;
 import org.apache.accumulo.core.data.ByteSequence;
@@ -36,7 +37,7 @@ public class ColumnQualifierFilter extends Filter {
   
   public ColumnQualifierFilter() {}
   
-  public ColumnQualifierFilter(SortedKeyValueIterator<Key,Value> iterator, HashSet<Column> columns) {
+  public ColumnQualifierFilter(SortedKeyValueIterator<Key,Value> iterator, Set<Column> columns) {
     setSource(iterator);
     init(columns);
   }
@@ -63,7 +64,7 @@ public class ColumnQualifierFilter extends Filter {
     return cfset != null && cfset.contains(key.getColumnFamilyData());
   }
   
-  public void init(HashSet<Column> columns) {
+  public void init(Set<Column> columns) {
     this.columnFamilies = new HashSet<ByteSequence>();
     this.columnsQualifiers = new HashMap<ByteSequence,HashSet<ByteSequence>>();
     

http://git-wip-us.apache.org/repos/asf/accumulo/blob/5e908585/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
index 1305be6..035d9b0 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
@@ -1651,7 +1651,7 @@ public class Tablet {
     }
   }
   
-  private Batch nextBatch(SortedKeyValueIterator<Key,Value> iter, Range range, int num, HashSet<Column> columns) throws IOException {
+  private Batch nextBatch(SortedKeyValueIterator<Key,Value> iter, Range range, int num, Set<Column> columns) throws IOException {
     
     // log.info("In nextBatch..");
     
@@ -1739,7 +1739,7 @@ public class Tablet {
     public long numBytes;
   }
   
-  Scanner createScanner(Range range, int num, HashSet<Column> columns, Authorizations authorizations, List<IterInfo> ssiList,
+  Scanner createScanner(Range range, int num, Set<Column> columns, Authorizations authorizations, List<IterInfo> ssiList,
       Map<String,Map<String,String>> ssio, boolean isolated, AtomicBoolean interruptFlag) {
     // do a test to see if this range falls within the tablet, if it does not
     // then clip will throw an exception
@@ -1873,14 +1873,14 @@ public class Tablet {
     // scan options
     Authorizations authorizations;
     byte[] defaultLabels;
-    HashSet<Column> columnSet;
+    Set<Column> columnSet;
     List<IterInfo> ssiList;
     Map<String,Map<String,String>> ssio;
     AtomicBoolean interruptFlag;
     int num;
     boolean isolated;
     
-    ScanOptions(int num, Authorizations authorizations, byte[] defaultLabels, HashSet<Column> columnSet, List<IterInfo> ssiList,
+    ScanOptions(int num, Authorizations authorizations, byte[] defaultLabels, Set<Column> columnSet, List<IterInfo> ssiList,
         Map<String,Map<String,String>> ssio, AtomicBoolean interruptFlag, boolean isolated) {
       this.num = num;
       this.authorizations = authorizations;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/5e908585/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
index 013639e..8f33488 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
@@ -750,6 +750,12 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     public TCredentials credentials;
     public Authorizations auths;
     public String tableId;
+    public AtomicBoolean interruptFlag;
+    
+    @Override
+    public void cleanup() {
+      interruptFlag.set(true);
+    }
   }
   
   private static class UpdateSession extends Session {
@@ -901,6 +907,8 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     
     WriteTracker writeTracker = new WriteTracker();
     
+    private RowLocks rowLocks = new RowLocks();
+
     ThriftClientHandler() {
       super(instance, watcher);
       log.debug(ThriftClientHandler.class.getName() + " created");
@@ -1730,16 +1738,11 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
         writeTracker.finishWrite(opid);
       }
     }
-    
-    private RowLocks rowLocks = new RowLocks();
 
-    private void checkConditions(Map<KeyExtent,List<ServerConditionalMutation>> updates, ArrayList<TCMResult> results, Authorizations authorizations,
-        List<String> symbols) {
+    private void checkConditions(Map<KeyExtent,List<ServerConditionalMutation>> updates, ArrayList<TCMResult> results, ConditionalSession cs,
+        List<String> symbols) throws IOException {
       Iterator<Entry<KeyExtent,List<ServerConditionalMutation>>> iter = updates.entrySet().iterator();
       
-      // TODO use constant
-      HashSet<Column> columns = new HashSet<Column>();
-
       CompressedIterators compressedIters = new CompressedIterators(symbols);
 
       while (iter.hasNext()) {
@@ -1752,97 +1755,91 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
           iter.remove();
         } else {
           List<ServerConditionalMutation> okMutations = new ArrayList<ServerConditionalMutation>(entry.getValue().size());
-          
-          // TODO extract to method
-          for (ServerConditionalMutation scm : entry.getValue()) {
-            boolean add = true;
-            for(TCondition tc : scm.getConditions()){
-            
-              Range range;
-              if (tc.hasTimestamp)
-                range = Range.exact(new Text(scm.getRow()), new Text(tc.getCf()), new Text(tc.getCq()), new Text(tc.getCv()), tc.getTs());
-              else
-                range = Range.exact(new Text(scm.getRow()), new Text(tc.getCf()), new Text(tc.getCq()), new Text(tc.getCv()));
-              
-              AtomicBoolean interruptFlag = new AtomicBoolean();
-
-              IterConfig ic = compressedIters.decompress(tc.iterators);
 
-              //TODO use one iterator per tablet, push checks into tablet?
-              Scanner scanner = tablet.createScanner(range, 1, columns, authorizations, ic.ssiList, ic.ssio, false, interruptFlag);
-              
-              try {
-                ScanBatch batch = scanner.read();
-                
-                Value val = null;
-                
-                for (KVEntry entry2 : batch.results) {
-                  val = entry2.getValue();
-                  break;
-                }
-                
-                if ((val == null ^ tc.getVal() == null) || (val != null && !Arrays.equals(tc.getVal(), val.get()))) {
-                  results.add(new TCMResult(scm.getID(), TCMStatus.REJECTED));
-                  add = false;
-                  break;
-                }
-                
-              } catch (TabletClosedException e) {
-                // TODO ignore rest of tablets mutations
-                results.add(new TCMResult(scm.getID(), TCMStatus.IGNORED));
-                add = false;
-                break;
-              } catch (IterationInterruptedException iie) {
-                // TODO determine why this happened, ignore rest of tablets mutations?
-                results.add(new TCMResult(scm.getID(), TCMStatus.IGNORED));
-                add = false;
-                break;
-              } catch (TooManyFilesException tmfe) {
-                // TODO handle differently?
-                results.add(new TCMResult(scm.getID(), TCMStatus.IGNORED));
-                add = false;
-                break;
-              } catch (IOException e) {
-                // TODO Auto-generated catch block
-                e.printStackTrace();
-              } finally {
-                scanner.close();
-              }
-            }
-            
-            if (add)
+          for (ServerConditionalMutation scm : entry.getValue()) {
+            if (checkCondition(results, cs, compressedIters, tablet, scm))
               okMutations.add(scm);
           }
           
-          // TODO just rebuild map
-          entry.getValue().clear();
-          entry.getValue().addAll(okMutations);
+          entry.setValue(okMutations);
         }
         
       }
     }
 
-    private void writeConditionalMutations(Map<KeyExtent,List<ServerConditionalMutation>> updates, ArrayList<TCMResult> results, TCredentials credentials) {
+    boolean checkCondition(ArrayList<TCMResult> results, ConditionalSession cs, CompressedIterators compressedIters,
+        Tablet tablet, ServerConditionalMutation scm) throws IOException {
+      boolean add = true;
+      
+      Set<Column> emptyCols = Collections.emptySet();
+
+      for(TCondition tc : scm.getConditions()){
+      
+        Range range;
+        if (tc.hasTimestamp)
+          range = Range.exact(new Text(scm.getRow()), new Text(tc.getCf()), new Text(tc.getCq()), new Text(tc.getCv()), tc.getTs());
+        else
+          range = Range.exact(new Text(scm.getRow()), new Text(tc.getCf()), new Text(tc.getCq()), new Text(tc.getCv()));
+        
+        IterConfig ic = compressedIters.decompress(tc.iterators);
+
+        //TODO use one iterator per tablet, push checks into tablet?
+        Scanner scanner = tablet.createScanner(range, 1, emptyCols, cs.auths, ic.ssiList, ic.ssio, false, cs.interruptFlag);
+        
+        try {
+          ScanBatch batch = scanner.read();
+          
+          Value val = null;
+          
+          for (KVEntry entry2 : batch.results) {
+            val = entry2.getValue();
+            break;
+          }
+          
+          if ((val == null ^ tc.getVal() == null) || (val != null && !Arrays.equals(tc.getVal(), val.get()))) {
+            results.add(new TCMResult(scm.getID(), TCMStatus.REJECTED));
+            add = false;
+            break;
+          }
+          
+        } catch (TabletClosedException e) {
+          results.add(new TCMResult(scm.getID(), TCMStatus.IGNORED));
+          add = false;
+          break;
+        } catch (IterationInterruptedException iie) {
+          results.add(new TCMResult(scm.getID(), TCMStatus.IGNORED));
+          add = false;
+          break;
+        } catch (TooManyFilesException tmfe) {
+          results.add(new TCMResult(scm.getID(), TCMStatus.IGNORED));
+          add = false;
+          break;
+        }
+      }
+      return add;
+    }
+
+    private void writeConditionalMutations(Map<KeyExtent,List<ServerConditionalMutation>> updates, ArrayList<TCMResult> results, ConditionalSession sess) {
       Set<Entry<KeyExtent,List<ServerConditionalMutation>>> es = updates.entrySet();
       
       Map<CommitSession,List<Mutation>> sendables = new HashMap<CommitSession,List<Mutation>>();
 
       // TODO stats
 
+      boolean sessionCanceled = sess.interruptFlag.get();
+
       for (Entry<KeyExtent,List<ServerConditionalMutation>> entry : es) {
         Tablet tablet = onlineTablets.get(entry.getKey());
-        if (tablet == null || tablet.isClosed()) {
+        if (tablet == null || tablet.isClosed() || sessionCanceled) {
           for (ServerConditionalMutation scm : entry.getValue())
             results.add(new TCMResult(scm.getID(), TCMStatus.IGNORED));
         } else {
-          // TODO write tracker
-          
           try {
             
             List<Mutation> mutations = (List<Mutation>) (List<? extends Mutation>) entry.getValue();
             if (mutations.size() > 0) {
 
-              CommitSession cs = tablet.prepareMutationsForCommit(new TservConstraintEnv(security, credentials), mutations);
+              CommitSession cs = tablet.prepareMutationsForCommit(new TservConstraintEnv(security, sess.credentials), mutations);
               
               if (cs == null) {
                 for (ServerConditionalMutation scm : entry.getValue())
@@ -1889,8 +1886,8 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
 
     }
 
-    private Map<KeyExtent,List<ServerConditionalMutation>> conditionalUpdate(TCredentials credentials, Authorizations authorizations,
-        Map<KeyExtent,List<ServerConditionalMutation>> updates, ArrayList<TCMResult> results, List<String> symbols) {
+    private Map<KeyExtent,List<ServerConditionalMutation>> conditionalUpdate(ConditionalSession cs, Map<KeyExtent,List<ServerConditionalMutation>> updates,
+        ArrayList<TCMResult> results, List<String> symbols) throws IOException {
       // sort each list of mutations, this is done to avoid deadlock and doing seeks in order is more efficient and detect duplicate rows.
       ConditionalMutationSet.sortConditionalMutations(updates);
       
@@ -1902,8 +1899,8 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
       // get as many locks as possible w/o blocking... defer any rows that are locked
       List<RowLock> locks = rowLocks.acquireRowlocks(updates, deferred);
       try {
-        checkConditions(updates, results, authorizations, symbols);
-        writeConditionalMutations(updates, results, credentials);
+        checkConditions(updates, results, cs, symbols);
+        writeConditionalMutations(updates, results, cs);
       } finally {
         rowLocks.releaseRowLocks(locks);
       }
@@ -1926,6 +1923,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
       cs.auths = new Authorizations(authorizations);
       cs.credentials = credentials;
       cs.tableId = tableID;
+      cs.interruptFlag = new AtomicBoolean();
       
       return sessionManager.createSession(cs, false);
     }
@@ -1934,34 +1932,36 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     public List<TCMResult> conditionalUpdate(TInfo tinfo, long sessID, Map<TKeyExtent,List<TConditionalMutation>> mutations, List<String> symbols)
         throws NoSuchScanIDException, TException {
       // TODO sessions, should show up in list scans
-      // TODO timeout like scans do
       
       ConditionalSession cs = (ConditionalSession) sessionManager.reserveSession(sessID);
       
       if(cs == null)
         throw new NoSuchScanIDException();
       
-      
+      Text tid = new Text(cs.tableId);
+      long opid = writeTracker.startWrite(TabletType.type(new KeyExtent(tid, null, null)));
       
       try{
         Map<KeyExtent,List<ServerConditionalMutation>> updates = Translator.translate(mutations, Translator.TKET,
             new Translator.ListTranslator<TConditionalMutation,ServerConditionalMutation>(ServerConditionalMutation.TCMT));
-        
-        Text tid = new Text(cs.tableId);
+
         for(KeyExtent ke : updates.keySet())
           if(!ke.getTableId().equals(tid))
             throw new IllegalArgumentException("Unexpected table id "+tid+" != "+ke.getTableId());
         
         ArrayList<TCMResult> results = new ArrayList<TCMResult>();
         
-        Map<KeyExtent,List<ServerConditionalMutation>> deferred = conditionalUpdate(cs.credentials, cs.auths, updates, results, symbols);
+        Map<KeyExtent,List<ServerConditionalMutation>> deferred = conditionalUpdate(cs, updates, results, symbols);
   
         while (deferred.size() > 0) {
-          deferred = conditionalUpdate(cs.credentials, cs.auths, deferred, results, symbols);
+          deferred = conditionalUpdate(cs, deferred, results, symbols);
         }
   
         return results;
+      } catch (IOException ioe) {
+        throw new TException(ioe);
       }finally{
+        writeTracker.finishWrite(opid);
         sessionManager.unreserveSession(sessID);
       }
     }
@@ -1970,7 +1970,12 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     public void invalidateConditionalUpdate(TInfo tinfo, long sessID) throws TException {
       //this method should wait for any running conditional update to complete
       //after this method returns a conditional update should not be able to start
-      ConditionalSession cs = (ConditionalSession) sessionManager.reserveSession(sessID, true);
+      
+      ConditionalSession cs = (ConditionalSession) sessionManager.getSession(sessID);
+      if (cs != null)
+        cs.interruptFlag.set(true);
+      
+      cs = (ConditionalSession) sessionManager.reserveSession(sessID, true);
       if(cs != null)
         sessionManager.removeSession(sessID, true);
     }


[22/50] git commit: ACCUMULO-1579 Fix classpath issues and add plugin test with iterator

Posted by kt...@apache.org.
ACCUMULO-1579 Fix classpath issues and add plugin test with iterator


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/3d7a6e71
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/3d7a6e71
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/3d7a6e71

Branch: refs/heads/ACCUMULO-1000
Commit: 3d7a6e71abea53485ef37b9e6e27134a722c6088
Parents: f8b9145
Author: Christopher Tubbs <ct...@apache.org>
Authored: Thu Jul 18 15:01:49 2013 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Thu Jul 18 15:01:49 2013 -0400

----------------------------------------------------------------------
 maven-plugin/pom.xml                            |  4 ++
 .../src/it/plugin-test/postbuild.groovy         |  3 ++
 .../apache/accumulo/plugin/CustomFilter.java    | 33 +++++++++++++++
 .../org/apache/accumulo/plugin/PluginIT.java    | 44 ++++++++++++++++++--
 .../maven/plugin/AbstractAccumuloMojo.java      | 31 +++++++-------
 .../apache/accumulo/maven/plugin/StartMojo.java |  2 +
 .../apache/accumulo/maven/plugin/StopMojo.java  |  2 +
 pom.xml                                         |  7 +++-
 8 files changed, 108 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/3d7a6e71/maven-plugin/pom.xml
----------------------------------------------------------------------
diff --git a/maven-plugin/pom.xml b/maven-plugin/pom.xml
index 9060498..9a073c6 100644
--- a/maven-plugin/pom.xml
+++ b/maven-plugin/pom.xml
@@ -72,6 +72,10 @@
     </dependency>
     <dependency>
       <groupId>org.apache.maven</groupId>
+      <artifactId>maven-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.maven</groupId>
       <artifactId>maven-plugin-api</artifactId>
     </dependency>
     <dependency>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3d7a6e71/maven-plugin/src/it/plugin-test/postbuild.groovy
----------------------------------------------------------------------
diff --git a/maven-plugin/src/it/plugin-test/postbuild.groovy b/maven-plugin/src/it/plugin-test/postbuild.groovy
index 404961a..3fbfcab 100644
--- a/maven-plugin/src/it/plugin-test/postbuild.groovy
+++ b/maven-plugin/src/it/plugin-test/postbuild.groovy
@@ -22,3 +22,6 @@ assert testCreateTable.isFile()
 
 File testWriteToTable = new File(basedir, "target/accumulo-maven-plugin/plugin-it-instance/testWriteToTablePassed");
 assert testWriteToTable.isFile()
+
+File testCheckIterator = new File(basedir, "target/accumulo-maven-plugin/plugin-it-instance/testCheckIteratorPassed");
+assert testCheckIterator.isFile()

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3d7a6e71/maven-plugin/src/it/plugin-test/src/main/java/org/apache/accumulo/plugin/CustomFilter.java
----------------------------------------------------------------------
diff --git a/maven-plugin/src/it/plugin-test/src/main/java/org/apache/accumulo/plugin/CustomFilter.java b/maven-plugin/src/it/plugin-test/src/main/java/org/apache/accumulo/plugin/CustomFilter.java
new file mode 100644
index 0000000..9a0497a
--- /dev/null
+++ b/maven-plugin/src/it/plugin-test/src/main/java/org/apache/accumulo/plugin/CustomFilter.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.plugin;
+
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.Filter;
+
+/**
+ * 
+ */
+public class CustomFilter extends Filter {
+  
+  @Override
+  public boolean accept(Key k, Value v) {
+    return k.getColumnFamily().toString().equals("allowed");
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3d7a6e71/maven-plugin/src/it/plugin-test/src/test/java/org/apache/accumulo/plugin/PluginIT.java
----------------------------------------------------------------------
diff --git a/maven-plugin/src/it/plugin-test/src/test/java/org/apache/accumulo/plugin/PluginIT.java b/maven-plugin/src/it/plugin-test/src/test/java/org/apache/accumulo/plugin/PluginIT.java
index e4dee28..1e3fe37 100644
--- a/maven-plugin/src/it/plugin-test/src/test/java/org/apache/accumulo/plugin/PluginIT.java
+++ b/maven-plugin/src/it/plugin-test/src/test/java/org/apache/accumulo/plugin/PluginIT.java
@@ -29,6 +29,7 @@ import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
@@ -38,7 +39,6 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.minicluster.MiniAccumuloInstance;
-import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -92,7 +92,45 @@ public class PluginIT {
     assertTrue(new File("target/accumulo-maven-plugin/" + instance.getInstanceName() + "/testWriteToTablePassed").createNewFile());
   }
   
-  @AfterClass
-  public static void tearDown() throws Exception {}
+  @Test
+  public void checkIterator() throws IOException, AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException {
+    String tableName = "checkIterator";
+    connector.tableOperations().create(tableName);
+    BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
+    Mutation m = new Mutation("ROW1");
+    m.put("allowed", "CQ1", "V1");
+    m.put("denied", "CQ2", "V2");
+    m.put("allowed", "CQ3", "V3");
+    bw.addMutation(m);
+    m = new Mutation("ROW2");
+    m.put("allowed", "CQ1", "V1");
+    m.put("denied", "CQ2", "V2");
+    m.put("allowed", "CQ3", "V3");
+    bw.addMutation(m);
+    bw.close();
+    
+    // check filter
+    Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
+    IteratorSetting is = new IteratorSetting(5, CustomFilter.class);
+    scanner.addScanIterator(is);
+    int count = 0;
+    for (Entry<Key,Value> entry : scanner) {
+      count++;
+      assertEquals("allowed", entry.getKey().getColumnFamily().toString());
+    }
+    assertEquals(4, count);
+    
+    // check filter negated
+    scanner.clearScanIterators();
+    CustomFilter.setNegate(is, true);
+    scanner.addScanIterator(is);
+    count = 0;
+    for (Entry<Key,Value> entry : scanner) {
+      count++;
+      assertEquals("denied", entry.getKey().getColumnFamily().toString());
+    }
+    assertEquals(2, count);
+    assertTrue(new File("target/accumulo-maven-plugin/" + instance.getInstanceName() + "/testCheckIteratorPassed").createNewFile());
+  }
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3d7a6e71/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/AbstractAccumuloMojo.java
----------------------------------------------------------------------
diff --git a/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/AbstractAccumuloMojo.java b/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/AbstractAccumuloMojo.java
index 7eb4acb..65174ed 100644
--- a/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/AbstractAccumuloMojo.java
+++ b/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/AbstractAccumuloMojo.java
@@ -18,32 +18,35 @@ package org.apache.accumulo.maven.plugin;
 
 import java.io.File;
 import java.net.MalformedURLException;
-import java.util.List;
 
 import org.apache.maven.artifact.Artifact;
 import org.apache.maven.plugin.AbstractMojo;
-import org.apache.maven.plugins.annotations.Parameter;
+import org.apache.maven.plugins.annotations.Component;
+import org.apache.maven.project.MavenProject;
 
 public abstract class AbstractAccumuloMojo extends AbstractMojo {
   
-  @Parameter(defaultValue = "${plugin.artifacts}", readonly = true, required = true)
-  private List<Artifact> pluginArtifacts;
+  @Component
+  private MavenProject project;
   
-  void configureMiniClasspath(String miniClasspath) {
+  void configureMiniClasspath(String miniClasspath) throws MalformedURLException {
     String classpath = "";
-    if (miniClasspath == null && pluginArtifacts != null) {
-      String sep = "";
-      for (Artifact artifact : pluginArtifacts) {
-        try {
-          classpath += sep + artifact.getFile().toURI().toURL();
-          sep = File.pathSeparator;
-        } catch (MalformedURLException e) {
-          e.printStackTrace();
-        }
+    StringBuilder sb = new StringBuilder();
+    if (miniClasspath == null && project != null) {
+      sb.append(project.getBuild().getOutputDirectory());
+      String sep = File.pathSeparator;
+      sb.append(sep).append(project.getBuild().getTestOutputDirectory());
+      for (Artifact artifact : project.getArtifacts()) {
+        addArtifact(sb, sep, artifact);
       }
+      classpath = sb.toString();
     } else if (miniClasspath != null && !miniClasspath.isEmpty()) {
       classpath = miniClasspath;
     }
     System.setProperty("java.class.path", System.getProperty("java.class.path", "") + File.pathSeparator + classpath);
   }
+  
+  private void addArtifact(StringBuilder classpath, String separator, Artifact artifact) throws MalformedURLException {
+    classpath.append(separator).append(artifact.getFile().toURI().toURL());
+  }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3d7a6e71/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/StartMojo.java
----------------------------------------------------------------------
diff --git a/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/StartMojo.java b/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/StartMojo.java
index b3efd81..9579952 100644
--- a/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/StartMojo.java
+++ b/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/StartMojo.java
@@ -23,6 +23,7 @@ import java.util.Set;
 
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
 import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.http.annotation.ThreadSafe;
 import org.apache.maven.plugin.MojoExecutionException;
 import org.apache.maven.plugins.annotations.LifecyclePhase;
 import org.apache.maven.plugins.annotations.Mojo;
@@ -33,6 +34,7 @@ import org.codehaus.plexus.util.FileUtils;
 /**
  * Goal which starts an instance of {@link MiniAccumuloCluster}.
  */
+@ThreadSafe
 @Mojo(name = "start", defaultPhase = LifecyclePhase.PRE_INTEGRATION_TEST, requiresDependencyResolution = ResolutionScope.TEST)
 public class StartMojo extends AbstractAccumuloMojo {
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3d7a6e71/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/StopMojo.java
----------------------------------------------------------------------
diff --git a/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/StopMojo.java b/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/StopMojo.java
index ae1b7c0..70150ea 100644
--- a/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/StopMojo.java
+++ b/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/StopMojo.java
@@ -18,6 +18,7 @@ package org.apache.accumulo.maven.plugin;
 
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster.LogWriter;
+import org.apache.http.annotation.ThreadSafe;
 import org.apache.maven.plugin.MojoExecutionException;
 import org.apache.maven.plugins.annotations.LifecyclePhase;
 import org.apache.maven.plugins.annotations.Mojo;
@@ -26,6 +27,7 @@ import org.apache.maven.plugins.annotations.ResolutionScope;
 /**
  * Goal which stops all instances of {@link MiniAccumuloCluster} started with the start mojo.
  */
+@ThreadSafe
 @Mojo(name = "stop", defaultPhase = LifecyclePhase.POST_INTEGRATION_TEST, requiresDependencyResolution = ResolutionScope.TEST)
 public class StopMojo extends AbstractAccumuloMojo {
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3d7a6e71/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index edcaa16..6454ff3 100644
--- a/pom.xml
+++ b/pom.xml
@@ -305,6 +305,11 @@
       </dependency>
       <dependency>
         <groupId>org.apache.maven</groupId>
+        <artifactId>maven-core</artifactId>
+        <version>${maven.min-version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.maven</groupId>
         <artifactId>maven-plugin-api</artifactId>
         <version>${maven.min-version}</version>
       </dependency>
@@ -670,7 +675,7 @@
             <configuration>
               <!--parallel>classes</parallel-->
               <perCoreThreadCount>false</perCoreThreadCount>
-              <threadCount>${accumulo.it.threads}</threadCount> 
+              <threadCount>${accumulo.it.threads}</threadCount>
               <redirectTestOutputToFile>true</redirectTestOutputToFile>
             </configuration>
           </execution>


[34/50] git commit: ACCUMULO-1000 modified conditional map to not process tservers concurrently and reuse sessions

Posted by kt...@apache.org.
ACCUMULO-1000 modified conditional map to not process tservers concurrently and reuse sessions


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/fdb95b40
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/fdb95b40
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/fdb95b40

Branch: refs/heads/ACCUMULO-1000
Commit: fdb95b40513094786b646dd682bb9d58ae06365d
Parents: ec53713
Author: keith@deenlo.com <ke...@deenlo.com>
Authored: Sat Jul 20 12:07:23 2013 -0400
Committer: keith@deenlo.com <ke...@deenlo.com>
Committed: Sat Jul 20 12:07:23 2013 -0400

----------------------------------------------------------------------
 .../core/client/impl/ConditionalWriterImpl.java | 128 ++++++++++++++++---
 .../server/tabletserver/TabletServer.java       |   2 +-
 2 files changed, 108 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/fdb95b40/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
index 31403fb..0e86ec7 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
@@ -90,8 +90,12 @@ class ConditionalWriterImpl implements ConditionalWriter {
   private TabletLocator locator;
   private String tableId;
 
-
-  private Map<String,BlockingQueue<TabletServerMutations<QCMutation>>> serverQueues;
+  private static class ServerQueue {
+    BlockingQueue<TabletServerMutations<QCMutation>> queue = new LinkedBlockingQueue<TabletServerMutations<QCMutation>>();
+    boolean taskQueued = false;
+  }
+  
+  private Map<String,ServerQueue> serverQueues;
   private DelayQueue<QCMutation> failedMutations = new DelayQueue<QCMutation>();
   private ScheduledThreadPoolExecutor threadPool;
   
@@ -168,16 +172,17 @@ class ConditionalWriterImpl implements ConditionalWriter {
     }
   }
   
-  private BlockingQueue<TabletServerMutations<QCMutation>> getServerQueue(String location) {
-    BlockingQueue<TabletServerMutations<QCMutation>> queue;
+  private ServerQueue getServerQueue(String location) {
+    ServerQueue serverQueue;
     synchronized (serverQueues) {
-      queue = serverQueues.get(location);
-      if (queue == null) {
-        queue = new LinkedBlockingQueue<TabletServerMutations<QCMutation>>();
-        serverQueues.put(location, queue);
+       serverQueue = serverQueues.get(location);
+      if (serverQueue == null) {
+        
+        serverQueue = new ServerQueue();
+        serverQueues.put(location, serverQueue);
       }
     }
-    return queue;
+    return serverQueue;
   }
   
   private void queueRetry(List<QCMutation> mutations) {
@@ -222,14 +227,38 @@ class ConditionalWriterImpl implements ConditionalWriter {
 
   private void queue(String location, TabletServerMutations<QCMutation> mutations) {
     
-    BlockingQueue<TabletServerMutations<QCMutation>> queue = getServerQueue(location);
+    ServerQueue serverQueue = getServerQueue(location);
     
-    queue.add(mutations);
-    threadPool.execute(new LoggingRunnable(log, new SendTask(location)));
+    synchronized (serverQueue) {
+      serverQueue.queue.add(mutations);
+      //never execute more that one task per server
+      if(!serverQueue.taskQueued){
+        threadPool.execute(new LoggingRunnable(log, new SendTask(location)));
+        serverQueue.taskQueued = true;
+      }
+    }
+   
   }
 
+  private void reschedule(SendTask task){
+    ServerQueue serverQueue = getServerQueue(task.location);
+    // just finished processing work for this server, could reschedule if it has more work or immediately process the work
+    // this code reschedules the the server for processing later... there may be other queues with
+    // more data that need to be processed... also it will give the current server time to build
+    // up more data... the thinking is that rescheduling instead or processing immediately will result
+    // in bigger batches and less RPC overhead
+    
+    synchronized (serverQueue) {
+      if(serverQueue.queue.size() > 0)
+        threadPool.execute(new LoggingRunnable(log, task));
+      else
+        serverQueue.taskQueued = false;
+    }
+    
+  }
+  
   private TabletServerMutations<QCMutation> dequeue(String location) {
-    BlockingQueue<TabletServerMutations<QCMutation>> queue = getServerQueue(location);
+    BlockingQueue<TabletServerMutations<QCMutation>> queue = getServerQueue(location).queue;
     
     ArrayList<TabletServerMutations<QCMutation>> mutations = new ArrayList<TabletLocator.TabletServerMutations<QCMutation>>();
     queue.drainTo(mutations);
@@ -268,7 +297,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
     this.threadPool = new ScheduledThreadPoolExecutor(3);
     this.threadPool.setMaximumPoolSize(3);
     this.locator = TabletLocator.getLocator(instance, new Text(tableId));
-    this.serverQueues = new HashMap<String,BlockingQueue<TabletServerMutations<QCMutation>>>();
+    this.serverQueues = new HashMap<String,ServerQueue>();
     this.tableId = tableId;
 
     Runnable failureHandler = new Runnable() {
@@ -319,7 +348,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
   private class SendTask implements Runnable {
     
 
-    private String location;
+    String location;
     
     public SendTask(String location) {
       this.location = location;
@@ -331,6 +360,8 @@ class ConditionalWriterImpl implements ConditionalWriter {
       TabletServerMutations<QCMutation> mutations = dequeue(location);
       if (mutations != null)
         sendToServer(location, mutations);
+      
+      reschedule(this);
     }
   }
   
@@ -345,6 +376,55 @@ class ConditionalWriterImpl implements ConditionalWriter {
     }
   }
 
+  private static class SessionID {
+    long sessionID;
+    boolean reserved;
+  }
+  
+  private HashMap<String, SessionID> cachedSessionIDs = new HashMap<String, SessionID>();
+  
+  private Long reserveSessionID(String location, TabletClientService.Iface client, TInfo tinfo) throws ThriftSecurityException, TException {
+    //avoid cost of repeatedly making RPC to create sessions, reuse sessions
+    synchronized (cachedSessionIDs) {
+      SessionID sid = cachedSessionIDs.get(location);
+      if (sid != null) {
+        if (sid.reserved)
+          throw new IllegalStateException();
+        
+        sid.reserved = true;
+        return sid.sessionID;
+      }
+    }
+    
+    Long sessionId = client.startConditionalUpdate(tinfo, credentials, ByteBufferUtil.toByteBuffers(auths.getAuthorizations()), tableId);
+    
+    synchronized (cachedSessionIDs) {
+      SessionID sid = new SessionID();
+      sid.reserved = true;
+      sid.sessionID = sessionId;
+      if(cachedSessionIDs.put(location, sid) != null)
+        throw new IllegalStateException();
+    }
+    
+    return sessionId;
+  }
+  
+  private void invalidateSessionID(String location) {
+    synchronized (cachedSessionIDs) {
+      cachedSessionIDs.remove(location);
+    }
+    
+  }
+  
+  private void unreserveSessionID(String location){
+    synchronized (cachedSessionIDs) {
+      SessionID sid = cachedSessionIDs.get(location);
+      if(!sid.reserved)
+        throw new IllegalStateException();
+      sid.reserved = false;
+    }
+  }
+  
   private void sendToServer(String location, TabletServerMutations<QCMutation> mutations) {
     TabletClientService.Iface client = null;
     
@@ -363,11 +443,17 @@ class ConditionalWriterImpl implements ConditionalWriter {
       CompressedIterators compressedIters = new CompressedIterators();
       convertMutations(mutations, cmidToCm, cmid, tmutations, compressedIters);
       
-      //TODO create a session per tserver and keep reusing it
-      sessionId = client.startConditionalUpdate(tinfo, credentials, ByteBufferUtil.toByteBuffers(auths.getAuthorizations()), tableId);
+      List<TCMResult> tresults = null;
+      while (tresults == null) {
+        try {
+          sessionId = reserveSessionID(location, client, tinfo);
+          tresults = client.conditionalUpdate(tinfo, sessionId, tmutations, compressedIters.getSymbolTable());
+        } catch (NoSuchScanIDException nssie) {
+          sessionId = null;
+          invalidateSessionID(location);
+        }
+      }
       
-      List<TCMResult> tresults = client.conditionalUpdate(tinfo, sessionId, tmutations, compressedIters.getSymbolTable());
-
       HashSet<KeyExtent> extentsToInvalidate = new HashSet<KeyExtent>();
 
       ArrayList<QCMutation> ignored = new ArrayList<QCMutation>();
@@ -392,8 +478,6 @@ class ConditionalWriterImpl implements ConditionalWriter {
 
       queueRetry(ignored);
 
-    } catch (NoSuchScanIDException nssie){
-    	queueRetry(cmidToCm);
     } catch (ThriftSecurityException tse) {
       AccumuloSecurityException ase = new AccumuloSecurityException(credentials.getPrincipal(), tse.getCode(), Tables.getPrintableTableInfoFromId(instance,
           tableId), tse);
@@ -409,6 +493,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
     } catch (Exception e) {
       queueException(location, cmidToCm, e);
     } finally {
+      unreserveSessionID(location);
       ThriftUtil.returnClient((TServiceClient) client);
     }
   }
@@ -591,6 +676,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
   
   @Override
   public void close() {
+    //TODO could possible close cached sessions using async method to clean up sessions on server side
     threadPool.shutdownNow();
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/fdb95b40/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
index ee1d1b6..013639e 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
@@ -1962,7 +1962,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
   
         return results;
       }finally{
-        sessionManager.removeSession(sessID, true);
+        sessionManager.unreserveSession(sessID);
       }
     }
 


[14/50] git commit: ACCUMULO-1537 make tests more stable, document in pom how to run tests in parallel

Posted by kt...@apache.org.
ACCUMULO-1537 make tests more stable, document in pom how to run tests in parallel


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/f7e96a3e
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/f7e96a3e
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/f7e96a3e

Branch: refs/heads/ACCUMULO-1000
Commit: f7e96a3e7174eb56a4eb21f7dfdf1f6a9495cc0e
Parents: f4c6e6f
Author: Eric Newton <er...@gmail.com>
Authored: Wed Jul 17 12:11:24 2013 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Wed Jul 17 12:11:24 2013 -0400

----------------------------------------------------------------------
 .../minicluster/MiniAccumuloConfig.java         |  1 +
 pom.xml                                         |  4 +++
 .../test/functional/DynamicThreadPoolsIT.java   | 29 ++++++++++----------
 .../accumulo/test/functional/MacTest.java       |  2 +-
 .../accumulo/test/functional/SplitIT.java       |  8 ++++--
 5 files changed, 26 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/f7e96a3e/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloConfig.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloConfig.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloConfig.java
index f183b4e..600ea4b 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloConfig.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloConfig.java
@@ -121,6 +121,7 @@ public class MiniAccumuloConfig {
       mergePropWithRandomPort(Property.TRACE_PORT.getKey());
       mergePropWithRandomPort(Property.TSERV_CLIENTPORT.getKey());
       mergePropWithRandomPort(Property.MONITOR_PORT.getKey());
+      mergePropWithRandomPort(Property.GC_PORT.getKey());
       
       // zookeeper port should be set explicitly in this class, not just on the site config
       if (zooKeeperPort == null)

http://git-wip-us.apache.org/repos/asf/accumulo/blob/f7e96a3e/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 73f8454..edcaa16 100644
--- a/pom.xml
+++ b/pom.xml
@@ -107,6 +107,7 @@
     </site>
   </distributionManagement>
   <properties>
+    <accumulo.it.threads>1</accumulo.it.threads>
     <!-- used for filtering the java source with the current version -->
     <accumulo.release.version>${project.version}</accumulo.release.version>
     <!-- the maven-release-plugin makes this recommendation, due to plugin bugs -->
@@ -667,6 +668,9 @@
               <goal>verify</goal>
             </goals>
             <configuration>
+              <!--parallel>classes</parallel-->
+              <perCoreThreadCount>false</perCoreThreadCount>
+              <threadCount>${accumulo.it.threads}</threadCount> 
               <redirectTestOutputToFile>true</redirectTestOutputToFile>
             </configuration>
           </execution>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/f7e96a3e/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
index d954974..daced3e 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
@@ -16,7 +16,7 @@
  */
 package org.apache.accumulo.test.functional;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.*;
 
 import java.util.Collections;
 
@@ -40,30 +40,28 @@ public class DynamicThreadPoolsIT extends MacTest {
   
   @Override
   public void configure(MiniAccumuloConfig cfg) {
+    cfg.setNumTservers(1);
     cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "100ms"));
   }
   
-  @Test(timeout = 30 * 1000)
+  @Test(timeout = 60 * 1000)
   public void test() throws Exception {
+    final int TABLES = 15;
     Connector c = getConnector();
-    c.instanceOperations().setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "1");
+    c.instanceOperations().setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "5");
     TestIngest.Opts opts = new TestIngest.Opts();
     opts.rows = 100*1000;
     opts.createTable = true;
     TestIngest.ingest(c, opts, BWOPTS);
     c.tableOperations().flush("test_ingest", null, null, true);
-    c.tableOperations().clone("test_ingest", "test_ingest2", true, null, null);
-    c.tableOperations().clone("test_ingest", "test_ingest3", true, null, null);
-    c.tableOperations().clone("test_ingest", "test_ingest4", true, null, null);
-    c.tableOperations().clone("test_ingest", "test_ingest5", true, null, null);
-    c.tableOperations().clone("test_ingest", "test_ingest6", true, null, null);
-    
+    for (int i = 1; i < TABLES; i++)
+      c.tableOperations().clone("test_ingest", "test_ingest" + i, true, null, null);
+    UtilWaitThread.sleep(11*1000); // time between checks of the thread pool sizes
     TCredentials creds = CredentialHelper.create("root", new PasswordToken(MacTest.PASSWORD), c.getInstance().getInstanceName());
-    UtilWaitThread.sleep(10);
-    for (int i = 2; i < 7; i++)
+    for (int i = 1; i < TABLES; i++)
       c.tableOperations().compact("test_ingest" + i, null, null, true, false);
-    int count = 0;
-    while (count == 0) {
+    for (int i = 0; i < 30; i++) {
+      int count = 0;
       MasterClientService.Iface client = null;
       MasterMonitorInfo stats = null;
       try {
@@ -79,9 +77,10 @@ public class DynamicThreadPoolsIT extends MacTest {
         }
       }
       System.out.println("count " + count);
+      if (count > 3)
+        return;
       UtilWaitThread.sleep(1000);
     }
-    assertTrue(count == 1 || count == 2); // sometimes we get two threads due to the way the stats are pulled
+    fail("Could not observe higher number of threads after changing the config");
   }
-  
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/f7e96a3e/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java b/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java
index a52d629..622702f 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java
@@ -44,7 +44,7 @@ public class MacTest {
   @Before
   public void setUp() throws Exception {
     folder.create();
-    MiniAccumuloConfig cfg = new MiniAccumuloConfig(folder.newFolder("miniAccumulo"), PASSWORD);
+    MiniAccumuloConfig cfg = new MiniAccumuloConfig(folder.newFolder(this.getClass().getSimpleName()), PASSWORD);
     configure(cfg);
     cluster = new MiniAccumuloCluster(cfg);
     cluster.start();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/f7e96a3e/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
index 1be04b1..afb14d8 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
@@ -103,8 +103,12 @@ public class SplitIT extends MacTest {
     c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
     DeleteIT.deleteTest(c, cluster);
     c.tableOperations().flush("test_ingest", null, null, true);
-    UtilWaitThread.sleep(10*1000);
-    assertTrue(c.tableOperations().listSplits("test_ingest").size() > 30);
+    for (int i = 0; i < 5; i++) {
+      UtilWaitThread.sleep(10*1000);
+      if (c.tableOperations().listSplits("test_ingest").size() > 20)
+        break;
+    }
+    assertTrue(c.tableOperations().listSplits("test_ingest").size() > 20);
   }
   
 }


[09/50] git commit: ACCUMULO-1550 committing Jonathan Hsieh's patch

Posted by kt...@apache.org.
ACCUMULO-1550 committing Jonathan Hsieh's patch


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/c82c4316
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/c82c4316
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/c82c4316

Branch: refs/heads/ACCUMULO-1000
Commit: c82c4316ad90ee7370c5919efb8ded65bb50b302
Parents: cfb01d4
Author: Eric Newton <er...@gmail.com>
Authored: Wed Jul 17 11:57:15 2013 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Wed Jul 17 11:57:15 2013 -0400

----------------------------------------------------------------------
 README                                          | 10 ++---
 bin/accumulo                                    |  4 +-
 bin/config.sh                                   | 41 +++++++++++++++-----
 bin/start-all.sh                                |  8 ++--
 bin/start-here.sh                               |  6 +--
 bin/start-server.sh                             |  2 +-
 bin/stop-all.sh                                 |  4 +-
 bin/stop-here.sh                                |  4 +-
 bin/tdown.sh                                    |  2 +-
 bin/tup.sh                                      |  2 +-
 .../1GB/native-standalone/accumulo-env.sh       |  4 +-
 conf/examples/1GB/standalone/accumulo-env.sh    |  4 +-
 .../2GB/native-standalone/accumulo-env.sh       |  4 +-
 conf/examples/2GB/standalone/accumulo-env.sh    |  4 +-
 .../3GB/native-standalone/accumulo-env.sh       |  4 +-
 conf/examples/3GB/standalone/accumulo-env.sh    |  4 +-
 .../512MB/native-standalone/accumulo-env.sh     |  4 +-
 conf/examples/512MB/standalone/accumulo-env.sh  |  4 +-
 .../org/apache/accumulo/server/Accumulo.java    |  4 +-
 .../start/classloader/AccumuloClassLoader.java  |  9 +++--
 test/system/continuous/agitator.pl              |  2 +-
 test/system/continuous/magitator.pl             |  6 +--
 test/system/continuous/mapred-setup.sh          |  2 +-
 test/system/continuous/start-stats.sh           |  2 +-
 test/system/randomwalk/README                   |  2 +-
 test/system/randomwalk/bin/reset-cluster.sh     | 10 ++---
 test/system/randomwalk/bin/start-all.sh         |  4 +-
 test/system/randomwalk/bin/start-local.sh       |  4 +-
 test/system/scalability/run.py                  |  2 +-
 29 files changed, 94 insertions(+), 68 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/README
----------------------------------------------------------------------
diff --git a/README b/README
index 21b53e2..445af37 100644
--- a/README
+++ b/README
@@ -111,12 +111,12 @@ accumulo client code.  These directories must be at the same location on every
 node in the cluster.
 
 If you are configuring a larger cluster you will need to create the configuration
-files yourself and propogate the changes to the $ACCUMULO_HOME/conf directories:
+files yourself and propogate the changes to the $ACCUMULO_CONF_DIR directories:
 
-   Create a "slaves" file in $ACCUMULO_HOME/conf/.  This is a list of machines
+   Create a "slaves" file in $ACCUMULO_CONF_DIR/.  This is a list of machines
    where tablet servers and loggers will run.
 
-   Create a "masters" file in $ACCUMULO_HOME/conf/.  This is a list of
+   Create a "masters" file in $ACCUMULO_CONF_DIR/.  This is a list of
    machines where the master server will run. 
 
    Create conf/accumulo-env.sh following the template of
@@ -229,7 +229,7 @@ server, but you can also glob them if you wish.
 
   kadmin.local -q "xst -k accumulo.keytab -glob accumulo*"
 
-Place this file in $ACCUMULO_HOME/conf for every host. It should be owned by
+Place this file in $ACCUMULO_CONF_DIR for every host. It should be owned by
 the accumulo user and chmodded to 400. Add the following to the accumulo-env.sh
 
   kinit -kt $ACCUMULO_HOME/conf/accumulo.keytab accumulo/`hostname -f`
@@ -242,7 +242,7 @@ _HOST in lieu of your individual host names.
 
   <property>
     <name>general.kerberos.keytab</name>
-    <value>$ACCUMULO_HOME/conf/accumulo.keytab</value>
+    <value>$ACCUMULO_CONF_DIR/accumulo.keytab</value>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/bin/accumulo
----------------------------------------------------------------------
diff --git a/bin/accumulo b/bin/accumulo
index 7ee197c..3ae3095 100755
--- a/bin/accumulo
+++ b/bin/accumulo
@@ -37,7 +37,7 @@ locationByProgram()
 {
    RESULT=$( which "$1" )
    if [[ "$?" != 0 && -z "$RESULT" ]]; then
-      echo "Cannot find '$1' and '$2' is not set in conf/accumulo-env.sh"
+      echo "Cannot find '$1' and '$2' is not set in $ACCUMULO_CONF_DIR/accumulo-env.sh"
       exit 1
    fi
    while [ -h "$RESULT" ]; do # resolve $RESULT until the file is no longer a symlink
@@ -80,7 +80,7 @@ logger)  export ACCUMULO_OPTS="${ACCUMULO_GENERAL_OPTS} ${ACCUMULO_LOGGER_OPTS}"
 *)       export ACCUMULO_OPTS="${ACCUMULO_GENERAL_OPTS} ${ACCUMULO_OTHER_OPTS}" ;;
 esac
 
-XML_FILES=${ACCUMULO_HOME}/conf
+XML_FILES=${ACCUMULO_CONF_DIR}
 LOG4J_JAR=$(find $HADOOP_PREFIX/lib $HADOOP_PREFIX/share/hadoop/common/lib -name 'log4j*.jar' -print 2>/dev/null | head -1)
 CLASSPATH=${XML_FILES}:${START_JAR}:${LOG4J_JAR}
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/bin/config.sh
----------------------------------------------------------------------
diff --git a/bin/config.sh b/bin/config.sh
index a58b33a..6c4d74a 100755
--- a/bin/config.sh
+++ b/bin/config.sh
@@ -15,6 +15,21 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+# Guarantees that Accumulo and its environment variables are set.
+#
+# Values set by script that can be user provided.  If not provided script attempts to infer.
+#  ACCUMULO_CONF_DIR  Location where accumulo-env.sh, accumulo-site.xml and friends will be read from
+#  ACCUMULO_HOME      Home directory for Accumulo
+#  ACCUMULO_LOG_DIR   Directory for Accumulo daemon logs
+#  ACCUMULO_VERSION   Accumulo version name
+#  HADOOP_PREFIX      Prefix to the home dir for hadoop.
+# 
+# Values always set by script.
+#  GC                 Machine to rn GC daemon on.  Used by start-here.sh script
+#  MONITOR            Machine to run monitor daemon on. Used by start-here.sh script
+#  SSH                Default ssh parameters used to start daemons
+#  HADOOP_HOME        Home dir for hadoop.  TODO fix this.
+
 # Start: Resolve Script Directory
 SOURCE="${BASH_SOURCE[0]}"
 while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
@@ -29,8 +44,16 @@ script=$( basename "$SOURCE" )
 ACCUMULO_HOME=$( cd -P ${bin}/.. && pwd )
 export ACCUMULO_HOME
 
-if [ -f $ACCUMULO_HOME/conf/accumulo-env.sh ] ; then
-   . $ACCUMULO_HOME/conf/accumulo-env.sh
+ACCUMULO_CONF_DIR="${ACCUMULO_CONF_DIR:-$ACCUMULO_HOME/conf}"
+export ACCUMULO_CONF_DIR
+if [ -z "$ACCUMULO_CONF_DIR" -o ! -d "$ACCUMULO_CONF_DIR" ]
+then
+  echo "ACCUMULO_CONF_DIR=$ACCUMULO_CONF_DIR is not a valid directory.  Please make sure it exists"
+  exit 1
+fi
+
+if [ -f $ACCUMULO_CONF_DIR/accumulo-env.sh ] ; then
+   . $ACCUMULO_CONF_DIR/accumulo-env.sh
 elif [ -z "$ACCUMULO_TEST" ] ; then
    #
    # Attempt to bootstrap configuration and continue
@@ -70,17 +93,17 @@ then
 fi
 export HADOOP_PREFIX
 
-MASTER1=$(egrep -v '(^#|^\s*$)' "$ACCUMULO_HOME/conf/masters" | head -1)
+MASTER1=$(egrep -v '(^#|^\s*$)' "$ACCUMULO_CONF_DIR/masters" | head -1)
 GC=$MASTER1
 MONITOR=$MASTER1
-if [ -f "$ACCUMULO_HOME/conf/gc" ]; then
-   GC=$(egrep -v '(^#|^\s*$)' "$ACCUMULO_HOME/conf/gc" | head -1)
+if [ -f "$ACCUMULO_CONF_DIR/gc" ]; then
+   GC=$(egrep -v '(^#|^\s*$)' "$ACCUMULO_CONF_DIR/gc" | head -1)
 fi
-if [ -f "$ACCUMULO_HOME/conf/monitor" ]; then
-   MONITOR=$(egrep -v '(^#|^\s*$)' "$ACCUMULO_HOME/conf/monitor" | head -1)
+if [ -f "$ACCUMULO_CONF_DIR/monitor" ]; then
+   MONITOR=$(egrep -v '(^#|^\s*$)' "$ACCUMULO_CONF_DIR/monitor" | head -1)
 fi
-if [ ! -f "$ACCUMULO_HOME/conf/tracers" ]; then
-   echo "$MASTER1" > "$ACCUMULO_HOME/conf/tracers"
+if [ ! -f "$ACCUMULO_CONF_DIR/tracers" ]; then
+   echo "$MASTER1" > "$ACCUMULO_CONF_DIR/tracers"
 fi
 
 SSH='ssh -qnf -o ConnectTimeout=2'

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/bin/start-all.sh
----------------------------------------------------------------------
diff --git a/bin/start-all.sh b/bin/start-all.sh
index 8494e18..8470a40 100755
--- a/bin/start-all.sh
+++ b/bin/start-all.sh
@@ -28,8 +28,8 @@ bin="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
 . "$bin"/config.sh
 unset DISPLAY
 
-if [ ! -f $ACCUMULO_HOME/conf/accumulo-env.sh ] ; then
-   echo "${ACCUMULO_HOME}/conf/accumulo-env.sh does not exist. Please make sure you configure Accumulo before you run anything"
+if [ ! -f $ACCUMULO_CONF_DIR/accumulo-env.sh ] ; then
+   echo "${ACCUMULO_CONF_DIR}/accumulo-env.sh does not exist. Please make sure you configure Accumulo before you run anything"
    echo "We provide examples you can copy in ${ACCUMULO_HOME}/conf/examples/ which are set up for your memory footprint"
    exit 1
 fi
@@ -54,12 +54,12 @@ if [ "$1" != "--notSlaves" ]; then
 fi
 
 ${bin}/accumulo org.apache.accumulo.server.master.state.SetGoalState NORMAL
-for master in `egrep -v '(^#|^\s*$)' "$ACCUMULO_HOME/conf/masters"`; do
+for master in `egrep -v '(^#|^\s*$)' "$ACCUMULO_CONF_DIR/masters"`; do
    ${bin}/start-server.sh $master master
 done
 
 ${bin}/start-server.sh $GC gc "garbage collector"
 
-for tracer in `egrep -v '(^#|^\s*$)' "$ACCUMULO_HOME/conf/tracers"`; do
+for tracer in `egrep -v '(^#|^\s*$)' "$ACCUMULO_CONF_DIR/tracers"`; do
    ${bin}/start-server.sh $tracer tracer
 done

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/bin/start-here.sh
----------------------------------------------------------------------
diff --git a/bin/start-here.sh b/bin/start-here.sh
index feb3d5e..2952975 100755
--- a/bin/start-here.sh
+++ b/bin/start-here.sh
@@ -42,14 +42,14 @@ fi
 
 HOSTS="`hostname -a` `hostname` localhost 127.0.0.1 $ip"
 for host in $HOSTS; do
-   if grep -q "^${host}\$" $ACCUMULO_HOME/conf/slaves; then
+   if grep -q "^${host}\$" $ACCUMULO_CONF_DIR/slaves; then
       ${bin}/start-server.sh $host tserver "tablet server"
       break
    fi
 done
 
 for host in $HOSTS; do
-   if grep -q "^${host}\$" $ACCUMULO_HOME/conf/masters; then
+   if grep -q "^${host}\$" $ACCUMULO_CONF_DIR/masters; then
       ${bin}/accumulo org.apache.accumulo.server.master.state.SetGoalState NORMAL
       ${bin}/start-server.sh $host master
       break
@@ -71,7 +71,7 @@ for host in $HOSTS; do
 done
 
 for host in $HOSTS; do
-   if grep -q "^${host}\$" $ACCUMULO_HOME/conf/tracers; then
+   if grep -q "^${host}\$" $ACCUMULO_CONF_DIR/tracers; then
       ${bin}/start-server.sh $host tracer 
       break
    fi

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/bin/start-server.sh
----------------------------------------------------------------------
diff --git a/bin/start-server.sh b/bin/start-server.sh
index 75d9342..e38c084 100755
--- a/bin/start-server.sh
+++ b/bin/start-server.sh
@@ -39,7 +39,7 @@ LONGNAME="$3"
 if [ -z "$LONGNAME" ]; then
    LONGNAME="$2"
 fi
-SLAVES=$( wc -l < ${ACCUMULO_HOME}/conf/slaves )
+SLAVES=$( wc -l < ${ACCUMULO_CONF_DIR}/slaves )
 
 IFCONFIG=/sbin/ifconfig
 if [ ! -x $IFCONFIG ]; then

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/bin/stop-all.sh
----------------------------------------------------------------------
diff --git a/bin/stop-all.sh b/bin/stop-all.sh
index e2724aa..348d067 100755
--- a/bin/stop-all.sh
+++ b/bin/stop-all.sh
@@ -44,7 +44,7 @@ sleep 5
 
 #look for master and gc processes not killed by 'admin stopAll'
 for signal in TERM KILL ; do
-   for master in `grep -v '^#' "$ACCUMULO_HOME/conf/masters"`; do
+   for master in `grep -v '^#' "$ACCUMULO_CONF_DIR/masters"`; do
       ${bin}/stop-server.sh $master "$ACCUMULO_HOME/lib/accumulo-start.jar" master $signal
    done
 
@@ -52,7 +52,7 @@ for signal in TERM KILL ; do
 
    ${bin}/stop-server.sh "$MONITOR" "$ACCUMULO_HOME/.*/accumulo-start.*.jar" monitor $signal
 
-   for tracer in `egrep -v '(^#|^\s*$)' "$ACCUMULO_HOME/conf/tracers"`; do
+   for tracer in `egrep -v '(^#|^\s*$)' "$ACCUMULO_CONF_DIR/tracers"`; do
       ${bin}/stop-server.sh $tracer "$ACCUMULO_HOME/.*/accumulo-start.*.jar" tracer $signal
    done
 done

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/bin/stop-here.sh
----------------------------------------------------------------------
diff --git a/bin/stop-here.sh b/bin/stop-here.sh
index d23b228..8a3b513 100755
--- a/bin/stop-here.sh
+++ b/bin/stop-here.sh
@@ -33,11 +33,11 @@ bin="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
 
 ACCUMULO="$ACCUMULO_HOME/lib/accumulo-start.jar"
 
-if egrep -q localhost\|127.0.0.1 $ACCUMULO_HOME/conf/slaves; then
+if egrep -q localhost\|127.0.0.1 $ACCUMULO_CONF_DIR/slaves; then
    $bin/accumulo admin stop localhost
 else
    for host in `hostname -a`; do
-      if grep -q ${host} $ACCUMULO_HOME/conf/slaves; then
+      if grep -q ${host} $ACCUMULO_CONF_DIR/slaves; then
          ${bin}/accumulo admin stop $host
       fi
    done

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/bin/tdown.sh
----------------------------------------------------------------------
diff --git a/bin/tdown.sh b/bin/tdown.sh
index 3dfc856..141ad24 100755
--- a/bin/tdown.sh
+++ b/bin/tdown.sh
@@ -28,7 +28,7 @@ bin="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
 . "$bin"/config.sh
 
 HADOOP_CMD=$HADOOP_PREFIX/bin/hadoop
-SLAVES=$ACCUMULO_HOME/conf/slaves
+SLAVES=$ACCUMULO_CONF_DIR/slaves
 SLAVE_HOSTS=$(egrep -v '(^#|^\s*$)' "${SLAVES}")
 
 echo "Stopping unresponsive tablet servers (if any)..."

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/bin/tup.sh
----------------------------------------------------------------------
diff --git a/bin/tup.sh b/bin/tup.sh
index 07b4609..b26def5 100755
--- a/bin/tup.sh
+++ b/bin/tup.sh
@@ -27,7 +27,7 @@ bin="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
 
 . "$bin"/config.sh
 
-SLAVES=$ACCUMULO_HOME/conf/slaves
+SLAVES=$ACCUMULO_CONF_DIR/slaves
 
 echo -n "Starting tablet servers ..."
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/conf/examples/1GB/native-standalone/accumulo-env.sh
----------------------------------------------------------------------
diff --git a/conf/examples/1GB/native-standalone/accumulo-env.sh b/conf/examples/1GB/native-standalone/accumulo-env.sh
index 04c85e7..16779c4 100755
--- a/conf/examples/1GB/native-standalone/accumulo-env.sh
+++ b/conf/examples/1GB/native-standalone/accumulo-env.sh
@@ -42,9 +42,9 @@ test -z "$HADOOP_CONF_DIR"       && export HADOOP_CONF_DIR="$HADOOP_PREFIX/conf"
 test -z "$JAVA_HOME"             && export JAVA_HOME=/path/to/java
 test -z "$ZOOKEEPER_HOME"        && export ZOOKEEPER_HOME=/path/to/zookeeper
 test -z "$ACCUMULO_LOG_DIR"      && export ACCUMULO_LOG_DIR=$ACCUMULO_HOME/logs
-if [ -f ${ACCUMULO_HOME}/conf/accumulo.policy ]
+if [ -f ${ACCUMULO_CONF_DIR}/accumulo.policy ]
 then
-   POLICY="-Djava.security.manager -Djava.security.policy=${ACCUMULO_HOME}/conf/accumulo.policy"
+   POLICY="-Djava.security.manager -Djava.security.policy=${ACCUMULO_CONF_DIR}/accumulo.policy"
 fi
 test -z "$ACCUMULO_TSERVER_OPTS" && export ACCUMULO_TSERVER_OPTS="${POLICY} -Xmx128m -Xms128m "
 test -z "$ACCUMULO_MASTER_OPTS"  && export ACCUMULO_MASTER_OPTS="${POLICY} -Xmx128m -Xms128m"

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/conf/examples/1GB/standalone/accumulo-env.sh
----------------------------------------------------------------------
diff --git a/conf/examples/1GB/standalone/accumulo-env.sh b/conf/examples/1GB/standalone/accumulo-env.sh
index f830256..10c1ab9 100755
--- a/conf/examples/1GB/standalone/accumulo-env.sh
+++ b/conf/examples/1GB/standalone/accumulo-env.sh
@@ -42,9 +42,9 @@ test -z "$HADOOP_CONF_DIR"       && export HADOOP_CONF_DIR="$HADOOP_PREFIX/conf"
 test -z "$JAVA_HOME"             && export JAVA_HOME=/path/to/java
 test -z "$ZOOKEEPER_HOME"        && export ZOOKEEPER_HOME=/path/to/zookeeper
 test -z "$ACCUMULO_LOG_DIR"      && export ACCUMULO_LOG_DIR=$ACCUMULO_HOME/logs
-if [ -f ${ACCUMULO_HOME}/conf/accumulo.policy ]
+if [ -f ${ACCUMULO_CONF_DIR}/accumulo.policy ]
 then
-   POLICY="-Djava.security.manager -Djava.security.policy=${ACCUMULO_HOME}/conf/accumulo.policy"
+   POLICY="-Djava.security.manager -Djava.security.policy=${ACCUMULO_CONF_DIR}/accumulo.policy"
 fi
 test -z "$ACCUMULO_TSERVER_OPTS" && export ACCUMULO_TSERVER_OPTS="${POLICY} -Xmx384m -Xms384m "
 test -z "$ACCUMULO_MASTER_OPTS"  && export ACCUMULO_MASTER_OPTS="${POLICY} -Xmx128m -Xms128m"

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/conf/examples/2GB/native-standalone/accumulo-env.sh
----------------------------------------------------------------------
diff --git a/conf/examples/2GB/native-standalone/accumulo-env.sh b/conf/examples/2GB/native-standalone/accumulo-env.sh
index 7db28c1..4302a1a 100755
--- a/conf/examples/2GB/native-standalone/accumulo-env.sh
+++ b/conf/examples/2GB/native-standalone/accumulo-env.sh
@@ -41,9 +41,9 @@ test -z "$HADOOP_CONF_DIR"       && export HADOOP_CONF_DIR="$HADOOP_PREFIX/conf"
 test -z "$JAVA_HOME"             && export JAVA_HOME=/path/to/java
 test -z "$ZOOKEEPER_HOME"        && export ZOOKEEPER_HOME=/path/to/zookeeper
 test -z "$ACCUMULO_LOG_DIR"      && export ACCUMULO_LOG_DIR=$ACCUMULO_HOME/logs
-if [ -f ${ACCUMULO_HOME}/conf/accumulo.policy ]
+if [ -f ${ACCUMULO_CONF_DIR}/accumulo.policy ]
 then
-   POLICY="-Djava.security.manager -Djava.security.policy=${ACCUMULO_HOME}/conf/accumulo.policy"
+   POLICY="-Djava.security.manager -Djava.security.policy=${ACCUMULO_CONF_DIR}/accumulo.policy"
 fi
 test -z "$ACCUMULO_TSERVER_OPTS" && export ACCUMULO_TSERVER_OPTS="${POLICY} -Xmx256m -Xms256m "
 test -z "$ACCUMULO_MASTER_OPTS"  && export ACCUMULO_MASTER_OPTS="${POLICY} -Xmx256m -Xms256m"

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/conf/examples/2GB/standalone/accumulo-env.sh
----------------------------------------------------------------------
diff --git a/conf/examples/2GB/standalone/accumulo-env.sh b/conf/examples/2GB/standalone/accumulo-env.sh
index a833476..0b29672 100755
--- a/conf/examples/2GB/standalone/accumulo-env.sh
+++ b/conf/examples/2GB/standalone/accumulo-env.sh
@@ -42,9 +42,9 @@ test -z "$HADOOP_CONF_DIR"       && export HADOOP_CONF_DIR="$HADOOP_PREFIX/conf"
 test -z "$JAVA_HOME"             && export JAVA_HOME=/path/to/java
 test -z "$ZOOKEEPER_HOME"        && export ZOOKEEPER_HOME=/path/to/zookeeper
 test -z "$ACCUMULO_LOG_DIR"      && export ACCUMULO_LOG_DIR=$ACCUMULO_HOME/logs
-if [ -f ${ACCUMULO_HOME}/conf/accumulo.policy ]
+if [ -f ${ACCUMULO_CONF_DIR}/accumulo.policy ]
 then
-   POLICY="-Djava.security.manager -Djava.security.policy=${ACCUMULO_HOME}/conf/accumulo.policy"
+   POLICY="-Djava.security.manager -Djava.security.policy=${ACCUMULO_CONF_DIR}/accumulo.policy"
 fi
 test -z "$ACCUMULO_TSERVER_OPTS" && export ACCUMULO_TSERVER_OPTS="${POLICY} -Xmx768m -Xms768m "
 test -z "$ACCUMULO_MASTER_OPTS"  && export ACCUMULO_MASTER_OPTS="${POLICY} -Xmx256m -Xms256m"

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/conf/examples/3GB/native-standalone/accumulo-env.sh
----------------------------------------------------------------------
diff --git a/conf/examples/3GB/native-standalone/accumulo-env.sh b/conf/examples/3GB/native-standalone/accumulo-env.sh
index 5c8fffb..82f4de5 100755
--- a/conf/examples/3GB/native-standalone/accumulo-env.sh
+++ b/conf/examples/3GB/native-standalone/accumulo-env.sh
@@ -41,9 +41,9 @@ test -z "$HADOOP_CONF_DIR"       && export HADOOP_CONF_DIR="$HADOOP_PREFIX/conf"
 test -z "$JAVA_HOME"             && export JAVA_HOME=/path/to/java
 test -z "$ZOOKEEPER_HOME"        && export ZOOKEEPER_HOME=/path/to/zookeeper
 test -z "$ACCUMULO_LOG_DIR"      && export ACCUMULO_LOG_DIR=$ACCUMULO_HOME/logs
-if [ -f ${ACCUMULO_HOME}/conf/accumulo.policy ]
+if [ -f ${ACCUMULO_CONF_DIR}/accumulo.policy ]
 then
-   POLICY="-Djava.security.manager -Djava.security.policy=${ACCUMULO_HOME}/conf/accumulo.policy"
+   POLICY="-Djava.security.manager -Djava.security.policy=${ACCUMULO_CONF_DIR}/accumulo.policy"
 fi
 test -z "$ACCUMULO_TSERVER_OPTS" && export ACCUMULO_TSERVER_OPTS="${POLICY} -Xmx1g -Xms1g -XX:NewSize=500m -XX:MaxNewSize=500m "
 test -z "$ACCUMULO_MASTER_OPTS"  && export ACCUMULO_MASTER_OPTS="${POLICY} -Xmx1g -Xms1g"

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/conf/examples/3GB/standalone/accumulo-env.sh
----------------------------------------------------------------------
diff --git a/conf/examples/3GB/standalone/accumulo-env.sh b/conf/examples/3GB/standalone/accumulo-env.sh
index 04c5791..ec8fae4 100755
--- a/conf/examples/3GB/standalone/accumulo-env.sh
+++ b/conf/examples/3GB/standalone/accumulo-env.sh
@@ -41,9 +41,9 @@ test -z "$HADOOP_CONF_DIR"       && export HADOOP_CONF_DIR="$HADOOP_PREFIX/conf"
 test -z "$JAVA_HOME"             && export JAVA_HOME=/path/to/java
 test -z "$ZOOKEEPER_HOME"        && export ZOOKEEPER_HOME=/path/to/zookeeper
 test -z "$ACCUMULO_LOG_DIR"      && export ACCUMULO_LOG_DIR=$ACCUMULO_HOME/logs
-if [ -f ${ACCUMULO_HOME}/conf/accumulo.policy ]
+if [ -f ${ACCUMULO_CONF_DIR}/accumulo.policy ]
 then
-   POLICY="-Djava.security.manager -Djava.security.policy=${ACCUMULO_HOME}/conf/accumulo.policy"
+   POLICY="-Djava.security.manager -Djava.security.policy=${ACCUMULO_CONF_DIR}/accumulo.policy"
 fi
 test -z "$ACCUMULO_TSERVER_OPTS" && export ACCUMULO_TSERVER_OPTS="${POLICY} -Xmx2g -Xms2g -XX:NewSize=1G -XX:MaxNewSize=1G "
 test -z "$ACCUMULO_MASTER_OPTS"  && export ACCUMULO_MASTER_OPTS="${POLICY} -Xmx1g -Xms1g"

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/conf/examples/512MB/native-standalone/accumulo-env.sh
----------------------------------------------------------------------
diff --git a/conf/examples/512MB/native-standalone/accumulo-env.sh b/conf/examples/512MB/native-standalone/accumulo-env.sh
index 86de0e0..e7f3846 100755
--- a/conf/examples/512MB/native-standalone/accumulo-env.sh
+++ b/conf/examples/512MB/native-standalone/accumulo-env.sh
@@ -41,9 +41,9 @@ test -z "$HADOOP_CONF_DIR"       && export HADOOP_CONF_DIR="$HADOOP_PREFIX/conf"
 test -z "$JAVA_HOME"             && export JAVA_HOME=/path/to/java
 test -z "$ZOOKEEPER_HOME"        && export ZOOKEEPER_HOME=/path/to/zookeeper
 test -z "$ACCUMULO_LOG_DIR"      && export ACCUMULO_LOG_DIR=$ACCUMULO_HOME/logs
-if [ -f ${ACCUMULO_HOME}/conf/accumulo.policy ]
+if [ -f ${ACCUMULO_CONF_DIR}/accumulo.policy ]
 then
-   POLICY="-Djava.security.manager -Djava.security.policy=${ACCUMULO_HOME}/conf/accumulo.policy"
+   POLICY="-Djava.security.manager -Djava.security.policy=${ACCUMULO_CONF_DIR}/accumulo.policy"
 fi
 test -z "$ACCUMULO_TSERVER_OPTS" && export ACCUMULO_TSERVER_OPTS="${POLICY} -Xmx48m -Xms48m "
 test -z "$ACCUMULO_MASTER_OPTS"  && export ACCUMULO_MASTER_OPTS="${POLICY} -Xmx128m -Xms128m"

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/conf/examples/512MB/standalone/accumulo-env.sh
----------------------------------------------------------------------
diff --git a/conf/examples/512MB/standalone/accumulo-env.sh b/conf/examples/512MB/standalone/accumulo-env.sh
index 36987a7..1034632 100755
--- a/conf/examples/512MB/standalone/accumulo-env.sh
+++ b/conf/examples/512MB/standalone/accumulo-env.sh
@@ -41,9 +41,9 @@ test -z "$HADOOP_CONF_DIR"       && export HADOOP_CONF_DIR="$HADOOP_PREFIX/conf"
 test -z "$JAVA_HOME"             && export JAVA_HOME=/path/to/java
 test -z "$ZOOKEEPER_HOME"        && export ZOOKEEPER_HOME=/path/to/zookeeper
 test -z "$ACCUMULO_LOG_DIR"      && export ACCUMULO_LOG_DIR=$ACCUMULO_HOME/logs
-if [ -f ${ACCUMULO_HOME}/conf/accumulo.policy ]
+if [ -f ${ACCUMULO_CONF_DIR}/accumulo.policy ]
 then
-   POLICY="-Djava.security.manager -Djava.security.policy=${ACCUMULO_HOME}/conf/accumulo.policy"
+   POLICY="-Djava.security.manager -Djava.security.policy=${ACCUMULO_CONF_DIR}/accumulo.policy"
 fi
 test -z "$ACCUMULO_TSERVER_OPTS" && export ACCUMULO_TSERVER_OPTS="${POLICY} -Xmx128m -Xms128m "
 test -z "$ACCUMULO_MASTER_OPTS"  && export ACCUMULO_MASTER_OPTS="${POLICY} -Xmx128m -Xms128m"

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/server/src/main/java/org/apache/accumulo/server/Accumulo.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/Accumulo.java b/server/src/main/java/org/apache/accumulo/server/Accumulo.java
index b4f24a3..f8ca31a 100644
--- a/server/src/main/java/org/apache/accumulo/server/Accumulo.java
+++ b/server/src/main/java/org/apache/accumulo/server/Accumulo.java
@@ -100,10 +100,10 @@ public class Accumulo {
       System.setProperty("org.apache.accumulo.core.host.log", localhost);
     
     // Use a specific log config, if it exists
-    String logConfig = String.format("%s/conf/%s_logger.xml", System.getenv("ACCUMULO_HOME"), application);
+    String logConfig = String.format("%s/%s_logger.xml", System.getenv("ACCUMULO_CONF_DIR"), application);
     if (!new File(logConfig).exists()) {
       // otherwise, use the generic config
-      logConfig = String.format("%s/conf/generic_logger.xml", System.getenv("ACCUMULO_HOME"));
+      logConfig = String.format("%s/generic_logger.xml", System.getenv("ACCUMULO_CONF_DIR"));
     }
     // Turn off messages about not being able to reach the remote logger... we protect against that.
     LogLog.setQuietMode(true);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
----------------------------------------------------------------------
diff --git a/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java b/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
index 2cac8d9..594ad8f 100644
--- a/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
+++ b/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
@@ -46,7 +46,7 @@ public class AccumuloClassLoader {
   public static final String CLASSPATH_PROPERTY_NAME = "general.classpaths";
   
   public static final String ACCUMULO_CLASSPATH_VALUE = 
-      "$ACCUMULO_HOME/conf,\n" + 
+      "$ACCUMULO_CONF_DIR,\n" + 
           "$ACCUMULO_HOME/lib/[^.].*.jar,\n" + 
           "$ZOOKEEPER_HOME/zookeeper[^.].*.jar,\n" + 
           "$HADOOP_CONF_DIR,\n" +
@@ -66,8 +66,11 @@ public class AccumuloClassLoader {
   
   static {
     String configFile = System.getProperty("org.apache.accumulo.config.file", "accumulo-site.xml");
-    if (System.getenv("ACCUMULO_HOME") != null) {
-      // accumulo home should be set
+    if (System.getenv("ACCUMULO_CONF_DIR") != null) {
+      // accumulo conf dir should be set
+      SITE_CONF = System.getenv("ACCUMULO_CONF_DIR");
+    } else if (System.getenv("ACCUMULO_HOME") != null) {
+      // if no accumulo conf dir, try accumulo home default
       SITE_CONF = System.getenv("ACCUMULO_HOME") + "/conf/" + configFile;
     } else {
       SITE_CONF = null;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/test/system/continuous/agitator.pl
----------------------------------------------------------------------
diff --git a/test/system/continuous/agitator.pl b/test/system/continuous/agitator.pl
index f769942..d9c5395 100755
--- a/test/system/continuous/agitator.pl
+++ b/test/system/continuous/agitator.pl
@@ -64,7 +64,7 @@ if($minKill > $maxKill){
 	die("minKill > maxKill $minKill > $maxKill");
 }
 
-@slavesRaw = `cat $ACCUMULO_HOME/conf/slaves`;
+@slavesRaw = `cat $ACCUMULO_CONF_DIR/slaves`;
 chomp(@slavesRaw);
 
 for $slave (@slavesRaw){

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/test/system/continuous/magitator.pl
----------------------------------------------------------------------
diff --git a/test/system/continuous/magitator.pl b/test/system/continuous/magitator.pl
index 8b22505..a0cab60 100755
--- a/test/system/continuous/magitator.pl
+++ b/test/system/continuous/magitator.pl
@@ -29,7 +29,7 @@ $ACCUMULO_HOME="../../..";
 $sleep1 = $ARGV[0];
 $sleep2 = $ARGV[1];
 
-@mastersRaw = `cat $ACCUMULO_HOME/conf/masters`;
+@mastersRaw = `cat $ACCUMULO_CONF_DIR/masters`;
 chomp(@mastersRaw);
 
 for $master (@mastersRaw){
@@ -52,10 +52,10 @@ while(1){
 		system($cmd);
 	}else{
 		print STDERR "$t Killing all masters\n";
-		$cmd = "pssh -h $ACCUMULO_HOME/conf/masters \"pkill -f '[ ]org.apache.accumulo.start.*master'\" < /dev/null";
+		$cmd = "pssh -h $ACCUMULO_CONF_DIR/masters \"pkill -f '[ ]org.apache.accumulo.start.*master'\" < /dev/null";
 		print "$t $cmd\n";
 		system($cmd);
-		$cmd = "pssh -h $ACCUMULO_HOME/conf/masters \"pkill -f '[ ]org.apache.accumulo.start.*gc'\" < /dev/null";
+		$cmd = "pssh -h $ACCUMULO_CONF_DIR/masters \"pkill -f '[ ]org.apache.accumulo.start.*gc'\" < /dev/null";
 		print "$t $cmd\n";
 		system($cmd);
 	}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/test/system/continuous/mapred-setup.sh
----------------------------------------------------------------------
diff --git a/test/system/continuous/mapred-setup.sh b/test/system/continuous/mapred-setup.sh
index aa2b11b..c348131 100755
--- a/test/system/continuous/mapred-setup.sh
+++ b/test/system/continuous/mapred-setup.sh
@@ -19,6 +19,6 @@
 # for running a map reduce job
 
 . continuous-env.sh
-. $ACCUMULO_HOME/conf/accumulo-env.sh
+. $ACCUMULO_CONF_DIR/accumulo-env.sh
 
 SERVER_LIBJAR="$ACCUMULO_HOME/lib/accumulo-test.jar"

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/test/system/continuous/start-stats.sh
----------------------------------------------------------------------
diff --git a/test/system/continuous/start-stats.sh b/test/system/continuous/start-stats.sh
index 4e42930..3aad413 100755
--- a/test/system/continuous/start-stats.sh
+++ b/test/system/continuous/start-stats.sh
@@ -22,7 +22,7 @@ mkdir -p $CONTINUOUS_LOG_DIR
 
 CONFIG_OUT=$CONTINUOUS_LOG_DIR/`date +%Y%m%d%H%M%S`_`hostname`_config.out
 
-cat $ACCUMULO_HOME/conf/accumulo-env.sh > $CONFIG_OUT
+cat $ACCUMULO_CONF_DIR/accumulo-env.sh > $CONFIG_OUT
 echo >> $CONFIG_OUT
 echo -e "config -np\nconfig -t $TABLE -np\nquit" | $ACCUMULO_HOME/bin/accumulo shell -u $USER -p $PASS >> $CONFIG_OUT
 echo >> $CONFIG_OUT

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/test/system/randomwalk/README
----------------------------------------------------------------------
diff --git a/test/system/randomwalk/README b/test/system/randomwalk/README
index 0c33310..78a50c6 100644
--- a/test/system/randomwalk/README
+++ b/test/system/randomwalk/README
@@ -4,7 +4,7 @@ The randomwalk framework needs to be configured for your Accumulo instance by
 doing the following steps:
 
 1.  Make sure you have both ACCUMULO_HOME and HADOOP_HOME set in your 
-    $ACCUMULO_HOME/conf/accumulo-env.sh.
+    $ACCUMULO_CONF_DIR/accumulo-env.sh.
 
 2.  Create 'randomwalk.conf' file in the conf directory containing settings
     needed by walkers to connect to Accumulo.

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/test/system/randomwalk/bin/reset-cluster.sh
----------------------------------------------------------------------
diff --git a/test/system/randomwalk/bin/reset-cluster.sh b/test/system/randomwalk/bin/reset-cluster.sh
index 15d791e..b5beb65 100755
--- a/test/system/randomwalk/bin/reset-cluster.sh
+++ b/test/system/randomwalk/bin/reset-cluster.sh
@@ -34,8 +34,8 @@ this="$bin/$script"
 ACCUMULO_HOME=`dirname "$this"`/../../../..
 export ACCUMULO_HOME=`cd $ACCUMULO_HOME; pwd`
 
-if [ -f $ACCUMULO_HOME/conf/accumulo-env.sh ] ; then
-. $ACCUMULO_HOME/conf/accumulo-env.sh
+if [ -f $ACCUMULO_CONF_DIR/accumulo-env.sh ] ; then
+. $ACCUMULO_CONF_DIR/accumulo-env.sh
 fi
 
 if [ -z $HADOOP_PREFIX ] ; then
@@ -49,7 +49,7 @@ if [ "$1" = "" ] ; then
 fi
 
 echo 'killing accumulo'
-pssh -h $ACCUMULO_HOME/conf/slaves "pkill -f org.apache.accumulo.start" < /dev/null
+pssh -h $ACCUMULO_CONF_DIR/slaves "pkill -f org.apache.accumulo.start" < /dev/null
 pkill -f org.apache.accumulo.start
 pkill -f agitator.pl
 
@@ -64,10 +64,10 @@ rm -f $ACCUMULO_HOME/test/system/continuous/logs/*
 rm -f ~/rwlogs/*
 
 echo 'removing old code'
-pssh -h $ACCUMULO_HOME/conf/slaves "rm -rf $ACCUMULO_HOME" < /dev/null
+pssh -h $ACCUMULO_CONF_DIR/slaves "rm -rf $ACCUMULO_HOME" < /dev/null
 
 echo 'pushing new code'
-prsync -r -h $ACCUMULO_HOME/conf/slaves $ACCUMULO_HOME /opt/dev
+prsync -r -h $ACCUMULO_CONF_DIR/slaves $ACCUMULO_HOME /opt/dev
 
 echo 'removing /accumulo dir'
 $HADOOP_PREFIX/bin/hadoop fs -rmr /accumulo

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/test/system/randomwalk/bin/start-all.sh
----------------------------------------------------------------------
diff --git a/test/system/randomwalk/bin/start-all.sh b/test/system/randomwalk/bin/start-all.sh
index 217c3b4..de9a9f1 100755
--- a/test/system/randomwalk/bin/start-all.sh
+++ b/test/system/randomwalk/bin/start-all.sh
@@ -34,8 +34,8 @@ this="$bin/$script"
 ACCUMULO_HOME=`dirname "$this"`/../../../..
 export ACCUMULO_HOME=`cd $ACCUMULO_HOME; pwd`
 
-if [ -f $ACCUMULO_HOME/conf/accumulo-env.sh ] ; then
-. $ACCUMULO_HOME/conf/accumulo-env.sh
+if [ -f $ACCUMULO_CONF_DIR/accumulo-env.sh ] ; then
+. $ACCUMULO_CONF_DIR/accumulo-env.sh
 fi
 
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/test/system/randomwalk/bin/start-local.sh
----------------------------------------------------------------------
diff --git a/test/system/randomwalk/bin/start-local.sh b/test/system/randomwalk/bin/start-local.sh
index 262a37c..ebb1e92 100755
--- a/test/system/randomwalk/bin/start-local.sh
+++ b/test/system/randomwalk/bin/start-local.sh
@@ -35,8 +35,8 @@ this="$bin/$script"
 ACCUMULO_HOME=`dirname "$this"`/../../../..
 export ACCUMULO_HOME=`cd $ACCUMULO_HOME; pwd`
 
-if [ -f $ACCUMULO_HOME/conf/accumulo-env.sh ] ; then
-. $ACCUMULO_HOME/conf/accumulo-env.sh
+if [ -f $ACCUMULO_CONF_DIR/accumulo-env.sh ] ; then
+. $ACCUMULO_CONF_DIR/accumulo-env.sh
 fi
 
 if [ -z "$HADOOP_PREFIX" ] ; then

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c82c4316/test/system/scalability/run.py
----------------------------------------------------------------------
diff --git a/test/system/scalability/run.py b/test/system/scalability/run.py
index bca64c1..ad98d63 100755
--- a/test/system/scalability/run.py
+++ b/test/system/scalability/run.py
@@ -57,7 +57,7 @@ def runTest(testName, siteConfig, testDir, numNodes, fdata):
     syscall('head -n %d %s > %s' % (numNodes,slavesPath,nodesPath))
 
     log('Copying slaves file to accumulo config')
-    syscall('cp '+nodesPath+' $ACCUMULO_HOME/conf/slaves');
+    syscall('cp '+nodesPath+' $ACCUMULO_CONF_DIR/slaves');
 
     log('Removing /accumulo directory in HDFS')
     syscall("hadoop fs -rmr /accumulo")


[41/50] git commit: ACCUMULO-998 Use junit asserts and fix code formatting for tests

Posted by kt...@apache.org.
ACCUMULO-998 Use junit asserts and fix code formatting for tests


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/47d1f5fe
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/47d1f5fe
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/47d1f5fe

Branch: refs/heads/ACCUMULO-1000
Commit: 47d1f5fe3ac2714bc4f7fa8baf413fbfd6ad8801
Parents: a943f32
Author: Christopher Tubbs <ct...@apache.org>
Authored: Mon Jul 22 14:31:50 2013 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Mon Jul 22 14:31:50 2013 -0400

----------------------------------------------------------------------
 .../accumulo/core/file/rfile/RFileTest.java     | 251 +++++++++----------
 1 file changed, 113 insertions(+), 138 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/47d1f5fe/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java b/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
index ebdb2d1..37b35a2 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
@@ -70,12 +70,10 @@ import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.junit.Test;
 
-
 public class RFileTest {
   
   private static final Collection<ByteSequence> EMPTY_COL_FAMS = new ArrayList<ByteSequence>();
   
-  
   static {
     Logger.getLogger(org.apache.hadoop.io.compress.CodecPool.class).setLevel(Level.OFF);
     Logger.getLogger(org.apache.hadoop.util.NativeCodeLoader.class).setLevel(Level.OFF);
@@ -174,7 +172,7 @@ public class RFileTest {
     private FSDataInputStream in;
     public Reader reader;
     public SortedKeyValueIterator<Key,Value> iter;
-
+    
     public void openWriter(boolean startDLG) throws IOException {
       
       if (outputFile == null) {
@@ -183,7 +181,7 @@ public class RFileTest {
         dos = new FSDataOutputStream(baos, new FileSystem.Statistics("a"));
       } else {
         BufferedOutputStream bufos = new BufferedOutputStream(new FileOutputStream(outputFile));
-        dos = new FSDataOutputStream(bufos, new FileSystem.Statistics("a"));       
+        dos = new FSDataOutputStream(bufos, new FileSystem.Statistics("a"));
       }
       CachableBlockFile.Writer _cbw = new CachableBlockFile.Writer(dos, "gz", conf);
       writer = new RFile.Writer(_cbw, 1000, 1000);
@@ -191,7 +189,7 @@ public class RFileTest {
       if (startDLG)
         writer.startDefaultLocalityGroup();
     }
-        
+    
     public void openWriter() throws IOException {
       openWriter(true);
     }
@@ -263,7 +261,7 @@ public class RFileTest {
     
     trf.openWriter();
     trf.closeWriter();
-        
+    
     trf.openReader();
     trf.iter.seek(new Range((Key) null, null), EMPTY_COL_FAMS, false);
     assertFalse(trf.iter.hasTop());
@@ -352,10 +350,10 @@ public class RFileTest {
         }
       }
     }
-		
+    
     // trf.writer.append(nk("r1","cf1","cq1","L1", 55), nv("foo"));
     trf.closeWriter();
-
+    
     trf.openReader();
     // seek before everything
     trf.iter.seek(new Range((Key) null, null), EMPTY_COL_FAMS, false);
@@ -448,7 +446,7 @@ public class RFileTest {
         trf.iter.next();
       }
     }
-
+    
     trf.closeReader();
   }
   
@@ -1266,8 +1264,8 @@ public class RFileTest {
       count++;
       indexIter.next();
     }
-
-    assert(count > 4);
+    
+    assertTrue(count > 4);
     
     trf.iter.seek(new Range(nk("r0000", "cf1", "cq1", "", 1), true, nk("r0001", "cf1", "cq1", "", 1), false), EMPTY_COL_FAMS, false);
     
@@ -1281,7 +1279,7 @@ public class RFileTest {
     assertFalse(trf.iter.hasTop());
     
     trf.iter.seek(new Range(nk("r0000", "cf1", "cq1", "", 1), false, nk("r0001", "cf1", "cq1", "", 1), true), EMPTY_COL_FAMS, false);
-		
+    
     for (int i = 2048; i < 4096; i++) {
       assertTrue(trf.iter.hasTop());
       assertEquals(nk("r0001", "cf1", "cq1", "", 1), trf.iter.getTopKey());
@@ -1495,14 +1493,14 @@ public class RFileTest {
     trf.openReader();
     
     Set<ByteSequence> cfs = Collections.emptySet();
-
+    
     Random rand = new Random();
-
+    
     for (int count = 0; count < 100; count++) {
       
       int start = rand.nextInt(2300);
       Range range = new Range(nk(nf("r_", start), "cf1", "cq1", "L1", 42), nk(nf("r_", start + 100), "cf1", "cq1", "L1", 42));
-
+      
       trf.reader.seek(range, cfs, false);
       
       int numToScan = rand.nextInt(100);
@@ -1515,12 +1513,12 @@ public class RFileTest {
       
       assertTrue(trf.reader.hasTop());
       assertEquals(nk(nf("r_", start + numToScan), "cf1", "cq1", "L1", 42), trf.reader.getTopKey());
-
+      
       // seek a little forward from the last range and read a few keys within the unconsumed portion of the last range
-
+      
       int start2 = start + numToScan + rand.nextInt(3);
       int end2 = start2 + rand.nextInt(3);
-
+      
       range = new Range(nk(nf("r_", start2), "cf1", "cq1", "L1", 42), nk(nf("r_", end2), "cf1", "cq1", "L1", 42));
       trf.reader.seek(range, cfs, false);
       
@@ -1531,13 +1529,12 @@ public class RFileTest {
       }
       
       assertFalse(trf.reader.hasTop());
-
+      
     }
     
     trf.closeReader();
   }
-
-
+  
   @Test(expected = NullPointerException.class)
   public void testMissingUnreleasedVersions() throws Exception {
     runVersionTest(5);
@@ -1604,12 +1601,11 @@ public class RFileTest {
     reader.close();
   }
   
-  
-  private AccumuloConfiguration setAndGetAccumuloConfig(String cryptoConfSetting) {  
+  private AccumuloConfiguration setAndGetAccumuloConfig(String cryptoConfSetting) {
     @SuppressWarnings("deprecation")
     AccumuloConfiguration conf = AccumuloConfiguration.getSiteConfiguration();
     System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, cryptoConfSetting);
-    ((SiteConfiguration)conf).clearAndNull();
+    ((SiteConfiguration) conf).clearAndNull();
     return conf;
   }
   
@@ -1619,25 +1615,24 @@ public class RFileTest {
     } else {
       System.clearProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     }
-    ((SiteConfiguration)conf).clearAndNull();
+    ((SiteConfiguration) conf).clearAndNull();
   }
-
-
+  
   @Test
   public void testEncRFile1() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
-
+    
     test1();
     
     restoreOldConfiguration(oldSiteConfigProperty, conf);
   }
-
+  
   @Test
   public void testEncRFile2() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
-
+    
     test2();
     
     restoreOldConfiguration(oldSiteConfigProperty, conf);
@@ -1647,57 +1642,57 @@ public class RFileTest {
   public void testEncRFile3() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
-
+    
     test3();
     
     restoreOldConfiguration(oldSiteConfigProperty, conf);
   }
-
+  
   @Test
   public void testEncRFile4() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
-
+    
     test4();
     
     restoreOldConfiguration(oldSiteConfigProperty, conf);
   }
-
+  
   @Test
   public void testEncRFile5() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
-
+    
     test5();
     
     restoreOldConfiguration(oldSiteConfigProperty, conf);
   }
-
+  
   @Test
   public void testEncRFile6() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
-
+    
     test6();
     
     restoreOldConfiguration(oldSiteConfigProperty, conf);
   }
-
+  
   @Test
   public void testEncRFile7() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
-
+    
     test7();
     
     restoreOldConfiguration(oldSiteConfigProperty, conf);
   }
-
+  
   @Test
   public void testEncRFile8() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
-
+    
     test8();
     
     restoreOldConfiguration(oldSiteConfigProperty, conf);
@@ -1707,38 +1702,37 @@ public class RFileTest {
   public void testEncRFile9() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
-
+    
     test9();
     
     restoreOldConfiguration(oldSiteConfigProperty, conf);
   }
-
+  
   @Test
   public void testEncRFile10() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
-
+    
     test10();
     
     restoreOldConfiguration(oldSiteConfigProperty, conf);
   }
-
+  
   @Test
   public void testEncRFile11() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
-
+    
     test11();
     
     restoreOldConfiguration(oldSiteConfigProperty, conf);
   }
   
-  
   @Test
   public void testEncRFile12() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
-
+    
     test12();
     
     restoreOldConfiguration(oldSiteConfigProperty, conf);
@@ -1748,17 +1742,17 @@ public class RFileTest {
   public void testEncRFile13() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
-
+    
     test13();
     
     restoreOldConfiguration(oldSiteConfigProperty, conf);
   }
-
+  
   @Test
   public void testEncRFile14() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
-
+    
     test14();
     
     restoreOldConfiguration(oldSiteConfigProperty, conf);
@@ -1768,7 +1762,7 @@ public class RFileTest {
   public void testEncRFile16() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
-
+    
     test16();
     
     restoreOldConfiguration(oldSiteConfigProperty, conf);
@@ -1778,7 +1772,7 @@ public class RFileTest {
   public void testEncRFile17() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
-
+    
     test17();
     
     restoreOldConfiguration(oldSiteConfigProperty, conf);
@@ -1788,7 +1782,7 @@ public class RFileTest {
   public void testEncRFile18() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
-
+    
     test18();
     
     restoreOldConfiguration(oldSiteConfigProperty, conf);
@@ -1798,19 +1792,19 @@ public class RFileTest {
   public void testEncRFile19() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
-
+    
     test19();
     
     restoreOldConfiguration(oldSiteConfigProperty, conf);
   }
-
-  //@Test
+  
+  @Test
   public void testEncryptedRFiles() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     @SuppressWarnings("deprecation")
     AccumuloConfiguration conf = AccumuloConfiguration.getSiteConfiguration();
     System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, CryptoTest.CRYPTO_ON_CONF);
-    ((SiteConfiguration)conf).clearAndNull();
+    ((SiteConfiguration) conf).clearAndNull();
     
     test1();
     test2();
@@ -1821,71 +1815,66 @@ public class RFileTest {
     test7();
     test8();
     
-    
     if (oldSiteConfigProperty != null) {
       System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, oldSiteConfigProperty);
     } else {
       System.clearProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     }
-    ((SiteConfiguration)conf).clearAndNull();
+    ((SiteConfiguration) conf).clearAndNull();
   }
   
-  //@Test
+  // @Test
   public void testRootTabletFromServer() throws Exception {
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     @SuppressWarnings("deprecation")
     AccumuloConfiguration conf = AccumuloConfiguration.getSiteConfiguration();
     System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, CryptoTest.CRYPTO_ON_CONF);
-    ((SiteConfiguration)conf).clearAndNull();
-
+    ((SiteConfiguration) conf).clearAndNull();
+    
     TestRFile trf = new TestRFile();
     trf.preGeneratedInputFile = new File("/tmp/should_work.rf");
     
     trf.openReader();
     trf.iter.seek(new Range((Key) null, null), EMPTY_COL_FAMS, false);
-    assert(trf.iter.hasTop());
+    assertTrue(trf.iter.hasTop());
     
-    assert(trf.reader.getLastKey() != null);
+    assertTrue(trf.reader.getLastKey() != null);
     
     trf.closeReader();
-   
     
     if (oldSiteConfigProperty != null) {
       System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, oldSiteConfigProperty);
     } else {
       System.clearProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     }
-    ((SiteConfiguration)conf).clearAndNull();
-
-  }  
+    ((SiteConfiguration) conf).clearAndNull();
+    
+  }
   
   private static final int TOTAL_NUM_ROWS = 10;
   private static final int ROW_MOD_VALUE = 10;
   
-  //@Test
+  // @Test
   // These tests will purge the disk cache when the run, so it's not recommended that they be run in development systems.
   public void testEncryptedRFileWriteSpeed() throws Exception {
-
     
-    System.out.println("Unencrypted Write, Unencrypted Read (Cache), Unencrypted Read (FS only), Encrypted Write, Encrypted Read (Cache), Encrypted Read (FS Only)");
+    System.out
+        .println("Unencrypted Write, Unencrypted Read (Cache), Unencrypted Read (FS only), Encrypted Write, Encrypted Read (Cache), Encrypted Read (FS Only)");
     int numIterations = 1;
     
     for (int i = 0; i < numIterations; i++) {
       @SuppressWarnings("deprecation")
       AccumuloConfiguration conf = AccumuloConfiguration.getSiteConfiguration();
       System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, CryptoTest.CRYPTO_OFF_CONF);
-      ((SiteConfiguration)conf).clearAndNull();
-   
+      ((SiteConfiguration) conf).clearAndNull();
+      
       TestRFile trf = new TestRFile();
       trf.outputFile = new File("/tmp/testUnencryptedRfile.rf");
       trf.openWriter();
       
-      
-
       double timeTickSize = 1000.0;
       int numRowsRead = 0;
-
-
+      
       try {
         
         performUnencryptedTests(trf, TOTAL_NUM_ROWS, ROW_MOD_VALUE, timeTickSize, true);
@@ -1895,28 +1884,26 @@ public class RFileTest {
       } catch (Exception e) {
         e.printStackTrace();
         throw e;
-      }      
-      
-     
+      }
       
     }
     
   }
-
-  private void performUnencryptedTests(TestRFile trf, int totalNumRows, int rowModValue, double timeTickSize, boolean first) throws IOException, InterruptedException {
+  
+  private void performUnencryptedTests(TestRFile trf, int totalNumRows, int rowModValue, double timeTickSize, boolean first) throws IOException,
+      InterruptedException {
     long start = System.currentTimeMillis();
     
-    
     writeRowsToRfile(trf, totalNumRows, rowModValue);
     
     long end = System.currentTimeMillis();
     
-    System.out.print(""+((end - start) / timeTickSize) + ", ");
+    System.out.print("" + ((end - start) / timeTickSize) + ", ");
     
     trf.preGeneratedInputFile = trf.outputFile;
-          
+    
     start = System.currentTimeMillis();
- 
+    
     trf.openReader();
     trf.iter.seek(new Range((Key) null, null), EMPTY_COL_FAMS, false);
     int numRowsRead = 0;
@@ -1933,10 +1920,10 @@ public class RFileTest {
     
     end = System.currentTimeMillis();
     
-    System.out.print(""+((end - start) / timeTickSize) + ", ");
-
+    System.out.print("" + ((end - start) / timeTickSize) + ", ");
+    
     Runtime.getRuntime().exec("purge").waitFor();
-
+    
     start = System.currentTimeMillis();
     
     trf.openReader();
@@ -1955,21 +1942,19 @@ public class RFileTest {
     
     end = System.currentTimeMillis();
     
-    
     if (first) {
-      System.out.print(""+((end - start) / timeTickSize)+", ");
+      System.out.print("" + ((end - start) / timeTickSize) + ", ");
     } else {
-      System.out.println(""+((end - start) / timeTickSize));
+      System.out.println("" + ((end - start) / timeTickSize));
       
     }
     
-    
-    
-    //trf.outputFile.delete();
+    // trf.outputFile.delete();
   }
-
+  
   @SuppressWarnings("deprecation")
-  private void performEncryptedTests(int totalNumRows, int rowModValue, double timeTickSize, int numRowsRead, boolean first) throws IOException, InterruptedException {
+  private void performEncryptedTests(int totalNumRows, int rowModValue, double timeTickSize, int numRowsRead, boolean first) throws IOException,
+      InterruptedException {
     AccumuloConfiguration conf;
     TestRFile trf;
     long start;
@@ -1978,7 +1963,7 @@ public class RFileTest {
     
     conf = AccumuloConfiguration.getSiteConfiguration();
     System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, CryptoTest.CRYPTO_ON_CONF);
-    ((SiteConfiguration)conf).clearAndNull();
+    ((SiteConfiguration) conf).clearAndNull();
     
     trf = new TestRFile();
     trf.outputFile = new File("/tmp/testEncryptedRfile.rf");
@@ -1989,13 +1974,13 @@ public class RFileTest {
     writeRowsToRfile(trf, totalNumRows, rowModValue);
     
     end = System.currentTimeMillis();
- 
-    System.out.print(""+((end - start) / timeTickSize) + ", ");
- 
+    
+    System.out.print("" + ((end - start) / timeTickSize) + ", ");
+    
     trf.preGeneratedInputFile = trf.outputFile;
     
     start = System.currentTimeMillis();
- 
+    
     trf.openReader();
     trf.iter.seek(new Range((Key) null, null), EMPTY_COL_FAMS, false);
     
@@ -2011,10 +1996,10 @@ public class RFileTest {
     
     end = System.currentTimeMillis();
     
-    System.out.print(""+((end - start) / timeTickSize)+", ");
-
+    System.out.print("" + ((end - start) / timeTickSize) + ", ");
+    
     Runtime.getRuntime().exec("purge").waitFor();
-
+    
     start = System.currentTimeMillis();
     
     trf.openReader();
@@ -2034,16 +2019,15 @@ public class RFileTest {
     end = System.currentTimeMillis();
     
     if (first) {
-      System.out.print(""+((end - start) / timeTickSize)+", ");
+      System.out.print("" + ((end - start) / timeTickSize) + ", ");
     } else {
-      System.out.println(""+((end - start) / timeTickSize));
+      System.out.println("" + ((end - start) / timeTickSize));
       
     }
     
-    
     trf.outputFile.delete();
   }
-
+  
   private int readRandomRowsFromRfile(TestRFile trf, int totalRowCount, int maxRowsToRead) throws IOException {
     if (maxRowsToRead <= 0) {
       return 0;
@@ -2053,7 +2037,7 @@ public class RFileTest {
     Random rand = new Random(System.nanoTime());
     
     int firstKeyNum = Math.abs(rand.nextInt()) % totalRowCount;
-    //int lastKeyNum = Math.abs(rand.nextInt()) % totalRowCount;
+    // int lastKeyNum = Math.abs(rand.nextInt()) % totalRowCount;
     int lastKeyNum = firstKeyNum + 1;
     
     if (lastKeyNum >= totalRowCount) {
@@ -2088,7 +2072,7 @@ public class RFileTest {
     return numRowsRead;
     
   }
-
+  
   private void writeRowsToRfile(TestRFile trf, int numRowsToWriteAndRead, int rowModValue) throws IOException {
     for (int i = 0; i < numRowsToWriteAndRead; i++) {
       String rowID = nf("r_", (i % rowModValue));
@@ -2097,32 +2081,31 @@ public class RFileTest {
       String colVis = "vis";
       
       Key k = nk(rowID, colFam, colQual, colVis, i);
-      Value v = nv(""+i);
+      Value v = nv("" + i);
       
       trf.writer.append(k, v);
     }
     
     trf.closeWriter();
   }
-    
   
   @Test
   public void testRootTabletEncryption() throws Exception {
     
-    // This tests that the normal set of operations used to populate a root tablet 
+    // This tests that the normal set of operations used to populate a root tablet
     
     String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     @SuppressWarnings("deprecation")
     AccumuloConfiguration conf = AccumuloConfiguration.getSiteConfiguration();
     System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, CryptoTest.CRYPTO_ON_CONF);
-    ((SiteConfiguration)conf).clearAndNull();
-
+    ((SiteConfiguration) conf).clearAndNull();
+    
     // populate the root tablet with info about the default tablet
     // the root tablet contains the key extent and locations of all the
     // metadata tablets
-    //String initRootTabFile = ServerConstants.getMetadataTableDir() + "/root_tablet/00000_00000."
-      //  + FileOperations.getNewFileExtension(AccumuloConfiguration.getDefaultConfiguration());
-    //FileSKVWriter mfw = FileOperations.getInstance().openWriter(initRootTabFile, fs, conf, AccumuloConfiguration.getDefaultConfiguration());
+    // String initRootTabFile = ServerConstants.getMetadataTableDir() + "/root_tablet/00000_00000."
+    // + FileOperations.getNewFileExtension(AccumuloConfiguration.getDefaultConfiguration());
+    // FileSKVWriter mfw = FileOperations.getInstance().openWriter(initRootTabFile, fs, conf, AccumuloConfiguration.getDefaultConfiguration());
     
     TestRFile testRfile = new TestRFile();
     testRfile.openWriter();
@@ -2131,19 +2114,19 @@ public class RFileTest {
     
     // mfw.startDefaultLocalityGroup();
     
-    //mfw.startDefaultLocalityGroup();
+    // mfw.startDefaultLocalityGroup();
     
     Text tableExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), MetadataSchema.TabletsSection.getRange().getEndKey().getRow()));
     
     // table tablet's directory
     Key tableDirKey = new Key(tableExtent, TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily(),
         TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), 0);
-    mfw.append(tableDirKey, new Value(/*TABLE_TABLETS_TABLET_DIR*/"/table_info".getBytes()));
+    mfw.append(tableDirKey, new Value(/* TABLE_TABLETS_TABLET_DIR */"/table_info".getBytes()));
     
     // table tablet time
     Key tableTimeKey = new Key(tableExtent, TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnFamily(),
         TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnQualifier(), 0);
-    mfw.append(tableTimeKey, new Value((/*TabletTime.LOGICAL_TIME_ID*/ 'L' + "0").getBytes()));
+    mfw.append(tableTimeKey, new Value((/* TabletTime.LOGICAL_TIME_ID */'L' + "0").getBytes()));
     
     // table tablet's prevrow
     Key tablePrevRowKey = new Key(tableExtent, TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
@@ -2161,14 +2144,13 @@ public class RFileTest {
     // default's time
     Key defaultTimeKey = new Key(defaultExtent, TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnFamily(),
         TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnQualifier(), 0);
-    mfw.append(defaultTimeKey, new Value((/*TabletTime.LOGICAL_TIME_ID*/ 'L' + "0").getBytes()));
+    mfw.append(defaultTimeKey, new Value((/* TabletTime.LOGICAL_TIME_ID */'L' + "0").getBytes()));
     
     // default's prevrow
     Key defaultPrevRowKey = new Key(defaultExtent, TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
         TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier(), 0);
     mfw.append(defaultPrevRowKey, KeyExtent.encodePrevEndRow(MetadataSchema.TabletsSection.getRange().getEndKey().getRow()));
     
-    
     testRfile.closeWriter();
     
     if (true) {
@@ -2178,27 +2160,20 @@ public class RFileTest {
       fileOutputStream.close();
     }
     
-    
-    
-    
     testRfile.openReader();
     testRfile.iter.seek(new Range((Key) null, null), EMPTY_COL_FAMS, false);
-    assert(testRfile.iter.hasTop());
-    
-    assert(testRfile.reader.getLastKey() != null);
-    
-    
+    assertTrue(testRfile.iter.hasTop());
     
+    assertTrue(testRfile.reader.getLastKey() != null);
     
     testRfile.closeReader();
-
+    
     if (oldSiteConfigProperty != null) {
       System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, oldSiteConfigProperty);
     } else {
       System.clearProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
     }
-    ((SiteConfiguration)conf).clearAndNull();
-
-  }  
+    ((SiteConfiguration) conf).clearAndNull();
+    
+  }
 }
-


[32/50] ACCUMULO-1132 Provide AuthenticationToken type for system user

Posted by kt...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java b/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
index e948894..2b98331 100644
--- a/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
+++ b/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
@@ -103,12 +103,7 @@ public class SecurityOperation {
     return toRet;
   }
   
-  /**
-   * 
-   * @deprecated not for client use
-   */
-  @Deprecated
-  public SecurityOperation(String instanceId) {
+  protected SecurityOperation(String instanceId) {
     ZKUserPath = Constants.ZROOT + "/" + instanceId + "/users";
     zooCache = new ZooCache();
   }
@@ -128,7 +123,7 @@ public class SecurityOperation {
   public void initializeSecurity(TCredentials credentials, String rootPrincipal, byte[] token) throws AccumuloSecurityException, ThriftSecurityException {
     authenticate(credentials);
     
-    if (!credentials.getPrincipal().equals(SecurityConstants.SYSTEM_PRINCIPAL))
+    if (!isSystemUser(credentials))
       throw new AccumuloSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
     
     authenticator.initializeSecurity(credentials, rootPrincipal, token);
@@ -148,27 +143,34 @@ public class SecurityOperation {
     return rootUserName;
   }
   
+  public boolean isSystemUser(TCredentials credentials) {
+    return SystemCredentials.get().getToken().getClass().getName().equals(credentials.getTokenClassName());
+  }
+  
   private void authenticate(TCredentials credentials) throws ThriftSecurityException {
     if (!credentials.getInstanceId().equals(HdfsZooInstance.getInstance().getInstanceID()))
       throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.INVALID_INSTANCEID);
     
-    if (SecurityConstants.getSystemCredentials().equals(credentials))
-      return;
-    else if (credentials.getPrincipal().equals(SecurityConstants.SYSTEM_PRINCIPAL)) {
-      throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.BAD_CREDENTIALS);
-    }
-    
-    try {
-      AuthenticationToken token = reassembleToken(credentials);
-      if (!authenticator.authenticateUser(credentials.getPrincipal(), token)) {
-        throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.BAD_CREDENTIALS);
+    if (isSystemUser(credentials)) {
+      authenticateSystemUser(credentials);
+    } else {
+      try {
+        AuthenticationToken token = reassembleToken(credentials);
+        if (!authenticator.authenticateUser(credentials.getPrincipal(), token)) {
+          throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.BAD_CREDENTIALS);
+        }
+      } catch (AccumuloSecurityException e) {
+        log.debug(e);
+        throw e.asThriftException();
       }
-    } catch (AccumuloSecurityException e) {
-      log.debug(e);
-      throw e.asThriftException();
     }
   }
   
+  private void authenticateSystemUser(TCredentials credentials) throws ThriftSecurityException {
+    if (SystemCredentials.get().getToken().equals(credentials.getToken()))
+      throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.BAD_CREDENTIALS);
+  }
+  
   public boolean canAskAboutUser(TCredentials credentials, String user) throws ThriftSecurityException {
     // Authentication done in canPerformSystemActions
     if (!(canPerformSystemActions(credentials) || credentials.getPrincipal().equals(user)))
@@ -178,7 +180,7 @@ public class SecurityOperation {
   
   public boolean authenticateUser(TCredentials credentials, TCredentials toAuth) throws ThriftSecurityException {
     canAskAboutUser(credentials, toAuth.getPrincipal());
-    // User is already authenticated from canAskAboutUser, this gets around issues with !SYSTEM user
+    // User is already authenticated from canAskAboutUser
     if (credentials.equals(toAuth))
       return true;
     try {
@@ -189,11 +191,6 @@ public class SecurityOperation {
     }
   }
   
-  /**
-   * @param toAuth
-   * @return
-   * @throws AccumuloSecurityException
-   */
   private AuthenticationToken reassembleToken(TCredentials toAuth) throws AccumuloSecurityException {
     String tokenClass = toAuth.getTokenClassName();
     if (authenticator.validTokenClass(tokenClass)) {
@@ -207,13 +204,9 @@ public class SecurityOperation {
     
     targetUserExists(user);
     
-    if (!credentials.getPrincipal().equals(user) && !hasSystemPermission(credentials.getPrincipal(), SystemPermission.SYSTEM, false))
+    if (!credentials.getPrincipal().equals(user) && !hasSystemPermission(credentials, SystemPermission.SYSTEM, false))
       throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
     
-    // system user doesn't need record-level authorizations for the tables it reads (for now)
-    if (user.equals(SecurityConstants.SYSTEM_PRINCIPAL))
-      return Authorizations.EMPTY;
-    
     try {
       return authorizor.getCachedUserAuthorizations(user);
     } catch (AccumuloSecurityException e) {
@@ -222,6 +215,11 @@ public class SecurityOperation {
   }
   
   public Authorizations getUserAuthorizations(TCredentials credentials) throws ThriftSecurityException {
+    // system user doesn't need record-level authorizations for the tables it reads
+    if (isSystemUser(credentials)) {
+      authenticate(credentials);
+      return Authorizations.EMPTY;
+    }
     return getUserAuthorizations(credentials, credentials.getPrincipal());
   }
   
@@ -230,8 +228,20 @@ public class SecurityOperation {
    * 
    * @return true if a user exists and has permission; false otherwise
    */
-  private boolean hasSystemPermission(String user, SystemPermission permission, boolean useCached) throws ThriftSecurityException {
-    if (user.equals(getRootUsername()) || user.equals(SecurityConstants.SYSTEM_PRINCIPAL))
+  private boolean hasSystemPermission(TCredentials credentials, SystemPermission permission, boolean useCached) throws ThriftSecurityException {
+    if (isSystemUser(credentials))
+      return true;
+    return _hasSystemPermission(credentials.getPrincipal(), permission, useCached);
+  }
+  
+  /**
+   * Checks if a user has a system permission<br/>
+   * This cannot check if a system user has permission.
+   * 
+   * @return true if a user exists and has permission; false otherwise
+   */
+  private boolean _hasSystemPermission(String user, SystemPermission permission, boolean useCached) throws ThriftSecurityException {
+    if (user.equals(getRootUsername()))
       return true;
     
     targetUserExists(user);
@@ -250,10 +260,19 @@ public class SecurityOperation {
    * 
    * @return true if a user exists and has permission; false otherwise
    */
-  protected boolean hasTablePermission(String user, String table, TablePermission permission, boolean useCached) throws ThriftSecurityException {
-    if (user.equals(SecurityConstants.SYSTEM_PRINCIPAL))
+  protected boolean hasTablePermission(TCredentials credentials, String table, TablePermission permission, boolean useCached) throws ThriftSecurityException {
+    if (isSystemUser(credentials))
       return true;
-    
+    return _hasTablePermission(credentials.getPrincipal(), table, permission, useCached);
+  }
+  
+  /**
+   * Checks if a user has a table permission<br/>
+   * This cannot check if a system user has permission.
+   * 
+   * @return true if a user exists and has permission; false otherwise
+   */
+  protected boolean _hasTablePermission(String user, String table, TablePermission permission, boolean useCached) throws ThriftSecurityException {
     targetUserExists(user);
     
     if ((table.equals(MetadataTable.ID) || table.equals(RootTable.ID)) && permission.equals(TablePermission.READ))
@@ -273,16 +292,14 @@ public class SecurityOperation {
   // some people just aren't allowed to ask about other users; here are those who can ask
   private boolean canAskAboutOtherUsers(TCredentials credentials, String user) throws ThriftSecurityException {
     authenticate(credentials);
-    return credentials.getPrincipal().equals(user) || hasSystemPermission(credentials.getPrincipal(), SystemPermission.SYSTEM, false)
-        || hasSystemPermission(credentials.getPrincipal(), SystemPermission.CREATE_USER, false)
-        || hasSystemPermission(credentials.getPrincipal(), SystemPermission.ALTER_USER, false)
-        || hasSystemPermission(credentials.getPrincipal(), SystemPermission.DROP_USER, false);
+    return credentials.getPrincipal().equals(user) || hasSystemPermission(credentials, SystemPermission.SYSTEM, false)
+        || hasSystemPermission(credentials, SystemPermission.CREATE_USER, false) || hasSystemPermission(credentials, SystemPermission.ALTER_USER, false)
+        || hasSystemPermission(credentials, SystemPermission.DROP_USER, false);
   }
   
   private void targetUserExists(String user) throws ThriftSecurityException {
-    if (user.equals(SecurityConstants.SYSTEM_PRINCIPAL) || user.equals(getRootUsername()))
+    if (user.equals(getRootUsername()))
       return;
-    
     try {
       if (!authenticator.userExists(user))
         throw new ThriftSecurityException(user, SecurityErrorCode.USER_DOESNT_EXIST);
@@ -293,7 +310,7 @@ public class SecurityOperation {
   
   public boolean canScan(TCredentials credentials, String table) throws ThriftSecurityException {
     authenticate(credentials);
-    return hasTablePermission(credentials.getPrincipal(), table, TablePermission.READ, true);
+    return hasTablePermission(credentials, table, TablePermission.READ, true);
   }
   
   public boolean canScan(TCredentials credentials, String table, TRange range, List<TColumn> columns, List<IterInfo> ssiList,
@@ -308,14 +325,13 @@ public class SecurityOperation {
   
   public boolean canWrite(TCredentials credentials, String table) throws ThriftSecurityException {
     authenticate(credentials);
-    return hasTablePermission(credentials.getPrincipal(), table, TablePermission.WRITE, true);
+    return hasTablePermission(credentials, table, TablePermission.WRITE, true);
   }
   
   public boolean canSplitTablet(TCredentials credentials, String table) throws ThriftSecurityException {
     authenticate(credentials);
-    return hasSystemPermission(credentials.getPrincipal(), SystemPermission.ALTER_TABLE, false)
-        || hasSystemPermission(credentials.getPrincipal(), SystemPermission.SYSTEM, false)
-        || hasTablePermission(credentials.getPrincipal(), table, TablePermission.ALTER_TABLE, false);
+    return hasSystemPermission(credentials, SystemPermission.ALTER_TABLE, false) || hasSystemPermission(credentials, SystemPermission.SYSTEM, false)
+        || hasTablePermission(credentials, table, TablePermission.ALTER_TABLE, false);
   }
   
   /**
@@ -323,19 +339,17 @@ public class SecurityOperation {
    */
   public boolean canPerformSystemActions(TCredentials credentials) throws ThriftSecurityException {
     authenticate(credentials);
-    return hasSystemPermission(credentials.getPrincipal(), SystemPermission.SYSTEM, false);
+    return hasSystemPermission(credentials, SystemPermission.SYSTEM, false);
   }
   
   public boolean canFlush(TCredentials c, String tableId) throws ThriftSecurityException {
     authenticate(c);
-    return hasTablePermission(c.getPrincipal(), tableId, TablePermission.WRITE, false)
-        || hasTablePermission(c.getPrincipal(), tableId, TablePermission.ALTER_TABLE, false);
+    return hasTablePermission(c, tableId, TablePermission.WRITE, false) || hasTablePermission(c, tableId, TablePermission.ALTER_TABLE, false);
   }
   
   public boolean canAlterTable(TCredentials c, String tableId) throws ThriftSecurityException {
     authenticate(c);
-    return hasTablePermission(c.getPrincipal(), tableId, TablePermission.ALTER_TABLE, false)
-        || hasSystemPermission(c.getPrincipal(), SystemPermission.ALTER_TABLE, false);
+    return hasTablePermission(c, tableId, TablePermission.ALTER_TABLE, false) || hasSystemPermission(c, SystemPermission.ALTER_TABLE, false);
   }
   
   public boolean canCreateTable(TCredentials c, String tableName) throws ThriftSecurityException {
@@ -344,42 +358,39 @@ public class SecurityOperation {
   
   public boolean canCreateTable(TCredentials c) throws ThriftSecurityException {
     authenticate(c);
-    return hasSystemPermission(c.getPrincipal(), SystemPermission.CREATE_TABLE, false);
+    return hasSystemPermission(c, SystemPermission.CREATE_TABLE, false);
   }
   
   public boolean canRenameTable(TCredentials c, String tableId, String oldTableName, String newTableName) throws ThriftSecurityException {
     authenticate(c);
-    return hasSystemPermission(c.getPrincipal(), SystemPermission.ALTER_TABLE, false)
-        || hasTablePermission(c.getPrincipal(), tableId, TablePermission.ALTER_TABLE, false);
+    return hasSystemPermission(c, SystemPermission.ALTER_TABLE, false) || hasTablePermission(c, tableId, TablePermission.ALTER_TABLE, false);
   }
   
   public boolean canCloneTable(TCredentials c, String tableId, String tableName) throws ThriftSecurityException {
     authenticate(c);
-    return hasSystemPermission(c.getPrincipal(), SystemPermission.CREATE_TABLE, false)
-        && hasTablePermission(c.getPrincipal(), tableId, TablePermission.READ, false);
+    return hasSystemPermission(c, SystemPermission.CREATE_TABLE, false) && hasTablePermission(c, tableId, TablePermission.READ, false);
   }
   
   public boolean canDeleteTable(TCredentials c, String tableId) throws ThriftSecurityException {
     authenticate(c);
-    return hasSystemPermission(c.getPrincipal(), SystemPermission.DROP_TABLE, false)
-        || hasTablePermission(c.getPrincipal(), tableId, TablePermission.DROP_TABLE, false);
+    return hasSystemPermission(c, SystemPermission.DROP_TABLE, false) || hasTablePermission(c, tableId, TablePermission.DROP_TABLE, false);
   }
   
   public boolean canOnlineOfflineTable(TCredentials c, String tableId, TableOperation op) throws ThriftSecurityException {
     authenticate(c);
-    return hasSystemPermission(c.getPrincipal(), SystemPermission.SYSTEM, false) || hasSystemPermission(c.getPrincipal(), SystemPermission.ALTER_TABLE, false)
-        || hasTablePermission(c.getPrincipal(), tableId, TablePermission.ALTER_TABLE, false);
+    return hasSystemPermission(c, SystemPermission.SYSTEM, false) || hasSystemPermission(c, SystemPermission.ALTER_TABLE, false)
+        || hasTablePermission(c, tableId, TablePermission.ALTER_TABLE, false);
   }
   
   public boolean canMerge(TCredentials c, String tableId) throws ThriftSecurityException {
     authenticate(c);
-    return hasSystemPermission(c.getPrincipal(), SystemPermission.SYSTEM, false) || hasSystemPermission(c.getPrincipal(), SystemPermission.ALTER_TABLE, false)
-        || hasTablePermission(c.getPrincipal(), tableId, TablePermission.ALTER_TABLE, false);
+    return hasSystemPermission(c, SystemPermission.SYSTEM, false) || hasSystemPermission(c, SystemPermission.ALTER_TABLE, false)
+        || hasTablePermission(c, tableId, TablePermission.ALTER_TABLE, false);
   }
   
   public boolean canDeleteRange(TCredentials c, String tableId, String tableName, Text startRow, Text endRow) throws ThriftSecurityException {
     authenticate(c);
-    return hasSystemPermission(c.getPrincipal(), SystemPermission.SYSTEM, false) || hasTablePermission(c.getPrincipal(), tableId, TablePermission.WRITE, false);
+    return hasSystemPermission(c, SystemPermission.SYSTEM, false) || hasTablePermission(c, tableId, TablePermission.WRITE, false);
   }
   
   public boolean canBulkImport(TCredentials c, String tableId, String tableName, String dir, String failDir) throws ThriftSecurityException {
@@ -388,98 +399,66 @@ public class SecurityOperation {
   
   public boolean canBulkImport(TCredentials c, String tableId) throws ThriftSecurityException {
     authenticate(c);
-    return hasTablePermission(c.getPrincipal(), tableId, TablePermission.BULK_IMPORT, false);
+    return hasTablePermission(c, tableId, TablePermission.BULK_IMPORT, false);
   }
   
   public boolean canCompact(TCredentials c, String tableId) throws ThriftSecurityException {
     authenticate(c);
-    return hasSystemPermission(c.getPrincipal(), SystemPermission.ALTER_TABLE, false)
-        || hasTablePermission(c.getPrincipal(), tableId, TablePermission.ALTER_TABLE, false)
-        || hasTablePermission(c.getPrincipal(), tableId, TablePermission.WRITE, false);
+    return hasSystemPermission(c, SystemPermission.ALTER_TABLE, false) || hasTablePermission(c, tableId, TablePermission.ALTER_TABLE, false)
+        || hasTablePermission(c, tableId, TablePermission.WRITE, false);
   }
   
   public boolean canChangeAuthorizations(TCredentials c, String user) throws ThriftSecurityException {
     authenticate(c);
-    if (user.equals(SecurityConstants.SYSTEM_PRINCIPAL))
-      throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
-    return hasSystemPermission(c.getPrincipal(), SystemPermission.ALTER_USER, false);
+    return hasSystemPermission(c, SystemPermission.ALTER_USER, false);
   }
   
   public boolean canChangePassword(TCredentials c, String user) throws ThriftSecurityException {
     authenticate(c);
-    if (user.equals(SecurityConstants.SYSTEM_PRINCIPAL))
-      throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
-    return c.getPrincipal().equals(user) || hasSystemPermission(c.getPrincipal(), SystemPermission.ALTER_USER, false);
+    return c.getPrincipal().equals(user) || hasSystemPermission(c, SystemPermission.ALTER_USER, false);
   }
   
   public boolean canCreateUser(TCredentials c, String user) throws ThriftSecurityException {
     authenticate(c);
-    
-    // don't allow creating a user with the same name as system user
-    if (user.equals(SecurityConstants.SYSTEM_PRINCIPAL))
-      throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
-    
-    return hasSystemPermission(c.getPrincipal(), SystemPermission.CREATE_USER, false);
+    return hasSystemPermission(c, SystemPermission.CREATE_USER, false);
   }
   
   public boolean canDropUser(TCredentials c, String user) throws ThriftSecurityException {
     authenticate(c);
-    
-    // can't delete root or system users
-    if (user.equals(getRootUsername()) || user.equals(SecurityConstants.SYSTEM_PRINCIPAL))
+    if (user.equals(getRootUsername()))
       throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
-    
-    return hasSystemPermission(c.getPrincipal(), SystemPermission.DROP_USER, false);
+    return hasSystemPermission(c, SystemPermission.DROP_USER, false);
   }
   
   public boolean canGrantSystem(TCredentials c, String user, SystemPermission sysPerm) throws ThriftSecurityException {
     authenticate(c);
-    
-    // can't modify system user
-    if (user.equals(SecurityConstants.SYSTEM_PRINCIPAL))
-      throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
-    
     // can't grant GRANT
     if (sysPerm.equals(SystemPermission.GRANT))
       throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.GRANT_INVALID);
-    
-    return hasSystemPermission(c.getPrincipal(), SystemPermission.GRANT, false);
+    return hasSystemPermission(c, SystemPermission.GRANT, false);
   }
   
   public boolean canGrantTable(TCredentials c, String user, String table) throws ThriftSecurityException {
     authenticate(c);
-    
-    // can't modify system user
-    if (user.equals(SecurityConstants.SYSTEM_PRINCIPAL))
-      throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
-    
-    return hasSystemPermission(c.getPrincipal(), SystemPermission.ALTER_TABLE, false)
-        || hasTablePermission(c.getPrincipal(), table, TablePermission.GRANT, false);
+    return hasSystemPermission(c, SystemPermission.ALTER_TABLE, false) || hasTablePermission(c, table, TablePermission.GRANT, false);
   }
   
   public boolean canRevokeSystem(TCredentials c, String user, SystemPermission sysPerm) throws ThriftSecurityException {
     authenticate(c);
-    
-    // can't modify system or root user
-    if (user.equals(getRootUsername()) || user.equals(SecurityConstants.SYSTEM_PRINCIPAL))
+    // can't modify root user
+    if (user.equals(getRootUsername()))
       throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
     
     // can't revoke GRANT
     if (sysPerm.equals(SystemPermission.GRANT))
       throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.GRANT_INVALID);
     
-    return hasSystemPermission(c.getPrincipal(), SystemPermission.GRANT, false);
+    return hasSystemPermission(c, SystemPermission.GRANT, false);
   }
   
   public boolean canRevokeTable(TCredentials c, String user, String table) throws ThriftSecurityException {
     authenticate(c);
-    
-    // can't modify system user
-    if (user.equals(SecurityConstants.SYSTEM_PRINCIPAL))
-      throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
-    
-    return hasSystemPermission(c.getPrincipal(), SystemPermission.ALTER_TABLE, false)
-        || hasTablePermission(c.getPrincipal(), table, TablePermission.GRANT, false);
+    return hasSystemPermission(c, SystemPermission.ALTER_TABLE, false) || hasTablePermission(c, table, TablePermission.GRANT, false);
   }
   
   public void changeAuthorizations(TCredentials credentials, String user, Authorizations authorizations) throws ThriftSecurityException {
@@ -602,13 +581,13 @@ public class SecurityOperation {
   public boolean hasSystemPermission(TCredentials credentials, String user, SystemPermission permissionById) throws ThriftSecurityException {
     if (!canAskAboutOtherUsers(credentials, user))
       throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
-    return hasSystemPermission(user, permissionById, false);
+    return _hasSystemPermission(user, permissionById, false);
   }
   
   public boolean hasTablePermission(TCredentials credentials, String user, String tableId, TablePermission permissionById) throws ThriftSecurityException {
     if (!canAskAboutOtherUsers(credentials, user))
       throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
-    return hasTablePermission(user, tableId, permissionById, false);
+    return _hasTablePermission(user, tableId, permissionById, false);
   }
   
   public Set<String> listUsers(TCredentials credentials) throws ThriftSecurityException {
@@ -635,11 +614,11 @@ public class SecurityOperation {
   
   public boolean canExport(TCredentials credentials, String tableId, String tableName, String exportDir) throws ThriftSecurityException {
     authenticate(credentials);
-    return hasTablePermission(credentials.getPrincipal(), tableId, TablePermission.READ, false);
+    return hasTablePermission(credentials, tableId, TablePermission.READ, false);
   }
   
   public boolean canImport(TCredentials credentials, String tableName, String importDir) throws ThriftSecurityException {
     authenticate(credentials);
-    return hasSystemPermission(credentials.getPrincipal(), SystemPermission.CREATE_TABLE, false);
+    return hasSystemPermission(credentials, SystemPermission.CREATE_TABLE, false);
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java b/server/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java
new file mode 100644
index 0000000..f30419a
--- /dev/null
+++ b/server/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.server.security;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.security.SecurityPermission;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.security.Credentials;
+import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.conf.ServerConfiguration;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.io.Writable;
+
+/**
+ * Credentials for the system services.
+ * 
+ * @since 1.6.0
+ */
+public final class SystemCredentials extends Credentials {
+  
+  private static final SecurityPermission SYSTEM_CREDENTIALS_PERMISSION = new SecurityPermission("systemCredentialsPermission");
+  
+  private static SystemCredentials SYSTEM_CREDS = null;
+  private static final String SYSTEM_PRINCIPAL = "!SYSTEM";
+  private static final SystemToken SYSTEM_TOKEN = SystemToken.get();
+  
+  private final TCredentials AS_THRIFT;
+  
+  private SystemCredentials() {
+    super(SYSTEM_PRINCIPAL, SYSTEM_TOKEN);
+    AS_THRIFT = toThrift(HdfsZooInstance.getInstance());
+  }
+  
+  public static SystemCredentials get() {
+    SecurityManager sm = System.getSecurityManager();
+    if (sm != null) {
+      sm.checkPermission(SYSTEM_CREDENTIALS_PERMISSION);
+    }
+    if (SYSTEM_CREDS == null) {
+      SYSTEM_CREDS = new SystemCredentials();
+      
+    }
+    return SYSTEM_CREDS;
+  }
+  
+  public TCredentials getAsThrift() {
+    return AS_THRIFT;
+  }
+  
+  /**
+   * An {@link AuthenticationToken} type for Accumulo servers for inter-server communication.
+   * 
+   * @since 1.6.0
+   */
+  public static final class SystemToken extends PasswordToken {
+    
+    /**
+     * A Constructor for {@link Writable}.
+     */
+    public SystemToken() {}
+    
+    private SystemToken(byte[] systemPassword) {
+      super(systemPassword);
+    }
+    
+    private static SystemToken get() {
+      byte[] confChecksum;
+      MessageDigest md;
+      try {
+        md = MessageDigest.getInstance(Constants.PW_HASH_ALGORITHM);
+      } catch (NoSuchAlgorithmException e) {
+        throw new RuntimeException("Failed to compute configuration checksum", e);
+      }
+      
+      // seed the config with the version and instance id, so at least it's not empty
+      md.update(ServerConstants.WIRE_VERSION.toString().getBytes(Constants.UTF8));
+      md.update(HdfsZooInstance.getInstance().getInstanceID().getBytes(Constants.UTF8));
+      
+      for (Entry<String,String> entry : ServerConfiguration.getSiteConfiguration()) {
+        // only include instance properties
+        if (entry.getKey().startsWith(Property.INSTANCE_PREFIX.toString())) {
+          md.update(entry.getKey().getBytes(Constants.UTF8));
+          md.update(entry.getValue().getBytes(Constants.UTF8));
+        }
+      }
+      confChecksum = md.digest();
+      
+      int wireVersion = ServerConstants.WIRE_VERSION;
+      byte[] inst = HdfsZooInstance.getInstance().getInstanceID().getBytes(Constants.UTF8);
+      
+      ByteArrayOutputStream bytes = new ByteArrayOutputStream(3 * (Integer.SIZE / Byte.SIZE) + inst.length + confChecksum.length);
+      DataOutputStream out = new DataOutputStream(bytes);
+      try {
+        out.write(wireVersion * -1);
+        out.write(inst.length);
+        out.write(inst);
+        out.write(confChecksum.length);
+        out.write(confChecksum);
+      } catch (IOException e) {
+        // this is impossible with ByteArrayOutputStream; crash hard if this happens
+        throw new RuntimeException(e);
+      }
+      return new SystemToken(Base64.encodeBase64(bytes.toByteArray()));
+    }
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
index 1305be6..e9b973a 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
@@ -106,7 +106,7 @@ import org.apache.accumulo.server.master.tableOps.CompactRange.CompactionIterato
 import org.apache.accumulo.server.problems.ProblemReport;
 import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.problems.ProblemType;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tabletserver.Compactor.CompactionCanceledException;
 import org.apache.accumulo.server.tabletserver.Compactor.CompactionEnv;
 import org.apache.accumulo.server.tabletserver.FileManager.ScanFileManager;
@@ -583,7 +583,7 @@ public class Tablet {
       
       if (filesToDelete.size() > 0) {
         log.debug("Removing scan refs from metadata " + extent + " " + filesToDelete);
-        MetadataTableUtil.removeScanFiles(extent, filesToDelete, SecurityConstants.getSystemCredentials(), tabletServer.getLock());
+        MetadataTableUtil.removeScanFiles(extent, filesToDelete, SystemCredentials.get().getAsThrift(), tabletServer.getLock());
       }
     }
     
@@ -604,7 +604,7 @@ public class Tablet {
       
       if (filesToDelete.size() > 0) {
         log.debug("Removing scan refs from metadata " + extent + " " + filesToDelete);
-        MetadataTableUtil.removeScanFiles(extent, filesToDelete, SecurityConstants.getSystemCredentials(), tabletServer.getLock());
+        MetadataTableUtil.removeScanFiles(extent, filesToDelete, SystemCredentials.get().getAsThrift(), tabletServer.getLock());
       }
     }
     
@@ -680,7 +680,7 @@ public class Tablet {
       }
       
       synchronized (bulkFileImportLock) {
-        TCredentials auths = SecurityConstants.getSystemCredentials();
+        TCredentials auths = SystemCredentials.get().getAsThrift();
         Connector conn;
         try {
           conn = HdfsZooInstance.getInstance().getConnector(auths.getPrincipal(), CredentialHelper.extractToken(auths));
@@ -838,7 +838,7 @@ public class Tablet {
       // very important to write delete entries outside of log lock, because
       // this !METADATA write does not go up... it goes sideways or to itself
       if (absMergeFile != null)
-        MetadataTableUtil.addDeleteEntries(extent, Collections.singleton(absMergeFile), SecurityConstants.getSystemCredentials());
+        MetadataTableUtil.addDeleteEntries(extent, Collections.singleton(absMergeFile), SystemCredentials.get().getAsThrift());
       
       Set<String> unusedWalLogs = beginClearingUnusedLogs();
       try {
@@ -846,7 +846,7 @@ public class Tablet {
         // need to write to !METADATA before writing to walog, when things are done in the reverse order
         // data could be lost... the minor compaction start even should be written before the following metadata
         // write is made
-        TCredentials creds = SecurityConstants.getSystemCredentials();
+        TCredentials creds = SystemCredentials.get().getAsThrift();
         
         synchronized (timeLock) {
           if (commitSession.getMaxCommittedTime() > persistedTime)
@@ -1037,7 +1037,7 @@ public class Tablet {
         Set<FileRef> filesInUseByScans = waitForScansToFinish(oldDatafiles, false, 10000);
         if (filesInUseByScans.size() > 0)
           log.debug("Adding scan refs to metadata " + extent + " " + filesInUseByScans);
-        MetadataTableUtil.replaceDatafiles(extent, oldDatafiles, filesInUseByScans, newDatafile, compactionId, dfv, SecurityConstants.getSystemCredentials(),
+        MetadataTableUtil.replaceDatafiles(extent, oldDatafiles, filesInUseByScans, newDatafile, compactionId, dfv, SystemCredentials.get().getAsThrift(),
             tabletServer.getClientAddressString(), lastLocation, tabletServer.getLock());
         removeFilesAfterScan(filesInUseByScans);
       }
@@ -1131,7 +1131,7 @@ public class Tablet {
       Text rowName = extent.getMetadataEntry();
       
       String tableId = extent.isMeta() ? RootTable.ID : MetadataTable.ID;
-      ScannerImpl mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), SecurityConstants.getSystemCredentials(), tableId, Authorizations.EMPTY);
+      ScannerImpl mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), SystemCredentials.get().getAsThrift(), tableId, Authorizations.EMPTY);
       
       // Commented out because when no data file is present, each tablet will scan through metadata table and return nothing
       // reduced batch size to improve performance
@@ -1161,7 +1161,7 @@ public class Tablet {
     
     if (ke.isMeta()) {
       try {
-        logEntries = MetadataTableUtil.getLogEntries(SecurityConstants.getSystemCredentials(), ke);
+        logEntries = MetadataTableUtil.getLogEntries(SystemCredentials.get().getAsThrift(), ke);
       } catch (Exception ex) {
         throw new RuntimeException("Unable to read tablet log entries", ex);
       }
@@ -2213,7 +2213,7 @@ public class Tablet {
       }
       
       if (updateMetadata) {
-        TCredentials creds = SecurityConstants.getSystemCredentials();
+        TCredentials creds = SystemCredentials.get().getAsThrift();
         // if multiple threads were allowed to update this outside of a sync block, then it would be
         // a race condition
         MetadataTableUtil.updateTabletFlushID(extent, tableFlushID, creds, tabletServer.getLock());
@@ -2729,7 +2729,7 @@ public class Tablet {
     }
     
     try {
-      Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> fileLog = MetadataTableUtil.getFileAndLogEntries(SecurityConstants.getSystemCredentials(), extent);
+      Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> fileLog = MetadataTableUtil.getFileAndLogEntries(SystemCredentials.get().getAsThrift(), extent);
       
       if (fileLog.getFirst().size() != 0) {
         String msg = "Closed tablet " + extent + " has walog entries in " + MetadataTable.NAME + " " + fileLog.getFirst();
@@ -3516,12 +3516,12 @@ public class Tablet {
       // it is possible that some of the bulk loading flags will be deleted after being read below because the bulk load
       // finishes.... therefore split could propogate load flags for a finished bulk load... there is a special iterator
       // on the !METADATA table to clean up this type of garbage
-      Map<FileRef,Long> bulkLoadedFiles = MetadataTableUtil.getBulkFilesLoaded(SecurityConstants.getSystemCredentials(), extent);
+      Map<FileRef,Long> bulkLoadedFiles = MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get().getAsThrift(), extent);
       
-      MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, SecurityConstants.getSystemCredentials(), tabletServer.getLock());
-      MetadataTableUtil.addNewTablet(low, lowDirectory, tabletServer.getTabletSession(), lowDatafileSizes, bulkLoadedFiles,
-          SecurityConstants.getSystemCredentials(), time, lastFlushID, lastCompactID, tabletServer.getLock());
-      MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, SecurityConstants.getSystemCredentials(), tabletServer.getLock());
+      MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, SystemCredentials.get().getAsThrift(), tabletServer.getLock());
+      MetadataTableUtil.addNewTablet(low, lowDirectory, tabletServer.getTabletSession(), lowDatafileSizes, bulkLoadedFiles, SystemCredentials.get()
+          .getAsThrift(), time, lastFlushID, lastCompactID, tabletServer.getLock());
+      MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, SystemCredentials.get().getAsThrift(), tabletServer.getLock());
       
       log.log(TLevel.TABLET_HIST, extent + " split " + low + " " + high);
       
@@ -3807,7 +3807,7 @@ public class Tablet {
       try {
         // if multiple threads were allowed to update this outside of a sync block, then it would be
         // a race condition
-        MetadataTableUtil.updateTabletCompactID(extent, compactionId, SecurityConstants.getSystemCredentials(), tabletServer.getLock());
+        MetadataTableUtil.updateTabletCompactID(extent, compactionId, SystemCredentials.get().getAsThrift(), tabletServer.getLock());
       } finally {
         synchronized (this) {
           majorCompactionInProgress = false;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
index 9d50f07..ceed0ee 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
@@ -156,8 +156,8 @@ import org.apache.accumulo.server.metrics.AbstractMetricsImpl;
 import org.apache.accumulo.server.problems.ProblemReport;
 import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.security.SecurityOperation;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tabletserver.Compactor.CompactionInfo;
 import org.apache.accumulo.server.tabletserver.Tablet.CommitSession;
 import org.apache.accumulo.server.tabletserver.Tablet.KVEntry;
@@ -228,7 +228,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
   private static long gcTimeIncreasedCount;
   
   private static final long MAX_TIME_TO_WAIT_FOR_SCAN_RESULT_MILLIS = 1000;
-  private static final long RECENTLY_SPLIT_MILLIES = 60*1000;
+  private static final long RECENTLY_SPLIT_MILLIES = 60 * 1000;
   
   private TabletServerLogger logger;
   
@@ -1749,31 +1749,29 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     
     private ZooCache masterLockCache = new ZooCache();
     
-    private void checkPermission(TCredentials credentials, String lock, boolean requiresSystemPermission, final String request) throws ThriftSecurityException {
-      if (requiresSystemPermission) {
-        boolean fatal = false;
-        try {
-          log.debug("Got " + request + " message from user: " + credentials.getPrincipal());
-          if (!security.canPerformSystemActions(credentials)) {
-            log.warn("Got " + request + " message from user: " + credentials.getPrincipal());
-            throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
-          }
-        } catch (ThriftSecurityException e) {
-          log.warn("Got " + request + " message from unauthenticatable user: " + e.getUser());
-          if (e.getUser().equals(SecurityConstants.SYSTEM_PRINCIPAL)) {
-            log.fatal("Got message from a service with a mismatched configuration. Please ensure a compatible configuration.", e);
-            fatal = true;
-          }
-          throw e;
-        } finally {
-          if (fatal) {
-            Halt.halt(1, new Runnable() {
-              @Override
-              public void run() {
-                logGCInfo(getSystemConfiguration());
-              }
-            });
-          }
+    private void checkPermission(TCredentials credentials, String lock, final String request) throws ThriftSecurityException {
+      boolean fatal = false;
+      try {
+        log.debug("Got " + request + " message from user: " + credentials.getPrincipal());
+        if (!security.canPerformSystemActions(credentials)) {
+          log.warn("Got " + request + " message from user: " + credentials.getPrincipal());
+          throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
+        }
+      } catch (ThriftSecurityException e) {
+        log.warn("Got " + request + " message from unauthenticatable user: " + e.getUser());
+        if (SystemCredentials.get().getAsThrift().getTokenClassName().equals(credentials.getTokenClassName())) {
+          log.fatal("Got message from a service with a mismatched configuration. Please ensure a compatible configuration.", e);
+          fatal = true;
+        }
+        throw e;
+      } finally {
+        if (fatal) {
+          Halt.halt(1, new Runnable() {
+            @Override
+            public void run() {
+              logGCInfo(getSystemConfiguration());
+            }
+          });
         }
       }
       
@@ -1815,7 +1813,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     public void loadTablet(TInfo tinfo, TCredentials credentials, String lock, final TKeyExtent textent) {
       
       try {
-        checkPermission(credentials, lock, true, "loadTablet");
+        checkPermission(credentials, lock, "loadTablet");
       } catch (ThriftSecurityException e) {
         log.error(e, e);
         throw new RuntimeException(e);
@@ -1891,7 +1889,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     @Override
     public void unloadTablet(TInfo tinfo, TCredentials credentials, String lock, TKeyExtent textent, boolean save) {
       try {
-        checkPermission(credentials, lock, true, "unloadTablet");
+        checkPermission(credentials, lock, "unloadTablet");
       } catch (ThriftSecurityException e) {
         log.error(e, e);
         throw new RuntimeException(e);
@@ -1905,7 +1903,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     @Override
     public void flush(TInfo tinfo, TCredentials credentials, String lock, String tableId, ByteBuffer startRow, ByteBuffer endRow) {
       try {
-        checkPermission(credentials, lock, true, "flush");
+        checkPermission(credentials, lock, "flush");
       } catch (ThriftSecurityException e) {
         log.error(e, e);
         throw new RuntimeException(e);
@@ -1942,7 +1940,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     @Override
     public void flushTablet(TInfo tinfo, TCredentials credentials, String lock, TKeyExtent textent) throws TException {
       try {
-        checkPermission(credentials, lock, true, "flushTablet");
+        checkPermission(credentials, lock, "flushTablet");
       } catch (ThriftSecurityException e) {
         log.error(e, e);
         throw new RuntimeException(e);
@@ -1962,7 +1960,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     @Override
     public void halt(TInfo tinfo, TCredentials credentials, String lock) throws ThriftSecurityException {
       
-      checkPermission(credentials, lock, true, "halt");
+      checkPermission(credentials, lock, "halt");
       
       Halt.halt(0, new Runnable() {
         @Override
@@ -1996,7 +1994,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     @Override
     public List<ActiveScan> getActiveScans(TInfo tinfo, TCredentials credentials) throws ThriftSecurityException, TException {
       try {
-        checkPermission(credentials, null, true, "getScans");
+        checkPermission(credentials, null, "getScans");
       } catch (ThriftSecurityException e) {
         log.error(e, e);
         throw new RuntimeException(e);
@@ -2008,7 +2006,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     @Override
     public void chop(TInfo tinfo, TCredentials credentials, String lock, TKeyExtent textent) throws TException {
       try {
-        checkPermission(credentials, lock, true, "chop");
+        checkPermission(credentials, lock, "chop");
       } catch (ThriftSecurityException e) {
         log.error(e, e);
         throw new RuntimeException(e);
@@ -2025,7 +2023,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     @Override
     public void compact(TInfo tinfo, TCredentials credentials, String lock, String tableId, ByteBuffer startRow, ByteBuffer endRow) throws TException {
       try {
-        checkPermission(credentials, lock, true, "compact");
+        checkPermission(credentials, lock, "compact");
       } catch (ThriftSecurityException e) {
         log.error(e, e);
         throw new RuntimeException(e);
@@ -2115,7 +2113,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     @Override
     public List<ActiveCompaction> getActiveCompactions(TInfo tinfo, TCredentials credentials) throws ThriftSecurityException, TException {
       try {
-        checkPermission(credentials, null, true, "getActiveCompactions");
+        checkPermission(credentials, null, "getActiveCompactions");
       } catch (ThriftSecurityException e) {
         log.error(e, e);
         throw new RuntimeException(e);
@@ -2612,7 +2610,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     entry.server = logs.get(0).getLogger();
     entry.filename = logs.get(0).getFileName();
     entry.logSet = logSet;
-    MetadataTableUtil.addLogEntry(SecurityConstants.getSystemCredentials(), entry, getLock());
+    MetadataTableUtil.addLogEntry(SystemCredentials.get().getAsThrift(), entry, getLock());
   }
   
   private int startServer(AccumuloConfiguration conf, Property portHint, TProcessor processor, String threadName) throws UnknownHostException {
@@ -2792,7 +2790,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
           while (!serverStopRequested && mm != null && client != null && client.getOutputProtocol() != null
               && client.getOutputProtocol().getTransport() != null && client.getOutputProtocol().getTransport().isOpen()) {
             try {
-              mm.send(SecurityConstants.getSystemCredentials(), getClientAddressString(), iface);
+              mm.send(SystemCredentials.get().getAsThrift(), getClientAddressString(), iface);
               mm = null;
             } catch (TException ex) {
               log.warn("Error sending message: queuing message again");
@@ -2899,7 +2897,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
         TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN, TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN,
         TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN, TabletsSection.ServerColumnFamily.TIME_COLUMN});
     
-    ScannerImpl scanner = new ScannerImpl(HdfsZooInstance.getInstance(), SecurityConstants.getSystemCredentials(), tableToVerify, Authorizations.EMPTY);
+    ScannerImpl scanner = new ScannerImpl(HdfsZooInstance.getInstance(), SystemCredentials.get().getAsThrift(), tableToVerify, Authorizations.EMPTY);
     scanner.setRange(extent.toMetadataRange());
     
     TreeMap<Key,Value> tkv = new TreeMap<Key,Value>();
@@ -2933,7 +2931,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
       
       KeyExtent fke;
       try {
-        fke = MetadataTableUtil.fixSplit(metadataEntry, tabletEntries.get(metadataEntry), instance, SecurityConstants.getSystemCredentials(), lock);
+        fke = MetadataTableUtil.fixSplit(metadataEntry, tabletEntries.get(metadataEntry), instance, SystemCredentials.get().getAsThrift(), lock);
       } catch (IOException e) {
         log.error("Error fixing split " + metadataEntry);
         throw new AccumuloException(e.toString());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/util/Admin.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/Admin.java b/server/src/main/java/org/apache/accumulo/server/util/Admin.java
index fca811e..215b9c7 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/Admin.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/Admin.java
@@ -36,7 +36,7 @@ import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.log4j.Logger;
 
@@ -88,8 +88,8 @@ public class Admin {
       String principal;
       AuthenticationToken token;
       if (opts.getToken() == null) {
-        principal = SecurityConstants.getSystemPrincipal();
-        token = SecurityConstants.getSystemToken();
+        principal = SystemCredentials.get().getPrincipal();
+        token = SystemCredentials.get().getToken();
       } else {
         principal = opts.principal;
         token = opts.getToken();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java b/server/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
index de27112..f180ccd 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
@@ -33,7 +33,7 @@ import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.master.state.TabletLocationState;
 import org.apache.accumulo.server.master.state.TabletState;
 import org.apache.accumulo.server.master.state.tables.TableManager;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.commons.collections.iterators.IteratorChain;
 import org.apache.log4j.Logger;
 
@@ -48,8 +48,8 @@ public class FindOfflineTablets {
     opts.parseArgs(FindOfflineTablets.class.getName(), args);
     final AtomicBoolean scanning = new AtomicBoolean(false);
     Instance instance = opts.getInstance();
-    MetaDataTableScanner rootScanner = new MetaDataTableScanner(instance, SecurityConstants.getSystemCredentials(), MetadataSchema.TabletsSection.getRange());
-    MetaDataTableScanner metaScanner = new MetaDataTableScanner(instance, SecurityConstants.getSystemCredentials(), MetadataSchema.TabletsSection.getRange());
+    MetaDataTableScanner rootScanner = new MetaDataTableScanner(instance, SystemCredentials.get().getAsThrift(), MetadataSchema.TabletsSection.getRange());
+    MetaDataTableScanner metaScanner = new MetaDataTableScanner(instance, SystemCredentials.get().getAsThrift(), MetadataSchema.TabletsSection.getRange());
     @SuppressWarnings("unchecked")
     Iterator<TabletLocationState> scanner = new IteratorChain(rootScanner, metaScanner);
     LiveTServerSet tservers = new LiveTServerSet(instance, DefaultConfiguration.getDefaultConfiguration(), new Listener() {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/Initialize.java b/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
index 7d4e6f2..843184d 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
@@ -64,7 +64,7 @@ import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.iterators.MetadataBulkLoadFilter;
 import org.apache.accumulo.server.master.state.tables.TableManager;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tabletserver.TabletTime;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.hadoop.conf.Configuration;
@@ -439,7 +439,7 @@ public class Initialize {
   }
   
   private static void initSecurity(Opts opts, String iid) throws AccumuloSecurityException, ThriftSecurityException {
-    AuditedSecurityOperation.getInstance(iid, true).initializeSecurity(SecurityConstants.getSystemCredentials(), DEFAULT_ROOT_USER, opts.rootpass);
+    AuditedSecurityOperation.getInstance(iid, true).initializeSecurity(SystemCredentials.get().getAsThrift(), DEFAULT_ROOT_USER, opts.rootpass);
   }
   
   protected static void initMetadataConfig() throws IOException {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java b/server/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
index 816df8b..b2cd114 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
@@ -81,7 +81,7 @@ import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.master.state.TServerInstance;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.zookeeper.ZooLock;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.hadoop.fs.FileStatus;
@@ -490,7 +490,7 @@ public class MetadataTableUtil {
   }
   
   public static void addDeleteEntry(String tableId, String path) throws IOException {
-    update(SecurityConstants.getSystemCredentials(), createDeleteMutation(tableId, path), new KeyExtent(new Text(tableId), null, null));
+    update(SystemCredentials.get().getAsThrift(), createDeleteMutation(tableId, path), new KeyExtent(new Text(tableId), null, null));
   }
   
   public static Mutation createDeleteMutation(String tableId, String pathToRemove) throws IOException {
@@ -975,7 +975,7 @@ public class MetadataTableUtil {
       } else {
         Mutation m = new Mutation(entry.extent.getMetadataEntry());
         m.putDelete(LogColumnFamily.NAME, new Text(entry.server + "/" + entry.filename));
-        update(SecurityConstants.getSystemCredentials(), zooLock, m, entry.extent);
+        update(SystemCredentials.get().getAsThrift(), zooLock, m, entry.extent);
       }
     }
   }
@@ -1126,7 +1126,7 @@ public class MetadataTableUtil {
   
   public static void cloneTable(Instance instance, String srcTableId, String tableId) throws Exception {
     
-    Connector conn = instance.getConnector(SecurityConstants.SYSTEM_PRINCIPAL, SecurityConstants.getSystemToken());
+    Connector conn = instance.getConnector(SystemCredentials.get().getPrincipal(), SystemCredentials.get().getToken());
     BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
     while (true) {
@@ -1151,7 +1151,7 @@ public class MetadataTableUtil {
         bw.flush();
         
         // delete what we have cloned and try again
-        deleteTable(tableId, false, SecurityConstants.getSystemCredentials(), null);
+        deleteTable(tableId, false, SystemCredentials.get().getAsThrift(), null);
         
         log.debug("Tablets merged in table " + srcTableId + " while attempting to clone, trying again");
         
@@ -1181,7 +1181,7 @@ public class MetadataTableUtil {
   public static void chopped(KeyExtent extent, ZooLock zooLock) {
     Mutation m = new Mutation(extent.getMetadataEntry());
     ChoppedColumnFamily.CHOPPED_COLUMN.put(m, new Value("chopped".getBytes()));
-    update(SecurityConstants.getSystemCredentials(), zooLock, m, extent);
+    update(SystemCredentials.get().getAsThrift(), zooLock, m, extent);
   }
   
   public static void removeBulkLoadEntries(Connector conn, String tableId, long tid) throws Exception {
@@ -1242,7 +1242,7 @@ public class MetadataTableUtil {
     
     // new KeyExtent is only added to force update to write to the metadata table, not the root table
     // because bulk loads aren't supported to the metadata table
-    update(SecurityConstants.getSystemCredentials(), m, new KeyExtent(new Text("anythingNotMetadata"), null, null));
+    update(SystemCredentials.get().getAsThrift(), m, new KeyExtent(new Text("anythingNotMetadata"), null, null));
   }
   
   public static void removeBulkLoadInProgressFlag(String path) {
@@ -1252,7 +1252,7 @@ public class MetadataTableUtil {
     
     // new KeyExtent is only added to force update to write to the metadata table, not the root table
     // because bulk loads aren't supported to the metadata table
-    update(SecurityConstants.getSystemCredentials(), m, new KeyExtent(new Text("anythingNotMetadata"), null, null));
+    update(SystemCredentials.get().getAsThrift(), m, new KeyExtent(new Text("anythingNotMetadata"), null, null));
   }
   
   public static void moveMetaDeleteMarkers(Instance instance, TCredentials creds) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
----------------------------------------------------------------------
diff --git a/server/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java b/server/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
new file mode 100644
index 0000000..f422ecb
--- /dev/null
+++ b/server/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.server.security;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.UUID;
+
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.impl.ConnectorImpl;
+import org.apache.accumulo.core.security.Credentials;
+import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.server.security.SystemCredentials.SystemToken;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * 
+ */
+public class SystemCredentialsTest {
+  
+  @BeforeClass
+  public static void setUp() throws IOException {
+    File testInstanceId = new File(new File(new File(new File("target"), "instanceTest"), "instance_id"), UUID.fromString(
+        "00000000-0000-0000-0000-000000000000").toString());
+    if (!testInstanceId.exists()) {
+      testInstanceId.getParentFile().mkdirs();
+      testInstanceId.createNewFile();
+    }
+  }
+  
+  /**
+   * This is a test to ensure the string literal in {@link ConnectorImpl#ConnectorImpl(Instance, TCredentials)} is kept up-to-date if we move the
+   * {@link SystemToken}<br/>
+   * This check will not be needed after ACCUMULO-1578
+   */
+  @Test
+  public void testSystemToken() {
+    assertEquals("org.apache.accumulo.server.security.SystemCredentials$SystemToken", SystemToken.class.getName());
+    assertEquals(SystemCredentials.get().getToken().getClass(), SystemToken.class);
+    assertEquals(SystemCredentials.get().getAsThrift().getTokenClassName(), SystemToken.class.getName());
+  }
+  
+  @Test
+  public void testSystemCredentials() {
+    Credentials a = SystemCredentials.get();
+    Credentials b = SystemCredentials.get();
+    assertTrue(a == b);
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/server/src/test/resources/accumulo-site.xml
----------------------------------------------------------------------
diff --git a/server/src/test/resources/accumulo-site.xml b/server/src/test/resources/accumulo-site.xml
new file mode 100644
index 0000000..2aa9fff
--- /dev/null
+++ b/server/src/test/resources/accumulo-site.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration>
+
+  <property>
+    <name>instance.dfs.dir</name>
+    <value>${project.build.directory}/instanceTest</value>
+  </property>
+
+  <property>
+    <name>instance.secret</name>
+    <value>TEST_SYSTEM_SECRET</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/test/src/main/java/org/apache/accumulo/test/GetMasterStats.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/GetMasterStats.java b/test/src/main/java/org/apache/accumulo/test/GetMasterStats.java
index 65cf80c..caef670 100644
--- a/test/src/main/java/org/apache/accumulo/test/GetMasterStats.java
+++ b/test/src/main/java/org/apache/accumulo/test/GetMasterStats.java
@@ -19,7 +19,6 @@ package org.apache.accumulo.test;
 import java.io.IOException;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.accumulo.core.client.impl.MasterClient;
 import org.apache.accumulo.core.master.MasterNotRunningException;
 import org.apache.accumulo.core.master.thrift.MasterClientService;
@@ -29,7 +28,8 @@ import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.monitor.Monitor;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
+import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.thrift.transport.TTransportException;
 
 public class GetMasterStats {
@@ -44,7 +44,7 @@ public class GetMasterStats {
     MasterMonitorInfo stats = null;
     try {
       client = MasterClient.getConnectionWithRetry(HdfsZooInstance.getInstance());
-      stats = client.getMasterStats(Tracer.traceInfo(), SecurityConstants.getSystemCredentials());
+      stats = client.getMasterStats(Tracer.traceInfo(), SystemCredentials.get().getAsThrift());
     } finally {
       if (client != null)
         MasterClient.close(client);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousStatsCollector.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousStatsCollector.java b/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousStatsCollector.java
index ea677da..8345ac4 100644
--- a/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousStatsCollector.java
+++ b/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousStatsCollector.java
@@ -45,7 +45,7 @@ import org.apache.accumulo.server.cli.ClientOnRequiredTable;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.monitor.Monitor;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
@@ -134,7 +134,7 @@ public class ContinuousStatsCollector {
       MasterClientService.Iface client = null;
       try {
         client = MasterClient.getConnectionWithRetry(opts.getInstance());
-        MasterMonitorInfo stats = client.getMasterStats(Tracer.traceInfo(), SecurityConstants.getSystemCredentials());
+        MasterMonitorInfo stats = client.getMasterStats(Tracer.traceInfo(), SystemCredentials.get().getAsThrift());
         
         TableInfo all = new TableInfo();
         Map<String,TableInfo> tableSummaries = new HashMap<String,TableInfo>();
@@ -177,8 +177,7 @@ public class ContinuousStatsCollector {
     
   }
   
-  static class Opts extends ClientOnRequiredTable {
-  }
+  static class Opts extends ClientOnRequiredTable {}
   
   public static void main(String[] args) {
     Opts opts = new Opts();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java
index 8cb79c3..802d942 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java
@@ -52,7 +52,7 @@ import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.master.state.Assignment;
 import org.apache.accumulo.server.master.state.TServerInstance;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tabletserver.TabletServer;
 import org.apache.accumulo.server.tabletserver.TabletTime;
 import org.apache.accumulo.server.util.FileUtil;
@@ -140,7 +140,7 @@ public class SplitRecoveryTest extends FunctionalTest {
       KeyExtent extent = extents[i];
       
       String tdir = ServerConstants.getTablesDirs()[0] + "/" + extent.getTableId().toString() + "/dir_" + i;
-      MetadataTableUtil.addTablet(extent, tdir, SecurityConstants.getSystemCredentials(), TabletTime.LOGICAL_TIME_ID, zl);
+      MetadataTableUtil.addTablet(extent, tdir, SystemCredentials.get().getAsThrift(), TabletTime.LOGICAL_TIME_ID, zl);
       SortedMap<FileRef,DataFileValue> mapFiles = new TreeMap<FileRef,DataFileValue>();
       mapFiles.put(new FileRef(tdir + "/" + RFile.EXTENSION + "_000_000"), new DataFileValue(1000017 + i, 10000 + i));
       
@@ -149,7 +149,7 @@ public class SplitRecoveryTest extends FunctionalTest {
       }
       int tid = 0;
       TransactionWatcher.ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid);
-      MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, "L0", SecurityConstants.getSystemCredentials(), zl);
+      MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, "L0", SystemCredentials.get().getAsThrift(), zl);
     }
     
     KeyExtent extent = extents[extentToSplit];
@@ -170,21 +170,21 @@ public class SplitRecoveryTest extends FunctionalTest {
     MetadataTableUtil.splitDatafiles(extent.getTableId(), midRow, splitRatio, new HashMap<FileRef,FileUtil.FileInfo>(), mapFiles, lowDatafileSizes,
         highDatafileSizes, highDatafilesToRemove);
     
-    MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, SecurityConstants.getSystemCredentials(), zl);
+    MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, SystemCredentials.get().getAsThrift(), zl);
     TServerInstance instance = new TServerInstance(location, zl.getSessionId());
-    Writer writer = new Writer(HdfsZooInstance.getInstance(), SecurityConstants.getSystemCredentials(), MetadataTable.ID);
+    Writer writer = new Writer(HdfsZooInstance.getInstance(), SystemCredentials.get().getAsThrift(), MetadataTable.ID);
     Assignment assignment = new Assignment(high, instance);
     Mutation m = new Mutation(assignment.tablet.getMetadataEntry());
     m.put(TabletsSection.FutureLocationColumnFamily.NAME, assignment.server.asColumnQualifier(), assignment.server.asMutationValue());
     writer.update(m);
     
     if (steps >= 1) {
-      Map<FileRef,Long> bulkFiles = MetadataTableUtil.getBulkFilesLoaded(SecurityConstants.getSystemCredentials(), extent);
-      MetadataTableUtil.addNewTablet(low, "/lowDir", instance, lowDatafileSizes, bulkFiles, SecurityConstants.getSystemCredentials(),
-          TabletTime.LOGICAL_TIME_ID + "0", -1l, -1l, zl);
+      Map<FileRef,Long> bulkFiles = MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get().getAsThrift(), extent);
+      MetadataTableUtil.addNewTablet(low, "/lowDir", instance, lowDatafileSizes, bulkFiles, SystemCredentials.get().getAsThrift(), TabletTime.LOGICAL_TIME_ID
+          + "0", -1l, -1l, zl);
     }
     if (steps >= 2)
-      MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, SecurityConstants.getSystemCredentials(), zl);
+      MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, SystemCredentials.get().getAsThrift(), zl);
     
     TabletServer.verifyTabletInformation(high, instance, null, "127.0.0.1:0", zl);
     
@@ -192,8 +192,8 @@ public class SplitRecoveryTest extends FunctionalTest {
       ensureTabletHasNoUnexpectedMetadataEntries(low, lowDatafileSizes);
       ensureTabletHasNoUnexpectedMetadataEntries(high, highDatafileSizes);
       
-      Map<FileRef,Long> lowBulkFiles = MetadataTableUtil.getBulkFilesLoaded(SecurityConstants.getSystemCredentials(), low);
-      Map<FileRef,Long> highBulkFiles = MetadataTableUtil.getBulkFilesLoaded(SecurityConstants.getSystemCredentials(), high);
+      Map<FileRef,Long> lowBulkFiles = MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get().getAsThrift(), low);
+      Map<FileRef,Long> highBulkFiles = MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get().getAsThrift(), high);
       
       if (!lowBulkFiles.equals(highBulkFiles)) {
         throw new Exception(" " + lowBulkFiles + " != " + highBulkFiles + " " + low + " " + high);
@@ -208,7 +208,7 @@ public class SplitRecoveryTest extends FunctionalTest {
   }
   
   private void ensureTabletHasNoUnexpectedMetadataEntries(KeyExtent extent, SortedMap<FileRef,DataFileValue> expectedMapFiles) throws Exception {
-    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), SecurityConstants.getSystemCredentials(), MetadataTable.ID, Authorizations.EMPTY);
+    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), SystemCredentials.get().getAsThrift(), MetadataTable.ID, Authorizations.EMPTY);
     scanner.setRange(extent.toMetadataRange());
     
     HashSet<ColumnFQ> expectedColumns = new HashSet<ColumnFQ>();
@@ -247,7 +247,7 @@ public class SplitRecoveryTest extends FunctionalTest {
       throw new Exception("Not all expected columns seen " + extent + " " + expectedColumns);
     }
     
-    SortedMap<FileRef,DataFileValue> fixedMapFiles = MetadataTableUtil.getDataFileSizes(extent, SecurityConstants.getSystemCredentials());
+    SortedMap<FileRef,DataFileValue> fixedMapFiles = MetadataTableUtil.getDataFileSizes(extent, SystemCredentials.get().getAsThrift());
     verifySame(expectedMapFiles, fixedMapFiles);
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java b/test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java
index 5602f14..3545170 100644
--- a/test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java
+++ b/test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java
@@ -42,7 +42,7 @@ import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.AddressUtil;
 import org.apache.accumulo.core.util.Stat;
 import org.apache.accumulo.server.master.state.TServerInstance;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.hadoop.io.Text;
 
 /**
@@ -56,8 +56,8 @@ public class MetadataBatchScanTest {
   
   public static void main(String[] args) throws Exception {
     
-    final Connector connector = new ZooKeeperInstance("acu14", "localhost")
-        .getConnector(SecurityConstants.SYSTEM_PRINCIPAL, SecurityConstants.getSystemToken());
+    final Connector connector = new ZooKeeperInstance("acu14", "localhost").getConnector(SystemCredentials.get().getPrincipal(), SystemCredentials.get()
+        .getToken());
     
     TreeSet<Long> splits = new TreeSet<Long>();
     Random r = new Random(42);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/test/src/main/java/org/apache/accumulo/test/performance/thrift/NullTserver.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/thrift/NullTserver.java b/test/src/main/java/org/apache/accumulo/test/performance/thrift/NullTserver.java
index d4b1c8e..41a4d54 100644
--- a/test/src/main/java/org/apache/accumulo/test/performance/thrift/NullTserver.java
+++ b/test/src/main/java/org/apache/accumulo/test/performance/thrift/NullTserver.java
@@ -62,7 +62,7 @@ import org.apache.accumulo.server.master.state.MetaDataStateStore;
 import org.apache.accumulo.server.master.state.MetaDataTableScanner;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.master.state.TabletLocationState;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.TServerUtils;
 import org.apache.accumulo.server.zookeeper.TransactionWatcher;
 import org.apache.accumulo.trace.thrift.TInfo;
@@ -230,7 +230,7 @@ public class NullTserver {
     
     // read the locations for the table
     Range tableRange = new KeyExtent(new Text(tableId), null, null).toMetadataRange();
-    MetaDataTableScanner s = new MetaDataTableScanner(zki, SecurityConstants.getSystemCredentials(), tableRange);
+    MetaDataTableScanner s = new MetaDataTableScanner(zki, SystemCredentials.get().getAsThrift(), tableRange);
     long randomSessionID = opts.port;
     TServerInstance instance = new TServerInstance(addr, randomSessionID);
     List<Assignment> assignments = new ArrayList<Assignment>();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/Shutdown.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/Shutdown.java b/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/Shutdown.java
index b283752..aa4c619 100644
--- a/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/Shutdown.java
+++ b/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/Shutdown.java
@@ -24,7 +24,7 @@ import org.apache.accumulo.core.master.thrift.MasterGoalState;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.master.state.SetGoalState;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.test.randomwalk.State;
 import org.apache.accumulo.test.randomwalk.Test;
 import org.apache.accumulo.trace.instrument.Tracer;
@@ -32,25 +32,25 @@ import org.apache.accumulo.trace.instrument.Tracer;
 public class Shutdown extends Test {
   
   @Override
-  public void visit(State state, Properties props) throws Exception  {
+  public void visit(State state, Properties props) throws Exception {
     log.debug("shutting down");
-    SetGoalState.main(new String[]{MasterGoalState.CLEAN_STOP.name()});
+    SetGoalState.main(new String[] {MasterGoalState.CLEAN_STOP.name()});
     
     while (!state.getConnector().instanceOperations().getTabletServers().isEmpty()) {
       UtilWaitThread.sleep(1000);
     }
     
     while (true) {
-        try {
-          Client client = MasterClient.getConnection(HdfsZooInstance.getInstance());
-          client.getMasterStats(Tracer.traceInfo(), SecurityConstants.getSystemCredentials());
-        } catch (Exception e) {
-          // assume this is due to server shutdown
-          break;
-        }
-        UtilWaitThread.sleep(1000);
+      try {
+        Client client = MasterClient.getConnection(HdfsZooInstance.getInstance());
+        client.getMasterStats(Tracer.traceInfo(), SystemCredentials.get().getAsThrift());
+      } catch (Exception e) {
+        // assume this is due to server shutdown
+        break;
+      }
+      UtilWaitThread.sleep(1000);
     }
-
+    
     log.debug("tablet servers stopped");
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/StartAll.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/StartAll.java b/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/StartAll.java
index 8b99a55..45844b0 100644
--- a/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/StartAll.java
+++ b/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/StartAll.java
@@ -25,7 +25,7 @@ import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.master.state.SetGoalState;
-import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.test.randomwalk.State;
 import org.apache.accumulo.test.randomwalk.Test;
 import org.apache.accumulo.trace.instrument.Tracer;
@@ -35,13 +35,13 @@ public class StartAll extends Test {
   @Override
   public void visit(State state, Properties props) throws Exception {
     log.info("Starting all servers");
-    SetGoalState.main(new String[]{MasterGoalState.NORMAL.name()});
-    Process exec = Runtime.getRuntime().exec(new String[]{System.getenv().get("ACCUMULO_HOME") + "/bin/start-all.sh"});
+    SetGoalState.main(new String[] {MasterGoalState.NORMAL.name()});
+    Process exec = Runtime.getRuntime().exec(new String[] {System.getenv().get("ACCUMULO_HOME") + "/bin/start-all.sh"});
     exec.waitFor();
     while (true) {
       try {
         Client client = MasterClient.getConnection(HdfsZooInstance.getInstance());
-        MasterMonitorInfo masterStats = client.getMasterStats(Tracer.traceInfo(), SecurityConstants.getSystemCredentials());
+        MasterMonitorInfo masterStats = client.getMasterStats(Tracer.traceInfo(), SystemCredentials.get().getAsThrift());
         if (!masterStats.tServerInfo.isEmpty())
           break;
       } catch (Exception ex) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a943f323/test/src/main/java/org/apache/accumulo/test/randomwalk/security/WalkingSecurity.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/randomwalk/security/WalkingSecurity.java b/test/src/main/java/org/apache/accumulo/test/randomwalk/security/WalkingSecurity.java
index bd97dd4..9cff8f7 100644
--- a/test/src/main/java/org/apache/accumulo/test/randomwalk/security/WalkingSecurity.java
+++ b/test/src/main/java/org/apache/accumulo/test/randomwalk/security/WalkingSecurity.java
@@ -69,7 +69,6 @@ public class WalkingSecurity extends SecurityOperation implements Authorizor, Au
     super(author, authent, pm, instanceId);
   }
   
-  @SuppressWarnings("deprecation")
   public WalkingSecurity(State state2) {
     super(state2.getInstance().getInstanceID());
     this.state = state2;
@@ -401,7 +400,7 @@ public class WalkingSecurity extends SecurityOperation implements Authorizor, Au
   public boolean validTokenClass(String tokenClass) {
     return tokenClass.equals(PasswordToken.class.getCanonicalName());
   }
-
+  
   public static void clearInstance() {
     instance = null;
   }


[36/50] git commit: ACCUMULO-1595 remove code to lock jvm to ram

Posted by kt...@apache.org.
ACCUMULO-1595 remove code to lock jvm to ram


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/b3f11559
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/b3f11559
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/b3f11559

Branch: refs/heads/ACCUMULO-1000
Commit: b3f1155962ead8af40b63c83103120ca0b82b3f2
Parents: a943f32
Author: Eric Newton <ec...@apache.org>
Authored: Mon Jul 22 13:07:54 2013 -0400
Committer: Eric Newton <ec...@apache.org>
Committed: Mon Jul 22 13:07:54 2013 -0400

----------------------------------------------------------------------
 .../accumulo/server/tabletserver/TabletServer.java   | 15 ---------------
 1 file changed, 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/b3f11559/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
index ceed0ee..d83102d 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
@@ -3036,21 +3036,6 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     clientAddress = new InetSocketAddress(hostname, 0);
     logger = new TabletServerLogger(this, getSystemConfiguration().getMemoryInBytes(Property.TSERV_WALOG_MAX_SIZE));
     
-    if (getSystemConfiguration().getBoolean(Property.TSERV_LOCK_MEMORY)) {
-      String path = "lib/native/mlock/" + System.mapLibraryName("MLock-" + Platform.getPlatform());
-      path = new File(path).getAbsolutePath();
-      try {
-        System.load(path);
-        log.info("Trying to lock memory pages to RAM");
-        if (MLock.lockMemoryPages() < 0)
-          log.error("Failed to lock memory pages to RAM");
-        else
-          log.info("Memory pages are now locked into RAM");
-      } catch (Throwable t) {
-        log.error("Failed to load native library for locking pages to RAM " + path + " (" + t + ")", t);
-      }
-    }
-    
     try {
       AccumuloVFSClassLoader.getContextManager().setContextConfig(new ContextManager.DefaultContextsConfig(new Iterable<Entry<String,String>>() {
         @Override


[11/50] git commit: ACCUMULO-998 remove malformed javadoc tag

Posted by kt...@apache.org.
ACCUMULO-998 remove malformed javadoc tag


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/c3698b09
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/c3698b09
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/c3698b09

Branch: refs/heads/ACCUMULO-1000
Commit: c3698b094127ee21a3558a3b499cfc0680416773
Parents: e6d6fab
Author: Christopher Tubbs <ct...@apache.org>
Authored: Wed Jul 17 12:09:02 2013 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Wed Jul 17 12:09:02 2013 -0400

----------------------------------------------------------------------
 .../core/security/crypto/CryptoModuleParameters.java    | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/c3698b09/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleParameters.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleParameters.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleParameters.java
index 7027496..d9d48fe 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleParameters.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleParameters.java
@@ -17,6 +17,7 @@
 
 package org.apache.accumulo.core.security.crypto;
 
+import java.io.FilterOutputStream;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.security.SecureRandom;
@@ -297,7 +298,6 @@ public class CryptoModuleParameters {
    * Gets the opaque ID associated with the encrypted version of the plaintext key.
    * 
    * @see CryptoModuleParameters#setOpaqueKeyEncryptionKeyID(String)
-   * @return
    */
   public String getOpaqueKeyEncryptionKeyID() {
     return opaqueKeyEncryptionKeyID;
@@ -340,9 +340,9 @@ public class CryptoModuleParameters {
    * 
    * <p>
    * 
-   * If this is set to <i>true</i>, then the stream passed to {@link CryptoModule#getEncryptingOutputStream(CryptoModuleParameters)} will be <i>written to by the module</i> before it
-   * is returned to the caller. There are situations where it is easier to let the crypto module do this writing on behalf of the caller, and other times where
-   * it is not appropriate (if the format of the underlying stream must be carefully maintained, for instance).
+   * If this is set to <i>true</i>, then the stream passed to {@link CryptoModule#getEncryptingOutputStream(CryptoModuleParameters)} will be <i>written to by
+   * the module</i> before it is returned to the caller. There are situations where it is easier to let the crypto module do this writing on behalf of the
+   * caller, and other times where it is not appropriate (if the format of the underlying stream must be carefully maintained, for instance).
    * 
    * @param recordParametersToStream
    *          whether or not to require the module to record its parameters to the stream by itself
@@ -456,13 +456,13 @@ public class CryptoModuleParameters {
     this.encryptedOutputStream = encryptedOutputStream;
   }
   
-
   /**
    * Gets the plaintext input stream, which is nearly always a wrapped version of the output from {@link CryptoModuleParameters#getEncryptedInputStream()}.
    * 
    * <p>
    * 
-   * Generally this method is used by {@link CryptoModule} classes as an <i>out</i> parameter from calling {@link CryptoModule#getDecryptingInputStream(CryptoModuleParameters)}.
+   * Generally this method is used by {@link CryptoModule} classes as an <i>out</i> parameter from calling
+   * {@link CryptoModule#getDecryptingInputStream(CryptoModuleParameters)}.
    * 
    * 
    * @see CryptoModuleParameters#setPlaintextInputStream(InputStream)


[16/50] git commit: ACCUMULO-1030 clean up the directories left from any previous runs

Posted by kt...@apache.org.
ACCUMULO-1030 clean up the directories left from any previous runs


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/88c11e63
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/88c11e63
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/88c11e63

Branch: refs/heads/ACCUMULO-1000
Commit: 88c11e63c1c83b2d75e7a1da4b5fa90d97a38243
Parents: 8b0f573
Author: Eric Newton <er...@gmail.com>
Authored: Wed Jul 17 13:12:03 2013 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Wed Jul 17 13:12:03 2013 -0400

----------------------------------------------------------------------
 .../src/main/java/org/apache/accumulo/maven/plugin/StartMojo.java | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/88c11e63/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/StartMojo.java
----------------------------------------------------------------------
diff --git a/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/StartMojo.java b/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/StartMojo.java
index 075ba8e..b3efd81 100644
--- a/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/StartMojo.java
+++ b/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/StartMojo.java
@@ -28,6 +28,7 @@ import org.apache.maven.plugins.annotations.LifecyclePhase;
 import org.apache.maven.plugins.annotations.Mojo;
 import org.apache.maven.plugins.annotations.Parameter;
 import org.apache.maven.plugins.annotations.ResolutionScope;
+import org.codehaus.plexus.util.FileUtils;
 
 /**
  * Goal which starts an instance of {@link MiniAccumuloCluster}.
@@ -54,6 +55,8 @@ public class StartMojo extends AbstractAccumuloMojo {
     
     try {
       subdir = subdir.getCanonicalFile();
+      if (subdir.exists())
+        FileUtils.forceDelete(subdir);
       subdir.mkdirs();
       configureMiniClasspath(miniClasspath);
       MiniAccumuloConfig cfg = new MiniAccumuloConfig(subdir, rootPassword);


[43/50] git commit: ACCUMULO-1533 Clean up deprecated mapreduce and thrift code

Posted by kt...@apache.org.
ACCUMULO-1533 Clean up deprecated mapreduce and thrift code


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/a69a9d68
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/a69a9d68
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/a69a9d68

Branch: refs/heads/ACCUMULO-1000
Commit: a69a9d68fb8f1e2e7455b75a5a1130e964d96a7d
Parents: 47d1f5f
Author: Christopher Tubbs <ct...@apache.org>
Authored: Mon Jul 22 14:33:03 2013 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Mon Jul 22 14:33:03 2013 -0400

----------------------------------------------------------------------
 .../apache/accumulo/core/cli/ClientOpts.java    |   4 +-
 .../core/client/AccumuloSecurityException.java  |  18 +-
 .../apache/accumulo/core/client/Instance.java   |  15 -
 .../accumulo/core/client/ZooKeeperInstance.java |  61 +-
 .../core/client/impl/ConnectorImpl.java         |   1 -
 .../mapreduce/AccumuloFileOutputFormat.java     |  49 --
 .../client/mapreduce/AccumuloOutputFormat.java  | 164 -----
 .../core/client/mapreduce/InputFormatBase.java  | 385 ------------
 .../accumulo/core/client/mock/MockInstance.java |   6 -
 .../core/conf/AccumuloConfiguration.java        |  18 +-
 .../apache/accumulo/core/data/ColumnUpdate.java |  10 -
 .../accumulo/core/security/thrift/AuthInfo.java | 616 -------------------
 .../core/security/thrift/SecurityErrorCode.java | 112 ----
 .../thrift/ThriftSecurityException.java         | 521 ----------------
 .../apache/accumulo/core/util/shell/Shell.java  |   4 +-
 .../core/util/shell/commands/FateCommand.java   |   7 +-
 .../accumulo/core/zookeeper/ZooCache.java       |  40 --
 .../apache/accumulo/core/zookeeper/ZooUtil.java |  42 ++
 core/src/main/thrift/security.thrift            |  40 --
 .../core/client/impl/TabletLocatorImplTest.java |   6 -
 .../mapreduce/AccumuloInputFormatTest.java      |  40 --
 .../client/security/SecurityErrorCodeTest.java  |   8 +-
 .../accumulo/core/file/rfile/RFileTest.java     |   2 +-
 .../accumulo/server/client/HdfsZooInstance.java |  16 +-
 .../accumulo/server/conf/ZooConfiguration.java  |   5 +-
 25 files changed, 71 insertions(+), 2119 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java b/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
index 8a2d6f0..ec6b198 100644
--- a/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
+++ b/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
@@ -42,6 +42,7 @@ import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.trace.instrument.Trace;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -221,8 +222,7 @@ public class ClientOpts extends Help {
       };
       this.zookeepers = config.get(Property.INSTANCE_ZK_HOST);
       Path instanceDir = new Path(config.get(Property.INSTANCE_DFS_DIR), "instance_id");
-      @SuppressWarnings("deprecation")
-      String instanceIDFromFile = ZooKeeperInstance.getInstanceIDFromHdfs(instanceDir);
+      String instanceIDFromFile = ZooUtil.getInstanceIDFromHdfs(instanceDir);
       return cachedInstance = new ZooKeeperInstance(UUID.fromString(instanceIDFromFile), zookeepers);
     }
     return cachedInstance = new ZooKeeperInstance(this.instance, this.zookeepers);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/java/org/apache/accumulo/core/client/AccumuloSecurityException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/AccumuloSecurityException.java b/core/src/main/java/org/apache/accumulo/core/client/AccumuloSecurityException.java
index 04ba4e7..f626f4d 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/AccumuloSecurityException.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/AccumuloSecurityException.java
@@ -94,7 +94,7 @@ public class AccumuloSecurityException extends Exception {
    * @param errorcode
    *          the specific reason for this exception
    * @param tableInfo
-   *          the relevant tableInfo for the security violation 
+   *          the relevant tableInfo for the security violation
    * @param cause
    *          the exception that caused this violation
    */
@@ -123,7 +123,7 @@ public class AccumuloSecurityException extends Exception {
    * @param errorcode
    *          the specific reason for this exception
    * @param tableInfo
-   *          the relevant tableInfo for the security violation 
+   *          the relevant tableInfo for the security violation
    */
   public AccumuloSecurityException(final String user, final SecurityErrorCode errorcode, final String tableInfo) {
     super(getDefaultErrorMessage(errorcode));
@@ -158,25 +158,17 @@ public class AccumuloSecurityException extends Exception {
    * @return the specific reason for this exception
    * @since 1.5.0
    */
-
+  
   public org.apache.accumulo.core.client.security.SecurityErrorCode getSecurityErrorCode() {
     return org.apache.accumulo.core.client.security.SecurityErrorCode.valueOf(errorCode.name());
   }
-
-  /**
-   * @return the specific reason for this exception
-   * 
-   * @deprecated since 1.5.0; Use {@link #getSecurityErrorCode()} instead.
-   */
-  public org.apache.accumulo.core.security.thrift.SecurityErrorCode getErrorCode() {
-    return org.apache.accumulo.core.security.thrift.SecurityErrorCode.valueOf(errorCode.name());
-  }
   
+  @Override
   public String getMessage() {
     StringBuilder message = new StringBuilder();
     message.append("Error ").append(errorCode);
     message.append(" for user ").append(user);
-    if(!StringUtils.isEmpty(tableInfo)) {
+    if (!StringUtils.isEmpty(tableInfo)) {
       message.append(" on table ").append(tableInfo);
     }
     message.append(" - ").append(super.getMessage());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/java/org/apache/accumulo/core/client/Instance.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/Instance.java b/core/src/main/java/org/apache/accumulo/core/client/Instance.java
index 3b04281..0796059 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/Instance.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/Instance.java
@@ -90,21 +90,6 @@ public interface Instance {
   /**
    * Returns a connection to accumulo.
    * 
-   * @param auth
-   *          An Credentials object.
-   * @return the accumulo Connector
-   * @throws AccumuloException
-   *           when a generic exception occurs
-   * @throws AccumuloSecurityException
-   *           when a user's credentials are invalid
-   * @deprecated since 1.5, use {@link #getConnector(String, AuthenticationToken)} with {@link PasswordToken}
-   */
-  @Deprecated
-  public abstract Connector getConnector(org.apache.accumulo.core.security.thrift.AuthInfo auth) throws AccumuloException, AccumuloSecurityException;
-  
-  /**
-   * Returns a connection to accumulo.
-   * 
    * @param user
    *          a valid accumulo user
    * @param pass

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/java/org/apache/accumulo/core/client/ZooKeeperInstance.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/ZooKeeperInstance.java b/core/src/main/java/org/apache/accumulo/core/client/ZooKeeperInstance.java
index 5b56adb..07f5fd4 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/ZooKeeperInstance.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/ZooKeeperInstance.java
@@ -16,8 +16,6 @@
  */
 package org.apache.accumulo.core.client;
 
-import java.io.FileNotFoundException;
-import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Collections;
 import java.util.List;
@@ -29,20 +27,14 @@ import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.file.FileUtil;
 import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.security.CredentialHelper;
-import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.util.ArgumentChecker;
 import org.apache.accumulo.core.util.ByteBufferUtil;
-import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.OpTimer;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
@@ -220,12 +212,7 @@ public class ZooKeeperInstance implements Instance {
   
   @Override
   public Connector getConnector(String principal, AuthenticationToken token) throws AccumuloException, AccumuloSecurityException {
-    return getConnector(CredentialHelper.create(principal, token, getInstanceID()));
-  }
-  
-  @SuppressWarnings("deprecation")
-  private Connector getConnector(TCredentials credential) throws AccumuloException, AccumuloSecurityException {
-    return new ConnectorImpl(this, credential);
+    return new ConnectorImpl(this, CredentialHelper.create(principal, token, getInstanceID()));
   }
   
   @Override
@@ -249,19 +236,7 @@ public class ZooKeeperInstance implements Instance {
   }
   
   /**
-   * @deprecated Use {@link #lookupInstanceName(org.apache.accumulo.fate.zookeeper.ZooCache, UUID)} instead
-   */
-  @Deprecated
-  public static String lookupInstanceName(org.apache.accumulo.core.zookeeper.ZooCache zooCache, UUID instanceId) {
-    return lookupInstanceName((ZooCache) zooCache, instanceId);
-  }
-  
-  /**
    * Given a zooCache and instanceId, look up the instance name.
-   * 
-   * @param zooCache
-   * @param instanceId
-   * @return the instance name
    */
   public static String lookupInstanceName(ZooCache zooCache, UUID instanceId) {
     ArgumentChecker.notNull(zooCache, instanceId);
@@ -275,38 +250,4 @@ public class ZooKeeperInstance implements Instance {
     return null;
   }
   
-  /**
-   * To be moved to server code. Only lives here to support certain client side utilities to minimize command-line options.
-   */
-  @Deprecated
-  public static String getInstanceIDFromHdfs(Path instanceDirectory) {
-    try {
-      FileSystem fs = FileUtil.getFileSystem(CachedConfiguration.getInstance(), AccumuloConfiguration.getSiteConfiguration());
-      FileStatus[] files = null;
-      try {
-        files = fs.listStatus(instanceDirectory);
-      } catch (FileNotFoundException ex) {
-        // ignored
-      }
-      log.debug("Trying to read instance id from " + instanceDirectory);
-      if (files == null || files.length == 0) {
-        log.error("unable obtain instance id at " + instanceDirectory);
-        throw new RuntimeException("Accumulo not initialized, there is no instance id at " + instanceDirectory);
-      } else if (files.length != 1) {
-        log.error("multiple potential instances in " + instanceDirectory);
-        throw new RuntimeException("Accumulo found multiple possible instance ids in " + instanceDirectory);
-      } else {
-        String result = files[0].getPath().getName();
-        return result;
-      }
-    } catch (IOException e) {
-      throw new RuntimeException("Accumulo not initialized, there is no instance id at " + instanceDirectory, e);
-    }
-  }
-  
-  @Deprecated
-  @Override
-  public Connector getConnector(org.apache.accumulo.core.security.thrift.AuthInfo auth) throws AccumuloException, AccumuloSecurityException {
-    return getConnector(auth.user, auth.password);
-  }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
index 3c6e445..3858cdc 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
@@ -51,7 +51,6 @@ public class ConnectorImpl extends Connector {
   private TableOperations tableops = null;
   private InstanceOperations instanceops = null;
   
-  @Deprecated
   public ConnectorImpl(Instance instance, TCredentials cred) throws AccumuloException, AccumuloSecurityException {
     ArgumentChecker.notNull(instance, cred);
     this.instance = instance;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormat.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormat.java
index cfcefda..d78219c 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormat.java
@@ -19,7 +19,6 @@ package org.apache.accumulo.core.client.mapreduce;
 import java.io.IOException;
 import java.util.Arrays;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.mapreduce.lib.util.FileOutputConfigurator;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
@@ -28,14 +27,12 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.file.FileSKVWriter;
-import org.apache.accumulo.core.file.rfile.RFile;
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.commons.collections.map.LRUMap;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.mapreduce.OutputFormat;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
@@ -177,50 +174,4 @@ public class AccumuloFileOutputFormat extends FileOutputFormat<Key,Value> {
     };
   }
   
-  // ----------------------------------------------------------------------------------------------------
-  // Everything below this line is deprecated and should go away in future versions
-  // ----------------------------------------------------------------------------------------------------
-  
-  /**
-   * @deprecated since 1.5.0; Retrieve the relevant block size from {@link #getAccumuloConfiguration(JobContext)} and configure hadoop's
-   *             io.seqfile.compress.blocksize with the same value. No longer needed, as {@link RFile} does not use this field.
-   */
-  @Deprecated
-  protected static void handleBlockSize(Configuration conf) {
-    conf.setInt("io.seqfile.compress.blocksize",
-        (int) FileOutputConfigurator.getAccumuloConfiguration(CLASS, conf).getMemoryInBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE));
-  }
-  
-  /**
-   * @deprecated since 1.5.0; This method does nothing. Only 'rf' type is supported.
-   */
-  @Deprecated
-  public static void setFileType(Configuration conf, String type) {}
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #setFileBlockSize(Job, long)}, {@link #setDataBlockSize(Job, long)}, or {@link #setIndexBlockSize(Job, long)} instead.
-   */
-  @Deprecated
-  public static void setBlockSize(Configuration conf, int blockSize) {
-    FileOutputConfigurator.setDataBlockSize(CLASS, conf, blockSize);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; This {@link OutputFormat} does not communicate with Accumulo. If this is needed, subclasses must implement their own
-   *             configuration.
-   */
-  @Deprecated
-  public static void setZooKeeperInstance(Configuration conf, String instanceName, String zooKeepers) {
-    FileOutputConfigurator.setZooKeeperInstance(CLASS, conf, instanceName, zooKeepers);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; This {@link OutputFormat} does not communicate with Accumulo. If this is needed, subclasses must implement their own
-   *             configuration.
-   */
-  @Deprecated
-  protected static Instance getInstance(Configuration conf) {
-    return FileOutputConfigurator.getInstance(CLASS, conf);
-  }
-  
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
index 69e7e2e..49bd3e1 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
@@ -21,7 +21,6 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map.Entry;
 import java.util.Set;
-import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
@@ -38,13 +37,11 @@ import org.apache.accumulo.core.client.mapreduce.lib.util.OutputConfigurator;
 import org.apache.accumulo.core.client.mock.MockInstance;
 import org.apache.accumulo.core.client.security.SecurityErrorCode;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.data.ColumnUpdate;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.accumulo.core.security.CredentialHelper;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.JobContext;
@@ -536,165 +533,4 @@ public class AccumuloOutputFormat extends OutputFormat<Text,Mutation> {
     }
   }
   
-  // ----------------------------------------------------------------------------------------------------
-  // Everything below this line is deprecated and should go away in future versions
-  // ----------------------------------------------------------------------------------------------------
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #setConnectorInfo(Job, String, AuthenticationToken)}, {@link #setCreateTables(Job, boolean)}, and
-   *             {@link #setDefaultTableName(Job, String)} instead.
-   */
-  @Deprecated
-  public static void setOutputInfo(Configuration conf, String user, byte[] passwd, boolean createTables, String defaultTable) {
-    try {
-      OutputConfigurator.setConnectorInfo(CLASS, conf, user, new PasswordToken(passwd));
-    } catch (AccumuloSecurityException e) {
-      throw new RuntimeException(e);
-    }
-    OutputConfigurator.setCreateTables(CLASS, conf, createTables);
-    OutputConfigurator.setDefaultTableName(CLASS, conf, defaultTable);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #setZooKeeperInstance(Job, String, String)} instead.
-   */
-  @Deprecated
-  public static void setZooKeeperInstance(Configuration conf, String instanceName, String zooKeepers) {
-    OutputConfigurator.setZooKeeperInstance(CLASS, conf, instanceName, zooKeepers);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #setMockInstance(Job, String)} instead.
-   */
-  @Deprecated
-  public static void setMockInstance(Configuration conf, String instanceName) {
-    OutputConfigurator.setMockInstance(CLASS, conf, instanceName);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #setBatchWriterOptions(Job, BatchWriterConfig)} instead.
-   */
-  @Deprecated
-  public static void setMaxMutationBufferSize(Configuration conf, long numberOfBytes) {
-    BatchWriterConfig bwConfig = OutputConfigurator.getBatchWriterOptions(CLASS, conf);
-    bwConfig.setMaxMemory(numberOfBytes);
-    OutputConfigurator.setBatchWriterOptions(CLASS, conf, bwConfig);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #setBatchWriterOptions(Job, BatchWriterConfig)} instead.
-   */
-  @Deprecated
-  public static void setMaxLatency(Configuration conf, int numberOfMilliseconds) {
-    BatchWriterConfig bwConfig = OutputConfigurator.getBatchWriterOptions(CLASS, conf);
-    bwConfig.setMaxLatency(numberOfMilliseconds, TimeUnit.MILLISECONDS);
-    OutputConfigurator.setBatchWriterOptions(CLASS, conf, bwConfig);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #setBatchWriterOptions(Job, BatchWriterConfig)} instead.
-   */
-  @Deprecated
-  public static void setMaxWriteThreads(Configuration conf, int numberOfThreads) {
-    BatchWriterConfig bwConfig = OutputConfigurator.getBatchWriterOptions(CLASS, conf);
-    bwConfig.setMaxWriteThreads(numberOfThreads);
-    OutputConfigurator.setBatchWriterOptions(CLASS, conf, bwConfig);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #setLogLevel(Job, Level)} instead.
-   */
-  @Deprecated
-  public static void setLogLevel(Configuration conf, Level level) {
-    OutputConfigurator.setLogLevel(CLASS, conf, level);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #setSimulationMode(Job, boolean)} instead.
-   */
-  @Deprecated
-  public static void setSimulationMode(Configuration conf) {
-    OutputConfigurator.setSimulationMode(CLASS, conf, true);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getToken(JobContext)} instead.
-   */
-  @Deprecated
-  protected static String getPrincipal(Configuration conf) {
-    return OutputConfigurator.getPrincipal(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getToken(JobContext)} instead.
-   */
-  @Deprecated
-  protected static byte[] getToken(Configuration conf) {
-    return OutputConfigurator.getToken(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #canCreateTables(JobContext)} instead.
-   */
-  @Deprecated
-  protected static boolean canCreateTables(Configuration conf) {
-    return OutputConfigurator.canCreateTables(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getDefaultTableName(JobContext)} instead.
-   */
-  @Deprecated
-  protected static String getDefaultTableName(Configuration conf) {
-    return OutputConfigurator.getDefaultTableName(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getInstance(JobContext)} instead.
-   */
-  @Deprecated
-  protected static Instance getInstance(Configuration conf) {
-    return OutputConfigurator.getInstance(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getBatchWriterOptions(JobContext)} instead.
-   */
-  @Deprecated
-  protected static long getMaxMutationBufferSize(Configuration conf) {
-    return OutputConfigurator.getBatchWriterOptions(CLASS, conf).getMaxMemory();
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getBatchWriterOptions(JobContext)} instead.
-   */
-  @Deprecated
-  protected static int getMaxLatency(Configuration conf) {
-    return (int) OutputConfigurator.getBatchWriterOptions(CLASS, conf).getMaxLatency(TimeUnit.MILLISECONDS);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getBatchWriterOptions(JobContext)} instead.
-   */
-  @Deprecated
-  protected static int getMaxWriteThreads(Configuration conf) {
-    return OutputConfigurator.getBatchWriterOptions(CLASS, conf).getMaxWriteThreads();
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getLogLevel(JobContext)} instead.
-   */
-  @Deprecated
-  protected static Level getLogLevel(Configuration conf) {
-    return OutputConfigurator.getLogLevel(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getSimulationMode(JobContext)} instead.
-   */
-  @Deprecated
-  protected static boolean getSimulationMode(Configuration conf) {
-    return OutputConfigurator.getSimulationMode(CLASS, conf);
-  }
-  
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
index ea40e02..8308a63 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
@@ -19,12 +19,9 @@ package org.apache.accumulo.core.client.mapreduce;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
-import java.io.UnsupportedEncodingException;
 import java.lang.reflect.Method;
 import java.math.BigInteger;
 import java.net.InetAddress;
-import java.net.URLDecoder;
-import java.net.URLEncoder;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -34,7 +31,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-import java.util.StringTokenizer;
 
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
@@ -55,14 +51,12 @@ import org.apache.accumulo.core.client.impl.TabletLocator;
 import org.apache.accumulo.core.client.mapreduce.lib.util.InputConfigurator;
 import org.apache.accumulo.core.client.mock.MockInstance;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.user.VersioningIterator;
 import org.apache.accumulo.core.master.state.tables.TableState;
 import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
@@ -999,385 +993,6 @@ public abstract class InputFormatBase<K,V> extends InputFormat<K,V> {
     }
   }
   
-  // ----------------------------------------------------------------------------------------------------
-  // Everything below this line is deprecated and should go away in future versions
-  // ----------------------------------------------------------------------------------------------------
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #setScanIsolation(Job, boolean)} instead.
-   */
-  @Deprecated
-  public static void setIsolated(Configuration conf, boolean enable) {
-    InputConfigurator.setScanIsolation(CLASS, conf, enable);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #setLocalIterators(Job, boolean)} instead.
-   */
-  @Deprecated
-  public static void setLocalIterators(Configuration conf, boolean enable) {
-    InputConfigurator.setLocalIterators(CLASS, conf, enable);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #setConnectorInfo(Job, String, AuthenticationToken)}, {@link #setInputTableName(Job, String)}, and
-   *             {@link #setScanAuthorizations(Job, Authorizations)} instead.
-   */
-  @Deprecated
-  public static void setInputInfo(Configuration conf, String user, byte[] passwd, String table, Authorizations auths) {
-    try {
-      InputConfigurator.setConnectorInfo(CLASS, conf, user, new PasswordToken(passwd));
-    } catch (AccumuloSecurityException e) {
-      throw new RuntimeException(e);
-    }
-    InputConfigurator.setInputTableName(CLASS, conf, table);
-    InputConfigurator.setScanAuthorizations(CLASS, conf, auths);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #setZooKeeperInstance(Job, String, String)} instead.
-   */
-  @Deprecated
-  public static void setZooKeeperInstance(Configuration conf, String instanceName, String zooKeepers) {
-    InputConfigurator.setZooKeeperInstance(CLASS, conf, instanceName, zooKeepers);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #setMockInstance(Job, String)} instead.
-   */
-  @Deprecated
-  public static void setMockInstance(Configuration conf, String instanceName) {
-    InputConfigurator.setMockInstance(CLASS, conf, instanceName);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #setRanges(Job, Collection)} instead.
-   */
-  @Deprecated
-  public static void setRanges(Configuration conf, Collection<Range> ranges) {
-    InputConfigurator.setRanges(CLASS, conf, ranges);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #setAutoAdjustRanges(Job, boolean)} instead.
-   */
-  @Deprecated
-  public static void disableAutoAdjustRanges(Configuration conf) {
-    InputConfigurator.setAutoAdjustRanges(CLASS, conf, false);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #addIterator(Job, IteratorSetting)} to add the {@link VersioningIterator} instead.
-   */
-  @Deprecated
-  public static void setMaxVersions(Configuration conf, int maxVersions) throws IOException {
-    IteratorSetting vers = new IteratorSetting(1, "vers", VersioningIterator.class);
-    try {
-      VersioningIterator.setMaxVersions(vers, maxVersions);
-    } catch (IllegalArgumentException e) {
-      throw new IOException(e);
-    }
-    InputConfigurator.addIterator(CLASS, conf, vers);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #setOfflineTableScan(Job, boolean)} instead.
-   */
-  @Deprecated
-  public static void setScanOffline(Configuration conf, boolean scanOff) {
-    InputConfigurator.setOfflineTableScan(CLASS, conf, scanOff);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #fetchColumns(Job, Collection)} instead.
-   */
-  @Deprecated
-  public static void fetchColumns(Configuration conf, Collection<Pair<Text,Text>> columnFamilyColumnQualifierPairs) {
-    InputConfigurator.fetchColumns(CLASS, conf, columnFamilyColumnQualifierPairs);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #setLogLevel(Job, Level)} instead.
-   */
-  @Deprecated
-  public static void setLogLevel(Configuration conf, Level level) {
-    InputConfigurator.setLogLevel(CLASS, conf, level);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #addIterator(Job, IteratorSetting)} instead.
-   */
-  @Deprecated
-  public static void addIterator(Configuration conf, IteratorSetting cfg) {
-    InputConfigurator.addIterator(CLASS, conf, cfg);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #isIsolated(JobContext)} instead.
-   */
-  @Deprecated
-  protected static boolean isIsolated(Configuration conf) {
-    return InputConfigurator.isIsolated(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #usesLocalIterators(JobContext)} instead.
-   */
-  @Deprecated
-  protected static boolean usesLocalIterators(Configuration conf) {
-    return InputConfigurator.usesLocalIterators(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getPrincipal(JobContext)} instead.
-   */
-  @Deprecated
-  protected static String getPrincipal(Configuration conf) {
-    return InputConfigurator.getPrincipal(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getToken(JobContext)} instead.
-   */
-  @Deprecated
-  protected static byte[] getToken(Configuration conf) {
-    return InputConfigurator.getToken(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getInputTableName(JobContext)} instead.
-   */
-  @Deprecated
-  protected static String getTablename(Configuration conf) {
-    return InputConfigurator.getInputTableName(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getScanAuthorizations(JobContext)} instead.
-   */
-  @Deprecated
-  protected static Authorizations getAuthorizations(Configuration conf) {
-    return InputConfigurator.getScanAuthorizations(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getInstance(JobContext)} instead.
-   */
-  @Deprecated
-  protected static Instance getInstance(Configuration conf) {
-    return InputConfigurator.getInstance(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getTabletLocator(JobContext)} instead.
-   */
-  @Deprecated
-  protected static TabletLocator getTabletLocator(Configuration conf) throws TableNotFoundException {
-    return InputConfigurator.getTabletLocator(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getRanges(JobContext)} instead.
-   */
-  @Deprecated
-  protected static List<Range> getRanges(Configuration conf) throws IOException {
-    return InputConfigurator.getRanges(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getFetchedColumns(JobContext)} instead.
-   */
-  @Deprecated
-  protected static Set<Pair<Text,Text>> getFetchedColumns(Configuration conf) {
-    return InputConfigurator.getFetchedColumns(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getAutoAdjustRanges(JobContext)} instead.
-   */
-  @Deprecated
-  protected static boolean getAutoAdjustRanges(Configuration conf) {
-    return InputConfigurator.getAutoAdjustRanges(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getLogLevel(JobContext)} instead.
-   */
-  @Deprecated
-  protected static Level getLogLevel(Configuration conf) {
-    return InputConfigurator.getLogLevel(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #validateOptions(JobContext)} instead.
-   */
-  @Deprecated
-  protected static void validateOptions(Configuration conf) throws IOException {
-    InputConfigurator.validateOptions(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #addIterator(Job, IteratorSetting)} to add the {@link VersioningIterator} instead.
-   */
-  @Deprecated
-  protected static int getMaxVersions(Configuration conf) {
-    // This is so convoluted, because the only reason to get the number of maxVersions is to construct the same type of IteratorSetting object we have to
-    // deconstruct to get at this option in the first place, but to preserve correct behavior, this appears necessary.
-    List<IteratorSetting> iteratorSettings = InputConfigurator.getIterators(CLASS, conf);
-    for (IteratorSetting setting : iteratorSettings) {
-      if ("vers".equals(setting.getName()) && 1 == setting.getPriority() && VersioningIterator.class.getName().equals(setting.getIteratorClass())) {
-        if (setting.getOptions().containsKey("maxVersions"))
-          return Integer.parseInt(setting.getOptions().get("maxVersions"));
-        else
-          return -1;
-      }
-    }
-    return -1;
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #isOfflineScan(JobContext)} instead.
-   */
-  @Deprecated
-  protected static boolean isOfflineScan(Configuration conf) {
-    return InputConfigurator.isOfflineScan(CLASS, conf);
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getIterators(JobContext)} instead.
-   */
-  @Deprecated
-  protected static List<AccumuloIterator> getIterators(Configuration conf) {
-    List<IteratorSetting> iteratorSettings = InputConfigurator.getIterators(CLASS, conf);
-    List<AccumuloIterator> deprecatedIterators = new ArrayList<AccumuloIterator>(iteratorSettings.size());
-    for (IteratorSetting setting : iteratorSettings) {
-      AccumuloIterator deprecatedIter = new AccumuloIterator(new String(setting.getPriority() + AccumuloIterator.FIELD_SEP + setting.getIteratorClass()
-          + AccumuloIterator.FIELD_SEP + setting.getName()));
-      deprecatedIterators.add(deprecatedIter);
-    }
-    return deprecatedIterators;
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link #getIterators(JobContext)} instead.
-   */
-  @Deprecated
-  protected static List<AccumuloIteratorOption> getIteratorOptions(Configuration conf) {
-    List<IteratorSetting> iteratorSettings = InputConfigurator.getIterators(CLASS, conf);
-    List<AccumuloIteratorOption> deprecatedIteratorOptions = new ArrayList<AccumuloIteratorOption>(iteratorSettings.size());
-    for (IteratorSetting setting : iteratorSettings) {
-      for (Entry<String,String> opt : setting.getOptions().entrySet()) {
-        String deprecatedOption;
-        try {
-          deprecatedOption = new String(setting.getName() + AccumuloIteratorOption.FIELD_SEP + URLEncoder.encode(opt.getKey(), "UTF-8")
-              + AccumuloIteratorOption.FIELD_SEP + URLEncoder.encode(opt.getValue(), "UTF-8"));
-        } catch (UnsupportedEncodingException e) {
-          throw new RuntimeException(e);
-        }
-        deprecatedIteratorOptions.add(new AccumuloIteratorOption(deprecatedOption));
-      }
-    }
-    return deprecatedIteratorOptions;
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link IteratorSetting} instead.
-   */
-  @Deprecated
-  static class AccumuloIterator {
-    
-    private static final String FIELD_SEP = ":";
-    
-    private int priority;
-    private String iteratorClass;
-    private String iteratorName;
-    
-    public AccumuloIterator(int priority, String iteratorClass, String iteratorName) {
-      this.priority = priority;
-      this.iteratorClass = iteratorClass;
-      this.iteratorName = iteratorName;
-    }
-    
-    // Parses out a setting given an string supplied from an earlier toString() call
-    public AccumuloIterator(String iteratorSetting) {
-      // Parse the string to expand the iterator
-      StringTokenizer tokenizer = new StringTokenizer(iteratorSetting, FIELD_SEP);
-      priority = Integer.parseInt(tokenizer.nextToken());
-      iteratorClass = tokenizer.nextToken();
-      iteratorName = tokenizer.nextToken();
-    }
-    
-    public int getPriority() {
-      return priority;
-    }
-    
-    public String getIteratorClass() {
-      return iteratorClass;
-    }
-    
-    public String getIteratorName() {
-      return iteratorName;
-    }
-    
-    @Override
-    public String toString() {
-      return new String(priority + FIELD_SEP + iteratorClass + FIELD_SEP + iteratorName);
-    }
-    
-  }
-  
-  /**
-   * @deprecated since 1.5.0; Use {@link IteratorSetting} instead.
-   */
-  @Deprecated
-  static class AccumuloIteratorOption {
-    private static final String FIELD_SEP = ":";
-    
-    private String iteratorName;
-    private String key;
-    private String value;
-    
-    public AccumuloIteratorOption(String iteratorName, String key, String value) {
-      this.iteratorName = iteratorName;
-      this.key = key;
-      this.value = value;
-    }
-    
-    // Parses out an option given a string supplied from an earlier toString() call
-    public AccumuloIteratorOption(String iteratorOption) {
-      StringTokenizer tokenizer = new StringTokenizer(iteratorOption, FIELD_SEP);
-      this.iteratorName = tokenizer.nextToken();
-      try {
-        this.key = URLDecoder.decode(tokenizer.nextToken(), "UTF-8");
-        this.value = URLDecoder.decode(tokenizer.nextToken(), "UTF-8");
-      } catch (UnsupportedEncodingException e) {
-        throw new RuntimeException(e);
-      }
-    }
-    
-    public String getIteratorName() {
-      return iteratorName;
-    }
-    
-    public String getKey() {
-      return key;
-    }
-    
-    public String getValue() {
-      return value;
-    }
-    
-    @Override
-    public String toString() {
-      try {
-        return new String(iteratorName + FIELD_SEP + URLEncoder.encode(key, "UTF-8") + FIELD_SEP + URLEncoder.encode(value, "UTF-8"));
-      } catch (UnsupportedEncodingException e) {
-        throw new RuntimeException(e);
-      }
-    }
-    
-  }
-  
   // use reflection to pull the Configuration out of the JobContext for Hadoop 1 and Hadoop 2 compatibility
   static Configuration getConfiguration(JobContext context) {
     try {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstance.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstance.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstance.java
index c0829df..f37994d 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstance.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstance.java
@@ -148,12 +148,6 @@ public class MockInstance implements Instance {
     this.conf = conf;
   }
   
-  @Deprecated
-  @Override
-  public Connector getConnector(org.apache.accumulo.core.security.thrift.AuthInfo auth) throws AccumuloException, AccumuloSecurityException {
-    return getConnector(auth.user, auth.password);
-  }
-  
   @Override
   public Connector getConnector(String principal, AuthenticationToken token) throws AccumuloException, AccumuloSecurityException {
     Connector conn = new MockConnector(principal, acu, this);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/java/org/apache/accumulo/core/conf/AccumuloConfiguration.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/AccumuloConfiguration.java b/core/src/main/java/org/apache/accumulo/core/conf/AccumuloConfiguration.java
index 28f24ef..28cb0bd 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/AccumuloConfiguration.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/AccumuloConfiguration.java
@@ -32,6 +32,7 @@ public abstract class AccumuloConfiguration implements Iterable<Entry<String,Str
   
   public abstract String get(Property property);
   
+  @Override
   public abstract Iterator<Entry<String,String>> iterator();
   
   private void checkType(Property property, PropertyType type) {
@@ -45,15 +46,17 @@ public abstract class AccumuloConfiguration implements Iterable<Entry<String,Str
   
   /**
    * This method returns all properties in a map of string->string under the given prefix property.
-   * @param property the prefix property, and must be of type PropertyType.PREFIX
+   * 
+   * @param property
+   *          the prefix property, and must be of type PropertyType.PREFIX
    * @return a map of strings to strings of the resulting properties
    */
-  public Map<String, String> getAllPropertiesWithPrefix(Property property) {
+  public Map<String,String> getAllPropertiesWithPrefix(Property property) {
     checkType(property, PropertyType.PREFIX);
     
-    Map<String, String> propMap = new HashMap<String, String>(); 
+    Map<String,String> propMap = new HashMap<String,String>();
     
-    for (Entry<String, String> entry : this) {
+    for (Entry<String,String> entry : this) {
       if (entry.getKey().startsWith(property.getKey())) {
         propMap.put(entry.getKey(), entry.getValue());
       }
@@ -153,8 +156,8 @@ public abstract class AccumuloConfiguration implements Iterable<Entry<String,Str
     return DefaultConfiguration.getInstance();
   }
   
-  // Only here for Shell option-free start-up
   /**
+   * Only here for Shell option-free start-up
    * 
    * @deprecated not for client use
    */
@@ -178,7 +181,6 @@ public abstract class AccumuloConfiguration implements Iterable<Entry<String,Str
     return maxFilesPerTablet;
   }
   
-  public void invalidateCache() {
-    // overridden in ZooConfiguration
-  }
+  // overridden in ZooConfiguration
+  public void invalidateCache() {}
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/java/org/apache/accumulo/core/data/ColumnUpdate.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/data/ColumnUpdate.java b/core/src/main/java/org/apache/accumulo/core/data/ColumnUpdate.java
index 691ec0e..641ca3a 100644
--- a/core/src/main/java/org/apache/accumulo/core/data/ColumnUpdate.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/ColumnUpdate.java
@@ -43,16 +43,6 @@ public class ColumnUpdate {
     this.val = val;
   }
   
-  /**
-   * @deprecated use setTimestamp(long);
-   * @param timestamp
-   */
-  @Deprecated
-  public void setSystemTimestamp(long timestamp) {
-    if (hasTimestamp)
-      throw new IllegalStateException("Cannot set system timestamp when user set a timestamp");
-  }
-  
   public boolean hasTimestamp() {
     return hasTimestamp;
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/java/org/apache/accumulo/core/security/thrift/AuthInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/thrift/AuthInfo.java b/core/src/main/java/org/apache/accumulo/core/security/thrift/AuthInfo.java
deleted file mode 100644
index 9df1d29..0000000
--- a/core/src/main/java/org/apache/accumulo/core/security/thrift/AuthInfo.java
+++ /dev/null
@@ -1,616 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.0)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package org.apache.accumulo.core.security.thrift;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * @deprecated since 1.5
- */
-@SuppressWarnings("all") public class AuthInfo implements org.apache.thrift.TBase<AuthInfo, AuthInfo._Fields>, java.io.Serializable, Cloneable {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AuthInfo");
-
-  private static final org.apache.thrift.protocol.TField USER_FIELD_DESC = new org.apache.thrift.protocol.TField("user", org.apache.thrift.protocol.TType.STRING, (short)1);
-  private static final org.apache.thrift.protocol.TField PASSWORD_FIELD_DESC = new org.apache.thrift.protocol.TField("password", org.apache.thrift.protocol.TType.STRING, (short)2);
-  private static final org.apache.thrift.protocol.TField INSTANCE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("instanceId", org.apache.thrift.protocol.TType.STRING, (short)3);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new AuthInfoStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new AuthInfoTupleSchemeFactory());
-  }
-
-  public String user; // required
-  public ByteBuffer password; // required
-  public String instanceId; // required
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  @SuppressWarnings("all") public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    USER((short)1, "user"),
-    PASSWORD((short)2, "password"),
-    INSTANCE_ID((short)3, "instanceId");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // USER
-          return USER;
-        case 2: // PASSWORD
-          return PASSWORD;
-        case 3: // INSTANCE_ID
-          return INSTANCE_ID;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.USER, new org.apache.thrift.meta_data.FieldMetaData("user", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.PASSWORD, new org.apache.thrift.meta_data.FieldMetaData("password", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
-    tmpMap.put(_Fields.INSTANCE_ID, new org.apache.thrift.meta_data.FieldMetaData("instanceId", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AuthInfo.class, metaDataMap);
-  }
-
-  public AuthInfo() {
-  }
-
-  public AuthInfo(
-    String user,
-    ByteBuffer password,
-    String instanceId)
-  {
-    this();
-    this.user = user;
-    this.password = password;
-    this.instanceId = instanceId;
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public AuthInfo(AuthInfo other) {
-    if (other.isSetUser()) {
-      this.user = other.user;
-    }
-    if (other.isSetPassword()) {
-      this.password = org.apache.thrift.TBaseHelper.copyBinary(other.password);
-;
-    }
-    if (other.isSetInstanceId()) {
-      this.instanceId = other.instanceId;
-    }
-  }
-
-  public AuthInfo deepCopy() {
-    return new AuthInfo(this);
-  }
-
-  @Override
-  public void clear() {
-    this.user = null;
-    this.password = null;
-    this.instanceId = null;
-  }
-
-  public String getUser() {
-    return this.user;
-  }
-
-  public AuthInfo setUser(String user) {
-    this.user = user;
-    return this;
-  }
-
-  public void unsetUser() {
-    this.user = null;
-  }
-
-  /** Returns true if field user is set (has been assigned a value) and false otherwise */
-  public boolean isSetUser() {
-    return this.user != null;
-  }
-
-  public void setUserIsSet(boolean value) {
-    if (!value) {
-      this.user = null;
-    }
-  }
-
-  public byte[] getPassword() {
-    setPassword(org.apache.thrift.TBaseHelper.rightSize(password));
-    return password == null ? null : password.array();
-  }
-
-  public ByteBuffer bufferForPassword() {
-    return password;
-  }
-
-  public AuthInfo setPassword(byte[] password) {
-    setPassword(password == null ? (ByteBuffer)null : ByteBuffer.wrap(password));
-    return this;
-  }
-
-  public AuthInfo setPassword(ByteBuffer password) {
-    this.password = password;
-    return this;
-  }
-
-  public void unsetPassword() {
-    this.password = null;
-  }
-
-  /** Returns true if field password is set (has been assigned a value) and false otherwise */
-  public boolean isSetPassword() {
-    return this.password != null;
-  }
-
-  public void setPasswordIsSet(boolean value) {
-    if (!value) {
-      this.password = null;
-    }
-  }
-
-  public String getInstanceId() {
-    return this.instanceId;
-  }
-
-  public AuthInfo setInstanceId(String instanceId) {
-    this.instanceId = instanceId;
-    return this;
-  }
-
-  public void unsetInstanceId() {
-    this.instanceId = null;
-  }
-
-  /** Returns true if field instanceId is set (has been assigned a value) and false otherwise */
-  public boolean isSetInstanceId() {
-    return this.instanceId != null;
-  }
-
-  public void setInstanceIdIsSet(boolean value) {
-    if (!value) {
-      this.instanceId = null;
-    }
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case USER:
-      if (value == null) {
-        unsetUser();
-      } else {
-        setUser((String)value);
-      }
-      break;
-
-    case PASSWORD:
-      if (value == null) {
-        unsetPassword();
-      } else {
-        setPassword((ByteBuffer)value);
-      }
-      break;
-
-    case INSTANCE_ID:
-      if (value == null) {
-        unsetInstanceId();
-      } else {
-        setInstanceId((String)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case USER:
-      return getUser();
-
-    case PASSWORD:
-      return getPassword();
-
-    case INSTANCE_ID:
-      return getInstanceId();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case USER:
-      return isSetUser();
-    case PASSWORD:
-      return isSetPassword();
-    case INSTANCE_ID:
-      return isSetInstanceId();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof AuthInfo)
-      return this.equals((AuthInfo)that);
-    return false;
-  }
-
-  public boolean equals(AuthInfo that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_user = true && this.isSetUser();
-    boolean that_present_user = true && that.isSetUser();
-    if (this_present_user || that_present_user) {
-      if (!(this_present_user && that_present_user))
-        return false;
-      if (!this.user.equals(that.user))
-        return false;
-    }
-
-    boolean this_present_password = true && this.isSetPassword();
-    boolean that_present_password = true && that.isSetPassword();
-    if (this_present_password || that_present_password) {
-      if (!(this_present_password && that_present_password))
-        return false;
-      if (!this.password.equals(that.password))
-        return false;
-    }
-
-    boolean this_present_instanceId = true && this.isSetInstanceId();
-    boolean that_present_instanceId = true && that.isSetInstanceId();
-    if (this_present_instanceId || that_present_instanceId) {
-      if (!(this_present_instanceId && that_present_instanceId))
-        return false;
-      if (!this.instanceId.equals(that.instanceId))
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    return 0;
-  }
-
-  public int compareTo(AuthInfo other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-    AuthInfo typedOther = (AuthInfo)other;
-
-    lastComparison = Boolean.valueOf(isSetUser()).compareTo(typedOther.isSetUser());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetUser()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.user, typedOther.user);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(isSetPassword()).compareTo(typedOther.isSetPassword());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetPassword()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.password, typedOther.password);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(isSetInstanceId()).compareTo(typedOther.isSetInstanceId());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetInstanceId()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.instanceId, typedOther.instanceId);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("AuthInfo(");
-    boolean first = true;
-
-    sb.append("user:");
-    if (this.user == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.user);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("password:");
-    if (this.password == null) {
-      sb.append("null");
-    } else {
-      org.apache.thrift.TBaseHelper.toString(this.password, sb);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("instanceId:");
-    if (this.instanceId == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.instanceId);
-    }
-    first = false;
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class AuthInfoStandardSchemeFactory implements SchemeFactory {
-    public AuthInfoStandardScheme getScheme() {
-      return new AuthInfoStandardScheme();
-    }
-  }
-
-  private static class AuthInfoStandardScheme extends StandardScheme<AuthInfo> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, AuthInfo struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // USER
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.user = iprot.readString();
-              struct.setUserIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // PASSWORD
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.password = iprot.readBinary();
-              struct.setPasswordIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 3: // INSTANCE_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.instanceId = iprot.readString();
-              struct.setInstanceIdIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-
-      // check for required fields of primitive type, which can't be checked in the validate method
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, AuthInfo struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.user != null) {
-        oprot.writeFieldBegin(USER_FIELD_DESC);
-        oprot.writeString(struct.user);
-        oprot.writeFieldEnd();
-      }
-      if (struct.password != null) {
-        oprot.writeFieldBegin(PASSWORD_FIELD_DESC);
-        oprot.writeBinary(struct.password);
-        oprot.writeFieldEnd();
-      }
-      if (struct.instanceId != null) {
-        oprot.writeFieldBegin(INSTANCE_ID_FIELD_DESC);
-        oprot.writeString(struct.instanceId);
-        oprot.writeFieldEnd();
-      }
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class AuthInfoTupleSchemeFactory implements SchemeFactory {
-    public AuthInfoTupleScheme getScheme() {
-      return new AuthInfoTupleScheme();
-    }
-  }
-
-  private static class AuthInfoTupleScheme extends TupleScheme<AuthInfo> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, AuthInfo struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      BitSet optionals = new BitSet();
-      if (struct.isSetUser()) {
-        optionals.set(0);
-      }
-      if (struct.isSetPassword()) {
-        optionals.set(1);
-      }
-      if (struct.isSetInstanceId()) {
-        optionals.set(2);
-      }
-      oprot.writeBitSet(optionals, 3);
-      if (struct.isSetUser()) {
-        oprot.writeString(struct.user);
-      }
-      if (struct.isSetPassword()) {
-        oprot.writeBinary(struct.password);
-      }
-      if (struct.isSetInstanceId()) {
-        oprot.writeString(struct.instanceId);
-      }
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, AuthInfo struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      BitSet incoming = iprot.readBitSet(3);
-      if (incoming.get(0)) {
-        struct.user = iprot.readString();
-        struct.setUserIsSet(true);
-      }
-      if (incoming.get(1)) {
-        struct.password = iprot.readBinary();
-        struct.setPasswordIsSet(true);
-      }
-      if (incoming.get(2)) {
-        struct.instanceId = iprot.readString();
-        struct.setInstanceIdIsSet(true);
-      }
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/java/org/apache/accumulo/core/security/thrift/SecurityErrorCode.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/thrift/SecurityErrorCode.java b/core/src/main/java/org/apache/accumulo/core/security/thrift/SecurityErrorCode.java
deleted file mode 100644
index 7aa0dd2..0000000
--- a/core/src/main/java/org/apache/accumulo/core/security/thrift/SecurityErrorCode.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.0)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package org.apache.accumulo.core.security.thrift;
-
-
-import java.util.Map;
-import java.util.HashMap;
-import org.apache.thrift.TEnum;
-
-/**
- * @deprecated since 1.5, see org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode
- */
-@SuppressWarnings("all") public enum SecurityErrorCode implements org.apache.thrift.TEnum {
-  DEFAULT_SECURITY_ERROR(0),
-  BAD_CREDENTIALS(1),
-  PERMISSION_DENIED(2),
-  USER_DOESNT_EXIST(3),
-  CONNECTION_ERROR(4),
-  USER_EXISTS(5),
-  GRANT_INVALID(6),
-  BAD_AUTHORIZATIONS(7),
-  INVALID_INSTANCEID(8),
-  TABLE_DOESNT_EXIST(9),
-  UNSUPPORTED_OPERATION(10),
-  INVALID_TOKEN(11),
-  AUTHENTICATOR_FAILED(12),
-  AUTHORIZOR_FAILED(13),
-  PERMISSIONHANDLER_FAILED(14),
-  TOKEN_EXPIRED(15),
-  SERIALIZATION_ERROR(16),
-  INSUFFICIENT_PROPERTIES(17);
-
-  private final int value;
-
-  private SecurityErrorCode(int value) {
-    this.value = value;
-  }
-
-  /**
-   * Get the integer value of this enum value, as defined in the Thrift IDL.
-   */
-  public int getValue() {
-    return value;
-  }
-
-  /**
-   * Find a the enum type by its integer value, as defined in the Thrift IDL.
-   * @return null if the value is not found.
-   */
-  public static SecurityErrorCode findByValue(int value) { 
-    switch (value) {
-      case 0:
-        return DEFAULT_SECURITY_ERROR;
-      case 1:
-        return BAD_CREDENTIALS;
-      case 2:
-        return PERMISSION_DENIED;
-      case 3:
-        return USER_DOESNT_EXIST;
-      case 4:
-        return CONNECTION_ERROR;
-      case 5:
-        return USER_EXISTS;
-      case 6:
-        return GRANT_INVALID;
-      case 7:
-        return BAD_AUTHORIZATIONS;
-      case 8:
-        return INVALID_INSTANCEID;
-      case 9:
-        return TABLE_DOESNT_EXIST;
-      case 10:
-        return UNSUPPORTED_OPERATION;
-      case 11:
-        return INVALID_TOKEN;
-      case 12:
-        return AUTHENTICATOR_FAILED;
-      case 13:
-        return AUTHORIZOR_FAILED;
-      case 14:
-        return PERMISSIONHANDLER_FAILED;
-      case 15:
-        return TOKEN_EXPIRED;
-      case 16:
-        return SERIALIZATION_ERROR;
-      case 17:
-        return INSUFFICIENT_PROPERTIES;
-      default:
-        return null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/java/org/apache/accumulo/core/security/thrift/ThriftSecurityException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/thrift/ThriftSecurityException.java b/core/src/main/java/org/apache/accumulo/core/security/thrift/ThriftSecurityException.java
deleted file mode 100644
index c7b2fcb..0000000
--- a/core/src/main/java/org/apache/accumulo/core/security/thrift/ThriftSecurityException.java
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.0)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package org.apache.accumulo.core.security.thrift;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * @deprecated since 1.5, see org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException
- */
-@SuppressWarnings("all") public class ThriftSecurityException extends TException implements org.apache.thrift.TBase<ThriftSecurityException, ThriftSecurityException._Fields>, java.io.Serializable, Cloneable {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ThriftSecurityException");
-
-  private static final org.apache.thrift.protocol.TField USER_FIELD_DESC = new org.apache.thrift.protocol.TField("user", org.apache.thrift.protocol.TType.STRING, (short)1);
-  private static final org.apache.thrift.protocol.TField CODE_FIELD_DESC = new org.apache.thrift.protocol.TField("code", org.apache.thrift.protocol.TType.I32, (short)2);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new ThriftSecurityExceptionStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new ThriftSecurityExceptionTupleSchemeFactory());
-  }
-
-  public String user; // required
-  /**
-   * 
-   * @see SecurityErrorCode
-   */
-  public SecurityErrorCode code; // required
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  @SuppressWarnings("all") public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    USER((short)1, "user"),
-    /**
-     * 
-     * @see SecurityErrorCode
-     */
-    CODE((short)2, "code");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // USER
-          return USER;
-        case 2: // CODE
-          return CODE;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.USER, new org.apache.thrift.meta_data.FieldMetaData("user", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.CODE, new org.apache.thrift.meta_data.FieldMetaData("code", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, SecurityErrorCode.class)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ThriftSecurityException.class, metaDataMap);
-  }
-
-  public ThriftSecurityException() {
-  }
-
-  public ThriftSecurityException(
-    String user,
-    SecurityErrorCode code)
-  {
-    this();
-    this.user = user;
-    this.code = code;
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public ThriftSecurityException(ThriftSecurityException other) {
-    if (other.isSetUser()) {
-      this.user = other.user;
-    }
-    if (other.isSetCode()) {
-      this.code = other.code;
-    }
-  }
-
-  public ThriftSecurityException deepCopy() {
-    return new ThriftSecurityException(this);
-  }
-
-  @Override
-  public void clear() {
-    this.user = null;
-    this.code = null;
-  }
-
-  public String getUser() {
-    return this.user;
-  }
-
-  public ThriftSecurityException setUser(String user) {
-    this.user = user;
-    return this;
-  }
-
-  public void unsetUser() {
-    this.user = null;
-  }
-
-  /** Returns true if field user is set (has been assigned a value) and false otherwise */
-  public boolean isSetUser() {
-    return this.user != null;
-  }
-
-  public void setUserIsSet(boolean value) {
-    if (!value) {
-      this.user = null;
-    }
-  }
-
-  /**
-   * 
-   * @see SecurityErrorCode
-   */
-  public SecurityErrorCode getCode() {
-    return this.code;
-  }
-
-  /**
-   * 
-   * @see SecurityErrorCode
-   */
-  public ThriftSecurityException setCode(SecurityErrorCode code) {
-    this.code = code;
-    return this;
-  }
-
-  public void unsetCode() {
-    this.code = null;
-  }
-
-  /** Returns true if field code is set (has been assigned a value) and false otherwise */
-  public boolean isSetCode() {
-    return this.code != null;
-  }
-
-  public void setCodeIsSet(boolean value) {
-    if (!value) {
-      this.code = null;
-    }
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case USER:
-      if (value == null) {
-        unsetUser();
-      } else {
-        setUser((String)value);
-      }
-      break;
-
-    case CODE:
-      if (value == null) {
-        unsetCode();
-      } else {
-        setCode((SecurityErrorCode)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case USER:
-      return getUser();
-
-    case CODE:
-      return getCode();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case USER:
-      return isSetUser();
-    case CODE:
-      return isSetCode();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof ThriftSecurityException)
-      return this.equals((ThriftSecurityException)that);
-    return false;
-  }
-
-  public boolean equals(ThriftSecurityException that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_user = true && this.isSetUser();
-    boolean that_present_user = true && that.isSetUser();
-    if (this_present_user || that_present_user) {
-      if (!(this_present_user && that_present_user))
-        return false;
-      if (!this.user.equals(that.user))
-        return false;
-    }
-
-    boolean this_present_code = true && this.isSetCode();
-    boolean that_present_code = true && that.isSetCode();
-    if (this_present_code || that_present_code) {
-      if (!(this_present_code && that_present_code))
-        return false;
-      if (!this.code.equals(that.code))
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    return 0;
-  }
-
-  public int compareTo(ThriftSecurityException other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-    ThriftSecurityException typedOther = (ThriftSecurityException)other;
-
-    lastComparison = Boolean.valueOf(isSetUser()).compareTo(typedOther.isSetUser());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetUser()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.user, typedOther.user);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(isSetCode()).compareTo(typedOther.isSetCode());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetCode()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.code, typedOther.code);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("ThriftSecurityException(");
-    boolean first = true;
-
-    sb.append("user:");
-    if (this.user == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.user);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("code:");
-    if (this.code == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.code);
-    }
-    first = false;
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class ThriftSecurityExceptionStandardSchemeFactory implements SchemeFactory {
-    public ThriftSecurityExceptionStandardScheme getScheme() {
-      return new ThriftSecurityExceptionStandardScheme();
-    }
-  }
-
-  private static class ThriftSecurityExceptionStandardScheme extends StandardScheme<ThriftSecurityException> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, ThriftSecurityException struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // USER
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.user = iprot.readString();
-              struct.setUserIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // CODE
-            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.code = SecurityErrorCode.findByValue(iprot.readI32());
-              struct.setCodeIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-
-      // check for required fields of primitive type, which can't be checked in the validate method
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, ThriftSecurityException struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.user != null) {
-        oprot.writeFieldBegin(USER_FIELD_DESC);
-        oprot.writeString(struct.user);
-        oprot.writeFieldEnd();
-      }
-      if (struct.code != null) {
-        oprot.writeFieldBegin(CODE_FIELD_DESC);
-        oprot.writeI32(struct.code.getValue());
-        oprot.writeFieldEnd();
-      }
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class ThriftSecurityExceptionTupleSchemeFactory implements SchemeFactory {
-    public ThriftSecurityExceptionTupleScheme getScheme() {
-      return new ThriftSecurityExceptionTupleScheme();
-    }
-  }
-
-  private static class ThriftSecurityExceptionTupleScheme extends TupleScheme<ThriftSecurityException> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, ThriftSecurityException struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      BitSet optionals = new BitSet();
-      if (struct.isSetUser()) {
-        optionals.set(0);
-      }
-      if (struct.isSetCode()) {
-        optionals.set(1);
-      }
-      oprot.writeBitSet(optionals, 2);
-      if (struct.isSetUser()) {
-        oprot.writeString(struct.user);
-      }
-      if (struct.isSetCode()) {
-        oprot.writeI32(struct.code.getValue());
-      }
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, ThriftSecurityException struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      BitSet incoming = iprot.readBitSet(2);
-      if (incoming.get(0)) {
-        struct.user = iprot.readString();
-        struct.setUserIsSet(true);
-      }
-      if (incoming.get(1)) {
-        struct.code = SecurityErrorCode.findByValue(iprot.readI32());
-        struct.setCodeIsSet(true);
-      }
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
index ef3e519..2b8d96e 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
@@ -140,6 +140,7 @@ import org.apache.accumulo.core.util.shell.commands.UserCommand;
 import org.apache.accumulo.core.util.shell.commands.UserPermissionsCommand;
 import org.apache.accumulo.core.util.shell.commands.UsersCommand;
 import org.apache.accumulo.core.util.shell.commands.WhoAmICommand;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooReader;
 import org.apache.commons.cli.BasicParser;
 import org.apache.commons.cli.CommandLine;
@@ -396,11 +397,10 @@ public class Shell extends ShellOptions {
     }
   }
   
-  @SuppressWarnings("deprecation")
   private static Instance getDefaultInstance(AccumuloConfiguration conf) {
     String keepers = conf.get(Property.INSTANCE_ZK_HOST);
     Path instanceDir = new Path(conf.get(Property.INSTANCE_DFS_DIR), "instance_id");
-    return new ZooKeeperInstance(UUID.fromString(ZooKeeperInstance.getInstanceIDFromHdfs(instanceDir)), keepers);
+    return new ZooKeeperInstance(UUID.fromString(ZooUtil.getInstanceIDFromHdfs(instanceDir)), keepers);
   }
   
   public Connector getConnector() {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a69a9d68/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FateCommand.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FateCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FateCommand.java
index 8a4d0ce..6bf4f30 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FateCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FateCommand.java
@@ -137,16 +137,15 @@ public class FateCommand extends Command {
     return failedCommand ? 1 : 0;
   }
   
-  @SuppressWarnings("deprecation")
   protected synchronized IZooReaderWriter getZooReaderWriter(Instance instance, String secret) {
-
+    
     if (secret == null) {
+      @SuppressWarnings("deprecation")
       AccumuloConfiguration conf = AccumuloConfiguration.getSiteConfiguration();
       secret = conf.get(Property.INSTANCE_SECRET);
     }
     
-    return new ZooReaderWriter(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut(), SCHEME,
-        (USER + ":" + secret).getBytes());
+    return new ZooReaderWriter(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut(), SCHEME, (USER + ":" + secret).getBytes());
   }
   
   @Override


[18/50] git commit: Merge branch '1.5.1-SNAPSHOT'

Posted by kt...@apache.org.
Merge branch '1.5.1-SNAPSHOT'


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/5cfb88b2
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/5cfb88b2
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/5cfb88b2

Branch: refs/heads/ACCUMULO-1000
Commit: 5cfb88b219be5dbf0b4e0269593ca1424019d4f3
Parents: 8b0f573 7b61723
Author: Eric Newton <er...@gmail.com>
Authored: Wed Jul 17 14:09:52 2013 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Wed Jul 17 14:09:52 2013 -0400

----------------------------------------------------------------------
 .../main/java/org/apache/accumulo/fate/zookeeper/ZooLock.java   | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------



[46/50] git commit: Merge remote-tracking branch 'origin' into ACCUMULO-1000

Posted by kt...@apache.org.
Merge remote-tracking branch 'origin' into ACCUMULO-1000


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/96a4815a
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/96a4815a
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/96a4815a

Branch: refs/heads/ACCUMULO-1000
Commit: 96a4815aa31417839516f8327720c74f25bdc4b1
Parents: 5e90858 a943f32
Author: Keith Turner <kt...@apache.org>
Authored: Mon Jul 22 17:03:38 2013 -0400
Committer: Keith Turner <kt...@apache.org>
Committed: Mon Jul 22 17:03:38 2013 -0400

----------------------------------------------------------------------
 .../client/admin/SecurityOperationsImpl.java    |   2 +-
 .../core/client/impl/BatchWriterImpl.java       |   4 +-
 .../core/client/impl/ConnectorImpl.java         |   5 +-
 .../client/security/tokens/PasswordToken.java   |   7 +-
 .../core/security/CredentialHelper.java         |   2 +-
 .../accumulo/core/security/Credentials.java     |  18 +-
 .../chapters/troubleshooting.tex                |   4 +-
 minicluster/pom.xml                             |   4 +
 .../minicluster/MiniAccumuloCluster.java        |  64 +++++-
 .../minicluster/MiniAccumuloConfig.java         |   9 +
 pom.xml                                         |   5 +
 server/pom.xml                                  |   6 +
 .../server/client/ClientServiceHandler.java     |  14 +-
 .../accumulo/server/client/HdfsZooInstance.java |   3 -
 .../client/security/token/SystemToken.java      |  30 ---
 .../server/gc/GarbageCollectWriteAheadLogs.java |   8 +-
 .../server/gc/SimpleGarbageCollector.java       |   9 +-
 .../accumulo/server/master/LiveTServerSet.java  |  24 +--
 .../apache/accumulo/server/master/Master.java   |   8 +-
 .../server/master/TabletGroupWatcher.java       |  12 +-
 .../master/balancer/TableLoadBalancer.java      |   4 +-
 .../server/master/balancer/TabletBalancer.java  |   8 +-
 .../server/master/state/MetaDataStateStore.java |   4 +-
 .../server/master/tableOps/BulkImport.java      |   4 +-
 .../server/master/tableOps/CloneTable.java      |  10 +-
 .../server/master/tableOps/CreateTable.java     |  12 +-
 .../server/master/tableOps/DeleteTable.java     |   6 +-
 .../server/master/tableOps/ImportTable.java     |   8 +-
 .../apache/accumulo/server/monitor/Monitor.java |   6 +-
 .../monitor/servlets/TServersServlet.java       |  18 +-
 .../server/monitor/servlets/TablesServlet.java  |   6 +-
 .../accumulo/server/problems/ProblemReport.java |   6 +-
 .../server/problems/ProblemReports.java         |   8 +-
 .../security/AuditedSecurityOperation.java      |   2 +-
 .../server/security/SecurityConstants.java      | 111 ----------
 .../server/security/SecurityOperation.java      | 207 +++++++++----------
 .../server/security/SystemCredentials.java      | 132 ++++++++++++
 .../accumulo/server/tabletserver/Tablet.java    |  34 +--
 .../server/tabletserver/TabletServer.java       |  78 ++++---
 .../org/apache/accumulo/server/util/Admin.java  |   6 +-
 .../server/util/FindOfflineTablets.java         |   6 +-
 .../apache/accumulo/server/util/Initialize.java |   4 +-
 .../accumulo/server/util/MetadataTableUtil.java |  16 +-
 .../server/security/SystemCredentialsTest.java  |  67 ++++++
 server/src/test/resources/accumulo-site.xml     |  32 +++
 test/pom.xml                                    |   6 +
 .../apache/accumulo/test/GetMasterStats.java    |   6 +-
 .../org/apache/accumulo/test/TestIngest.java    |   5 +-
 .../apache/accumulo/test/TestRandomDeletes.java |  23 ++-
 .../continuous/ContinuousStatsCollector.java    |   7 +-
 .../test/functional/SplitRecoveryTest.java      |  26 +--
 .../metadata/MetadataBatchScanTest.java         |   6 +-
 .../test/performance/thrift/NullTserver.java    |   4 +-
 .../test/randomwalk/concurrent/Shutdown.java    |  24 +--
 .../test/randomwalk/concurrent/StartAll.java    |   8 +-
 .../randomwalk/security/WalkingSecurity.java    |   3 +-
 .../accumulo/test/functional/AddSplitIT.java    |  30 +--
 .../test/functional/BadIteratorMincIT.java      |  32 +--
 .../test/functional/BatchWriterFlushIT.java     |  24 ++-
 .../accumulo/test/functional/BinaryIT.java      |   2 +-
 .../accumulo/test/functional/BulkFileIT.java    |  13 +-
 .../apache/accumulo/test/functional/BulkIT.java |  15 +-
 .../accumulo/test/functional/ClassLoaderIT.java |   4 +-
 .../accumulo/test/functional/CombinerIT.java    |  20 +-
 .../accumulo/test/functional/ConstraintIT.java  |  40 ++--
 .../test/functional/CreateAndUseIT.java         |  27 +--
 .../test/functional/CreateManyScannersIT.java   |   9 +-
 .../accumulo/test/functional/DeleteIT.java      |   2 +-
 .../accumulo/test/functional/DeleteRowsIT.java  |  44 ++--
 .../test/functional/DeleteRowsSplitIT.java      |  24 +--
 .../test/functional/FateStarvationIT.java       |  14 +-
 .../test/functional/HalfDeadTServerIT.java      |  17 +-
 .../accumulo/test/functional/LogicalTimeIT.java |  41 ++--
 .../accumulo/test/functional/MacTest.java       |   5 +-
 .../accumulo/test/functional/MapReduceIT.java   |   8 +-
 .../accumulo/test/functional/MergeIT.java       |  69 ++++---
 .../accumulo/test/functional/MergeMetaIT.java   |   2 +-
 .../accumulo/test/functional/NativeMapIT.java   |   4 +-
 .../accumulo/test/functional/PermissionsIT.java | 108 +++++-----
 .../accumulo/test/functional/RenameIT.java      |  13 +-
 .../accumulo/test/functional/RestartIT.java     |   5 +-
 .../test/functional/RestartStressIT.java        |   5 +-
 .../test/functional/ScanIteratorIT.java         |  11 +-
 .../accumulo/test/functional/ScanRangeIT.java   |  18 +-
 .../test/functional/ServerSideErrorIT.java      |  19 +-
 .../accumulo/test/functional/SimpleMacIT.java   |  82 ++++++++
 .../test/functional/SparseColumnFamilyIT.java   |  13 +-
 .../test/functional/SplitRecoveryIT.java        |   4 +-
 .../accumulo/test/functional/StartIT.java       |   8 +-
 .../accumulo/test/functional/TableIT.java       |  25 +--
 .../accumulo/test/functional/TimeoutIT.java     |  16 +-
 .../accumulo/test/functional/VisibilityIT.java  |  68 +++---
 .../test/functional/WriteAheadLogIT.java        |   3 +-
 .../accumulo/test/functional/WriteLotsIT.java   |   7 +-
 .../accumulo/test/functional/ZooCacheIT.java    |   8 +-
 95 files changed, 1145 insertions(+), 865 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/96a4815a/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/96a4815a/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
----------------------------------------------------------------------
diff --cc server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
index 94a8cd6,2b98331..64b6177
--- a/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
+++ b/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
@@@ -310,22 -325,13 +327,21 @@@ public class SecurityOperation 
    
    public boolean canWrite(TCredentials credentials, String table) throws ThriftSecurityException {
      authenticate(credentials);
-     return hasTablePermission(credentials.getPrincipal(), table, TablePermission.WRITE, true);
+     return hasTablePermission(credentials, table, TablePermission.WRITE, true);
    }
    
 +  public boolean canConditionallyUpdate(TCredentials credentials, String tableID, List<ByteBuffer> authorizations) throws ThriftSecurityException {
 +    
 +    authenticate(credentials);
 +    
 +    return hasTablePermission(credentials.getPrincipal(), tableID, TablePermission.WRITE, true)
 +        && hasTablePermission(credentials.getPrincipal(), tableID, TablePermission.READ, true);
 +  }
 +
    public boolean canSplitTablet(TCredentials credentials, String table) throws ThriftSecurityException {
      authenticate(credentials);
-     return hasSystemPermission(credentials.getPrincipal(), SystemPermission.ALTER_TABLE, false)
-         || hasSystemPermission(credentials.getPrincipal(), SystemPermission.SYSTEM, false)
-         || hasTablePermission(credentials.getPrincipal(), table, TablePermission.ALTER_TABLE, false);
+     return hasSystemPermission(credentials, SystemPermission.ALTER_TABLE, false) || hasSystemPermission(credentials, SystemPermission.SYSTEM, false)
+         || hasTablePermission(credentials, table, TablePermission.ALTER_TABLE, false);
    }
    
    /**

http://git-wip-us.apache.org/repos/asf/accumulo/blob/96a4815a/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/96a4815a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
----------------------------------------------------------------------
diff --cc server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
index 8f33488,ceed0ee..c65c55d
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
@@@ -163,10 -156,9 +163,10 @@@ import org.apache.accumulo.server.metri
  import org.apache.accumulo.server.problems.ProblemReport;
  import org.apache.accumulo.server.problems.ProblemReports;
  import org.apache.accumulo.server.security.AuditedSecurityOperation;
- import org.apache.accumulo.server.security.SecurityConstants;
  import org.apache.accumulo.server.security.SecurityOperation;
+ import org.apache.accumulo.server.security.SystemCredentials;
  import org.apache.accumulo.server.tabletserver.Compactor.CompactionInfo;
 +import org.apache.accumulo.server.tabletserver.RowLocks.RowLock;
  import org.apache.accumulo.server.tabletserver.Tablet.CommitSession;
  import org.apache.accumulo.server.tabletserver.Tablet.KVEntry;
  import org.apache.accumulo.server.tabletserver.Tablet.LookupResult;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/96a4815a/test/src/main/java/org/apache/accumulo/test/performance/thrift/NullTserver.java
----------------------------------------------------------------------


[40/50] git commit: ACCUMULO-1586 committing Michael Berman's patch

Posted by kt...@apache.org.
ACCUMULO-1586 committing Michael Berman's patch


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/1a48f7c3
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/1a48f7c3
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/1a48f7c3

Branch: refs/heads/ACCUMULO-1000
Commit: 1a48f7c34da98f4ba1fe3fac133081ee5f35caca
Parents: 8545123
Author: Eric Newton <ec...@apache.org>
Authored: Mon Jul 22 14:20:49 2013 -0400
Committer: Eric Newton <ec...@apache.org>
Committed: Mon Jul 22 14:20:49 2013 -0400

----------------------------------------------------------------------
 .../accumulo/server/metrics/AbstractMetricsImpl.java     |  1 -
 .../org/apache/accumulo/server/util/TServerUtils.java    | 11 ++++++++---
 2 files changed, 8 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/1a48f7c3/server/src/main/java/org/apache/accumulo/server/metrics/AbstractMetricsImpl.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/metrics/AbstractMetricsImpl.java b/server/src/main/java/org/apache/accumulo/server/metrics/AbstractMetricsImpl.java
index a047507..9735371 100644
--- a/server/src/main/java/org/apache/accumulo/server/metrics/AbstractMetricsImpl.java
+++ b/server/src/main/java/org/apache/accumulo/server/metrics/AbstractMetricsImpl.java
@@ -138,7 +138,6 @@ public abstract class AbstractMetricsImpl {
     if (null == getObjectName())
       throw new IllegalArgumentException("MBean object name must be set.");
     mbs.registerMBean(this, getObjectName());
-    
     setupLogging();
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/1a48f7c3/server/src/main/java/org/apache/accumulo/server/util/TServerUtils.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/TServerUtils.java b/server/src/main/java/org/apache/accumulo/server/util/TServerUtils.java
index be14023..0c751f5 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/TServerUtils.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/TServerUtils.java
@@ -101,6 +101,8 @@ public class TServerUtils {
     boolean portSearch = false;
     if (portSearchProperty != null)
       portSearch = conf.getBoolean(portSearchProperty);
+    // create the TimedProcessor outside the port search loop so we don't try to register the same metrics mbean more than once
+    TServerUtils.TimedProcessor timedProcessor = new TServerUtils.TimedProcessor(processor, serverName, threadName);
     Random random = new Random();
     for (int j = 0; j < 100; j++) {
       
@@ -116,7 +118,7 @@ public class TServerUtils {
         if (port > 65535)
           port = 1024 + port % (65535 - 1024);
         try {
-          return TServerUtils.startTServer(port, processor, serverName, threadName, minThreads, timeBetweenThreadChecks, maxMessageSize);
+          return TServerUtils.startTServer(port, timedProcessor, serverName, threadName, minThreads, timeBetweenThreadChecks, maxMessageSize);
         } catch (Exception ex) {
           log.info("Unable to use port " + port + ", retrying. (Thread Name = " + threadName + ")");
           UtilWaitThread.sleep(250);
@@ -246,7 +248,6 @@ public class TServerUtils {
       }
     }, timeBetweenThreadChecks, timeBetweenThreadChecks);
     options.executorService(pool);
-    processor = new TServerUtils.TimedProcessor(processor, serverName, threadName);
     options.processorFactory(new TProcessorFactory(processor));
     return new ServerPort(new THsHaServer(options), port);
   }
@@ -268,13 +269,17 @@ public class TServerUtils {
     TThreadPoolServer.Args options = new TThreadPoolServer.Args(transport);
     options.protocolFactory(ThriftUtil.protocolFactory());
     options.transportFactory(ThriftUtil.transportFactory());
-    processor = new TServerUtils.TimedProcessor(processor, serverName, threadName);
     options.processorFactory(new ClientInfoProcessorFactory(processor));
     return new ServerPort(new TThreadPoolServer(options), port);
   }
   
   public static ServerPort startTServer(int port, TProcessor processor, String serverName, String threadName, int numThreads, long timeBetweenThreadChecks, long maxMessageSize)
       throws TTransportException {
+    return startTServer(port, new TimedProcessor(processor, serverName, threadName), serverName, threadName, numThreads, timeBetweenThreadChecks, maxMessageSize);
+  }
+  
+  public static ServerPort startTServer(int port, TimedProcessor processor, String serverName, String threadName, int numThreads, long timeBetweenThreadChecks, long maxMessageSize)
+      throws TTransportException {
     ServerPort result = startHsHaServer(port, processor, serverName, threadName, numThreads, timeBetweenThreadChecks, maxMessageSize);
     // ServerPort result = startThreadPoolServer(port, processor, serverName, threadName, -1);
     final TServer finalServer = result.server;


[49/50] git commit: ACCUMULO-1000 Added timeout & config to conditional writer. Added unit test

Posted by kt...@apache.org.
ACCUMULO-1000 Added timeout & config to conditional writer.  Added unit test


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/79019ef0
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/79019ef0
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/79019ef0

Branch: refs/heads/ACCUMULO-1000
Commit: 79019ef0477b76966e2aff7259443aa9cd2f1cce
Parents: 5183ae4
Author: Keith Turner <kt...@apache.org>
Authored: Tue Jul 23 12:07:41 2013 -0400
Committer: Keith Turner <kt...@apache.org>
Committed: Tue Jul 23 12:11:06 2013 -0400

----------------------------------------------------------------------
 .../accumulo/core/client/ConditionalWriter.java |  35 ++----
 .../core/client/ConditionalWriterConfig.java    | 118 +++++++++++++++++++
 .../apache/accumulo/core/client/Connector.java  |   9 +-
 .../core/client/impl/ConditionalWriterImpl.java |  96 ++++++++++-----
 .../core/client/impl/ConnectorImpl.java         |   7 +-
 .../core/client/mock/MockConnector.java         |   3 +-
 .../server/tabletserver/TabletServer.java       |   2 +-
 .../accumulo/test/FaultyConditionalWriter.java  |   9 --
 .../accumulo/test/functional/SlowIterator.java  |  24 +++-
 .../accumulo/test/ConditionalWriterTest.java    | 115 ++++++++++++++----
 10 files changed, 313 insertions(+), 105 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/79019ef0/core/src/main/java/org/apache/accumulo/core/client/ConditionalWriter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/ConditionalWriter.java b/core/src/main/java/org/apache/accumulo/core/client/ConditionalWriter.java
index b434463..db29492 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/ConditionalWriter.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/ConditionalWriter.java
@@ -18,8 +18,8 @@
 package org.apache.accumulo.core.client;
 
 import java.util.Iterator;
-import java.util.concurrent.TimeUnit;
 
+import org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode;
 import org.apache.accumulo.core.data.ConditionalMutation;
 
 /**
@@ -48,11 +48,11 @@ public interface ConditionalWriter {
     public Status getStatus() throws AccumuloException, AccumuloSecurityException {
       if (status == null) {
         if (exception instanceof AccumuloException)
-          throw (AccumuloException) exception;
-        if (exception instanceof AccumuloSecurityException)
-          throw (AccumuloSecurityException) exception;
-        if (exception instanceof RuntimeException)
-          throw (RuntimeException) exception;
+          throw new AccumuloException(exception);
+        if (exception instanceof AccumuloSecurityException) {
+          AccumuloSecurityException ase = (AccumuloSecurityException) exception;
+          throw new AccumuloSecurityException(ase.getUser(), SecurityErrorCode.valueOf(ase.getSecurityErrorCode().name()), ase.getTableInfo(), ase);
+        }
         else
           throw new AccumuloException(exception);
       }
@@ -94,33 +94,12 @@ public interface ConditionalWriter {
      * A condition contained a column visibility that could never be seen
      */
     INVISIBLE_VISIBILITY,
-    /**
-     * nothing was done with this mutation, this is caused by previous mutations failing in some way like timing out
-     */
-    IGNORED
+
   }
 
   public abstract Iterator<Result> write(Iterator<ConditionalMutation> mutations);
   
   public abstract Result write(ConditionalMutation mutation);
-  
-  /**
-   * This setting determines how long a scanner will automatically retry when a failure occurs. By default a scanner will retry forever.
-   * 
-   * Setting to zero or Long.MAX_VALUE and TimeUnit.MILLISECONDS means to retry forever.
-   * 
-   * @param timeOut
-   * @param timeUnit
-   *          determines how timeout is interpreted
-   */
-  public void setTimeout(long timeOut, TimeUnit timeUnit);
-  
-  /**
-   * Returns the setting for how long a scanner will automatically retry when a failure occurs.
-   * 
-   * @return the timeout configured for this scanner
-   */
-  public long getTimeout(TimeUnit timeUnit);
 
   public void close();
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/79019ef0/core/src/main/java/org/apache/accumulo/core/client/ConditionalWriterConfig.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/ConditionalWriterConfig.java b/core/src/main/java/org/apache/accumulo/core/client/ConditionalWriterConfig.java
new file mode 100644
index 0000000..f2a91ea
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/client/ConditionalWriterConfig.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.ArgumentChecker;
+
+/**
+ * 
+ * @since 1.6.0
+ */
+public class ConditionalWriterConfig {
+  
+  private static final Long DEFAULT_TIMEOUT = Long.MAX_VALUE;
+  private Long timeout = null;
+  
+  private static final Integer DEFAULT_MAX_WRITE_THREADS = 3;
+  private Integer maxWriteThreads = null;
+  
+  private Authorizations auths = Authorizations.EMPTY;
+  
+  /**
+   * A set of authorization labels that will be checked against the column visibility of each key in order to filter data. The authorizations passed in must be
+   * a subset of the accumulo user's set of authorizations. If the accumulo user has authorizations (A1, A2) and authorizations (A2, A3) are passed, then an
+   * exception will be thrown.
+   * 
+   * <p>
+   * Any condition that is not visible with this set of authorizations will fail.
+   * 
+   * @param auths
+   */
+  public ConditionalWriterConfig setAuthorizations(Authorizations auths) {
+    ArgumentChecker.notNull(auths);
+    this.auths = auths;
+    return this;
+  }
+  
+  /**
+   * Sets the maximum amount of time an unresponsive server will be re-tried. When this timeout is exceeded, the {@link ConditionalWriter} should return the
+   * mutation with an exception.<br />
+   * For no timeout, set to zero, or {@link Long#MAX_VALUE} with {@link TimeUnit#MILLISECONDS}.
+   * 
+   * <p>
+   * {@link TimeUnit#MICROSECONDS} or {@link TimeUnit#NANOSECONDS} will be truncated to the nearest {@link TimeUnit#MILLISECONDS}.<br />
+   * If this truncation would result in making the value zero when it was specified as non-zero, then a minimum value of one {@link TimeUnit#MILLISECONDS} will
+   * be used.
+   * 
+   * <p>
+   * <b>Default:</b> {@link Long#MAX_VALUE} (no timeout)
+   * 
+   * @param timeout
+   *          the timeout, in the unit specified by the value of {@code timeUnit}
+   * @param timeUnit
+   *          determines how {@code timeout} will be interpreted
+   * @throws IllegalArgumentException
+   *           if {@code timeout} is less than 0
+   * @return {@code this} to allow chaining of set methods
+   */
+  public ConditionalWriterConfig setTimeout(long timeout, TimeUnit timeUnit) {
+    if (timeout < 0)
+      throw new IllegalArgumentException("Negative timeout not allowed " + timeout);
+    
+    if (timeout == 0)
+      this.timeout = Long.MAX_VALUE;
+    else
+      // make small, positive values that truncate to 0 when converted use the minimum millis instead
+      this.timeout = Math.max(1, timeUnit.toMillis(timeout));
+    return this;
+  }
+  
+  /**
+   * Sets the maximum number of threads to use for writing data to the tablet servers.
+   * 
+   * <p>
+   * <b>Default:</b> 3
+   * 
+   * @param maxWriteThreads
+   *          the maximum threads to use
+   * @throws IllegalArgumentException
+   *           if {@code maxWriteThreads} is non-positive
+   * @return {@code this} to allow chaining of set methods
+   */
+  public ConditionalWriterConfig setMaxWriteThreads(int maxWriteThreads) {
+    if (maxWriteThreads <= 0)
+      throw new IllegalArgumentException("Max threads must be positive " + maxWriteThreads);
+    
+    this.maxWriteThreads = maxWriteThreads;
+    return this;
+  }
+  
+  public Authorizations getAuthorizations() {
+    return auths;
+  }
+
+  public long getTimeout(TimeUnit timeUnit) {
+    return timeUnit.convert(timeout != null ? timeout : DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
+  }
+  
+  public int getMaxWriteThreads() {
+    return maxWriteThreads != null ? maxWriteThreads : DEFAULT_MAX_WRITE_THREADS;
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/79019ef0/core/src/main/java/org/apache/accumulo/core/client/Connector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/Connector.java b/core/src/main/java/org/apache/accumulo/core/client/Connector.java
index 45a8162..bbfa55f 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/Connector.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/Connector.java
@@ -178,16 +178,15 @@ public abstract class Connector {
    * 
    * @param tableName
    *          the name of the table to query data from
-   * @param authorizations
-   *          A set of authorization labels that will be checked against the column visibility of each key in order to filter data. The authorizations passed in
-   *          must be a subset of the accumulo user's set of authorizations. If the accumulo user has authorizations (A1, A2) and authorizations (A2, A3) are
-   *          passed, then an exception will be thrown.
+   * @param config
+   *          configuration used to create conditional writer
    * 
    * @return ConditionalWriter object for writing ConditionalMutations
    * @throws TableNotFoundException
    *           when the specified table doesn't exist
+   * @since 1.6.0
    */
-  public abstract ConditionalWriter createConditionalWriter(String tableName, Authorizations authorizations) throws TableNotFoundException;
+  public abstract ConditionalWriter createConditionalWriter(String tableName, ConditionalWriterConfig config) throws TableNotFoundException;
 
   /**
    * Accessor method for internal instance object.

http://git-wip-us.apache.org/repos/asf/accumulo/blob/79019ef0/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
index ed20054..55aa718 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
@@ -37,12 +37,15 @@ import java.util.concurrent.TimeUnit;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.ConditionalWriter;
+import org.apache.accumulo.core.client.ConditionalWriterConfig;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.TableDeletedException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.TableOfflineException;
+import org.apache.accumulo.core.client.TimedOutException;
 import org.apache.accumulo.core.client.impl.TabletLocator.TabletServerMutations;
 import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
+import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Condition;
 import org.apache.accumulo.core.data.ConditionalMutation;
@@ -94,6 +97,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
   private TCredentials credentials;
   private TabletLocator locator;
   private String tableId;
+  private long timeout;
 
   private static class ServerQueue {
     BlockingQueue<TabletServerMutations<QCMutation>> queue = new LinkedBlockingQueue<TabletServerMutations<QCMutation>>();
@@ -125,7 +129,6 @@ class ConditionalWriterImpl implements ConditionalWriter {
         throw new NoSuchElementException();
 
       try {
-        // TODO maybe call drainTo after take() to get a batch efficiently
         Result result = rq.poll(1, TimeUnit.SECONDS);
         while (result == null) {
           
@@ -153,12 +156,14 @@ class ConditionalWriterImpl implements ConditionalWriter {
     private BlockingQueue<Result> resultQueue;
     private long resetTime;
     private long delay = 50;
+    private long entryTime;
     
-    QCMutation(ConditionalMutation cm, BlockingQueue<Result> resultQueue) {
+    QCMutation(ConditionalMutation cm, BlockingQueue<Result> resultQueue, long entryTime) {
       super(cm);
       this.resultQueue = resultQueue;
+      this.entryTime = entryTime;
     }
-    
+
     @Override
     public int compareTo(Delayed o) {
       QCMutation oqcm = (QCMutation) o;
@@ -171,7 +176,6 @@ class ConditionalWriterImpl implements ConditionalWriter {
     }
     
     void resetDelay() {
-      // TODO eventually timeout a mutation
       delay = Math.min(delay * 2, MAX_SLEEP);
       resetTime = System.currentTimeMillis();
     }
@@ -190,12 +194,37 @@ class ConditionalWriterImpl implements ConditionalWriter {
     return serverQueue;
   }
   
-  private void queueRetry(List<QCMutation> mutations) {
-    for (QCMutation qcm : mutations) {
-      qcm.resetDelay();
-    }
+  private void queueRetry(List<QCMutation> mutations, String server) {
     
-    failedMutations.addAll(mutations);
+    if (timeout < Long.MAX_VALUE) {
+      
+      long time = System.currentTimeMillis();
+      
+      ArrayList<QCMutation> mutations2 = new ArrayList<ConditionalWriterImpl.QCMutation>(mutations.size());
+
+      for (QCMutation qcm : mutations) {
+        qcm.resetDelay();
+        if (time + qcm.getDelay(TimeUnit.MILLISECONDS) > qcm.entryTime + timeout) {
+          TimedOutException toe;
+          if (server != null)
+            toe = new TimedOutException(Collections.singleton(server));
+          else
+            toe = new TimedOutException("Conditional mutation timed out");
+          
+          qcm.resultQueue.add(new Result(toe, qcm, server));
+        } else {
+          mutations2.add(qcm);
+        }
+      }
+      
+      if (mutations2.size() > 0)
+        failedMutations.addAll(mutations2);
+
+    } else {
+      for (QCMutation qcm : mutations)
+        qcm.resetDelay();
+      failedMutations.addAll(mutations);
+    }
   }
 
   private void queue(List<QCMutation> mutations) {
@@ -221,7 +250,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
     }
     
     if (failures.size() > 0)
-      queueRetry(failures);
+      queueRetry(failures, null);
 
     for (Entry<String,TabletServerMutations<QCMutation>> entry : binnedMutations.entrySet()) {
       queue(entry.getKey(), entry.getValue());
@@ -293,17 +322,17 @@ class ConditionalWriterImpl implements ConditionalWriter {
     }
   }
 
-  ConditionalWriterImpl(Instance instance, TCredentials credentials, String tableId, Authorizations authorizations) {
+  ConditionalWriterImpl(Instance instance, TCredentials credentials, String tableId, ConditionalWriterConfig config) {
     this.instance = instance;
     this.credentials = credentials;
-    this.auths = authorizations;
-    this.ve = new VisibilityEvaluator(authorizations);
-    // TODO make configurable
-    this.threadPool = new ScheduledThreadPoolExecutor(3);
-    this.threadPool.setMaximumPoolSize(3);
+    this.auths = config.getAuthorizations();
+    this.ve = new VisibilityEvaluator(config.getAuthorizations());
+    this.threadPool = new ScheduledThreadPoolExecutor(config.getMaxWriteThreads());
+    this.threadPool.setMaximumPoolSize(config.getMaxWriteThreads());
     this.locator = TabletLocator.getLocator(instance, new Text(tableId));
     this.serverQueues = new HashMap<String,ServerQueue>();
     this.tableId = tableId;
+    this.timeout = config.getTimeout(TimeUnit.MILLISECONDS);
 
     Runnable failureHandler = new Runnable() {
       
@@ -328,6 +357,8 @@ class ConditionalWriterImpl implements ConditionalWriter {
 
     int count = 0;
 
+    long entryTime = System.currentTimeMillis();
+
     mloop: while (mutations.hasNext()) {
       // TODO stop reading from iterator if too much memory
       ConditionalMutation mut = mutations.next();
@@ -341,7 +372,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
       }
 
       // copy the mutations so that even if caller changes it, it will not matter
-      mutationList.add(new QCMutation(mut, resultQueue));
+      mutationList.add(new QCMutation(mut, resultQueue, entryTime));
     }
 
     queue(mutationList);
@@ -438,6 +469,15 @@ class ConditionalWriterImpl implements ConditionalWriter {
     }
   }
   
+  private TabletClientService.Iface getClient(String location) throws TTransportException {
+    TabletClientService.Iface client;
+    if (timeout < instance.getConfiguration().getTimeInMillis(Property.GENERAL_RPC_TIMEOUT))
+      client = ThriftUtil.getTServerClient(location, timeout);
+    else
+      client = ThriftUtil.getTServerClient(location, instance.getConfiguration());
+    return client;
+  }
+
   private void sendToServer(String location, TabletServerMutations<QCMutation> mutations) {
     TabletClientService.Iface client = null;
     
@@ -449,7 +489,8 @@ class ConditionalWriterImpl implements ConditionalWriter {
     Long sessionId = null;
     
     try {
-      client = ThriftUtil.getTServerClient(location, instance.getConfiguration());
+      
+      client = getClient(location);
 
       Map<TKeyExtent,List<TConditionalMutation>> tmutations = new HashMap<TKeyExtent,List<TConditionalMutation>>();
 
@@ -486,7 +527,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
         locator.invalidateCache(ke);
       }
 
-      queueRetry(ignored);
+      queueRetry(ignored, location);
 
     } catch (ThriftSecurityException tse) {
       AccumuloSecurityException ase = new AccumuloSecurityException(credentials.getPrincipal(), tse.getCode(), Tables.getPrintableTableInfoFromId(instance,
@@ -508,11 +549,12 @@ class ConditionalWriterImpl implements ConditionalWriter {
     }
   }
 
-  private void queueRetry(Map<Long,CMK> cmidToCm) {
+
+  private void queueRetry(Map<Long,CMK> cmidToCm, String location) {
     ArrayList<QCMutation> ignored = new ArrayList<QCMutation>();
     for (CMK cmk : cmidToCm.values())
     	ignored.add(cmk.cm);
-    queueRetry(ignored);
+    queueRetry(ignored, location);
   }
 
   private void queueException(String location, Map<Long,CMK> cmidToCm, Exception e) {
@@ -522,7 +564,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
 
   private void invalidateSession(String location, TabletServerMutations<QCMutation> mutations, Map<Long,CMK> cmidToCm, Long sessionId) {
     if(sessionId == null){
-      queueRetry(cmidToCm);
+      queueRetry(cmidToCm, location);
     }else{
       try {
         invalidateSession(sessionId, location, mutations);
@@ -587,7 +629,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
     TInfo tinfo = Tracer.traceInfo();
     
     try {
-      client = ThriftUtil.getTServerClient(location, instance.getConfiguration());
+      client = getClient(location);
       client.invalidateConditionalUpdate(tinfo, sessionId);
     } finally {
       ThriftUtil.returnClient((TServiceClient) client);
@@ -678,14 +720,6 @@ class ConditionalWriterImpl implements ConditionalWriter {
     return write(Collections.singleton(mutation).iterator()).next();
   }
   
-  public void setTimeout(long timeOut, TimeUnit timeUnit) {
-    throw new UnsupportedOperationException();
-  }
-  
-  public long getTimeout(TimeUnit timeUnit) {
-    throw new UnsupportedOperationException();
-  }
-  
   @Override
   public void close() {
     //TODO could possible close cached sessions using async method to clean up sessions on server side

http://git-wip-us.apache.org/repos/asf/accumulo/blob/79019ef0/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
index 693f3c9..57e36fd 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
@@ -25,6 +25,7 @@ import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.ConditionalWriter;
+import org.apache.accumulo.core.client.ConditionalWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.MultiTableBatchWriter;
@@ -131,10 +132,8 @@ public class ConnectorImpl extends Connector {
   }
   
   @Override
-  public ConditionalWriter createConditionalWriter(String tableName, Authorizations authorizations) throws TableNotFoundException {
-    ArgumentChecker.notNull(tableName, authorizations);
-    // TODO resolve table name to table id here and pass that
-    return new ConditionalWriterImpl(instance, credentials, getTableId(tableName), authorizations);
+  public ConditionalWriter createConditionalWriter(String tableName, ConditionalWriterConfig config) throws TableNotFoundException {
+    return new ConditionalWriterImpl(instance, credentials, getTableId(tableName), config);
   }
   
   @Override

http://git-wip-us.apache.org/repos/asf/accumulo/blob/79019ef0/core/src/main/java/org/apache/accumulo/core/client/mock/MockConnector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockConnector.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockConnector.java
index 4a405aa..4af2ea5 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockConnector.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockConnector.java
@@ -23,6 +23,7 @@ import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.ConditionalWriter;
+import org.apache.accumulo.core.client.ConditionalWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.MultiTableBatchWriter;
@@ -130,7 +131,7 @@ public class MockConnector extends Connector {
   }
   
   @Override
-  public ConditionalWriter createConditionalWriter(String tableName, Authorizations authorizations) throws TableNotFoundException {
+  public ConditionalWriter createConditionalWriter(String tableName, ConditionalWriterConfig config) throws TableNotFoundException {
     // TODO add implementation
     throw new UnsupportedOperationException();
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/79019ef0/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
index 4f7ba92..c1a1fc3 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
@@ -1933,7 +1933,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
       
       ConditionalSession cs = (ConditionalSession) sessionManager.reserveSession(sessID);
       
-      if(cs == null)
+      if (cs == null || cs.interruptFlag.get())
         throw new NoSuchScanIDException();
       
       Text tid = new Text(cs.tableId);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/79019ef0/test/src/main/java/org/apache/accumulo/test/FaultyConditionalWriter.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/FaultyConditionalWriter.java b/test/src/main/java/org/apache/accumulo/test/FaultyConditionalWriter.java
index de56218..7e7480f 100644
--- a/test/src/main/java/org/apache/accumulo/test/FaultyConditionalWriter.java
+++ b/test/src/main/java/org/apache/accumulo/test/FaultyConditionalWriter.java
@@ -20,7 +20,6 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.Random;
-import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.client.ConditionalWriter;
 import org.apache.accumulo.core.data.ConditionalMutation;
@@ -74,14 +73,6 @@ public class FaultyConditionalWriter implements ConditionalWriter {
     return write(Collections.singleton(mutation).iterator()).next();
   }
   
-  public void setTimeout(long timeOut, TimeUnit timeUnit) {
-    cw.setTimeout(timeOut, timeUnit);
-  }
-  
-  public long getTimeout(TimeUnit timeUnit) {
-    return cw.getTimeout(timeUnit);
-  }
-  
   @Override
   public void close() {
     cw.close();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/79019ef0/test/src/main/java/org/apache/accumulo/test/functional/SlowIterator.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SlowIterator.java b/test/src/main/java/org/apache/accumulo/test/functional/SlowIterator.java
index a71b1ad..03eaefb 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/SlowIterator.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/SlowIterator.java
@@ -17,10 +17,13 @@
 package org.apache.accumulo.test.functional;
 
 import java.io.IOException;
+import java.util.Collection;
 import java.util.Map;
 
 import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
@@ -30,13 +33,19 @@ import org.apache.accumulo.core.util.UtilWaitThread;
 public class SlowIterator extends WrappingIterator {
 
   static private final String SLEEP_TIME = "sleepTime";
+  static private final String SEEK_SLEEP_TIME = "seekSleepTime";
   
-  long sleepTime;
+  private long sleepTime = 0;
+  private long seekSleepTime = 0;
   
   public static void setSleepTime(IteratorSetting is, long millis) {
     is.addOption(SLEEP_TIME, Long.toString(millis));  
   }
   
+  public static void setSeekSleepTime(IteratorSetting is, long t) {
+    is.addOption(SEEK_SLEEP_TIME, Long.toString(t));
+  }
+
   @Override
   public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
     throw new UnsupportedOperationException();
@@ -49,9 +58,20 @@ public class SlowIterator extends WrappingIterator {
   }
   
   @Override
+  public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
+    UtilWaitThread.sleep(seekSleepTime);
+    super.seek(range, columnFamilies, inclusive);
+  }
+  
+  @Override
   public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
     super.init(source, options, env);
-    sleepTime = Long.parseLong(options.get(SLEEP_TIME));
+    if (options.containsKey(SLEEP_TIME))
+      sleepTime = Long.parseLong(options.get(SLEEP_TIME));
+    
+    if (options.containsKey(SEEK_SLEEP_TIME))
+      seekSleepTime = Long.parseLong(options.get(SEEK_SLEEP_TIME));
   }
+
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/79019ef0/test/src/test/java/org/apache/accumulo/test/ConditionalWriterTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/ConditionalWriterTest.java b/test/src/test/java/org/apache/accumulo/test/ConditionalWriterTest.java
index 65a5636..66b699e 100644
--- a/test/src/test/java/org/apache/accumulo/test/ConditionalWriterTest.java
+++ b/test/src/test/java/org/apache/accumulo/test/ConditionalWriterTest.java
@@ -41,6 +41,7 @@ import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.ConditionalWriter;
 import org.apache.accumulo.core.client.ConditionalWriter.Result;
 import org.apache.accumulo.core.client.ConditionalWriter.Status;
+import org.apache.accumulo.core.client.ConditionalWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IsolatedScanner;
 import org.apache.accumulo.core.client.IteratorSetting;
@@ -74,6 +75,7 @@ import org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
 import org.apache.accumulo.minicluster.MiniAccumuloConfig;
 import org.apache.accumulo.test.functional.BadIterator;
+import org.apache.accumulo.test.functional.SlowIterator;
 import org.apache.hadoop.io.Text;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -107,7 +109,7 @@ public class ConditionalWriterTest {
     
     conn.tableOperations().create("foo");
 
-    ConditionalWriter cw = conn.createConditionalWriter("foo", Authorizations.EMPTY);
+    ConditionalWriter cw = conn.createConditionalWriter("foo", new ConditionalWriterConfig());
     
     // mutation conditional on column tx:seq not exiting
     ConditionalMutation cm0 = new ConditionalMutation("99006", new Condition("tx", "seq"));
@@ -190,7 +192,7 @@ public class ConditionalWriterTest {
     
     conn.securityOperations().changeUserAuthorizations("root", auths);
     
-    ConditionalWriter cw = conn.createConditionalWriter(table, auths);
+    ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig().setAuthorizations(auths));
     
     ColumnVisibility cva = new ColumnVisibility("A");
     ColumnVisibility cvb = new ColumnVisibility("B");
@@ -278,7 +280,7 @@ public class ConditionalWriterTest {
 
     Authorizations filteredAuths = new Authorizations("A");
     
-    ConditionalWriter cw = conn.createConditionalWriter(table, filteredAuths);
+    ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig().setAuthorizations(filteredAuths));
     
     ColumnVisibility cva = new ColumnVisibility("A");
     ColumnVisibility cvb = new ColumnVisibility("B");
@@ -340,6 +342,25 @@ public class ConditionalWriterTest {
     Assert.assertEquals(Status.INVISIBLE_VISIBILITY, cw.write(cm7).getStatus());
     
     cw.close();
+
+    // test passing auths that exceed users configured auths
+    
+    Authorizations exceedingAuths = new Authorizations("A", "B", "D");
+    ConditionalWriter cw2 = conn.createConditionalWriter(table, new ConditionalWriterConfig().setAuthorizations(exceedingAuths));
+    
+    ConditionalMutation cm8 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvb), new Condition("tx", "seq").setVisibility(cva)
+        .setValue("1"));
+    cm8.put("name", "last", cva, "doe");
+    cm8.put("name", "first", cva, "john");
+    cm8.put("tx", "seq", cva, "1");
+
+    try {
+      cw2.write(cm8).getStatus();
+      Assert.assertTrue(false);
+    } catch (AccumuloSecurityException ase) {}
+    
+
+    cw2.close();
   }
   
   @Test
@@ -356,7 +377,7 @@ public class ConditionalWriterTest {
     
     Scanner scanner = conn.createScanner(table + "_clone", new Authorizations());
 
-    ConditionalWriter cw = conn.createConditionalWriter(table + "_clone", new Authorizations());
+    ConditionalWriter cw = conn.createConditionalWriter(table + "_clone", new ConditionalWriterConfig());
 
     ConditionalMutation cm0 = new ConditionalMutation("99006+", new Condition("tx", "seq"));
     cm0.put("tx", "seq", "1");
@@ -421,7 +442,7 @@ public class ConditionalWriterTest {
     
     Assert.assertEquals("3", scanner.iterator().next().getValue().toString());
 
-    ConditionalWriter cw = conn.createConditionalWriter(table, new Authorizations());
+    ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig());
     
     ConditionalMutation cm0 = new ConditionalMutation("ACCUMULO-1000", new Condition("count", "comments").setValue("3"));
     cm0.put("count", "comments", "1");
@@ -504,7 +525,7 @@ public class ConditionalWriterTest {
     cm2.put("tx", "seq", cvab, "1");
     mutations.add(cm2);
     
-    ConditionalWriter cw = conn.createConditionalWriter(table, new Authorizations("A"));
+    ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig().setAuthorizations(new Authorizations("A")));
     Iterator<Result> results = cw.write(mutations.iterator());
     int count = 0;
     while (results.hasNext()) {
@@ -611,7 +632,7 @@ public class ConditionalWriterTest {
       cml.add(cm);
     }
 
-    ConditionalWriter cw = conn.createConditionalWriter(table, Authorizations.EMPTY);
+    ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig());
 
     Iterator<Result> results = cw.write(cml.iterator());
 
@@ -704,7 +725,7 @@ public class ConditionalWriterTest {
     cm3.put("tx", "seq", cvaob, "2");
     mutations.add(cm3);
 
-    ConditionalWriter cw = conn.createConditionalWriter(table, new Authorizations("A"));
+    ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig().setAuthorizations(new Authorizations("A")));
     Iterator<Result> results = cw.write(mutations.iterator());
     HashSet<String> rows = new HashSet<String>();
     while (results.hasNext()) {
@@ -745,7 +766,7 @@ public class ConditionalWriterTest {
     
     conn.tableOperations().create(table);
     
-    ConditionalWriter cw = conn.createConditionalWriter(table, Authorizations.EMPTY);
+    ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig());
     
     ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq"));
     cm1.put("tx", "seq", "1");
@@ -942,7 +963,7 @@ public class ConditionalWriterTest {
         break;
     }
     
-    ConditionalWriter cw = conn.createConditionalWriter(table, Authorizations.EMPTY);
+    ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig());
     
     ArrayList<ByteSequence> rows = new ArrayList<ByteSequence>();
 
@@ -1026,9 +1047,9 @@ public class ConditionalWriterTest {
     cm1.put("tx", "seq", "1");
     cm1.put("data", "x", "a");
     
-    ConditionalWriter cw1 = conn2.createConditionalWriter("sect1", Authorizations.EMPTY);
-    ConditionalWriter cw2 = conn2.createConditionalWriter("sect2", Authorizations.EMPTY);
-    ConditionalWriter cw3 = conn2.createConditionalWriter("sect3", Authorizations.EMPTY);
+    ConditionalWriter cw1 = conn2.createConditionalWriter("sect1", new ConditionalWriterConfig());
+    ConditionalWriter cw2 = conn2.createConditionalWriter("sect2", new ConditionalWriterConfig());
+    ConditionalWriter cw3 = conn2.createConditionalWriter("sect3", new ConditionalWriterConfig());
     
     Assert.assertEquals(Status.ACCEPTED, cw3.write(cm1).getStatus());
     
@@ -1050,8 +1071,54 @@ public class ConditionalWriterTest {
 
 
   @Test
-  public void testTimeout() {
-    // TODO
+  public void testTimeout() throws Exception {
+    ZooKeeperInstance zki = new ZooKeeperInstance(cluster.getInstanceName(), cluster.getZooKeepers());
+    Connector conn = zki.getConnector("root", new PasswordToken(secret));
+    
+    String table = "fooT";
+    
+    conn.tableOperations().create(table);
+
+    ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig().setTimeout(1, TimeUnit.SECONDS));
+
+    ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq"));
+    cm1.put("tx", "seq", "1");
+    cm1.put("data", "x", "a");
+    
+    Assert.assertEquals(cw.write(cm1).getStatus(), Status.ACCEPTED);
+    
+    IteratorSetting is = new IteratorSetting(5, SlowIterator.class);
+    SlowIterator.setSeekSleepTime(is, 4000);
+    
+    ConditionalMutation cm2 = new ConditionalMutation("r1", new Condition("tx", "seq").setValue("1").setIterators(is));
+    cm2.put("tx", "seq", "2");
+    cm2.put("data", "x", "b");
+    
+    Assert.assertEquals(cw.write(cm2).getStatus(), Status.UNKNOWN);
+    
+    Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
+    
+    for (Entry<Key,Value> entry : scanner) {
+      String cf = entry.getKey().getColumnFamilyData().toString();
+      String cq = entry.getKey().getColumnQualifierData().toString();
+      String val = entry.getValue().toString();
+      
+      if (cf.equals("tx") && cq.equals("seq"))
+        Assert.assertEquals("1", val);
+      else if (cf.equals("data") && cq.equals("x"))
+        Assert.assertEquals("a", val);
+      else
+        Assert.assertTrue(false);
+    }
+    
+    ConditionalMutation cm3 = new ConditionalMutation("r1", new Condition("tx", "seq").setValue("1"));
+    cm3.put("tx", "seq", "2");
+    cm3.put("data", "x", "b");
+    
+    Assert.assertEquals(cw.write(cm3).getStatus(), Status.ACCEPTED);
+    
+    cw.close();
+
   }
 
   @Test
@@ -1062,13 +1129,13 @@ public class ConditionalWriterTest {
     Connector conn = zki.getConnector("root", new PasswordToken(secret));
     
     try {
-      conn.createConditionalWriter(table, Authorizations.EMPTY);
+      conn.createConditionalWriter(table, new ConditionalWriterConfig());
       Assert.assertFalse(true);
     } catch (TableNotFoundException e) {}
     
     conn.tableOperations().create(table);
     
-    ConditionalWriter cw = conn.createConditionalWriter(table, Authorizations.EMPTY);
+    ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig());
     
     conn.tableOperations().delete(table);
     
@@ -1081,8 +1148,8 @@ public class ConditionalWriterTest {
     try {
       result.getStatus();
       Assert.assertFalse(true);
-    } catch (TableDeletedException ae) {
-      
+    } catch (AccumuloException ae) {
+      Assert.assertEquals(TableDeletedException.class, ae.getCause().getClass());
     }
     
   }
@@ -1096,7 +1163,7 @@ public class ConditionalWriterTest {
     
     conn.tableOperations().create(table);
     
-    ConditionalWriter cw = conn.createConditionalWriter(table, Authorizations.EMPTY);
+    ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig());
     
     conn.tableOperations().offline(table);
 
@@ -1111,14 +1178,14 @@ public class ConditionalWriterTest {
     try {
       result.getStatus();
       Assert.assertFalse(true);
-    } catch (TableOfflineException ae) {
-      
+    } catch (AccumuloException ae) {
+      Assert.assertEquals(TableOfflineException.class, ae.getCause().getClass());
     }
     
     cw.close();
     
     try {
-      conn.createConditionalWriter(table, Authorizations.EMPTY);
+      conn.createConditionalWriter(table, new ConditionalWriterConfig());
       Assert.assertFalse(true);
     } catch (TableOfflineException e) {}
   }
@@ -1140,7 +1207,7 @@ public class ConditionalWriterTest {
     
     conn.tableOperations().create(table);
     
-    ConditionalWriter cw = conn.createConditionalWriter(table, Authorizations.EMPTY);
+    ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig());
     
     IteratorSetting iterSetting = new IteratorSetting(5, BadIterator.class);
     


[15/50] git commit: Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/accumulo

Posted by kt...@apache.org.
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/accumulo


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/8b0f573e
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/8b0f573e
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/8b0f573e

Branch: refs/heads/ACCUMULO-1000
Commit: 8b0f573e0d0d24c74ebf14989d48cc8185b76cd4
Parents: f7e96a3 c3698b0
Author: Eric Newton <er...@gmail.com>
Authored: Wed Jul 17 12:12:04 2013 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Wed Jul 17 12:12:04 2013 -0400

----------------------------------------------------------------------
 .../core/security/crypto/CryptoModuleParameters.java    | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------