You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by bu...@apache.org on 2014/05/12 19:51:21 UTC

[1/4] git commit: ACCUMULO-2791 Downgrade commons-codec to match that provided by Hadoop.

Repository: accumulo
Updated Branches:
  refs/heads/1.6.1-SNAPSHOT 484491d21 -> a73cf8511
  refs/heads/master 4c0f3590c -> e1862d312


ACCUMULO-2791 Downgrade commons-codec to match that provided by Hadoop.

* Povide a core.util Base64 class to enforce the non-chunked behavior we rely on
* Changed to use codec 1.4 'shaHex' method


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/a73cf851
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/a73cf851
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/a73cf851

Branch: refs/heads/1.6.1-SNAPSHOT
Commit: a73cf8511020f552e90386d829a6dffb5a8a1cad
Parents: 484491d
Author: Sean Busbey <bu...@cloudera.com>
Authored: Fri May 9 11:50:07 2014 -0500
Committer: Sean Busbey <bu...@cloudera.com>
Committed: Fri May 9 16:44:14 2014 -0500

----------------------------------------------------------------------
 .../core/client/mapreduce/RangeInputSplit.java  |  2 +-
 .../mapreduce/lib/impl/ConfiguratorBase.java    |  2 +-
 .../mapreduce/lib/impl/InputConfigurator.java   | 12 ++--
 .../lib/partition/RangePartitioner.java         |  2 +-
 .../iterators/user/IntersectingIterator.java    |  6 +-
 .../accumulo/core/security/Authorizations.java  |  4 +-
 .../accumulo/core/security/Credentials.java     |  2 +-
 .../org/apache/accumulo/core/util/Base64.java   | 75 ++++++++++++++++++++
 .../apache/accumulo/core/util/CreateToken.java  |  2 +-
 .../org/apache/accumulo/core/util/Encoding.java |  9 +--
 .../util/shell/commands/AddSplitsCommand.java   |  2 +-
 .../util/shell/commands/CreateTableCommand.java |  2 +-
 .../util/shell/commands/GetSplitsCommand.java   |  6 +-
 .../core/util/shell/commands/HiddenCommand.java |  2 +-
 .../client/mapred/AccumuloInputFormatTest.java  |  4 +-
 .../mapreduce/AccumuloInputFormatTest.java      |  4 +-
 .../lib/impl/ConfiguratorBaseTest.java          |  2 +-
 .../examples/simple/mapreduce/RowHash.java      |  2 +-
 .../mapreduce/bulk/BulkIngestExample.java       |  4 +-
 pom.xml                                         |  2 +-
 .../apache/accumulo/server/fs/VolumeUtil.java   |  2 +-
 .../master/state/TabletStateChangeIterator.java |  4 +-
 .../server/security/SystemCredentials.java      |  2 +-
 .../accumulo/server/util/DumpZookeeper.java     |  4 +-
 .../accumulo/server/util/RestoreZookeeper.java  |  2 +-
 .../apache/accumulo/master/tableOps/Utils.java  |  6 +-
 .../monitor/servlets/TServersServlet.java       |  4 +-
 .../test/randomwalk/shard/BulkInsert.java       |  4 +-
 .../accumulo/test/functional/CredentialsIT.java |  2 +-
 29 files changed, 124 insertions(+), 52 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
index 47b34e9..06f4081 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
@@ -42,8 +42,8 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.Pair;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.mapreduce.InputSplit;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
index 33ca5d2..e87d43b 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
@@ -30,7 +30,7 @@ import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
 import org.apache.accumulo.core.security.Credentials;
 import org.apache.accumulo.core.util.ArgumentChecker;
-import org.apache.commons.codec.binary.Base64;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
index 2fc606c..1ed23e3 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
@@ -60,9 +60,9 @@ import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.TextUtil;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.MapWritable;
 import org.apache.hadoop.io.Text;
@@ -176,7 +176,7 @@ public class InputConfigurator extends ConfiguratorBase {
       for (Range r : ranges) {
         ByteArrayOutputStream baos = new ByteArrayOutputStream();
         r.write(new DataOutputStream(baos));
-        rangeStrings.add(new String(Base64.encodeBase64(baos.toByteArray())));
+        rangeStrings.add(Base64.encodeBase64String(baos.toByteArray()));
       }
       conf.setStrings(enumToConfKey(implementingClass, ScanOpts.RANGES), rangeStrings.toArray(new String[0]));
     } catch (IOException ex) {
@@ -272,9 +272,9 @@ public class InputConfigurator extends ConfiguratorBase {
       if (column.getFirst() == null)
         throw new IllegalArgumentException("Column family can not be null");
 
-      String col = new String(Base64.encodeBase64(TextUtil.getBytes(column.getFirst())), Constants.UTF8);
+      String col = Base64.encodeBase64String(TextUtil.getBytes(column.getFirst()));
       if (column.getSecond() != null)
-        col += ":" + new String(Base64.encodeBase64(TextUtil.getBytes(column.getSecond())), Constants.UTF8);
+        col += ":" + Base64.encodeBase64String(TextUtil.getBytes(column.getSecond()));
       columnStrings.add(col);
     }
 
@@ -339,7 +339,7 @@ public class InputConfigurator extends ConfiguratorBase {
     String newIter;
     try {
       cfg.write(new DataOutputStream(baos));
-      newIter = new String(Base64.encodeBase64(baos.toByteArray()), Constants.UTF8);
+      newIter = Base64.encodeBase64String(baos.toByteArray());
       baos.close();
     } catch (IOException e) {
       throw new IllegalArgumentException("unable to serialize IteratorSetting");
@@ -536,7 +536,7 @@ public class InputConfigurator extends ConfiguratorBase {
     }
 
     String confKey = enumToConfKey(implementingClass, ScanOpts.TABLE_CONFIGS);
-    conf.set(confKey, new String(Base64.encodeBase64(baos.toByteArray())));
+    conf.set(confKey, Base64.encodeBase64String(baos.toByteArray()));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java
index 54730ef..1541fae 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java
@@ -28,7 +28,7 @@ import java.util.TreeSet;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.mapreduce.lib.impl.DistributedCacheHelper;
-import org.apache.commons.codec.binary.Base64;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/iterators/user/IntersectingIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/IntersectingIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/IntersectingIterator.java
index c219b5a..8ce0ca8 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/IntersectingIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/IntersectingIterator.java
@@ -31,8 +31,8 @@ import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.TextUtil;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 
@@ -391,7 +391,7 @@ public class IntersectingIterator implements SortedKeyValueIterator<Key,Value> {
   protected static String encodeColumns(Text[] columns) {
     StringBuilder sb = new StringBuilder();
     for (int i = 0; i < columns.length; i++) {
-      sb.append(new String(Base64.encodeBase64(TextUtil.getBytes(columns[i])), Constants.UTF8));
+      sb.append(Base64.encodeBase64String(TextUtil.getBytes(columns[i])));
       sb.append('\n');
     }
     return sb.toString();
@@ -408,7 +408,7 @@ public class IntersectingIterator implements SortedKeyValueIterator<Key,Value> {
       else
         bytes[i] = 0;
     }
-    return new String(Base64.encodeBase64(bytes), Constants.UTF8);
+    return Base64.encodeBase64String(bytes);
   }
   
   protected static Text[] decodeColumns(String columns) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java b/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
index ab3ea68..1abe002 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
@@ -31,8 +31,8 @@ import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.data.ArrayByteSequence;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.util.ArgumentChecker;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.ByteBufferUtil;
-import org.apache.commons.codec.binary.Base64;
 
 /**
  * A collection of authorization strings.
@@ -340,7 +340,7 @@ public class Authorizations implements Iterable<byte[]>, Serializable, Authoriza
     for (byte[] auth : authsList) {
       sb.append(sep);
       sep = ",";
-      sb.append(new String(Base64.encodeBase64(auth), Constants.UTF8));
+      sb.append(Base64.encodeBase64String(auth));
     }
 
     return sb.toString();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/security/Credentials.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/Credentials.java b/core/src/main/java/org/apache/accumulo/core/security/Credentials.java
index 9f8b1be..582b4e0 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/Credentials.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/Credentials.java
@@ -26,7 +26,7 @@ import org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.commons.codec.binary.Base64;
+import org.apache.accumulo.core.util.Base64;
 
 /**
  * A wrapper for internal use. This class carries the instance, principal, and authentication token for use in the public API, in a non-serialized form. This is

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/util/Base64.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/Base64.java b/core/src/main/java/org/apache/accumulo/core/util/Base64.java
new file mode 100644
index 0000000..76de4ed
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/util/Base64.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.util;
+
+import org.apache.commons.codec.binary.StringUtils;
+
+/**
+ * A wrapper around commons-codec's Base64 to make sure we get the non-chunked behavior that
+ * became the default in commons-codec version 1.5+ while relying on the commons-codec version 1.4
+ * that Hadoop Client provides.
+ */
+public final class Base64 {
+
+  /**
+   * Private to prevent instantiation.
+   */
+  private Base64() {
+  }
+
+  /**
+   * Serialize to Base64 byte array, non-chunked.
+   */
+  public static byte[] encodeBase64(byte[] data) {
+    return org.apache.commons.codec.binary.Base64.encodeBase64(data, false);
+  }
+
+  /**
+   * Serialize to Base64 as a String, non-chunked.
+   */
+  public static String encodeBase64String(byte[] data) {
+    /* Based on implementation of this same name function in commons-codec 1.5+. in commons-codec 1.4, the second param sets chunking to true. */
+    return StringUtils.newStringUtf8(org.apache.commons.codec.binary.Base64.encodeBase64(data, false));
+  }
+
+  /**
+   * Serialize to Base64 as a String using the URLSafe alphabet, non-chunked.
+   *
+   * The URLSafe alphabet uses - instead of + and _ instead of /.
+   */
+  public static String encodeBase64URLSafeString(byte[] data) {
+    return org.apache.commons.codec.binary.Base64.encodeBase64URLSafeString(data);
+  }
+
+  /**
+   * Decode, presuming bytes are base64.
+   *
+   * Transparently handles either the standard alphabet or the URL Safe one.
+   */
+  public static byte[] decodeBase64(byte[] base64) {
+    return org.apache.commons.codec.binary.Base64.decodeBase64(base64);
+  }
+
+  /**
+   * Decode, presuming String is base64.
+   *
+   * Transparently handles either the standard alphabet or the URL Safe one.
+   */
+  public static byte[] decodeBase64(String base64String) {
+    return org.apache.commons.codec.binary.Base64.decodeBase64(base64String);
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java b/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
index cc6762a..9f86db3 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
@@ -32,7 +32,7 @@ import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.Authe
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.Properties;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.TokenProperty;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.commons.codec.binary.Base64;
+import org.apache.accumulo.core.util.Base64;
 
 import com.beust.jcommander.Parameter;
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/util/Encoding.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/Encoding.java b/core/src/main/java/org/apache/accumulo/core/util/Encoding.java
index 451d4d6..aff8f62 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/Encoding.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/Encoding.java
@@ -17,14 +17,13 @@
 package org.apache.accumulo.core.util;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.commons.codec.binary.Base64;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.hadoop.io.Text;
 
 public class Encoding {
   
   public static String encodeAsBase64FileName(Text data) {
-    String encodedRow = new String(Base64.encodeBase64(TextUtil.getBytes(data)), Constants.UTF8);
-    encodedRow = encodedRow.replace('/', '_').replace('+', '-');
+    String encodedRow = Base64.encodeBase64URLSafeString(TextUtil.getBytes(data));
     
     int index = encodedRow.length() - 1;
     while (index >= 0 && encodedRow.charAt(index) == '=')
@@ -37,9 +36,7 @@ public class Encoding {
   public static byte[] decodeBase64FileName(String node) {
     while (node.length() % 4 != 0)
       node += "=";
-    
-    node = node.replace('_', '/').replace('-', '+');
-    
+    /* decode transparently handles URLSafe encodings */
     return Base64.decodeBase64(node.getBytes(Constants.UTF8));
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/util/shell/commands/AddSplitsCommand.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/AddSplitsCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/AddSplitsCommand.java
index 6bd260c..f06a639 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/AddSplitsCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/AddSplitsCommand.java
@@ -21,13 +21,13 @@ import java.util.TreeSet;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.shell.Shell;
 import org.apache.accumulo.core.util.shell.Shell.Command;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.MissingArgumentException;
 import org.apache.commons.cli.Option;
 import org.apache.commons.cli.Options;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.io.Text;
 
 public class AddSplitsCommand extends Command {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/util/shell/commands/CreateTableCommand.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/CreateTableCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/CreateTableCommand.java
index 25b92be..d2c73f7 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/CreateTableCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/CreateTableCommand.java
@@ -35,6 +35,7 @@ import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.iterators.IteratorUtil;
 import org.apache.accumulo.core.security.VisibilityConstraint;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.shell.Shell;
 import org.apache.accumulo.core.util.shell.Shell.Command;
 import org.apache.accumulo.core.util.shell.Token;
@@ -42,7 +43,6 @@ import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;
 import org.apache.commons.cli.OptionGroup;
 import org.apache.commons.cli.Options;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.io.Text;
 
 public class CreateTableCommand extends Command {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java
index a27fa47..695d1a3 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java
@@ -35,6 +35,7 @@ import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.core.util.format.BinaryFormatter;
 import org.apache.accumulo.core.util.shell.Shell;
@@ -45,7 +46,6 @@ import org.apache.accumulo.core.util.shell.Shell.PrintShell;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;
 import org.apache.commons.cli.Options;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.io.Text;
 
 public class GetSplitsCommand extends Command {
@@ -104,7 +104,7 @@ public class GetSplitsCommand extends Command {
       return null;
     }
     BinaryFormatter.getlength(text.getLength());
-    return encode ? new String(Base64.encodeBase64(TextUtil.getBytes(text)), Constants.UTF8) : BinaryFormatter.appendText(new StringBuilder(), text).toString();
+    return encode ? Base64.encodeBase64String(TextUtil.getBytes(text)) : BinaryFormatter.appendText(new StringBuilder(), text).toString();
   }
   
   private static String obscuredTabletName(final KeyExtent extent) {
@@ -117,7 +117,7 @@ public class GetSplitsCommand extends Command {
     if (extent.getEndRow() != null && extent.getEndRow().getLength() > 0) {
       digester.update(extent.getEndRow().getBytes(), 0, extent.getEndRow().getLength());
     }
-    return new String(Base64.encodeBase64(digester.digest()), Constants.UTF8);
+    return Base64.encodeBase64String(digester.digest());
   }
   
   @Override

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/util/shell/commands/HiddenCommand.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/HiddenCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/HiddenCommand.java
index c212c75..61f60f8 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/HiddenCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/HiddenCommand.java
@@ -20,12 +20,12 @@ import java.security.SecureRandom;
 import java.util.Random;
 
 import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.shell.Shell;
 import org.apache.accumulo.core.util.shell.Shell.Command;
 import org.apache.accumulo.core.util.shell.ShellCommandException;
 import org.apache.accumulo.core.util.shell.ShellCommandException.ErrorCode;
 import org.apache.commons.cli.CommandLine;
-import org.apache.commons.codec.binary.Base64;
 
 public class HiddenCommand extends Command {
   private static Random rand = new SecureRandom();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java b/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java
index 13490e0..9e6958a 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java
@@ -36,7 +36,7 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.RegExFilter;
 import org.apache.accumulo.core.iterators.user.WholeRowIterator;
-import org.apache.commons.codec.binary.Base64;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.Text;
@@ -80,7 +80,7 @@ public class AccumuloInputFormatTest {
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
     is.write(new DataOutputStream(baos));
     String iterators = job.get("AccumuloInputFormat.ScanOpts.Iterators");
-    assertEquals(new String(Base64.encodeBase64(baos.toByteArray())), iterators);
+    assertEquals(Base64.encodeBase64String(baos.toByteArray()), iterators);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
index 2500972..3844cd9 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
@@ -42,9 +42,9 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.RegExFilter;
 import org.apache.accumulo.core.iterators.user.WholeRowIterator;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.Pair;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.Text;
@@ -77,7 +77,7 @@ public class AccumuloInputFormatTest {
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
     is.write(new DataOutputStream(baos));
     String iterators = conf.get("AccumuloInputFormat.ScanOpts.Iterators");
-    assertEquals(new String(Base64.encodeBase64(baos.toByteArray())), iterators);
+    assertEquals(Base64.encodeBase64String(baos.toByteArray()), iterators);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBaseTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBaseTest.java b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBaseTest.java
index 1983470..d5ebb22 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBaseTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBaseTest.java
@@ -29,7 +29,7 @@ import org.apache.accumulo.core.client.mock.MockInstance;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.commons.codec.binary.Base64;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java
index 1fa9b8f..165b481 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java
@@ -25,8 +25,8 @@ import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.Pair;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.MD5Hash;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
index 72bd7eb..6da51a3 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
@@ -27,9 +27,9 @@ import org.apache.accumulo.core.client.mapreduce.AccumuloFileOutputFormat;
 import org.apache.accumulo.core.client.mapreduce.lib.partition.RangePartitioner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.examples.simple.mapreduce.JobUtil;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -133,7 +133,7 @@ public class BulkIngestExample extends Configured implements Tool {
 
       Collection<Text> splits = connector.tableOperations().listSplits(opts.tableName, 100);
       for (Text split : splits)
-        out.println(new String(Base64.encodeBase64(TextUtil.getBytes(split))));
+        out.println(Base64.encodeBase64String(TextUtil.getBytes(split)));
 
       job.setNumReduceTasks(splits.size() + 1);
       out.close();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 96affb5..5e32a55 100644
--- a/pom.xml
+++ b/pom.xml
@@ -161,7 +161,7 @@
       <dependency>
         <groupId>commons-codec</groupId>
         <artifactId>commons-codec</artifactId>
-        <version>1.7</version>
+        <version>1.4</version>
       </dependency>
       <dependency>
         <groupId>commons-collections</groupId>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
index 34abb01..436667c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
@@ -336,7 +336,7 @@ public class VolumeUtil {
   private static String hash(FileSystem fs, Path dir, String name) throws IOException {
     FSDataInputStream in = fs.open(new Path(dir, name));
     try {
-      return DigestUtils.sha1Hex(in);
+      return DigestUtils.shaHex(in);
     } finally {
       in.close();
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java b/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java
index 5749523..f4d5591 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java
@@ -35,9 +35,9 @@ import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.SkippingIterator;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.util.AddressUtil;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.StringUtil;
 import org.apache.accumulo.server.master.state.TabletLocationState.BadLocationStateException;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
@@ -182,7 +182,7 @@ public class TabletStateChangeIterator extends SkippingIterator {
     } catch (Exception ex) {
       throw new RuntimeException(ex);
     }
-    String encoded = new String(Base64.encodeBase64(Arrays.copyOf(buffer.getData(), buffer.getLength())), Constants.UTF8);
+    String encoded = Base64.encodeBase64String(Arrays.copyOf(buffer.getData(), buffer.getLength()));
     cfg.addOption(MERGES_OPTION, encoded);
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/server/base/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java b/server/base/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java
index b5d7aba..767ed25 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java
@@ -31,10 +31,10 @@ import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.security.Credentials;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfiguration;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.io.Writable;
 
 /**

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/server/base/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java b/server/base/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java
index 30aa2eb..504956f 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java
@@ -21,9 +21,9 @@ import java.io.UnsupportedEncodingException;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.zookeeper.KeeperException;
@@ -108,7 +108,7 @@ public class DumpZookeeper {
     for (int i = 0; i < data.length; i++) {
       // does this look like simple ascii?
       if (data[i] < ' ' || data[i] > '~')
-        return new Encoded("base64", new String(Base64.encodeBase64(data), Constants.UTF8));
+        return new Encoded("base64", Base64.encodeBase64String(data));
     }
     return new Encoded(Constants.UTF8.name(), new String(data, Constants.UTF8));
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java b/server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
index 37ef5f1..a08000e 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
@@ -25,10 +25,10 @@ import javax.xml.parsers.SAXParserFactory;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.zookeeper.KeeperException;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
index 577f5d5..c2bb7a9 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
@@ -27,6 +27,7 @@ import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
 import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.DistributedReadWriteLock;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
@@ -35,7 +36,6 @@ import org.apache.accumulo.fate.zookeeper.ZooReservation;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.zookeeper.ZooQueueLock;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.log4j.Logger;
 import org.apache.zookeeper.KeeperException;
 
@@ -117,7 +117,7 @@ public class Utils {
     Instance instance = HdfsZooInstance.getInstance();
 
     String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/"
-        + new String(Base64.encodeBase64(directory.getBytes(Constants.UTF8)), Constants.UTF8);
+        + Base64.encodeBase64String(directory.getBytes(Constants.UTF8));
 
     IZooReaderWriter zk = ZooReaderWriter.getRetryingInstance();
 
@@ -130,7 +130,7 @@ public class Utils {
   public static void unreserveHdfsDirectory(String directory, long tid) throws KeeperException, InterruptedException {
     Instance instance = HdfsZooInstance.getInstance();
     String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/"
-        + new String(Base64.encodeBase64(directory.getBytes(Constants.UTF8)), Constants.UTF8);
+        + Base64.encodeBase64String(directory.getBytes(Constants.UTF8));
     ZooReservation.release(ZooReaderWriter.getRetryingInstance(), resvPath, String.format("%016x", tid));
   }
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
index 9f1bd1f..a1ee765 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
@@ -35,6 +35,7 @@ import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.core.tabletserver.thrift.ActionStats;
 import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
 import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.Duration;
 import org.apache.accumulo.core.util.ThriftUtil;
 import org.apache.accumulo.monitor.Monitor;
@@ -53,7 +54,6 @@ import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.ActionStatsUpdator;
 import org.apache.accumulo.server.util.TableInfoUtil;
 import org.apache.accumulo.trace.instrument.Tracer;
-import org.apache.commons.codec.binary.Base64;
 
 import com.google.common.net.HostAndPort;
 
@@ -169,7 +169,7 @@ public class TServersServlet extends BasicServlet {
       if (extent.getEndRow() != null && extent.getEndRow().getLength() > 0) {
         digester.update(extent.getEndRow().getBytes(), 0, extent.getEndRow().getLength());
       }
-      String obscuredExtent = new String(Base64.encodeBase64(digester.digest()), Constants.UTF8);
+      String obscuredExtent = Base64.encodeBase64String(digester.digest());
       String displayExtent = String.format("<code>[%s]</code>", obscuredExtent);
       
       TableRow row = perTabletResults.prepareRow();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/test/src/main/java/org/apache/accumulo/test/randomwalk/shard/BulkInsert.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/randomwalk/shard/BulkInsert.java b/test/src/main/java/org/apache/accumulo/test/randomwalk/shard/BulkInsert.java
index 41acce2..df4a62c 100644
--- a/test/src/main/java/org/apache/accumulo/test/randomwalk/shard/BulkInsert.java
+++ b/test/src/main/java/org/apache/accumulo/test/randomwalk/shard/BulkInsert.java
@@ -32,12 +32,12 @@ import org.apache.accumulo.core.data.ColumnUpdate;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.test.randomwalk.State;
 import org.apache.accumulo.test.randomwalk.Test;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -169,7 +169,7 @@ public class BulkInsert extends Test {
     
     Collection<Text> splits = conn.tableOperations().listSplits(tableName, maxSplits);
     for (Text split : splits)
-      out.println(new String(Base64.encodeBase64(TextUtil.getBytes(split)), Constants.UTF8));
+      out.println(Base64.encodeBase64String(TextUtil.getBytes(split)));
     
     out.close();
     

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java
index a35a19a..cfc274a 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java
@@ -36,7 +36,7 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.commons.codec.binary.Base64;
+import org.apache.accumulo.core.util.Base64;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;


[3/4] Merge branch '1.6.1-SNAPSHOT'

Posted by bu...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/mapreduce/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java
----------------------------------------------------------------------
diff --cc mapreduce/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java
index 13490e0,0000000..9e6958a
mode 100644,000000..100644
--- a/mapreduce/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java
+++ b/mapreduce/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java
@@@ -1,285 -1,0 +1,285 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.mapred;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertNull;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.io.ByteArrayOutputStream;
 +import java.io.DataOutputStream;
 +import java.io.IOException;
 +import java.util.List;
 +
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.mock.MockInstance;
 +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.iterators.user.RegExFilter;
 +import org.apache.accumulo.core.iterators.user.WholeRowIterator;
- import org.apache.commons.codec.binary.Base64;
++import org.apache.accumulo.core.util.Base64;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.conf.Configured;
 +import org.apache.hadoop.io.Text;
 +import org.apache.hadoop.mapred.JobClient;
 +import org.apache.hadoop.mapred.JobConf;
 +import org.apache.hadoop.mapred.Mapper;
 +import org.apache.hadoop.mapred.OutputCollector;
 +import org.apache.hadoop.mapred.Reporter;
 +import org.apache.hadoop.mapred.lib.NullOutputFormat;
 +import org.apache.hadoop.util.Tool;
 +import org.apache.hadoop.util.ToolRunner;
 +import org.junit.Before;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +public class AccumuloInputFormatTest {
 +
 +  private static final String PREFIX = AccumuloInputFormatTest.class.getSimpleName();
 +  private static final String INSTANCE_NAME = PREFIX + "_mapred_instance";
 +  private static final String TEST_TABLE_1 = PREFIX + "_mapred_table_1";
 +
 +  private JobConf job;
 +
 +  @BeforeClass
 +  public static void setupClass() {
 +    System.setProperty("hadoop.tmp.dir", System.getProperty("user.dir") + "/target/hadoop-tmp");
 +  }
 +
 +  @Before
 +  public void createJob() {
 +    job = new JobConf();
 +  }
 +
 +  /**
 +   * Check that the iterator configuration is getting stored in the Job conf correctly.
 +   */
 +  @Test
 +  public void testSetIterator() throws IOException {
 +    IteratorSetting is = new IteratorSetting(1, "WholeRow", "org.apache.accumulo.core.iterators.WholeRowIterator");
 +    AccumuloInputFormat.addIterator(job, is);
 +    ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +    is.write(new DataOutputStream(baos));
 +    String iterators = job.get("AccumuloInputFormat.ScanOpts.Iterators");
-     assertEquals(new String(Base64.encodeBase64(baos.toByteArray())), iterators);
++    assertEquals(Base64.encodeBase64String(baos.toByteArray()), iterators);
 +  }
 +
 +  @Test
 +  public void testAddIterator() throws IOException {
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(1, "WholeRow", WholeRowIterator.class));
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(2, "Versions", "org.apache.accumulo.core.iterators.VersioningIterator"));
 +    IteratorSetting iter = new IteratorSetting(3, "Count", "org.apache.accumulo.core.iterators.CountingIterator");
 +    iter.addOption("v1", "1");
 +    iter.addOption("junk", "\0omg:!\\xyzzy");
 +    AccumuloInputFormat.addIterator(job, iter);
 +
 +    List<IteratorSetting> list = AccumuloInputFormat.getIterators(job);
 +
 +    // Check the list size
 +    assertTrue(list.size() == 3);
 +
 +    // Walk the list and make sure our settings are correct
 +    IteratorSetting setting = list.get(0);
 +    assertEquals(1, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.user.WholeRowIterator", setting.getIteratorClass());
 +    assertEquals("WholeRow", setting.getName());
 +    assertEquals(0, setting.getOptions().size());
 +
 +    setting = list.get(1);
 +    assertEquals(2, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.VersioningIterator", setting.getIteratorClass());
 +    assertEquals("Versions", setting.getName());
 +    assertEquals(0, setting.getOptions().size());
 +
 +    setting = list.get(2);
 +    assertEquals(3, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.CountingIterator", setting.getIteratorClass());
 +    assertEquals("Count", setting.getName());
 +    assertEquals(2, setting.getOptions().size());
 +    assertEquals("1", setting.getOptions().get("v1"));
 +    assertEquals("\0omg:!\\xyzzy", setting.getOptions().get("junk"));
 +  }
 +
 +  /**
 +   * Test adding iterator options where the keys and values contain both the FIELD_SEPARATOR character (':') and ITERATOR_SEPARATOR (',') characters. There
 +   * should be no exceptions thrown when trying to parse these types of option entries.
 +   * 
 +   * This test makes sure that the expected raw values, as appears in the Job, are equal to what's expected.
 +   */
 +  @Test
 +  public void testIteratorOptionEncoding() throws Throwable {
 +    String key = "colon:delimited:key";
 +    String value = "comma,delimited,value";
 +    IteratorSetting someSetting = new IteratorSetting(1, "iterator", "Iterator.class");
 +    someSetting.addOption(key, value);
 +    AccumuloInputFormat.addIterator(job, someSetting);
 +
 +    List<IteratorSetting> list = AccumuloInputFormat.getIterators(job);
 +    assertEquals(1, list.size());
 +    assertEquals(1, list.get(0).getOptions().size());
 +    assertEquals(list.get(0).getOptions().get(key), value);
 +
 +    someSetting.addOption(key + "2", value);
 +    someSetting.setPriority(2);
 +    someSetting.setName("it2");
 +    AccumuloInputFormat.addIterator(job, someSetting);
 +    list = AccumuloInputFormat.getIterators(job);
 +    assertEquals(2, list.size());
 +    assertEquals(1, list.get(0).getOptions().size());
 +    assertEquals(list.get(0).getOptions().get(key), value);
 +    assertEquals(2, list.get(1).getOptions().size());
 +    assertEquals(list.get(1).getOptions().get(key), value);
 +    assertEquals(list.get(1).getOptions().get(key + "2"), value);
 +  }
 +
 +  /**
 +   * Test getting iterator settings for multiple iterators set
 +   */
 +  @Test
 +  public void testGetIteratorSettings() throws IOException {
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(1, "WholeRow", "org.apache.accumulo.core.iterators.WholeRowIterator"));
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(2, "Versions", "org.apache.accumulo.core.iterators.VersioningIterator"));
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(3, "Count", "org.apache.accumulo.core.iterators.CountingIterator"));
 +
 +    List<IteratorSetting> list = AccumuloInputFormat.getIterators(job);
 +
 +    // Check the list size
 +    assertTrue(list.size() == 3);
 +
 +    // Walk the list and make sure our settings are correct
 +    IteratorSetting setting = list.get(0);
 +    assertEquals(1, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.WholeRowIterator", setting.getIteratorClass());
 +    assertEquals("WholeRow", setting.getName());
 +
 +    setting = list.get(1);
 +    assertEquals(2, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.VersioningIterator", setting.getIteratorClass());
 +    assertEquals("Versions", setting.getName());
 +
 +    setting = list.get(2);
 +    assertEquals(3, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.CountingIterator", setting.getIteratorClass());
 +    assertEquals("Count", setting.getName());
 +
 +  }
 +
 +  @Test
 +  public void testSetRegex() throws IOException {
 +    String regex = ">\"*%<>\'\\";
 +
 +    IteratorSetting is = new IteratorSetting(50, regex, RegExFilter.class);
 +    RegExFilter.setRegexs(is, regex, null, null, null, false);
 +    AccumuloInputFormat.addIterator(job, is);
 +
 +    assertTrue(regex.equals(AccumuloInputFormat.getIterators(job).get(0).getName()));
 +  }
 +
 +  private static AssertionError e1 = null;
 +  private static AssertionError e2 = null;
 +
 +  private static class MRTester extends Configured implements Tool {
 +    private static class TestMapper implements Mapper<Key,Value,Key,Value> {
 +      Key key = null;
 +      int count = 0;
 +
 +      @Override
 +      public void map(Key k, Value v, OutputCollector<Key,Value> output, Reporter reporter) throws IOException {
 +        try {
 +          if (key != null)
 +            assertEquals(key.getRow().toString(), new String(v.get()));
 +          assertEquals(k.getRow(), new Text(String.format("%09x", count + 1)));
 +          assertEquals(new String(v.get()), String.format("%09x", count));
 +        } catch (AssertionError e) {
 +          e1 = e;
 +        }
 +        key = new Key(k);
 +        count++;
 +      }
 +
 +      @Override
 +      public void configure(JobConf job) {}
 +
 +      @Override
 +      public void close() throws IOException {
 +        try {
 +          assertEquals(100, count);
 +        } catch (AssertionError e) {
 +          e2 = e;
 +        }
 +      }
 +
 +    }
 +
 +    @Override
 +    public int run(String[] args) throws Exception {
 +
 +      if (args.length != 3) {
 +        throw new IllegalArgumentException("Usage : " + MRTester.class.getName() + " <user> <pass> <table>");
 +      }
 +
 +      String user = args[0];
 +      String pass = args[1];
 +      String table = args[2];
 +
 +      JobConf job = new JobConf(getConf());
 +      job.setJarByClass(this.getClass());
 +
 +      job.setInputFormat(AccumuloInputFormat.class);
 +
 +      AccumuloInputFormat.setConnectorInfo(job, user, new PasswordToken(pass));
 +      AccumuloInputFormat.setInputTableName(job, table);
 +      AccumuloInputFormat.setMockInstance(job, INSTANCE_NAME);
 +
 +      job.setMapperClass(TestMapper.class);
 +      job.setMapOutputKeyClass(Key.class);
 +      job.setMapOutputValueClass(Value.class);
 +      job.setOutputFormat(NullOutputFormat.class);
 +
 +      job.setNumReduceTasks(0);
 +
 +      return JobClient.runJob(job).isSuccessful() ? 0 : 1;
 +    }
 +
 +    public static void main(String... args) throws Exception {
 +      assertEquals(0, ToolRunner.run(new Configuration(), new MRTester(), args));
 +    }
 +  }
 +
 +  @Test
 +  public void testMap() throws Exception {
 +    MockInstance mockInstance = new MockInstance(INSTANCE_NAME);
 +    Connector c = mockInstance.getConnector("root", new PasswordToken(""));
 +    c.tableOperations().create(TEST_TABLE_1);
 +    BatchWriter bw = c.createBatchWriter(TEST_TABLE_1, new BatchWriterConfig());
 +    for (int i = 0; i < 100; i++) {
 +      Mutation m = new Mutation(new Text(String.format("%09x", i + 1)));
 +      m.put(new Text(), new Text(), new Value(String.format("%09x", i).getBytes()));
 +      bw.addMutation(m);
 +    }
 +    bw.close();
 +
 +    MRTester.main("root", "", TEST_TABLE_1);
 +    assertNull(e1);
 +    assertNull(e2);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/mapreduce/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
----------------------------------------------------------------------
diff --cc mapreduce/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
index 2500972,0000000..3844cd9
mode 100644,000000..100644
--- a/mapreduce/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
+++ b/mapreduce/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
@@@ -1,412 -1,0 +1,412 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.mapreduce;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertNull;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.io.ByteArrayOutputStream;
 +import java.io.DataOutputStream;
 +import java.io.IOException;
 +import java.util.Collection;
 +import java.util.Collections;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Set;
 +
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.mock.MockInstance;
 +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.iterators.user.RegExFilter;
 +import org.apache.accumulo.core.iterators.user.WholeRowIterator;
 +import org.apache.accumulo.core.security.Authorizations;
++import org.apache.accumulo.core.util.Base64;
 +import org.apache.accumulo.core.util.CachedConfiguration;
 +import org.apache.accumulo.core.util.Pair;
- import org.apache.commons.codec.binary.Base64;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.conf.Configured;
 +import org.apache.hadoop.io.Text;
 +import org.apache.hadoop.mapreduce.InputFormat;
 +import org.apache.hadoop.mapreduce.InputSplit;
 +import org.apache.hadoop.mapreduce.Job;
 +import org.apache.hadoop.mapreduce.Mapper;
 +import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
 +import org.apache.hadoop.util.Tool;
 +import org.apache.hadoop.util.ToolRunner;
 +import org.apache.log4j.Level;
 +import org.junit.Assert;
 +import org.junit.Test;
 +
 +public class AccumuloInputFormatTest {
 +
 +  private static final String PREFIX = AccumuloInputFormatTest.class.getSimpleName();
 +
 +  /**
 +   * Check that the iterator configuration is getting stored in the Job conf correctly.
 +   */
 +  @Test
 +  public void testSetIterator() throws IOException {
 +    @SuppressWarnings("deprecation")
 +    Job job = new Job();
 +
 +    IteratorSetting is = new IteratorSetting(1, "WholeRow", "org.apache.accumulo.core.iterators.WholeRowIterator");
 +    AccumuloInputFormat.addIterator(job, is);
 +    Configuration conf = job.getConfiguration();
 +    ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +    is.write(new DataOutputStream(baos));
 +    String iterators = conf.get("AccumuloInputFormat.ScanOpts.Iterators");
-     assertEquals(new String(Base64.encodeBase64(baos.toByteArray())), iterators);
++    assertEquals(Base64.encodeBase64String(baos.toByteArray()), iterators);
 +  }
 +
 +  @Test
 +  public void testAddIterator() throws IOException {
 +    @SuppressWarnings("deprecation")
 +    Job job = new Job();
 +
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(1, "WholeRow", WholeRowIterator.class));
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(2, "Versions", "org.apache.accumulo.core.iterators.VersioningIterator"));
 +    IteratorSetting iter = new IteratorSetting(3, "Count", "org.apache.accumulo.core.iterators.CountingIterator");
 +    iter.addOption("v1", "1");
 +    iter.addOption("junk", "\0omg:!\\xyzzy");
 +    AccumuloInputFormat.addIterator(job, iter);
 +
 +    List<IteratorSetting> list = AccumuloInputFormat.getIterators(job);
 +
 +    // Check the list size
 +    assertTrue(list.size() == 3);
 +
 +    // Walk the list and make sure our settings are correct
 +    IteratorSetting setting = list.get(0);
 +    assertEquals(1, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.user.WholeRowIterator", setting.getIteratorClass());
 +    assertEquals("WholeRow", setting.getName());
 +    assertEquals(0, setting.getOptions().size());
 +
 +    setting = list.get(1);
 +    assertEquals(2, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.VersioningIterator", setting.getIteratorClass());
 +    assertEquals("Versions", setting.getName());
 +    assertEquals(0, setting.getOptions().size());
 +
 +    setting = list.get(2);
 +    assertEquals(3, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.CountingIterator", setting.getIteratorClass());
 +    assertEquals("Count", setting.getName());
 +    assertEquals(2, setting.getOptions().size());
 +    assertEquals("1", setting.getOptions().get("v1"));
 +    assertEquals("\0omg:!\\xyzzy", setting.getOptions().get("junk"));
 +  }
 +
 +  /**
 +   * Test adding iterator options where the keys and values contain both the FIELD_SEPARATOR character (':') and ITERATOR_SEPARATOR (',') characters. There
 +   * should be no exceptions thrown when trying to parse these types of option entries.
 +   * 
 +   * This test makes sure that the expected raw values, as appears in the Job, are equal to what's expected.
 +   */
 +  @Test
 +  public void testIteratorOptionEncoding() throws Throwable {
 +    String key = "colon:delimited:key";
 +    String value = "comma,delimited,value";
 +    IteratorSetting someSetting = new IteratorSetting(1, "iterator", "Iterator.class");
 +    someSetting.addOption(key, value);
 +    @SuppressWarnings("deprecation")
 +    Job job = new Job();
 +    AccumuloInputFormat.addIterator(job, someSetting);
 +
 +    List<IteratorSetting> list = AccumuloInputFormat.getIterators(job);
 +    assertEquals(1, list.size());
 +    assertEquals(1, list.get(0).getOptions().size());
 +    assertEquals(list.get(0).getOptions().get(key), value);
 +
 +    someSetting.addOption(key + "2", value);
 +    someSetting.setPriority(2);
 +    someSetting.setName("it2");
 +    AccumuloInputFormat.addIterator(job, someSetting);
 +    list = AccumuloInputFormat.getIterators(job);
 +    assertEquals(2, list.size());
 +    assertEquals(1, list.get(0).getOptions().size());
 +    assertEquals(list.get(0).getOptions().get(key), value);
 +    assertEquals(2, list.get(1).getOptions().size());
 +    assertEquals(list.get(1).getOptions().get(key), value);
 +    assertEquals(list.get(1).getOptions().get(key + "2"), value);
 +  }
 +
 +  /**
 +   * Test getting iterator settings for multiple iterators set
 +   */
 +  @Test
 +  public void testGetIteratorSettings() throws IOException {
 +    @SuppressWarnings("deprecation")
 +    Job job = new Job();
 +
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(1, "WholeRow", "org.apache.accumulo.core.iterators.WholeRowIterator"));
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(2, "Versions", "org.apache.accumulo.core.iterators.VersioningIterator"));
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(3, "Count", "org.apache.accumulo.core.iterators.CountingIterator"));
 +
 +    List<IteratorSetting> list = AccumuloInputFormat.getIterators(job);
 +
 +    // Check the list size
 +    assertTrue(list.size() == 3);
 +
 +    // Walk the list and make sure our settings are correct
 +    IteratorSetting setting = list.get(0);
 +    assertEquals(1, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.WholeRowIterator", setting.getIteratorClass());
 +    assertEquals("WholeRow", setting.getName());
 +
 +    setting = list.get(1);
 +    assertEquals(2, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.VersioningIterator", setting.getIteratorClass());
 +    assertEquals("Versions", setting.getName());
 +
 +    setting = list.get(2);
 +    assertEquals(3, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.CountingIterator", setting.getIteratorClass());
 +    assertEquals("Count", setting.getName());
 +
 +  }
 +
 +  @Test
 +  public void testSetRegex() throws IOException {
 +    @SuppressWarnings("deprecation")
 +    Job job = new Job();
 +
 +    String regex = ">\"*%<>\'\\";
 +
 +    IteratorSetting is = new IteratorSetting(50, regex, RegExFilter.class);
 +    RegExFilter.setRegexs(is, regex, null, null, null, false);
 +    AccumuloInputFormat.addIterator(job, is);
 +
 +    assertTrue(regex.equals(AccumuloInputFormat.getIterators(job).get(0).getName()));
 +  }
 +
 +  private static AssertionError e1 = null;
 +  private static AssertionError e2 = null;
 +
 +  private static class MRTester extends Configured implements Tool {
 +    private static class TestMapper extends Mapper<Key,Value,Key,Value> {
 +      Key key = null;
 +      int count = 0;
 +
 +      @Override
 +      protected void map(Key k, Value v, Context context) throws IOException, InterruptedException {
 +        try {
 +          if (key != null)
 +            assertEquals(key.getRow().toString(), new String(v.get()));
 +          assertEquals(k.getRow(), new Text(String.format("%09x", count + 1)));
 +          assertEquals(new String(v.get()), String.format("%09x", count));
 +        } catch (AssertionError e) {
 +          e1 = e;
 +        }
 +        key = new Key(k);
 +        count++;
 +      }
 +
 +      @Override
 +      protected void cleanup(Context context) throws IOException, InterruptedException {
 +        try {
 +          assertEquals(100, count);
 +        } catch (AssertionError e) {
 +          e2 = e;
 +        }
 +      }
 +    }
 +
 +    @Override
 +    public int run(String[] args) throws Exception {
 +
 +      if (args.length != 5) {
 +        throw new IllegalArgumentException("Usage : " + MRTester.class.getName() + " <user> <pass> <table> <instanceName> <inputFormatClass>");
 +      }
 +
 +      String user = args[0];
 +      String pass = args[1];
 +      String table = args[2];
 +
 +      String instanceName = args[3];
 +      String inputFormatClassName = args[4];
 +      @SuppressWarnings("unchecked")
 +      Class<? extends InputFormat<?,?>> inputFormatClass = (Class<? extends InputFormat<?,?>>) Class.forName(inputFormatClassName);
 +
 +      @SuppressWarnings("deprecation")
 +      Job job = new Job(getConf(), this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
 +      job.setJarByClass(this.getClass());
 +
 +      job.setInputFormatClass(inputFormatClass);
 +
 +      AccumuloInputFormat.setConnectorInfo(job, user, new PasswordToken(pass));
 +      AccumuloInputFormat.setInputTableName(job, table);
 +      AccumuloInputFormat.setMockInstance(job, instanceName);
 +
 +      job.setMapperClass(TestMapper.class);
 +      job.setMapOutputKeyClass(Key.class);
 +      job.setMapOutputValueClass(Value.class);
 +      job.setOutputFormatClass(NullOutputFormat.class);
 +
 +      job.setNumReduceTasks(0);
 +
 +      job.waitForCompletion(true);
 +
 +      return job.isSuccessful() ? 0 : 1;
 +    }
 +
 +    public static int main(String[] args) throws Exception {
 +      return ToolRunner.run(CachedConfiguration.getInstance(), new MRTester(), args);
 +    }
 +  }
 +
 +  @Test
 +  public void testMap() throws Exception {
 +    final String INSTANCE_NAME = PREFIX + "_mapreduce_instance";
 +    final String TEST_TABLE_1 = PREFIX + "_mapreduce_table_1";
 +
 +    MockInstance mockInstance = new MockInstance(INSTANCE_NAME);
 +    Connector c = mockInstance.getConnector("root", new PasswordToken(""));
 +    c.tableOperations().create(TEST_TABLE_1);
 +    BatchWriter bw = c.createBatchWriter(TEST_TABLE_1, new BatchWriterConfig());
 +    for (int i = 0; i < 100; i++) {
 +      Mutation m = new Mutation(new Text(String.format("%09x", i + 1)));
 +      m.put(new Text(), new Text(), new Value(String.format("%09x", i).getBytes()));
 +      bw.addMutation(m);
 +    }
 +    bw.close();
 +
 +    Assert.assertEquals(0, MRTester.main(new String[] {"root", "", TEST_TABLE_1, INSTANCE_NAME, AccumuloInputFormat.class.getCanonicalName()}));
 +    assertNull(e1);
 +    assertNull(e2);
 +  }
 +
 +  @Test
 +  public void testCorrectRangeInputSplits() throws Exception {
 +    @SuppressWarnings("deprecation")
 +    Job job = new Job(new Configuration(), this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
 +
 +    String username = "user", table = "table", instance = "instance";
 +    PasswordToken password = new PasswordToken("password");
 +    Authorizations auths = new Authorizations("foo");
 +    Collection<Pair<Text,Text>> fetchColumns = Collections.singleton(new Pair<Text,Text>(new Text("foo"), new Text("bar")));
 +    boolean isolated = true, localIters = true;
 +    Level level = Level.WARN;
 +
 +    Instance inst = new MockInstance(instance);
 +    Connector connector = inst.getConnector(username, password);
 +    connector.tableOperations().create(table);
 +
 +    AccumuloInputFormat.setConnectorInfo(job, username, password);
 +    AccumuloInputFormat.setInputTableName(job, table);
 +    AccumuloInputFormat.setScanAuthorizations(job, auths);
 +    AccumuloInputFormat.setMockInstance(job, instance);
 +    AccumuloInputFormat.setScanIsolation(job, isolated);
 +    AccumuloInputFormat.setLocalIterators(job, localIters);
 +    AccumuloInputFormat.fetchColumns(job, fetchColumns);
 +    AccumuloInputFormat.setLogLevel(job, level);
 +
 +    AccumuloInputFormat aif = new AccumuloInputFormat();
 +
 +    List<InputSplit> splits = aif.getSplits(job);
 +
 +    Assert.assertEquals(1, splits.size());
 +
 +    InputSplit split = splits.get(0);
 +
 +    Assert.assertEquals(RangeInputSplit.class, split.getClass());
 +
 +    RangeInputSplit risplit = (RangeInputSplit) split;
 +
 +    Assert.assertEquals(username, risplit.getPrincipal());
 +    Assert.assertEquals(table, risplit.getTableName());
 +    Assert.assertEquals(password, risplit.getToken());
 +    Assert.assertEquals(auths, risplit.getAuths());
 +    Assert.assertEquals(instance, risplit.getInstanceName());
 +    Assert.assertEquals(isolated, risplit.isIsolatedScan());
 +    Assert.assertEquals(localIters, risplit.usesLocalIterators());
 +    Assert.assertEquals(fetchColumns, risplit.getFetchedColumns());
 +    Assert.assertEquals(level, risplit.getLogLevel());
 +  }
 +
 +  @Test
 +  public void testPartialInputSplitDelegationToConfiguration() throws Exception {
 +    String user = "testPartialInputSplitUser";
 +    PasswordToken password = new PasswordToken("");
 +
 +    MockInstance mockInstance = new MockInstance("testPartialInputSplitDelegationToConfiguration");
 +    Connector c = mockInstance.getConnector(user, password);
 +    c.tableOperations().create("testtable");
 +    BatchWriter bw = c.createBatchWriter("testtable", new BatchWriterConfig());
 +    for (int i = 0; i < 100; i++) {
 +      Mutation m = new Mutation(new Text(String.format("%09x", i + 1)));
 +      m.put(new Text(), new Text(), new Value(String.format("%09x", i).getBytes()));
 +      bw.addMutation(m);
 +    }
 +    bw.close();
 +
 +    Assert.assertEquals(
 +        0,
 +        MRTester.main(new String[] {user, "", "testtable", "testPartialInputSplitDelegationToConfiguration",
 +            EmptySplitsAccumuloInputFormat.class.getCanonicalName()}));
 +    assertNull(e1);
 +    assertNull(e2);
 +  }
 +
 +  @Test
 +  public void testPartialFailedInputSplitDelegationToConfiguration() throws Exception {
 +    String user = "testPartialFailedInputSplit";
 +    PasswordToken password = new PasswordToken("");
 +
 +    MockInstance mockInstance = new MockInstance("testPartialFailedInputSplitDelegationToConfiguration");
 +    Connector c = mockInstance.getConnector(user, password);
 +    c.tableOperations().create("testtable");
 +    BatchWriter bw = c.createBatchWriter("testtable", new BatchWriterConfig());
 +    for (int i = 0; i < 100; i++) {
 +      Mutation m = new Mutation(new Text(String.format("%09x", i + 1)));
 +      m.put(new Text(), new Text(), new Value(String.format("%09x", i).getBytes()));
 +      bw.addMutation(m);
 +    }
 +    bw.close();
 +
 +    // We should fail before we even get into the Mapper because we can't make the RecordReader
 +    Assert.assertEquals(
 +        1,
 +        MRTester.main(new String[] {user, "", "testtable", "testPartialFailedInputSplitDelegationToConfiguration",
 +            BadPasswordSplitsAccumuloInputFormat.class.getCanonicalName()}));
 +    assertNull(e1);
 +    assertNull(e2);
 +  }
 +
 +  @Test
 +  public void testEmptyColumnFamily() throws IOException {
 +    @SuppressWarnings("deprecation")
 +    Job job = new Job();
 +    Set<Pair<Text,Text>> cols = new HashSet<Pair<Text,Text>>();
 +    cols.add(new Pair<Text,Text>(new Text(""), null));
 +    cols.add(new Pair<Text,Text>(new Text("foo"), new Text("bar")));
 +    cols.add(new Pair<Text,Text>(new Text(""), new Text("bar")));
 +    cols.add(new Pair<Text,Text>(new Text(""), new Text("")));
 +    cols.add(new Pair<Text,Text>(new Text("foo"), new Text("")));
 +    AccumuloInputFormat.fetchColumns(job, cols);
 +    Set<Pair<Text,Text>> setCols = AccumuloInputFormat.getFetchedColumns(job);
 +    assertEquals(cols, setCols);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/mapreduce/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBaseTest.java
----------------------------------------------------------------------
diff --cc mapreduce/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBaseTest.java
index 1983470,0000000..d5ebb22
mode 100644,000000..100644
--- a/mapreduce/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBaseTest.java
+++ b/mapreduce/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBaseTest.java
@@@ -1,129 -1,0 +1,129 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.mapreduce.lib.impl;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertTrue;
 +
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.ClientConfiguration;
 +import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.ZooKeeperInstance;
 +import org.apache.accumulo.core.client.mock.MockInstance;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
 +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
- import org.apache.commons.codec.binary.Base64;
++import org.apache.accumulo.core.util.Base64;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.log4j.Level;
 +import org.apache.log4j.Logger;
 +import org.junit.Test;
 +
 +/**
 + * 
 + */
 +public class ConfiguratorBaseTest {
 +
 +  private static enum PrivateTestingEnum {
 +    SOMETHING, SOMETHING_ELSE
 +  }
 +
 +  @Test
 +  public void testEnumToConfKey() {
 +    assertEquals(this.getClass().getSimpleName() + ".PrivateTestingEnum.Something",
 +        ConfiguratorBase.enumToConfKey(this.getClass(), PrivateTestingEnum.SOMETHING));
 +    assertEquals(this.getClass().getSimpleName() + ".PrivateTestingEnum.SomethingElse",
 +        ConfiguratorBase.enumToConfKey(this.getClass(), PrivateTestingEnum.SOMETHING_ELSE));
 +  }
 +
 +  @Test
 +  public void testSetConnectorInfoClassOfQConfigurationStringAuthenticationToken() throws AccumuloSecurityException {
 +    Configuration conf = new Configuration();
 +    assertFalse(ConfiguratorBase.isConnectorInfoSet(this.getClass(), conf));
 +    ConfiguratorBase.setConnectorInfo(this.getClass(), conf, "testUser", new PasswordToken("testPassword"));
 +    assertTrue(ConfiguratorBase.isConnectorInfoSet(this.getClass(), conf));
 +    assertEquals("testUser", ConfiguratorBase.getPrincipal(this.getClass(), conf));
 +    AuthenticationToken token = ConfiguratorBase.getAuthenticationToken(this.getClass(), conf);
 +    assertEquals(PasswordToken.class, token.getClass());
 +    assertEquals(new PasswordToken("testPassword"), token);
 +    assertEquals(
 +        "inline:" + PasswordToken.class.getName() + ":" + Base64.encodeBase64String(AuthenticationTokenSerializer.serialize(new PasswordToken("testPassword"))),
 +        conf.get(ConfiguratorBase.enumToConfKey(this.getClass(), ConfiguratorBase.ConnectorInfo.TOKEN)));
 +  }
 +
 +  @Test
 +  public void testSetConnectorInfoClassOfQConfigurationStringString() throws AccumuloSecurityException {
 +    Configuration conf = new Configuration();
 +    assertFalse(ConfiguratorBase.isConnectorInfoSet(this.getClass(), conf));
 +    ConfiguratorBase.setConnectorInfo(this.getClass(), conf, "testUser", "testFile");
 +    assertTrue(ConfiguratorBase.isConnectorInfoSet(this.getClass(), conf));
 +    assertEquals("testUser", ConfiguratorBase.getPrincipal(this.getClass(), conf));
 +    assertEquals("file:testFile", conf.get(ConfiguratorBase.enumToConfKey(this.getClass(), ConfiguratorBase.ConnectorInfo.TOKEN)));
 +  }
 +
 +  @Test
 +  public void testSetZooKeeperInstance() {
 +    Configuration conf = new Configuration();
 +    ConfiguratorBase.setZooKeeperInstance(this.getClass(), conf, new ClientConfiguration().withInstance("testInstanceName").withZkHosts("testZooKeepers")
 +        .withSsl(true).withZkTimeout(1234));
 +    ClientConfiguration clientConf = ClientConfiguration.deserialize(conf.get(ConfiguratorBase.enumToConfKey(this.getClass(),
 +        ConfiguratorBase.InstanceOpts.CLIENT_CONFIG)));
 +    assertEquals("testInstanceName", clientConf.get(ClientProperty.INSTANCE_NAME));
 +    assertEquals("testZooKeepers", clientConf.get(ClientProperty.INSTANCE_ZK_HOST));
 +    assertEquals("true", clientConf.get(ClientProperty.INSTANCE_RPC_SSL_ENABLED));
 +    assertEquals("1234", clientConf.get(ClientProperty.INSTANCE_ZK_TIMEOUT));
 +    assertEquals(ZooKeeperInstance.class.getSimpleName(), conf.get(ConfiguratorBase.enumToConfKey(this.getClass(), ConfiguratorBase.InstanceOpts.TYPE)));
 +
 +    Instance instance = ConfiguratorBase.getInstance(this.getClass(), conf);
 +    assertEquals(ZooKeeperInstance.class.getName(), instance.getClass().getName());
 +    assertEquals("testInstanceName", ((ZooKeeperInstance) instance).getInstanceName());
 +    assertEquals("testZooKeepers", ((ZooKeeperInstance) instance).getZooKeepers());
 +    assertEquals(1234000, ((ZooKeeperInstance) instance).getZooKeepersSessionTimeOut());
 +  }
 +
 +  @Test
 +  public void testSetMockInstance() {
 +    Configuration conf = new Configuration();
 +    ConfiguratorBase.setMockInstance(this.getClass(), conf, "testInstanceName");
 +    assertEquals("testInstanceName", conf.get(ConfiguratorBase.enumToConfKey(this.getClass(), ConfiguratorBase.InstanceOpts.NAME)));
 +    assertEquals(null, conf.get(ConfiguratorBase.enumToConfKey(this.getClass(), ConfiguratorBase.InstanceOpts.ZOO_KEEPERS)));
 +    assertEquals(MockInstance.class.getSimpleName(), conf.get(ConfiguratorBase.enumToConfKey(this.getClass(), ConfiguratorBase.InstanceOpts.TYPE)));
 +    Instance instance = ConfiguratorBase.getInstance(this.getClass(), conf);
 +    assertEquals(MockInstance.class.getName(), instance.getClass().getName());
 +  }
 +
 +  @Test
 +  public void testSetLogLevel() {
 +    Configuration conf = new Configuration();
 +    Level currentLevel = Logger.getLogger(this.getClass()).getLevel();
 +
 +    ConfiguratorBase.setLogLevel(this.getClass(), conf, Level.DEBUG);
 +    Logger.getLogger(this.getClass()).setLevel(currentLevel);
 +    assertEquals(Level.DEBUG, ConfiguratorBase.getLogLevel(this.getClass(), conf));
 +
 +    ConfiguratorBase.setLogLevel(this.getClass(), conf, Level.INFO);
 +    Logger.getLogger(this.getClass()).setLevel(currentLevel);
 +    assertEquals(Level.INFO, ConfiguratorBase.getLogLevel(this.getClass(), conf));
 +
 +    ConfiguratorBase.setLogLevel(this.getClass(), conf, Level.FATAL);
 +    Logger.getLogger(this.getClass()).setLevel(currentLevel);
 +    assertEquals(Level.FATAL, ConfiguratorBase.getLogLevel(this.getClass(), conf));
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java
index 6c94756,f4d5591..e06b58e
--- a/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java
@@@ -35,8 -35,9 +35,8 @@@ import org.apache.accumulo.core.iterato
  import org.apache.accumulo.core.iterators.SkippingIterator;
  import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
  import org.apache.accumulo.core.util.AddressUtil;
+ import org.apache.accumulo.core.util.Base64;
 -import org.apache.accumulo.core.util.StringUtil;
  import org.apache.accumulo.server.master.state.TabletLocationState.BadLocationStateException;
- import org.apache.commons.codec.binary.Base64;
  import org.apache.hadoop.io.DataInputBuffer;
  import org.apache.hadoop.io.DataOutputBuffer;
  import org.apache.hadoop.io.Text;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/server/base/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/server/base/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java
index 289923e,504956f..565eaba
--- a/server/base/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java
@@@ -18,12 -18,12 +18,12 @@@ package org.apache.accumulo.server.util
  
  import java.io.PrintStream;
  import java.io.UnsupportedEncodingException;
 +import java.nio.charset.StandardCharsets;
  
 -import org.apache.accumulo.core.Constants;
  import org.apache.accumulo.core.cli.Help;
+ import org.apache.accumulo.core.util.Base64;
  import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
  import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
- import org.apache.commons.codec.binary.Base64;
  import org.apache.log4j.Level;
  import org.apache.log4j.Logger;
  import org.apache.zookeeper.KeeperException;
@@@ -108,9 -108,9 +108,9 @@@ public class DumpZookeeper 
      for (int i = 0; i < data.length; i++) {
        // does this look like simple ascii?
        if (data[i] < ' ' || data[i] > '~')
-         return new Encoded("base64", new String(Base64.encodeBase64(data), StandardCharsets.UTF_8));
+         return new Encoded("base64", Base64.encodeBase64String(data));
      }
 -    return new Encoded(Constants.UTF8.name(), new String(data, Constants.UTF8));
 +    return new Encoded(StandardCharsets.UTF_8.name(), new String(data, StandardCharsets.UTF_8));
    }
    
    private static void write(PrintStream out, int indent, String fmt, Object... args) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
index 71c1146,a08000e..3a8eb0b
--- a/server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
@@@ -24,7 -23,9 +24,8 @@@ import java.util.Stack
  import javax.xml.parsers.SAXParser;
  import javax.xml.parsers.SAXParserFactory;
  
 -import org.apache.accumulo.core.Constants;
  import org.apache.accumulo.core.cli.Help;
+ import org.apache.accumulo.core.util.Base64;
  import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
  import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
  import org.apache.accumulo.server.zookeeper.ZooReaderWriter;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
----------------------------------------------------------------------
diff --cc server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
index 5f036e0,c2bb7a9..a741085
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
@@@ -118,7 -117,7 +118,7 @@@ public class Utils 
      Instance instance = HdfsZooInstance.getInstance();
  
      String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/"
-         + new String(Base64.encodeBase64(directory.getBytes(StandardCharsets.UTF_8)), StandardCharsets.UTF_8);
 -        + Base64.encodeBase64String(directory.getBytes(Constants.UTF8));
++        + Base64.encodeBase64String(directory.getBytes(StandardCharsets.UTF_8));
  
      IZooReaderWriter zk = ZooReaderWriter.getRetryingInstance();
  
@@@ -131,7 -130,7 +131,7 @@@
    public static void unreserveHdfsDirectory(String directory, long tid) throws KeeperException, InterruptedException {
      Instance instance = HdfsZooInstance.getInstance();
      String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/"
-         + new String(Base64.encodeBase64(directory.getBytes(StandardCharsets.UTF_8)), StandardCharsets.UTF_8);
 -        + Base64.encodeBase64String(directory.getBytes(Constants.UTF8));
++        + Base64.encodeBase64String(directory.getBytes(StandardCharsets.UTF_8));
      ZooReservation.release(ZooReaderWriter.getRetryingInstance(), resvPath, String.format("%016x", tid));
    }
  

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/shell/src/main/java/org/apache/accumulo/shell/ShellUtil.java
----------------------------------------------------------------------
diff --cc shell/src/main/java/org/apache/accumulo/shell/ShellUtil.java
index c0f7a9a,0000000..d8b9f65
mode 100644,000000..100644
--- a/shell/src/main/java/org/apache/accumulo/shell/ShellUtil.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/ShellUtil.java
@@@ -1,60 -1,0 +1,60 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.shell;
 +
 +import java.io.File;
 +import java.io.FileNotFoundException;
 +import java.nio.charset.StandardCharsets;
 +import java.util.List;
 +import java.util.Scanner;
 +
- import org.apache.commons.codec.binary.Base64;
++import org.apache.accumulo.core.util.Base64;
 +import org.apache.hadoop.io.Text;
 +
 +import com.google.common.collect.Lists;
 +
 +public class ShellUtil {
 +
 +  /**
 +   * Scans the given file line-by-line (ignoring empty lines) and returns a list containing those lines. If decode is set to true, every line is decoded using
 +   * {@link Base64#decodeBase64(byte[])} from the UTF-8 bytes of that line before inserting in the list.
 +   * 
 +   * @param filename
 +   *          Path to the file that needs to be scanned
 +   * @param decode
 +   *          Whether to decode lines in the file
 +   * @return List of {@link Text} objects containing data in the given file
 +   * @throws FileNotFoundException
 +   *           if the given file doesn't exist
 +   */
 +  public static List<Text> scanFile(String filename, boolean decode) throws FileNotFoundException {
 +    String line;
 +    Scanner file = new Scanner(new File(filename), StandardCharsets.UTF_8.name());
 +    List<Text> result = Lists.newArrayList();
 +    try {
 +      while (file.hasNextLine()) {
 +        line = file.nextLine();
 +        if (!line.isEmpty()) {
 +          result.add(decode ? new Text(Base64.decodeBase64(line.getBytes(StandardCharsets.UTF_8))) : new Text(line));
 +        }
 +      }
 +    } finally {
 +      file.close();
 +    }
 +    return result;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/shell/src/main/java/org/apache/accumulo/shell/commands/GetSplitsCommand.java
----------------------------------------------------------------------
diff --cc shell/src/main/java/org/apache/accumulo/shell/commands/GetSplitsCommand.java
index f275c39,0000000..26493fd
mode 100644,000000..100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/GetSplitsCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/GetSplitsCommand.java
@@@ -1,155 -1,0 +1,154 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.shell.commands;
 +
 +import java.io.IOException;
- import java.nio.charset.StandardCharsets;
 +import java.security.MessageDigest;
 +import java.security.NoSuchAlgorithmException;
 +import java.util.Iterator;
 +import java.util.Map.Entry;
 +
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.KeyExtent;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.RootTable;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 +import org.apache.accumulo.core.security.Authorizations;
++import org.apache.accumulo.core.util.Base64;
 +import org.apache.accumulo.core.util.TextUtil;
 +import org.apache.accumulo.core.util.format.BinaryFormatter;
 +import org.apache.accumulo.shell.Shell;
 +import org.apache.accumulo.shell.Shell.Command;
 +import org.apache.accumulo.shell.Shell.PrintFile;
 +import org.apache.accumulo.shell.Shell.PrintLine;
 +import org.apache.accumulo.shell.Shell.PrintShell;
 +import org.apache.commons.cli.CommandLine;
 +import org.apache.commons.cli.Option;
 +import org.apache.commons.cli.Options;
- import org.apache.commons.codec.binary.Base64;
 +import org.apache.hadoop.io.Text;
 +
 +public class GetSplitsCommand extends Command {
 +  
 +  private Option outputFileOpt, maxSplitsOpt, base64Opt, verboseOpt;
 +  
 +  @Override
 +  public int execute(final String fullCommand, final CommandLine cl, final Shell shellState) throws IOException, AccumuloException, AccumuloSecurityException,
 +      TableNotFoundException {
 +    final String tableName = OptUtil.getTableOpt(cl, shellState);
 +    
 +    final String outputFile = cl.getOptionValue(outputFileOpt.getOpt());
 +    final String m = cl.getOptionValue(maxSplitsOpt.getOpt());
 +    final int maxSplits = m == null ? 0 : Integer.parseInt(m);
 +    final boolean encode = cl.hasOption(base64Opt.getOpt());
 +    final boolean verbose = cl.hasOption(verboseOpt.getOpt());
 +    
 +    final PrintLine p = outputFile == null ? new PrintShell(shellState.getReader()) : new PrintFile(outputFile);
 +    
 +    try {
 +      if (!verbose) {
 +        for (Text row : maxSplits > 0 ? shellState.getConnector().tableOperations().listSplits(tableName, maxSplits) : shellState.getConnector()
 +            .tableOperations().listSplits(tableName)) {
 +          p.print(encode(encode, row));
 +        }
 +      } else {
 +        String systemTableToCheck = MetadataTable.NAME.equals(tableName) ? RootTable.NAME : MetadataTable.NAME;
 +        final Scanner scanner = shellState.getConnector().createScanner(systemTableToCheck, Authorizations.EMPTY);
 +        TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
 +        final Text start = new Text(shellState.getConnector().tableOperations().tableIdMap().get(tableName));
 +        final Text end = new Text(start);
 +        end.append(new byte[] {'<'}, 0, 1);
 +        scanner.setRange(new Range(start, end));
 +        for (Iterator<Entry<Key,Value>> iterator = scanner.iterator(); iterator.hasNext();) {
 +          final Entry<Key,Value> next = iterator.next();
 +          if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(next.getKey())) {
 +            KeyExtent extent = new KeyExtent(next.getKey().getRow(), next.getValue());
 +            final String pr = encode(encode, extent.getPrevEndRow());
 +            final String er = encode(encode, extent.getEndRow());
 +            final String line = String.format("%-26s (%s, %s%s", obscuredTabletName(extent), pr == null ? "-inf" : pr, er == null ? "+inf" : er,
 +                er == null ? ") Default Tablet " : "]");
 +            p.print(line);
 +          }
 +        }
 +      }
 +      
 +    } finally {
 +      p.close();
 +    }
 +    
 +    return 0;
 +  }
 +  
 +  private static String encode(final boolean encode, final Text text) {
 +    if (text == null) {
 +      return null;
 +    }
 +    BinaryFormatter.getlength(text.getLength());
-     return encode ? new String(Base64.encodeBase64(TextUtil.getBytes(text)), StandardCharsets.UTF_8) : BinaryFormatter.appendText(new StringBuilder(), text).toString();
++    return encode ? Base64.encodeBase64String(TextUtil.getBytes(text)) : BinaryFormatter.appendText(new StringBuilder(), text).toString();
 +  }
 +  
 +  private static String obscuredTabletName(final KeyExtent extent) {
 +    MessageDigest digester;
 +    try {
 +      digester = MessageDigest.getInstance("MD5");
 +    } catch (NoSuchAlgorithmException e) {
 +      throw new RuntimeException(e);
 +    }
 +    if (extent.getEndRow() != null && extent.getEndRow().getLength() > 0) {
 +      digester.update(extent.getEndRow().getBytes(), 0, extent.getEndRow().getLength());
 +    }
-     return new String(Base64.encodeBase64(digester.digest()), StandardCharsets.UTF_8);
++    return Base64.encodeBase64String(digester.digest());
 +  }
 +  
 +  @Override
 +  public String description() {
 +    return "retrieves the current split points for tablets in the current table";
 +  }
 +  
 +  @Override
 +  public int numArgs() {
 +    return 0;
 +  }
 +  
 +  @Override
 +  public Options getOptions() {
 +    final Options opts = new Options();
 +    
 +    outputFileOpt = new Option("o", "output", true, "local file to write the splits to");
 +    outputFileOpt.setArgName("file");
 +    
 +    maxSplitsOpt = new Option("m", "max", true, "maximum number of splits to return (evenly spaced)");
 +    maxSplitsOpt.setArgName("num");
 +    
 +    base64Opt = new Option("b64", "base64encoded", false, "encode the split points");
 +    
 +    verboseOpt = new Option("v", "verbose", false, "print out the tablet information with start/end rows");
 +    
 +    opts.addOption(outputFileOpt);
 +    opts.addOption(maxSplitsOpt);
 +    opts.addOption(base64Opt);
 +    opts.addOption(verboseOpt);
 +    opts.addOption(OptUtil.tableOpt("table to get splits for"));
 +    
 +    return opts;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/shell/src/main/java/org/apache/accumulo/shell/commands/HiddenCommand.java
----------------------------------------------------------------------
diff --cc shell/src/main/java/org/apache/accumulo/shell/commands/HiddenCommand.java
index 824517d,0000000..8cbf214
mode 100644,000000..100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/HiddenCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/HiddenCommand.java
@@@ -1,62 -1,0 +1,62 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.shell.commands;
 +
 +import java.nio.charset.StandardCharsets;
 +import java.security.SecureRandom;
 +import java.util.Random;
 +
++import org.apache.accumulo.core.util.Base64;
 +import org.apache.accumulo.shell.Shell;
 +import org.apache.accumulo.shell.ShellCommandException;
 +import org.apache.accumulo.shell.Shell.Command;
 +import org.apache.accumulo.shell.ShellCommandException.ErrorCode;
 +import org.apache.commons.cli.CommandLine;
- import org.apache.commons.codec.binary.Base64;
 +
 +public class HiddenCommand extends Command {
 +  private static Random rand = new SecureRandom();
 +  
 +  @Override
 +  public String description() {
 +    return "The first rule of Accumulo is: \"You don't talk about Accumulo.\"";
 +  }
 +  
 +  @Override
 +  public int execute(final String fullCommand, final CommandLine cl, final Shell shellState) throws Exception {
 +    if (rand.nextInt(10) == 0) {
 +      shellState.getReader().beep();
 +      shellState.getReader().println();
 +      shellState.getReader().println(
 +          new String(Base64.decodeBase64(("ICAgICAgIC4tLS4KICAgICAgLyAvXCBcCiAgICAgKCAvLS1cICkKICAgICAuPl8gIF88LgogICAgLyB8ICd8ICcgXAog"
 +              + "ICAvICB8Xy58Xy4gIFwKICAvIC98ICAgICAgfFwgXAogfCB8IHwgfFwvfCB8IHwgfAogfF98IHwgfCAgfCB8IHxffAogICAgIC8gIF9fICBcCiAgICAvICAv"
 +              + "ICBcICBcCiAgIC8gIC8gICAgXCAgXF8KIHwvICAvICAgICAgXCB8IHwKIHxfXy8gICAgICAgIFx8X3wK").getBytes(StandardCharsets.UTF_8)), StandardCharsets.UTF_8));
 +    } else {
 +      throw new ShellCommandException(ErrorCode.UNRECOGNIZED_COMMAND, getName());
 +    }
 +    return 0;
 +  }
 +  
 +  @Override
 +  public int numArgs() {
 +    return Shell.NO_FIXED_ARG_LENGTH_CHECK;
 +  }
 +  
 +  @Override
 +  public String getName() {
 +    return "accvmvlo";
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/shell/src/test/java/org/apache/accumulo/shell/ShellUtilTest.java
----------------------------------------------------------------------
diff --cc shell/src/test/java/org/apache/accumulo/shell/ShellUtilTest.java
index 4e99336,0000000..934a41d
mode 100644,000000..100644
--- a/shell/src/test/java/org/apache/accumulo/shell/ShellUtilTest.java
+++ b/shell/src/test/java/org/apache/accumulo/shell/ShellUtilTest.java
@@@ -1,67 -1,0 +1,67 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.shell;
 +
 +import static org.junit.Assert.*;
 +
 +import java.io.File;
 +import java.io.FileNotFoundException;
 +import java.io.IOException;
 +import java.nio.charset.StandardCharsets;
 +import java.util.List;
 +
++import org.apache.accumulo.core.util.Base64;
 +import org.apache.accumulo.shell.ShellUtil;
- import org.apache.commons.codec.binary.Base64;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Rule;
 +import org.junit.Test;
 +import org.junit.rules.TemporaryFolder;
 +
 +import com.google.common.collect.ImmutableList;
 +
 +public class ShellUtilTest {
 +
 +  @Rule
 +  public TemporaryFolder folder = new TemporaryFolder(new File(System.getProperty("user.dir") + "/target"));
 +
 +  // String with 3 lines, with one empty line
 +  private static final String FILEDATA = "line1\n\nline2";
 +
 +  @Test
 +  public void testWithoutDecode() throws IOException {
 +    File testFile = new File(folder.getRoot(), "testFileNoDecode.txt");
 +    FileUtils.writeStringToFile(testFile, FILEDATA);
 +    List<Text> output = ShellUtil.scanFile(testFile.getAbsolutePath(), false);
 +    assertEquals(ImmutableList.of(new Text("line1"), new Text("line2")), output);
 +  }
 +
 +  @Test
 +  public void testWithDecode() throws IOException {
 +    File testFile = new File(folder.getRoot(), "testFileWithDecode.txt");
 +    FileUtils.writeStringToFile(testFile, FILEDATA);
 +    List<Text> output = ShellUtil.scanFile(testFile.getAbsolutePath(), true);
 +    assertEquals(
 +        ImmutableList.of(new Text(Base64.decodeBase64("line1".getBytes(StandardCharsets.UTF_8))), new Text(Base64.decodeBase64("line2".getBytes(StandardCharsets.UTF_8)))),
 +        output);
 +  }
 +
 +  @Test(expected = FileNotFoundException.class)
 +  public void testWithMissingFile() throws FileNotFoundException {
 +    ShellUtil.scanFile("missingFile.txt", false);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/test/src/main/java/org/apache/accumulo/test/randomwalk/shard/BulkInsert.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/randomwalk/shard/BulkInsert.java
index 3b9a92a,df4a62c..0f04afc
--- a/test/src/main/java/org/apache/accumulo/test/randomwalk/shard/BulkInsert.java
+++ b/test/src/main/java/org/apache/accumulo/test/randomwalk/shard/BulkInsert.java
@@@ -35,10 -36,8 +36,9 @@@ import org.apache.accumulo.core.util.Ba
  import org.apache.accumulo.core.util.CachedConfiguration;
  import org.apache.accumulo.core.util.TextUtil;
  import org.apache.accumulo.core.util.UtilWaitThread;
 +import org.apache.accumulo.test.randomwalk.Environment;
  import org.apache.accumulo.test.randomwalk.State;
  import org.apache.accumulo.test.randomwalk.Test;
- import org.apache.commons.codec.binary.Base64;
  import org.apache.hadoop.conf.Configuration;
  import org.apache.hadoop.fs.FileStatus;
  import org.apache.hadoop.fs.FileSystem;


[2/4] git commit: ACCUMULO-2791 Downgrade commons-codec to match that provided by Hadoop.

Posted by bu...@apache.org.
ACCUMULO-2791 Downgrade commons-codec to match that provided by Hadoop.

* Povide a core.util Base64 class to enforce the non-chunked behavior we rely on
* Changed to use codec 1.4 'shaHex' method


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/a73cf851
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/a73cf851
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/a73cf851

Branch: refs/heads/master
Commit: a73cf8511020f552e90386d829a6dffb5a8a1cad
Parents: 484491d
Author: Sean Busbey <bu...@cloudera.com>
Authored: Fri May 9 11:50:07 2014 -0500
Committer: Sean Busbey <bu...@cloudera.com>
Committed: Fri May 9 16:44:14 2014 -0500

----------------------------------------------------------------------
 .../core/client/mapreduce/RangeInputSplit.java  |  2 +-
 .../mapreduce/lib/impl/ConfiguratorBase.java    |  2 +-
 .../mapreduce/lib/impl/InputConfigurator.java   | 12 ++--
 .../lib/partition/RangePartitioner.java         |  2 +-
 .../iterators/user/IntersectingIterator.java    |  6 +-
 .../accumulo/core/security/Authorizations.java  |  4 +-
 .../accumulo/core/security/Credentials.java     |  2 +-
 .../org/apache/accumulo/core/util/Base64.java   | 75 ++++++++++++++++++++
 .../apache/accumulo/core/util/CreateToken.java  |  2 +-
 .../org/apache/accumulo/core/util/Encoding.java |  9 +--
 .../util/shell/commands/AddSplitsCommand.java   |  2 +-
 .../util/shell/commands/CreateTableCommand.java |  2 +-
 .../util/shell/commands/GetSplitsCommand.java   |  6 +-
 .../core/util/shell/commands/HiddenCommand.java |  2 +-
 .../client/mapred/AccumuloInputFormatTest.java  |  4 +-
 .../mapreduce/AccumuloInputFormatTest.java      |  4 +-
 .../lib/impl/ConfiguratorBaseTest.java          |  2 +-
 .../examples/simple/mapreduce/RowHash.java      |  2 +-
 .../mapreduce/bulk/BulkIngestExample.java       |  4 +-
 pom.xml                                         |  2 +-
 .../apache/accumulo/server/fs/VolumeUtil.java   |  2 +-
 .../master/state/TabletStateChangeIterator.java |  4 +-
 .../server/security/SystemCredentials.java      |  2 +-
 .../accumulo/server/util/DumpZookeeper.java     |  4 +-
 .../accumulo/server/util/RestoreZookeeper.java  |  2 +-
 .../apache/accumulo/master/tableOps/Utils.java  |  6 +-
 .../monitor/servlets/TServersServlet.java       |  4 +-
 .../test/randomwalk/shard/BulkInsert.java       |  4 +-
 .../accumulo/test/functional/CredentialsIT.java |  2 +-
 29 files changed, 124 insertions(+), 52 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
index 47b34e9..06f4081 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
@@ -42,8 +42,8 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.Pair;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.mapreduce.InputSplit;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
index 33ca5d2..e87d43b 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
@@ -30,7 +30,7 @@ import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
 import org.apache.accumulo.core.security.Credentials;
 import org.apache.accumulo.core.util.ArgumentChecker;
-import org.apache.commons.codec.binary.Base64;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
index 2fc606c..1ed23e3 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
@@ -60,9 +60,9 @@ import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.TextUtil;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.MapWritable;
 import org.apache.hadoop.io.Text;
@@ -176,7 +176,7 @@ public class InputConfigurator extends ConfiguratorBase {
       for (Range r : ranges) {
         ByteArrayOutputStream baos = new ByteArrayOutputStream();
         r.write(new DataOutputStream(baos));
-        rangeStrings.add(new String(Base64.encodeBase64(baos.toByteArray())));
+        rangeStrings.add(Base64.encodeBase64String(baos.toByteArray()));
       }
       conf.setStrings(enumToConfKey(implementingClass, ScanOpts.RANGES), rangeStrings.toArray(new String[0]));
     } catch (IOException ex) {
@@ -272,9 +272,9 @@ public class InputConfigurator extends ConfiguratorBase {
       if (column.getFirst() == null)
         throw new IllegalArgumentException("Column family can not be null");
 
-      String col = new String(Base64.encodeBase64(TextUtil.getBytes(column.getFirst())), Constants.UTF8);
+      String col = Base64.encodeBase64String(TextUtil.getBytes(column.getFirst()));
       if (column.getSecond() != null)
-        col += ":" + new String(Base64.encodeBase64(TextUtil.getBytes(column.getSecond())), Constants.UTF8);
+        col += ":" + Base64.encodeBase64String(TextUtil.getBytes(column.getSecond()));
       columnStrings.add(col);
     }
 
@@ -339,7 +339,7 @@ public class InputConfigurator extends ConfiguratorBase {
     String newIter;
     try {
       cfg.write(new DataOutputStream(baos));
-      newIter = new String(Base64.encodeBase64(baos.toByteArray()), Constants.UTF8);
+      newIter = Base64.encodeBase64String(baos.toByteArray());
       baos.close();
     } catch (IOException e) {
       throw new IllegalArgumentException("unable to serialize IteratorSetting");
@@ -536,7 +536,7 @@ public class InputConfigurator extends ConfiguratorBase {
     }
 
     String confKey = enumToConfKey(implementingClass, ScanOpts.TABLE_CONFIGS);
-    conf.set(confKey, new String(Base64.encodeBase64(baos.toByteArray())));
+    conf.set(confKey, Base64.encodeBase64String(baos.toByteArray()));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java
index 54730ef..1541fae 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java
@@ -28,7 +28,7 @@ import java.util.TreeSet;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.mapreduce.lib.impl.DistributedCacheHelper;
-import org.apache.commons.codec.binary.Base64;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/iterators/user/IntersectingIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/IntersectingIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/IntersectingIterator.java
index c219b5a..8ce0ca8 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/IntersectingIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/IntersectingIterator.java
@@ -31,8 +31,8 @@ import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.TextUtil;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 
@@ -391,7 +391,7 @@ public class IntersectingIterator implements SortedKeyValueIterator<Key,Value> {
   protected static String encodeColumns(Text[] columns) {
     StringBuilder sb = new StringBuilder();
     for (int i = 0; i < columns.length; i++) {
-      sb.append(new String(Base64.encodeBase64(TextUtil.getBytes(columns[i])), Constants.UTF8));
+      sb.append(Base64.encodeBase64String(TextUtil.getBytes(columns[i])));
       sb.append('\n');
     }
     return sb.toString();
@@ -408,7 +408,7 @@ public class IntersectingIterator implements SortedKeyValueIterator<Key,Value> {
       else
         bytes[i] = 0;
     }
-    return new String(Base64.encodeBase64(bytes), Constants.UTF8);
+    return Base64.encodeBase64String(bytes);
   }
   
   protected static Text[] decodeColumns(String columns) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java b/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
index ab3ea68..1abe002 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
@@ -31,8 +31,8 @@ import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.data.ArrayByteSequence;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.util.ArgumentChecker;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.ByteBufferUtil;
-import org.apache.commons.codec.binary.Base64;
 
 /**
  * A collection of authorization strings.
@@ -340,7 +340,7 @@ public class Authorizations implements Iterable<byte[]>, Serializable, Authoriza
     for (byte[] auth : authsList) {
       sb.append(sep);
       sep = ",";
-      sb.append(new String(Base64.encodeBase64(auth), Constants.UTF8));
+      sb.append(Base64.encodeBase64String(auth));
     }
 
     return sb.toString();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/security/Credentials.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/Credentials.java b/core/src/main/java/org/apache/accumulo/core/security/Credentials.java
index 9f8b1be..582b4e0 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/Credentials.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/Credentials.java
@@ -26,7 +26,7 @@ import org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.commons.codec.binary.Base64;
+import org.apache.accumulo.core.util.Base64;
 
 /**
  * A wrapper for internal use. This class carries the instance, principal, and authentication token for use in the public API, in a non-serialized form. This is

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/util/Base64.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/Base64.java b/core/src/main/java/org/apache/accumulo/core/util/Base64.java
new file mode 100644
index 0000000..76de4ed
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/util/Base64.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.util;
+
+import org.apache.commons.codec.binary.StringUtils;
+
+/**
+ * A wrapper around commons-codec's Base64 to make sure we get the non-chunked behavior that
+ * became the default in commons-codec version 1.5+ while relying on the commons-codec version 1.4
+ * that Hadoop Client provides.
+ */
+public final class Base64 {
+
+  /**
+   * Private to prevent instantiation.
+   */
+  private Base64() {
+  }
+
+  /**
+   * Serialize to Base64 byte array, non-chunked.
+   */
+  public static byte[] encodeBase64(byte[] data) {
+    return org.apache.commons.codec.binary.Base64.encodeBase64(data, false);
+  }
+
+  /**
+   * Serialize to Base64 as a String, non-chunked.
+   */
+  public static String encodeBase64String(byte[] data) {
+    /* Based on implementation of this same name function in commons-codec 1.5+. in commons-codec 1.4, the second param sets chunking to true. */
+    return StringUtils.newStringUtf8(org.apache.commons.codec.binary.Base64.encodeBase64(data, false));
+  }
+
+  /**
+   * Serialize to Base64 as a String using the URLSafe alphabet, non-chunked.
+   *
+   * The URLSafe alphabet uses - instead of + and _ instead of /.
+   */
+  public static String encodeBase64URLSafeString(byte[] data) {
+    return org.apache.commons.codec.binary.Base64.encodeBase64URLSafeString(data);
+  }
+
+  /**
+   * Decode, presuming bytes are base64.
+   *
+   * Transparently handles either the standard alphabet or the URL Safe one.
+   */
+  public static byte[] decodeBase64(byte[] base64) {
+    return org.apache.commons.codec.binary.Base64.decodeBase64(base64);
+  }
+
+  /**
+   * Decode, presuming String is base64.
+   *
+   * Transparently handles either the standard alphabet or the URL Safe one.
+   */
+  public static byte[] decodeBase64(String base64String) {
+    return org.apache.commons.codec.binary.Base64.decodeBase64(base64String);
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java b/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
index cc6762a..9f86db3 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
@@ -32,7 +32,7 @@ import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.Authe
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.Properties;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.TokenProperty;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.commons.codec.binary.Base64;
+import org.apache.accumulo.core.util.Base64;
 
 import com.beust.jcommander.Parameter;
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/util/Encoding.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/Encoding.java b/core/src/main/java/org/apache/accumulo/core/util/Encoding.java
index 451d4d6..aff8f62 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/Encoding.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/Encoding.java
@@ -17,14 +17,13 @@
 package org.apache.accumulo.core.util;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.commons.codec.binary.Base64;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.hadoop.io.Text;
 
 public class Encoding {
   
   public static String encodeAsBase64FileName(Text data) {
-    String encodedRow = new String(Base64.encodeBase64(TextUtil.getBytes(data)), Constants.UTF8);
-    encodedRow = encodedRow.replace('/', '_').replace('+', '-');
+    String encodedRow = Base64.encodeBase64URLSafeString(TextUtil.getBytes(data));
     
     int index = encodedRow.length() - 1;
     while (index >= 0 && encodedRow.charAt(index) == '=')
@@ -37,9 +36,7 @@ public class Encoding {
   public static byte[] decodeBase64FileName(String node) {
     while (node.length() % 4 != 0)
       node += "=";
-    
-    node = node.replace('_', '/').replace('-', '+');
-    
+    /* decode transparently handles URLSafe encodings */
     return Base64.decodeBase64(node.getBytes(Constants.UTF8));
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/util/shell/commands/AddSplitsCommand.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/AddSplitsCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/AddSplitsCommand.java
index 6bd260c..f06a639 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/AddSplitsCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/AddSplitsCommand.java
@@ -21,13 +21,13 @@ import java.util.TreeSet;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.shell.Shell;
 import org.apache.accumulo.core.util.shell.Shell.Command;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.MissingArgumentException;
 import org.apache.commons.cli.Option;
 import org.apache.commons.cli.Options;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.io.Text;
 
 public class AddSplitsCommand extends Command {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/util/shell/commands/CreateTableCommand.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/CreateTableCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/CreateTableCommand.java
index 25b92be..d2c73f7 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/CreateTableCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/CreateTableCommand.java
@@ -35,6 +35,7 @@ import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.iterators.IteratorUtil;
 import org.apache.accumulo.core.security.VisibilityConstraint;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.shell.Shell;
 import org.apache.accumulo.core.util.shell.Shell.Command;
 import org.apache.accumulo.core.util.shell.Token;
@@ -42,7 +43,6 @@ import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;
 import org.apache.commons.cli.OptionGroup;
 import org.apache.commons.cli.Options;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.io.Text;
 
 public class CreateTableCommand extends Command {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java
index a27fa47..695d1a3 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java
@@ -35,6 +35,7 @@ import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.core.util.format.BinaryFormatter;
 import org.apache.accumulo.core.util.shell.Shell;
@@ -45,7 +46,6 @@ import org.apache.accumulo.core.util.shell.Shell.PrintShell;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;
 import org.apache.commons.cli.Options;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.io.Text;
 
 public class GetSplitsCommand extends Command {
@@ -104,7 +104,7 @@ public class GetSplitsCommand extends Command {
       return null;
     }
     BinaryFormatter.getlength(text.getLength());
-    return encode ? new String(Base64.encodeBase64(TextUtil.getBytes(text)), Constants.UTF8) : BinaryFormatter.appendText(new StringBuilder(), text).toString();
+    return encode ? Base64.encodeBase64String(TextUtil.getBytes(text)) : BinaryFormatter.appendText(new StringBuilder(), text).toString();
   }
   
   private static String obscuredTabletName(final KeyExtent extent) {
@@ -117,7 +117,7 @@ public class GetSplitsCommand extends Command {
     if (extent.getEndRow() != null && extent.getEndRow().getLength() > 0) {
       digester.update(extent.getEndRow().getBytes(), 0, extent.getEndRow().getLength());
     }
-    return new String(Base64.encodeBase64(digester.digest()), Constants.UTF8);
+    return Base64.encodeBase64String(digester.digest());
   }
   
   @Override

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/main/java/org/apache/accumulo/core/util/shell/commands/HiddenCommand.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/HiddenCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/HiddenCommand.java
index c212c75..61f60f8 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/HiddenCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/HiddenCommand.java
@@ -20,12 +20,12 @@ import java.security.SecureRandom;
 import java.util.Random;
 
 import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.shell.Shell;
 import org.apache.accumulo.core.util.shell.Shell.Command;
 import org.apache.accumulo.core.util.shell.ShellCommandException;
 import org.apache.accumulo.core.util.shell.ShellCommandException.ErrorCode;
 import org.apache.commons.cli.CommandLine;
-import org.apache.commons.codec.binary.Base64;
 
 public class HiddenCommand extends Command {
   private static Random rand = new SecureRandom();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java b/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java
index 13490e0..9e6958a 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormatTest.java
@@ -36,7 +36,7 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.RegExFilter;
 import org.apache.accumulo.core.iterators.user.WholeRowIterator;
-import org.apache.commons.codec.binary.Base64;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.Text;
@@ -80,7 +80,7 @@ public class AccumuloInputFormatTest {
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
     is.write(new DataOutputStream(baos));
     String iterators = job.get("AccumuloInputFormat.ScanOpts.Iterators");
-    assertEquals(new String(Base64.encodeBase64(baos.toByteArray())), iterators);
+    assertEquals(Base64.encodeBase64String(baos.toByteArray()), iterators);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
index 2500972..3844cd9 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
@@ -42,9 +42,9 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.RegExFilter;
 import org.apache.accumulo.core.iterators.user.WholeRowIterator;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.Pair;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.Text;
@@ -77,7 +77,7 @@ public class AccumuloInputFormatTest {
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
     is.write(new DataOutputStream(baos));
     String iterators = conf.get("AccumuloInputFormat.ScanOpts.Iterators");
-    assertEquals(new String(Base64.encodeBase64(baos.toByteArray())), iterators);
+    assertEquals(Base64.encodeBase64String(baos.toByteArray()), iterators);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBaseTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBaseTest.java b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBaseTest.java
index 1983470..d5ebb22 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBaseTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBaseTest.java
@@ -29,7 +29,7 @@ import org.apache.accumulo.core.client.mock.MockInstance;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.commons.codec.binary.Base64;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java
index 1fa9b8f..165b481 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java
@@ -25,8 +25,8 @@ import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.Pair;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.MD5Hash;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
index 72bd7eb..6da51a3 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
@@ -27,9 +27,9 @@ import org.apache.accumulo.core.client.mapreduce.AccumuloFileOutputFormat;
 import org.apache.accumulo.core.client.mapreduce.lib.partition.RangePartitioner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.examples.simple.mapreduce.JobUtil;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -133,7 +133,7 @@ public class BulkIngestExample extends Configured implements Tool {
 
       Collection<Text> splits = connector.tableOperations().listSplits(opts.tableName, 100);
       for (Text split : splits)
-        out.println(new String(Base64.encodeBase64(TextUtil.getBytes(split))));
+        out.println(Base64.encodeBase64String(TextUtil.getBytes(split)));
 
       job.setNumReduceTasks(splits.size() + 1);
       out.close();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 96affb5..5e32a55 100644
--- a/pom.xml
+++ b/pom.xml
@@ -161,7 +161,7 @@
       <dependency>
         <groupId>commons-codec</groupId>
         <artifactId>commons-codec</artifactId>
-        <version>1.7</version>
+        <version>1.4</version>
       </dependency>
       <dependency>
         <groupId>commons-collections</groupId>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
index 34abb01..436667c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
@@ -336,7 +336,7 @@ public class VolumeUtil {
   private static String hash(FileSystem fs, Path dir, String name) throws IOException {
     FSDataInputStream in = fs.open(new Path(dir, name));
     try {
-      return DigestUtils.sha1Hex(in);
+      return DigestUtils.shaHex(in);
     } finally {
       in.close();
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java b/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java
index 5749523..f4d5591 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java
@@ -35,9 +35,9 @@ import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.SkippingIterator;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.util.AddressUtil;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.StringUtil;
 import org.apache.accumulo.server.master.state.TabletLocationState.BadLocationStateException;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
@@ -182,7 +182,7 @@ public class TabletStateChangeIterator extends SkippingIterator {
     } catch (Exception ex) {
       throw new RuntimeException(ex);
     }
-    String encoded = new String(Base64.encodeBase64(Arrays.copyOf(buffer.getData(), buffer.getLength())), Constants.UTF8);
+    String encoded = Base64.encodeBase64String(Arrays.copyOf(buffer.getData(), buffer.getLength()));
     cfg.addOption(MERGES_OPTION, encoded);
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/server/base/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java b/server/base/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java
index b5d7aba..767ed25 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java
@@ -31,10 +31,10 @@ import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.security.Credentials;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfiguration;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.io.Writable;
 
 /**

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/server/base/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java b/server/base/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java
index 30aa2eb..504956f 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java
@@ -21,9 +21,9 @@ import java.io.UnsupportedEncodingException;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.zookeeper.KeeperException;
@@ -108,7 +108,7 @@ public class DumpZookeeper {
     for (int i = 0; i < data.length; i++) {
       // does this look like simple ascii?
       if (data[i] < ' ' || data[i] > '~')
-        return new Encoded("base64", new String(Base64.encodeBase64(data), Constants.UTF8));
+        return new Encoded("base64", Base64.encodeBase64String(data));
     }
     return new Encoded(Constants.UTF8.name(), new String(data, Constants.UTF8));
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java b/server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
index 37ef5f1..a08000e 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
@@ -25,10 +25,10 @@ import javax.xml.parsers.SAXParserFactory;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.zookeeper.KeeperException;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
index 577f5d5..c2bb7a9 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
@@ -27,6 +27,7 @@ import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
 import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.DistributedReadWriteLock;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
@@ -35,7 +36,6 @@ import org.apache.accumulo.fate.zookeeper.ZooReservation;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.zookeeper.ZooQueueLock;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.log4j.Logger;
 import org.apache.zookeeper.KeeperException;
 
@@ -117,7 +117,7 @@ public class Utils {
     Instance instance = HdfsZooInstance.getInstance();
 
     String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/"
-        + new String(Base64.encodeBase64(directory.getBytes(Constants.UTF8)), Constants.UTF8);
+        + Base64.encodeBase64String(directory.getBytes(Constants.UTF8));
 
     IZooReaderWriter zk = ZooReaderWriter.getRetryingInstance();
 
@@ -130,7 +130,7 @@ public class Utils {
   public static void unreserveHdfsDirectory(String directory, long tid) throws KeeperException, InterruptedException {
     Instance instance = HdfsZooInstance.getInstance();
     String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/"
-        + new String(Base64.encodeBase64(directory.getBytes(Constants.UTF8)), Constants.UTF8);
+        + Base64.encodeBase64String(directory.getBytes(Constants.UTF8));
     ZooReservation.release(ZooReaderWriter.getRetryingInstance(), resvPath, String.format("%016x", tid));
   }
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
index 9f1bd1f..a1ee765 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
@@ -35,6 +35,7 @@ import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.core.tabletserver.thrift.ActionStats;
 import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
 import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.Duration;
 import org.apache.accumulo.core.util.ThriftUtil;
 import org.apache.accumulo.monitor.Monitor;
@@ -53,7 +54,6 @@ import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.ActionStatsUpdator;
 import org.apache.accumulo.server.util.TableInfoUtil;
 import org.apache.accumulo.trace.instrument.Tracer;
-import org.apache.commons.codec.binary.Base64;
 
 import com.google.common.net.HostAndPort;
 
@@ -169,7 +169,7 @@ public class TServersServlet extends BasicServlet {
       if (extent.getEndRow() != null && extent.getEndRow().getLength() > 0) {
         digester.update(extent.getEndRow().getBytes(), 0, extent.getEndRow().getLength());
       }
-      String obscuredExtent = new String(Base64.encodeBase64(digester.digest()), Constants.UTF8);
+      String obscuredExtent = Base64.encodeBase64String(digester.digest());
       String displayExtent = String.format("<code>[%s]</code>", obscuredExtent);
       
       TableRow row = perTabletResults.prepareRow();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/test/src/main/java/org/apache/accumulo/test/randomwalk/shard/BulkInsert.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/randomwalk/shard/BulkInsert.java b/test/src/main/java/org/apache/accumulo/test/randomwalk/shard/BulkInsert.java
index 41acce2..df4a62c 100644
--- a/test/src/main/java/org/apache/accumulo/test/randomwalk/shard/BulkInsert.java
+++ b/test/src/main/java/org/apache/accumulo/test/randomwalk/shard/BulkInsert.java
@@ -32,12 +32,12 @@ import org.apache.accumulo.core.data.ColumnUpdate;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.util.Base64;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.test.randomwalk.State;
 import org.apache.accumulo.test.randomwalk.Test;
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -169,7 +169,7 @@ public class BulkInsert extends Test {
     
     Collection<Text> splits = conn.tableOperations().listSplits(tableName, maxSplits);
     for (Text split : splits)
-      out.println(new String(Base64.encodeBase64(TextUtil.getBytes(split)), Constants.UTF8));
+      out.println(Base64.encodeBase64String(TextUtil.getBytes(split)));
     
     out.close();
     

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a73cf851/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java
index a35a19a..cfc274a 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java
@@ -36,7 +36,7 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.commons.codec.binary.Base64;
+import org.apache.accumulo.core.util.Base64;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;


[4/4] git commit: Merge branch '1.6.1-SNAPSHOT'

Posted by bu...@apache.org.
Merge branch '1.6.1-SNAPSHOT'

Conflicts:
	core/src/main/java/org/apache/accumulo/core/iterators/user/IntersectingIterator.java
	core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
	core/src/main/java/org/apache/accumulo/core/util/Encoding.java
	mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
	mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
	server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java
	server/base/src/main/java/org/apache/accumulo/server/util/DumpZookeeper.java
	server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
	server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
	shell/src/main/java/org/apache/accumulo/shell/commands/AddSplitsCommand.java
	shell/src/main/java/org/apache/accumulo/shell/commands/CreateTableCommand.java
	shell/src/main/java/org/apache/accumulo/shell/commands/GetSplitsCommand.java
	shell/src/main/java/org/apache/accumulo/shell/commands/HiddenCommand.java
	test/src/main/java/org/apache/accumulo/test/randomwalk/shard/BulkInsert.java


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/e1862d31
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/e1862d31
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/e1862d31

Branch: refs/heads/master
Commit: e1862d312078271e590f4ed495ef93e5637cce9b
Parents: 4c0f359 a73cf85
Author: Sean Busbey <bu...@cloudera.com>
Authored: Mon May 12 12:50:26 2014 -0500
Committer: Sean Busbey <bu...@cloudera.com>
Committed: Mon May 12 12:50:26 2014 -0500

----------------------------------------------------------------------
 .../iterators/user/IntersectingIterator.java    |  6 +-
 .../accumulo/core/security/Authorizations.java  |  4 +-
 .../accumulo/core/security/Credentials.java     |  2 +-
 .../org/apache/accumulo/core/util/Base64.java   | 75 ++++++++++++++++++++
 .../apache/accumulo/core/util/CreateToken.java  |  2 +-
 .../org/apache/accumulo/core/util/Encoding.java |  9 +--
 .../examples/simple/mapreduce/RowHash.java      |  2 +-
 .../mapreduce/bulk/BulkIngestExample.java       |  4 +-
 .../core/client/mapreduce/RangeInputSplit.java  |  2 +-
 .../mapreduce/lib/impl/ConfiguratorBase.java    |  2 +-
 .../mapreduce/lib/impl/InputConfigurator.java   | 18 ++---
 .../lib/partition/RangePartitioner.java         |  2 +-
 .../client/mapred/AccumuloInputFormatTest.java  |  4 +-
 .../mapreduce/AccumuloInputFormatTest.java      |  4 +-
 .../lib/impl/ConfiguratorBaseTest.java          |  2 +-
 pom.xml                                         |  2 +-
 .../apache/accumulo/server/fs/VolumeUtil.java   |  2 +-
 .../master/state/TabletStateChangeIterator.java |  4 +-
 .../server/security/SystemCredentials.java      |  2 +-
 .../accumulo/server/util/DumpZookeeper.java     |  4 +-
 .../accumulo/server/util/RestoreZookeeper.java  |  2 +-
 .../apache/accumulo/master/tableOps/Utils.java  |  6 +-
 .../monitor/servlets/TServersServlet.java       |  5 +-
 .../org/apache/accumulo/shell/ShellUtil.java    |  2 +-
 .../shell/commands/GetSplitsCommand.java        |  7 +-
 .../accumulo/shell/commands/HiddenCommand.java  |  2 +-
 .../apache/accumulo/shell/ShellUtilTest.java    |  2 +-
 .../test/randomwalk/shard/BulkInsert.java       |  4 +-
 .../accumulo/test/functional/CredentialsIT.java |  2 +-
 29 files changed, 127 insertions(+), 57 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/core/src/main/java/org/apache/accumulo/core/iterators/user/IntersectingIterator.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
index d527653,1abe002..d306b43
--- a/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
@@@ -30,10 -27,12 +30,10 @@@ import java.util.List
  import java.util.Set;
  import java.util.TreeSet;
  
 -import org.apache.accumulo.core.Constants;
  import org.apache.accumulo.core.data.ArrayByteSequence;
  import org.apache.accumulo.core.data.ByteSequence;
 -import org.apache.accumulo.core.util.ArgumentChecker;
+ import org.apache.accumulo.core.util.Base64;
  import org.apache.accumulo.core.util.ByteBufferUtil;
- import org.apache.commons.codec.binary.Base64;
  
  /**
   * A collection of authorization strings.

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/core/src/main/java/org/apache/accumulo/core/security/Credentials.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/core/src/main/java/org/apache/accumulo/core/util/Encoding.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/util/Encoding.java
index 7ddb029,aff8f62..761fc1f
--- a/core/src/main/java/org/apache/accumulo/core/util/Encoding.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/Encoding.java
@@@ -16,9 -16,8 +16,9 @@@
   */
  package org.apache.accumulo.core.util;
  
 -import org.apache.accumulo.core.Constants;
 +import java.nio.charset.StandardCharsets;
 +
- import org.apache.commons.codec.binary.Base64;
+ import org.apache.accumulo.core.util.Base64;
  import org.apache.hadoop.io.Text;
  
  public class Encoding {
@@@ -38,10 -36,8 +37,8 @@@
    public static byte[] decodeBase64FileName(String node) {
      while (node.length() % 4 != 0)
        node += "=";
-     
-     node = node.replace('_', '/').replace('-', '+');
-     
+     /* decode transparently handles URLSafe encodings */
 -    return Base64.decodeBase64(node.getBytes(Constants.UTF8));
 +    return Base64.decodeBase64(node.getBytes(StandardCharsets.UTF_8));
    }
    
  }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
----------------------------------------------------------------------
diff --cc examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
index 1a43ec2,6da51a3..354396c
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
@@@ -131,9 -131,9 +131,9 @@@ public class BulkIngestExample extends 
        FileSystem fs = FileSystem.get(conf);
        out = new PrintStream(new BufferedOutputStream(fs.create(new Path(opts.workDir + "/splits.txt"))));
  
 -      Collection<Text> splits = connector.tableOperations().listSplits(opts.tableName, 100);
 +      Collection<Text> splits = connector.tableOperations().listSplits(opts.getTableName(), 100);
        for (Text split : splits)
-         out.println(new String(Base64.encodeBase64(TextUtil.getBytes(split))));
+         out.println(Base64.encodeBase64String(TextUtil.getBytes(split)));
  
        job.setNumReduceTasks(splits.size() + 1);
        out.close();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
----------------------------------------------------------------------
diff --cc mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
index 4b5a149,0000000..d2f4724
mode 100644,000000..100644
--- a/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
+++ b/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
@@@ -1,490 -1,0 +1,490 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.mapreduce;
 +
 +import java.io.DataInput;
 +import java.io.DataOutput;
 +import java.io.IOException;
 +import java.math.BigInteger;
 +import java.nio.charset.StandardCharsets;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collection;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Set;
 +
 +import org.apache.accumulo.core.client.ClientConfiguration;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.ZooKeeperInstance;
 +import org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator;
 +import org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.TokenSource;
 +import org.apache.accumulo.core.client.mock.MockInstance;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
 +import org.apache.accumulo.core.data.ByteSequence;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.PartialKey;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.security.Authorizations;
++import org.apache.accumulo.core.util.Base64;
 +import org.apache.accumulo.core.util.Pair;
- import org.apache.commons.codec.binary.Base64;
 +import org.apache.hadoop.io.Text;
 +import org.apache.hadoop.io.Writable;
 +import org.apache.hadoop.mapreduce.InputSplit;
 +import org.apache.log4j.Level;
 +
 +/**
 + * The Class RangeInputSplit. Encapsulates an Accumulo range for use in Map Reduce jobs.
 + */
 +public class RangeInputSplit extends InputSplit implements Writable {
 +  private Range range;
 +  private String[] locations;
 +  private String tableId, tableName, instanceName, zooKeepers, principal;
 +  private TokenSource tokenSource;
 +  private String tokenFile;
 +  private AuthenticationToken token;
 +  private Boolean offline, mockInstance, isolatedScan, localIterators;
 +  private Authorizations auths;
 +  private Set<Pair<Text,Text>> fetchedColumns;
 +  private List<IteratorSetting> iterators;
 +  private Level level;
 +
 +  public RangeInputSplit() {
 +    range = new Range();
 +    locations = new String[0];
 +    tableName = "";
 +    tableId = "";
 +  }
 +
 +  public RangeInputSplit(RangeInputSplit split) throws IOException {
 +    this.setRange(split.getRange());
 +    this.setLocations(split.getLocations());
 +    this.setTableName(split.getTableName());
 +    this.setTableId(split.getTableId());
 +  }
 +
 +  protected RangeInputSplit(String table, String tableId, Range range, String[] locations) {
 +    this.range = range;
 +    setLocations(locations);
 +    this.tableName = table;
 +    this.tableId = tableId;
 +  }
 +
 +  public Range getRange() {
 +    return range;
 +  }
 +
 +  private static byte[] extractBytes(ByteSequence seq, int numBytes) {
 +    byte[] bytes = new byte[numBytes + 1];
 +    bytes[0] = 0;
 +    for (int i = 0; i < numBytes; i++) {
 +      if (i >= seq.length())
 +        bytes[i + 1] = 0;
 +      else
 +        bytes[i + 1] = seq.byteAt(i);
 +    }
 +    return bytes;
 +  }
 +
 +  public static float getProgress(ByteSequence start, ByteSequence end, ByteSequence position) {
 +    int maxDepth = Math.min(Math.max(end.length(), start.length()), position.length());
 +    BigInteger startBI = new BigInteger(extractBytes(start, maxDepth));
 +    BigInteger endBI = new BigInteger(extractBytes(end, maxDepth));
 +    BigInteger positionBI = new BigInteger(extractBytes(position, maxDepth));
 +    return (float) (positionBI.subtract(startBI).doubleValue() / endBI.subtract(startBI).doubleValue());
 +  }
 +
 +  public float getProgress(Key currentKey) {
 +    if (currentKey == null)
 +      return 0f;
 +    if (range.getStartKey() != null && range.getEndKey() != null) {
 +      if (range.getStartKey().compareTo(range.getEndKey(), PartialKey.ROW) != 0) {
 +        // just look at the row progress
 +        return getProgress(range.getStartKey().getRowData(), range.getEndKey().getRowData(), currentKey.getRowData());
 +      } else if (range.getStartKey().compareTo(range.getEndKey(), PartialKey.ROW_COLFAM) != 0) {
 +        // just look at the column family progress
 +        return getProgress(range.getStartKey().getColumnFamilyData(), range.getEndKey().getColumnFamilyData(), currentKey.getColumnFamilyData());
 +      } else if (range.getStartKey().compareTo(range.getEndKey(), PartialKey.ROW_COLFAM_COLQUAL) != 0) {
 +        // just look at the column qualifier progress
 +        return getProgress(range.getStartKey().getColumnQualifierData(), range.getEndKey().getColumnQualifierData(), currentKey.getColumnQualifierData());
 +      }
 +    }
 +    // if we can't figure it out, then claim no progress
 +    return 0f;
 +  }
 +
 +  /**
 +   * This implementation of length is only an estimate, it does not provide exact values. Do not have your code rely on this return value.
 +   */
 +  @Override
 +  public long getLength() throws IOException {
 +    Text startRow = range.isInfiniteStartKey() ? new Text(new byte[] {Byte.MIN_VALUE}) : range.getStartKey().getRow();
 +    Text stopRow = range.isInfiniteStopKey() ? new Text(new byte[] {Byte.MAX_VALUE}) : range.getEndKey().getRow();
 +    int maxCommon = Math.min(7, Math.min(startRow.getLength(), stopRow.getLength()));
 +    long diff = 0;
 +
 +    byte[] start = startRow.getBytes();
 +    byte[] stop = stopRow.getBytes();
 +    for (int i = 0; i < maxCommon; ++i) {
 +      diff |= 0xff & (start[i] ^ stop[i]);
 +      diff <<= Byte.SIZE;
 +    }
 +
 +    if (startRow.getLength() != stopRow.getLength())
 +      diff |= 0xff;
 +
 +    return diff + 1;
 +  }
 +
 +  @Override
 +  public String[] getLocations() throws IOException {
 +    return Arrays.copyOf(locations, locations.length);
 +  }
 +
 +  @Override
 +  public void readFields(DataInput in) throws IOException {
 +    range.readFields(in);
 +    tableName = in.readUTF();
 +    tableId = in.readUTF();
 +    int numLocs = in.readInt();
 +    locations = new String[numLocs];
 +    for (int i = 0; i < numLocs; ++i)
 +      locations[i] = in.readUTF();
 +
 +    if (in.readBoolean()) {
 +      isolatedScan = in.readBoolean();
 +    }
 +
 +    if (in.readBoolean()) {
 +      offline = in.readBoolean();
 +    }
 +
 +    if (in.readBoolean()) {
 +      localIterators = in.readBoolean();
 +    }
 +
 +    if (in.readBoolean()) {
 +      mockInstance = in.readBoolean();
 +    }
 +
 +    if (in.readBoolean()) {
 +      int numColumns = in.readInt();
 +      List<String> columns = new ArrayList<String>(numColumns);
 +      for (int i = 0; i < numColumns; i++) {
 +        columns.add(in.readUTF());
 +      }
 +
 +      fetchedColumns = InputConfigurator.deserializeFetchedColumns(columns);
 +    }
 +
 +    if (in.readBoolean()) {
 +      String strAuths = in.readUTF();
 +      auths = new Authorizations(strAuths.getBytes(StandardCharsets.UTF_8));
 +    }
 +
 +    if (in.readBoolean()) {
 +      principal = in.readUTF();
 +    }
 +
 +    if (in.readBoolean()) {
 +      int ordinal = in.readInt();
 +      this.tokenSource = TokenSource.values()[ordinal];
 +
 +      switch (this.tokenSource) {
 +        case INLINE:
 +          String tokenClass = in.readUTF();
 +          byte[] base64TokenBytes = in.readUTF().getBytes(StandardCharsets.UTF_8);
 +          byte[] tokenBytes = Base64.decodeBase64(base64TokenBytes);
 +
 +          this.token = AuthenticationTokenSerializer.deserialize(tokenClass, tokenBytes);
 +          break;
 +
 +        case FILE:
 +          this.tokenFile = in.readUTF();
 +
 +          break;
 +        default:
 +          throw new IOException("Cannot parse unknown TokenSource ordinal");
 +      }
 +    }
 +
 +    if (in.readBoolean()) {
 +      instanceName = in.readUTF();
 +    }
 +
 +    if (in.readBoolean()) {
 +      zooKeepers = in.readUTF();
 +    }
 +
 +    if (in.readBoolean()) {
 +      level = Level.toLevel(in.readInt());
 +    }
 +  }
 +
 +  @Override
 +  public void write(DataOutput out) throws IOException {
 +    range.write(out);
 +    out.writeUTF(tableName);
 +    out.writeUTF(tableId);
 +    out.writeInt(locations.length);
 +    for (int i = 0; i < locations.length; ++i)
 +      out.writeUTF(locations[i]);
 +
 +    out.writeBoolean(null != isolatedScan);
 +    if (null != isolatedScan) {
 +      out.writeBoolean(isolatedScan);
 +    }
 +
 +    out.writeBoolean(null != offline);
 +    if (null != offline) {
 +      out.writeBoolean(offline);
 +    }
 +
 +    out.writeBoolean(null != localIterators);
 +    if (null != localIterators) {
 +      out.writeBoolean(localIterators);
 +    }
 +
 +    out.writeBoolean(null != mockInstance);
 +    if (null != mockInstance) {
 +      out.writeBoolean(mockInstance);
 +    }
 +
 +    out.writeBoolean(null != fetchedColumns);
 +    if (null != fetchedColumns) {
 +      String[] cols = InputConfigurator.serializeColumns(fetchedColumns);
 +      out.writeInt(cols.length);
 +      for (String col : cols) {
 +        out.writeUTF(col);
 +      }
 +    }
 +
 +    out.writeBoolean(null != auths);
 +    if (null != auths) {
 +      out.writeUTF(auths.serialize());
 +    }
 +
 +    out.writeBoolean(null != principal);
 +    if (null != principal) {
 +      out.writeUTF(principal);
 +    }
 +
 +    out.writeBoolean(null != tokenSource);
 +    if (null != tokenSource) {
 +      out.writeInt(tokenSource.ordinal());
 +
 +      if (null != token && null != tokenFile) {
 +        throw new IOException("Cannot use both inline AuthenticationToken and file-based AuthenticationToken");
 +      } else if (null != token) {
 +        out.writeUTF(token.getClass().getCanonicalName());
 +        out.writeUTF(Base64.encodeBase64String(AuthenticationTokenSerializer.serialize(token)));
 +      } else {
 +        out.writeUTF(tokenFile);
 +      }
 +    }
 +
 +    out.writeBoolean(null != instanceName);
 +    if (null != instanceName) {
 +      out.writeUTF(instanceName);
 +    }
 +
 +    out.writeBoolean(null != zooKeepers);
 +    if (null != zooKeepers) {
 +      out.writeUTF(zooKeepers);
 +    }
 +
 +    out.writeBoolean(null != level);
 +    if (null != level) {
 +      out.writeInt(level.toInt());
 +    }
 +  }
 +
 +  @Override
 +  public String toString() {
 +    StringBuilder sb = new StringBuilder(256);
 +    sb.append("Range: ").append(range);
 +    sb.append(" Locations: ").append(Arrays.asList(locations));
 +    sb.append(" Table: ").append(tableName);
 +    sb.append(" TableID: ").append(tableId);
 +    sb.append(" InstanceName: ").append(instanceName);
 +    sb.append(" zooKeepers: ").append(zooKeepers);
 +    sb.append(" principal: ").append(principal);
 +    sb.append(" tokenSource: ").append(tokenSource);
 +    sb.append(" authenticationToken: ").append(token);
 +    sb.append(" authenticationTokenFile: ").append(tokenFile);
 +    sb.append(" Authorizations: ").append(auths);
 +    sb.append(" offlineScan: ").append(offline);
 +    sb.append(" mockInstance: ").append(mockInstance);
 +    sb.append(" isolatedScan: ").append(isolatedScan);
 +    sb.append(" localIterators: ").append(localIterators);
 +    sb.append(" fetchColumns: ").append(fetchedColumns);
 +    sb.append(" iterators: ").append(iterators);
 +    sb.append(" logLevel: ").append(level);
 +    return sb.toString();
 +  }
 +
 +  public String getTableName() {
 +    return tableName;
 +  }
 +
 +  public void setTableName(String table) {
 +    this.tableName = table;
 +  }
 +
 +  public void setTableId(String tableId) {
 +    this.tableId = tableId;
 +  }
 +
 +  public String getTableId() {
 +    return tableId;
 +  }
 +
 +  public Instance getInstance() {
 +    if (null == instanceName) {
 +      return null;
 +    }
 +
 +    if (isMockInstance()) {
 +      return new MockInstance(getInstanceName());
 +    }
 +
 +    if (null == zooKeepers) {
 +      return null;
 +    }
 +
 +    return new ZooKeeperInstance(ClientConfiguration.loadDefault().withInstance(getInstanceName()).withZkHosts(getZooKeepers()));
 +  }
 +
 +  public String getInstanceName() {
 +    return instanceName;
 +  }
 +
 +  public void setInstanceName(String instanceName) {
 +    this.instanceName = instanceName;
 +  }
 +
 +  public String getZooKeepers() {
 +    return zooKeepers;
 +  }
 +
 +  public void setZooKeepers(String zooKeepers) {
 +    this.zooKeepers = zooKeepers;
 +  }
 +
 +  public String getPrincipal() {
 +    return principal;
 +  }
 +
 +  public void setPrincipal(String principal) {
 +    this.principal = principal;
 +  }
 +
 +  public AuthenticationToken getToken() {
 +    return token;
 +  }
 +
 +  public void setToken(AuthenticationToken token) {
 +    this.tokenSource = TokenSource.INLINE;
 +    this.token = token;
 +  }
 +
 +  public void setToken(String tokenFile) {
 +    this.tokenSource = TokenSource.FILE;
 +    this.tokenFile = tokenFile;
 +  }
 +
 +  public Boolean isOffline() {
 +    return offline;
 +  }
 +
 +  public void setOffline(Boolean offline) {
 +    this.offline = offline;
 +  }
 +
 +  public void setLocations(String[] locations) {
 +    this.locations = Arrays.copyOf(locations, locations.length);
 +  }
 +
 +  public Boolean isMockInstance() {
 +    return mockInstance;
 +  }
 +
 +  public void setMockInstance(Boolean mockInstance) {
 +    this.mockInstance = mockInstance;
 +  }
 +
 +  public Boolean isIsolatedScan() {
 +    return isolatedScan;
 +  }
 +
 +  public void setIsolatedScan(Boolean isolatedScan) {
 +    this.isolatedScan = isolatedScan;
 +  }
 +
 +  public Authorizations getAuths() {
 +    return auths;
 +  }
 +
 +  public void setAuths(Authorizations auths) {
 +    this.auths = auths;
 +  }
 +
 +  public void setRange(Range range) {
 +    this.range = range;
 +  }
 +
 +  public Boolean usesLocalIterators() {
 +    return localIterators;
 +  }
 +
 +  public void setUsesLocalIterators(Boolean localIterators) {
 +    this.localIterators = localIterators;
 +  }
 +
 +  public Set<Pair<Text,Text>> getFetchedColumns() {
 +    return fetchedColumns;
 +  }
 +
 +  public void setFetchedColumns(Collection<Pair<Text,Text>> fetchedColumns) {
 +    this.fetchedColumns = new HashSet<Pair<Text,Text>>();
 +    for (Pair<Text,Text> columns : fetchedColumns) {
 +      this.fetchedColumns.add(columns);
 +    }
 +  }
 +
 +  public void setFetchedColumns(Set<Pair<Text,Text>> fetchedColumns) {
 +    this.fetchedColumns = fetchedColumns;
 +  }
 +
 +  public List<IteratorSetting> getIterators() {
 +    return iterators;
 +  }
 +
 +  public void setIterators(List<IteratorSetting> iterators) {
 +    this.iterators = iterators;
 +  }
 +
 +  public Level getLogLevel() {
 +    return level;
 +  }
 +
 +  public void setLogLevel(Level level) {
 +    this.level = level;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
----------------------------------------------------------------------
diff --cc mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
index 4610556,0000000..cf131bd
mode 100644,000000..100644
--- a/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
+++ b/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
@@@ -1,369 -1,0 +1,369 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.mapreduce.lib.impl;
 +
 +import static com.google.common.base.Preconditions.checkArgument;
 +
 +import java.io.IOException;
 +import java.net.URI;
 +import java.net.URISyntaxException;
 +import java.nio.charset.StandardCharsets;
 +
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.ClientConfiguration;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.ZooKeeperInstance;
 +import org.apache.accumulo.core.client.mock.MockInstance;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
 +import org.apache.accumulo.core.security.Credentials;
- import org.apache.commons.codec.binary.Base64;
++import org.apache.accumulo.core.util.Base64;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.FSDataInputStream;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.util.StringUtils;
 +import org.apache.log4j.Level;
 +import org.apache.log4j.Logger;
 +
 +/**
 + * @since 1.6.0
 + */
 +public class ConfiguratorBase {
 +
 +  /**
 +   * Configuration keys for {@link Instance#getConnector(String, AuthenticationToken)}.
 +   * 
 +   * @since 1.6.0
 +   */
 +  public static enum ConnectorInfo {
 +    IS_CONFIGURED, PRINCIPAL, TOKEN,
 +  }
 +
 +  public static enum TokenSource {
 +    FILE, INLINE;
 +
 +    private String prefix;
 +
 +    private TokenSource() {
 +      prefix = name().toLowerCase() + ":";
 +    }
 +
 +    public String prefix() {
 +      return prefix;
 +    }
 +  }
 +
 +  /**
 +   * Configuration keys for {@link Instance}, {@link ZooKeeperInstance}, and {@link MockInstance}.
 +   * 
 +   * @since 1.6.0
 +   */
 +  public static enum InstanceOpts {
 +    TYPE, NAME, ZOO_KEEPERS, CLIENT_CONFIG;
 +  }
 +
 +  /**
 +   * Configuration keys for general configuration options.
 +   * 
 +   * @since 1.6.0
 +   */
 +  public static enum GeneralOpts {
 +    LOG_LEVEL
 +  }
 +
 +  /**
 +   * Provides a configuration key for a given feature enum, prefixed by the implementingClass
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param e
 +   *          the enum used to provide the unique part of the configuration key
 +   * @return the configuration key
 +   * @since 1.6.0
 +   */
 +  protected static String enumToConfKey(Class<?> implementingClass, Enum<?> e) {
 +    return implementingClass.getSimpleName() + "." + e.getDeclaringClass().getSimpleName() + "." + StringUtils.camelize(e.name().toLowerCase());
 +  }
 +
 +  /**
 +   * Sets the connector information needed to communicate with Accumulo in this job.
 +   * 
 +   * <p>
 +   * <b>WARNING:</b> The serialized token is stored in the configuration and shared with all MapReduce tasks. It is BASE64 encoded to provide a charset safe
 +   * conversion to a string, and is not intended to be secure.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param principal
 +   *          a valid Accumulo user name
 +   * @param token
 +   *          the user's password
 +   * @since 1.6.0
 +   */
 +  public static void setConnectorInfo(Class<?> implementingClass, Configuration conf, String principal, AuthenticationToken token)
 +      throws AccumuloSecurityException {
 +    if (isConnectorInfoSet(implementingClass, conf))
 +      throw new IllegalStateException("Connector info for " + implementingClass.getSimpleName() + " can only be set once per job");
 +
 +    checkArgument(principal != null, "principal is null");
 +    checkArgument(token != null, "token is null");
 +    conf.setBoolean(enumToConfKey(implementingClass, ConnectorInfo.IS_CONFIGURED), true);
 +    conf.set(enumToConfKey(implementingClass, ConnectorInfo.PRINCIPAL), principal);
 +    conf.set(enumToConfKey(implementingClass, ConnectorInfo.TOKEN),
 +        TokenSource.INLINE.prefix() + token.getClass().getName() + ":" + Base64.encodeBase64String(AuthenticationTokenSerializer.serialize(token)));
 +  }
 +
 +  /**
 +   * Sets the connector information needed to communicate with Accumulo in this job.
 +   * 
 +   * <p>
 +   * Pulls a token file into the Distributed Cache that contains the authentication token in an attempt to be more secure than storing the password in the
 +   * Configuration. Token file created with "bin/accumulo create-token".
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param principal
 +   *          a valid Accumulo user name
 +   * @param tokenFile
 +   *          the path to the token file in DFS
 +   * @since 1.6.0
 +   */
 +  public static void setConnectorInfo(Class<?> implementingClass, Configuration conf, String principal, String tokenFile) throws AccumuloSecurityException {
 +    if (isConnectorInfoSet(implementingClass, conf))
 +      throw new IllegalStateException("Connector info for " + implementingClass.getSimpleName() + " can only be set once per job");
 +
 +    checkArgument(principal != null, "principal is null");
 +    checkArgument(tokenFile != null, "tokenFile is null");
 +
 +    try {
 +      DistributedCacheHelper.addCacheFile(new URI(tokenFile), conf);
 +    } catch (URISyntaxException e) {
 +      throw new IllegalStateException("Unable to add tokenFile \"" + tokenFile + "\" to distributed cache.");
 +    }
 +
 +    conf.setBoolean(enumToConfKey(implementingClass, ConnectorInfo.IS_CONFIGURED), true);
 +    conf.set(enumToConfKey(implementingClass, ConnectorInfo.PRINCIPAL), principal);
 +    conf.set(enumToConfKey(implementingClass, ConnectorInfo.TOKEN), TokenSource.FILE.prefix() + tokenFile);
 +  }
 +
 +  /**
 +   * Determines if the connector info has already been set for this instance.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return true if the connector info has already been set, false otherwise
 +   * @since 1.6.0
 +   * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
 +   */
 +  public static Boolean isConnectorInfoSet(Class<?> implementingClass, Configuration conf) {
 +    return conf.getBoolean(enumToConfKey(implementingClass, ConnectorInfo.IS_CONFIGURED), false);
 +  }
 +
 +  /**
 +   * Gets the user name from the configuration.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return the principal
 +   * @since 1.6.0
 +   * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
 +   */
 +  public static String getPrincipal(Class<?> implementingClass, Configuration conf) {
 +    return conf.get(enumToConfKey(implementingClass, ConnectorInfo.PRINCIPAL));
 +  }
 +
 +  /**
 +   * Gets the authenticated token from either the specified token file or directly from the configuration, whichever was used when the job was configured.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return the principal's authentication token
 +   * @since 1.6.0
 +   * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
 +   * @see #setConnectorInfo(Class, Configuration, String, String)
 +   */
 +  public static AuthenticationToken getAuthenticationToken(Class<?> implementingClass, Configuration conf) {
 +    String token = conf.get(enumToConfKey(implementingClass, ConnectorInfo.TOKEN));
 +    if (token == null || token.isEmpty())
 +      return null;
 +    if (token.startsWith(TokenSource.INLINE.prefix())) {
 +      String[] args = token.substring(TokenSource.INLINE.prefix().length()).split(":", 2);
 +      if (args.length == 2)
 +        return AuthenticationTokenSerializer.deserialize(args[0], Base64.decodeBase64(args[1].getBytes(StandardCharsets.UTF_8)));
 +    } else if (token.startsWith(TokenSource.FILE.prefix())) {
 +      String tokenFileName = token.substring(TokenSource.FILE.prefix().length());
 +      return getTokenFromFile(conf, getPrincipal(implementingClass, conf), tokenFileName);
 +    }
 +
 +    throw new IllegalStateException("Token was not properly serialized into the configuration");
 +  }
 +
 +  /**
 +   * Reads from the token file in distributed cache. Currently, the token file stores data separated by colons e.g. principal:token_class:token
 +   * 
 +   * @param conf
 +   *          the Hadoop context for the configured job
 +   * @return path to the token file as a String
 +   * @since 1.6.0
 +   * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
 +   */
 +  public static AuthenticationToken getTokenFromFile(Configuration conf, String principal, String tokenFile) {
 +    FSDataInputStream in = null;
 +    try {
 +      URI[] uris = DistributedCacheHelper.getCacheFiles(conf);
 +      Path path = null;
 +      for (URI u : uris) {
 +        if (u.toString().equals(tokenFile)) {
 +          path = new Path(u);
 +        }
 +      }
 +      if (path == null) {
 +        throw new IllegalArgumentException("Couldn't find password file called \"" + tokenFile + "\" in cache.");
 +      }
 +      FileSystem fs = FileSystem.get(conf);
 +      in = fs.open(path);
 +    } catch (IOException e) {
 +      throw new IllegalArgumentException("Couldn't open password file called \"" + tokenFile + "\".");
 +    }
 +    try (java.util.Scanner fileScanner = new java.util.Scanner(in)) {
 +      while (fileScanner.hasNextLine()) {
 +        Credentials creds = Credentials.deserialize(fileScanner.nextLine());
 +        if (principal.equals(creds.getPrincipal())) {
 +          return creds.getToken();
 +        }
 +      }
 +      throw new IllegalArgumentException("Couldn't find token for user \"" + principal + "\" in file \"" + tokenFile + "\"");
 +    }
 +  }
 +
 +  /**
 +   * Configures a {@link ZooKeeperInstance} for this job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param clientConfig
 +   *          client configuration for specifying connection timeouts, SSL connection options, etc.
 +   * @since 1.6.0
 +   */
 +  public static void setZooKeeperInstance(Class<?> implementingClass, Configuration conf, ClientConfiguration clientConfig) {
 +    String key = enumToConfKey(implementingClass, InstanceOpts.TYPE);
 +    if (!conf.get(key, "").isEmpty())
 +      throw new IllegalStateException("Instance info can only be set once per job; it has already been configured with " + conf.get(key));
 +    conf.set(key, "ZooKeeperInstance");
 +    if (clientConfig != null) {
 +      conf.set(enumToConfKey(implementingClass, InstanceOpts.CLIENT_CONFIG), clientConfig.serialize());
 +    }
 +  }
 +
 +  /**
 +   * Configures a {@link MockInstance} for this job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param instanceName
 +   *          the Accumulo instance name
 +   * @since 1.6.0
 +   */
 +  public static void setMockInstance(Class<?> implementingClass, Configuration conf, String instanceName) {
 +    String key = enumToConfKey(implementingClass, InstanceOpts.TYPE);
 +    if (!conf.get(key, "").isEmpty())
 +      throw new IllegalStateException("Instance info can only be set once per job; it has already been configured with " + conf.get(key));
 +    conf.set(key, "MockInstance");
 +
 +    checkArgument(instanceName != null, "instanceName is null");
 +    conf.set(enumToConfKey(implementingClass, InstanceOpts.NAME), instanceName);
 +  }
 +
 +  /**
 +   * Initializes an Accumulo {@link Instance} based on the configuration.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return an Accumulo instance
 +   * @since 1.6.0
 +   * @see #setZooKeeperInstance(Class, Configuration, ClientConfiguration)
 +   * @see #setMockInstance(Class, Configuration, String)
 +   */
 +  public static Instance getInstance(Class<?> implementingClass, Configuration conf) {
 +    String instanceType = conf.get(enumToConfKey(implementingClass, InstanceOpts.TYPE), "");
 +    if ("MockInstance".equals(instanceType))
 +      return new MockInstance(conf.get(enumToConfKey(implementingClass, InstanceOpts.NAME)));
 +    else if ("ZooKeeperInstance".equals(instanceType)) {
 +      String clientConfigString = conf.get(enumToConfKey(implementingClass, InstanceOpts.CLIENT_CONFIG));
 +      if (clientConfigString == null) {
 +        String instanceName = conf.get(enumToConfKey(implementingClass, InstanceOpts.NAME));
 +        String zookeepers = conf.get(enumToConfKey(implementingClass, InstanceOpts.ZOO_KEEPERS));
 +        return new ZooKeeperInstance(ClientConfiguration.loadDefault().withInstance(instanceName).withZkHosts(zookeepers));
 +      } else {
 +        return new ZooKeeperInstance(ClientConfiguration.deserialize(clientConfigString));
 +      }
 +    } else if (instanceType.isEmpty())
 +      throw new IllegalStateException("Instance has not been configured for " + implementingClass.getSimpleName());
 +    else
 +      throw new IllegalStateException("Unrecognized instance type " + instanceType);
 +  }
 +
 +  /**
 +   * Sets the log level for this job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param level
 +   *          the logging level
 +   * @since 1.6.0
 +   */
 +  public static void setLogLevel(Class<?> implementingClass, Configuration conf, Level level) {
 +    checkArgument(level != null, "level is null");
 +    Logger.getLogger(implementingClass).setLevel(level);
 +    conf.setInt(enumToConfKey(implementingClass, GeneralOpts.LOG_LEVEL), level.toInt());
 +  }
 +
 +  /**
 +   * Gets the log level from this configuration.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return the log level
 +   * @since 1.6.0
 +   * @see #setLogLevel(Class, Configuration, Level)
 +   */
 +  public static Level getLogLevel(Class<?> implementingClass, Configuration conf) {
 +    return Level.toLevel(conf.getInt(enumToConfKey(implementingClass, GeneralOpts.LOG_LEVEL), Level.INFO.toInt()));
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
----------------------------------------------------------------------
diff --cc mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
index 69bbef2,0000000..339c9a8
mode 100644,000000..100644
--- a/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
+++ b/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
@@@ -1,796 -1,0 +1,796 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.mapreduce.lib.impl;
 +
 +import static com.google.common.base.Preconditions.checkArgument;
 +
 +import java.io.ByteArrayInputStream;
 +import java.io.ByteArrayOutputStream;
 +import java.io.DataInputStream;
 +import java.io.DataOutputStream;
 +import java.io.IOException;
 +import java.nio.charset.StandardCharsets;
 +import java.util.ArrayList;
 +import java.util.Collection;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Set;
 +import java.util.StringTokenizer;
 +
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.ClientSideIteratorScanner;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.IsolatedScanner;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.RowIterator;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.client.impl.Tables;
 +import org.apache.accumulo.core.client.impl.TabletLocator;
 +import org.apache.accumulo.core.client.mapreduce.InputTableConfig;
 +import org.apache.accumulo.core.client.mock.impl.MockTabletLocator;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.KeyExtent;
 +import org.apache.accumulo.core.data.PartialKey;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 +import org.apache.accumulo.core.master.state.tables.TableState;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.security.TablePermission;
++import org.apache.accumulo.core.util.Base64;
 +import org.apache.accumulo.core.util.Pair;
 +import org.apache.accumulo.core.util.TextUtil;
- import org.apache.commons.codec.binary.Base64;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.io.MapWritable;
 +import org.apache.hadoop.io.Text;
 +import org.apache.hadoop.io.Writable;
 +import org.apache.hadoop.util.StringUtils;
 +
 +import com.google.common.collect.Maps;
 +
 +/**
 + * @since 1.6.0
 + */
 +public class InputConfigurator extends ConfiguratorBase {
 +
 +  /**
 +   * Configuration keys for {@link Scanner}.
 +   * 
 +   * @since 1.6.0
 +   */
 +  public static enum ScanOpts {
 +    TABLE_NAME, AUTHORIZATIONS, RANGES, COLUMNS, ITERATORS, TABLE_CONFIGS
 +  }
 +
 +  /**
 +   * Configuration keys for various features.
 +   * 
 +   * @since 1.6.0
 +   */
 +  public static enum Features {
 +    AUTO_ADJUST_RANGES, SCAN_ISOLATION, USE_LOCAL_ITERATORS, SCAN_OFFLINE
 +  }
 +
 +  /**
 +   * Sets the name of the input table, over which this job will scan.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param tableName
 +   *          the table to use when the tablename is null in the write call
 +   * @since 1.6.0
 +   */
 +  public static void setInputTableName(Class<?> implementingClass, Configuration conf, String tableName) {
 +    checkArgument(tableName != null, "tableName is null");
 +    conf.set(enumToConfKey(implementingClass, ScanOpts.TABLE_NAME), tableName);
 +  }
 +
 +  /**
 +   * Sets the name of the input table, over which this job will scan.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @since 1.6.0
 +   */
 +  public static String getInputTableName(Class<?> implementingClass, Configuration conf) {
 +    return conf.get(enumToConfKey(implementingClass, ScanOpts.TABLE_NAME));
 +  }
 +
 +  /**
 +   * Sets the {@link Authorizations} used to scan. Must be a subset of the user's authorization. Defaults to the empty set.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param auths
 +   *          the user's authorizations
 +   * @since 1.6.0
 +   */
 +  public static void setScanAuthorizations(Class<?> implementingClass, Configuration conf, Authorizations auths) {
 +    if (auths != null && !auths.isEmpty())
 +      conf.set(enumToConfKey(implementingClass, ScanOpts.AUTHORIZATIONS), auths.serialize());
 +  }
 +
 +  /**
 +   * Gets the authorizations to set for the scans from the configuration.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return the Accumulo scan authorizations
 +   * @since 1.6.0
 +   * @see #setScanAuthorizations(Class, Configuration, Authorizations)
 +   */
 +  public static Authorizations getScanAuthorizations(Class<?> implementingClass, Configuration conf) {
 +    String authString = conf.get(enumToConfKey(implementingClass, ScanOpts.AUTHORIZATIONS));
 +    return authString == null ? Authorizations.EMPTY : new Authorizations(authString.getBytes(StandardCharsets.UTF_8));
 +  }
 +
 +  /**
 +   * Sets the input ranges to scan on all input tables for this job. If not set, the entire table will be scanned.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param ranges
 +   *          the ranges that will be mapped over
 +   * @throws IllegalArgumentException
 +   *           if the ranges cannot be encoded into base 64
 +   * @since 1.6.0
 +   */
 +  public static void setRanges(Class<?> implementingClass, Configuration conf, Collection<Range> ranges) {
 +    checkArgument(ranges != null, "ranges is null");
 +
 +    ArrayList<String> rangeStrings = new ArrayList<String>(ranges.size());
 +    try {
 +      for (Range r : ranges) {
 +        ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +        r.write(new DataOutputStream(baos));
-         rangeStrings.add(new String(Base64.encodeBase64(baos.toByteArray())));
++        rangeStrings.add(Base64.encodeBase64String(baos.toByteArray()));
 +      }
 +      conf.setStrings(enumToConfKey(implementingClass, ScanOpts.RANGES), rangeStrings.toArray(new String[0]));
 +    } catch (IOException ex) {
 +      throw new IllegalArgumentException("Unable to encode ranges to Base64", ex);
 +    }
 +  }
 +
 +  /**
 +   * Gets the ranges to scan over from a job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return the ranges
 +   * @throws IOException
 +   *           if the ranges have been encoded improperly
 +   * @since 1.6.0
 +   * @see #setRanges(Class, Configuration, Collection)
 +   */
 +  public static List<Range> getRanges(Class<?> implementingClass, Configuration conf) throws IOException {
 +
 +    Collection<String> encodedRanges = conf.getStringCollection(enumToConfKey(implementingClass, ScanOpts.RANGES));
 +    List<Range> ranges = new ArrayList<Range>();
 +    for (String rangeString : encodedRanges) {
-       ByteArrayInputStream bais = new ByteArrayInputStream(Base64.decodeBase64(rangeString.getBytes()));
++      ByteArrayInputStream bais = new ByteArrayInputStream(Base64.decodeBase64(rangeString.getBytes(StandardCharsets.UTF_8)));
 +      Range range = new Range();
 +      range.readFields(new DataInputStream(bais));
 +      ranges.add(range);
 +    }
 +    return ranges;
 +  }
 +
 +  /**
 +   * Gets a list of the iterator settings (for iterators to apply to a scanner) from this configuration.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return a list of iterators
 +   * @since 1.6.0
 +   * @see #addIterator(Class, Configuration, IteratorSetting)
 +   */
 +  public static List<IteratorSetting> getIterators(Class<?> implementingClass, Configuration conf) {
 +    String iterators = conf.get(enumToConfKey(implementingClass, ScanOpts.ITERATORS));
 +
 +    // If no iterators are present, return an empty list
 +    if (iterators == null || iterators.isEmpty())
 +      return new ArrayList<IteratorSetting>();
 +
 +    // Compose the set of iterators encoded in the job configuration
 +    StringTokenizer tokens = new StringTokenizer(iterators, StringUtils.COMMA_STR);
 +    List<IteratorSetting> list = new ArrayList<IteratorSetting>();
 +    try {
 +      while (tokens.hasMoreTokens()) {
 +        String itstring = tokens.nextToken();
-         ByteArrayInputStream bais = new ByteArrayInputStream(Base64.decodeBase64(itstring.getBytes()));
++        ByteArrayInputStream bais = new ByteArrayInputStream(Base64.decodeBase64(itstring.getBytes(StandardCharsets.UTF_8)));
 +        list.add(new IteratorSetting(new DataInputStream(bais)));
 +        bais.close();
 +      }
 +    } catch (IOException e) {
 +      throw new IllegalArgumentException("couldn't decode iterator settings");
 +    }
 +    return list;
 +  }
 +
 +  /**
 +   * Restricts the columns that will be mapped over for the single input table on this job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param columnFamilyColumnQualifierPairs
 +   *          a pair of {@link Text} objects corresponding to column family and column qualifier. If the column qualifier is null, the entire column family is
 +   *          selected. An empty set is the default and is equivalent to scanning the all columns.
 +   * @throws IllegalArgumentException
 +   *           if the column family is null
 +   * @since 1.6.0
 +   */
 +  public static void fetchColumns(Class<?> implementingClass, Configuration conf, Collection<Pair<Text,Text>> columnFamilyColumnQualifierPairs) {
 +    checkArgument(columnFamilyColumnQualifierPairs != null, "columnFamilyColumnQualifierPairs is null");
 +    String[] columnStrings = serializeColumns(columnFamilyColumnQualifierPairs);
 +    conf.setStrings(enumToConfKey(implementingClass, ScanOpts.COLUMNS), columnStrings);
 +  }
 +
 +  public static String[] serializeColumns(Collection<Pair<Text,Text>> columnFamilyColumnQualifierPairs) {
 +    checkArgument(columnFamilyColumnQualifierPairs != null, "columnFamilyColumnQualifierPairs is null");
 +    ArrayList<String> columnStrings = new ArrayList<String>(columnFamilyColumnQualifierPairs.size());
 +    for (Pair<Text,Text> column : columnFamilyColumnQualifierPairs) {
 +
 +      if (column.getFirst() == null)
 +        throw new IllegalArgumentException("Column family can not be null");
 +
-       String col = new String(Base64.encodeBase64(TextUtil.getBytes(column.getFirst())), StandardCharsets.UTF_8);
++      String col = Base64.encodeBase64String(TextUtil.getBytes(column.getFirst()));
 +      if (column.getSecond() != null)
-         col += ":" + new String(Base64.encodeBase64(TextUtil.getBytes(column.getSecond())), StandardCharsets.UTF_8);
++        col += ":" + Base64.encodeBase64String(TextUtil.getBytes(column.getSecond()));
 +      columnStrings.add(col);
 +    }
 +
 +    return columnStrings.toArray(new String[0]);
 +  }
 +
 +  /**
 +   * Gets the columns to be mapped over from this job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return a set of columns
 +   * @since 1.6.0
 +   * @see #fetchColumns(Class, Configuration, Collection)
 +   */
 +  public static Set<Pair<Text,Text>> getFetchedColumns(Class<?> implementingClass, Configuration conf) {
 +    checkArgument(conf != null, "conf is null");
 +    String confValue = conf.get(enumToConfKey(implementingClass, ScanOpts.COLUMNS));
 +    List<String> serialized = new ArrayList<String>();
 +    if (confValue != null) {
 +      // Split and include any trailing empty strings to allow empty column families
 +      for (String val : confValue.split(",", -1)) {
 +        serialized.add(val);
 +      }
 +    }
 +    return deserializeFetchedColumns(serialized);
 +  }
 +
 +  public static Set<Pair<Text,Text>> deserializeFetchedColumns(Collection<String> serialized) {
 +    Set<Pair<Text,Text>> columns = new HashSet<Pair<Text,Text>>();
 +
 +    if (null == serialized) {
 +      return columns;
 +    }
 +
 +    for (String col : serialized) {
 +      int idx = col.indexOf(":");
 +      Text cf = new Text(idx < 0 ? Base64.decodeBase64(col.getBytes(StandardCharsets.UTF_8)) : Base64.decodeBase64(col.substring(0, idx).getBytes(
 +          StandardCharsets.UTF_8)));
 +      Text cq = idx < 0 ? null : new Text(Base64.decodeBase64(col.substring(idx + 1).getBytes(StandardCharsets.UTF_8)));
 +      columns.add(new Pair<Text,Text>(cf, cq));
 +    }
 +    return columns;
 +  }
 +
 +  /**
 +   * Encode an iterator on the input for the single input table associated with this job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param cfg
 +   *          the configuration of the iterator
 +   * @throws IllegalArgumentException
 +   *           if the iterator can't be serialized into the configuration
 +   * @since 1.6.0
 +   */
 +  public static void addIterator(Class<?> implementingClass, Configuration conf, IteratorSetting cfg) {
 +    ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +    String newIter;
 +    try {
 +      cfg.write(new DataOutputStream(baos));
-       newIter = new String(Base64.encodeBase64(baos.toByteArray()), StandardCharsets.UTF_8);
++      newIter = Base64.encodeBase64String(baos.toByteArray());
 +      baos.close();
 +    } catch (IOException e) {
 +      throw new IllegalArgumentException("unable to serialize IteratorSetting");
 +    }
 +
 +    String confKey = enumToConfKey(implementingClass, ScanOpts.ITERATORS);
 +    String iterators = conf.get(confKey);
 +    // No iterators specified yet, create a new string
 +    if (iterators == null || iterators.isEmpty()) {
 +      iterators = newIter;
 +    } else {
 +      // append the next iterator & reset
 +      iterators = iterators.concat(StringUtils.COMMA_STR + newIter);
 +    }
 +    // Store the iterators w/ the job
 +    conf.set(confKey, iterators);
 +  }
 +
 +  /**
 +   * Controls the automatic adjustment of ranges for this job. This feature merges overlapping ranges, then splits them to align with tablet boundaries.
 +   * Disabling this feature will cause exactly one Map task to be created for each specified range. The default setting is enabled. *
 +   * 
 +   * <p>
 +   * By default, this feature is <b>enabled</b>.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param enableFeature
 +   *          the feature is enabled if true, disabled otherwise
 +   * @see #setRanges(Class, Configuration, Collection)
 +   * @since 1.6.0
 +   */
 +  public static void setAutoAdjustRanges(Class<?> implementingClass, Configuration conf, boolean enableFeature) {
 +    conf.setBoolean(enumToConfKey(implementingClass, Features.AUTO_ADJUST_RANGES), enableFeature);
 +  }
 +
 +  /**
 +   * Determines whether a configuration has auto-adjust ranges enabled.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return false if the feature is disabled, true otherwise
 +   * @since 1.6.0
 +   * @see #setAutoAdjustRanges(Class, Configuration, boolean)
 +   */
 +  public static Boolean getAutoAdjustRanges(Class<?> implementingClass, Configuration conf) {
 +    return conf.getBoolean(enumToConfKey(implementingClass, Features.AUTO_ADJUST_RANGES), true);
 +  }
 +
 +  /**
 +   * Controls the use of the {@link IsolatedScanner} in this job.
 +   * 
 +   * <p>
 +   * By default, this feature is <b>disabled</b>.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param enableFeature
 +   *          the feature is enabled if true, disabled otherwise
 +   * @since 1.6.0
 +   */
 +  public static void setScanIsolation(Class<?> implementingClass, Configuration conf, boolean enableFeature) {
 +    conf.setBoolean(enumToConfKey(implementingClass, Features.SCAN_ISOLATION), enableFeature);
 +  }
 +
 +  /**
 +   * Determines whether a configuration has isolation enabled.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return true if the feature is enabled, false otherwise
 +   * @since 1.6.0
 +   * @see #setScanIsolation(Class, Configuration, boolean)
 +   */
 +  public static Boolean isIsolated(Class<?> implementingClass, Configuration conf) {
 +    return conf.getBoolean(enumToConfKey(implementingClass, Features.SCAN_ISOLATION), false);
 +  }
 +
 +  /**
 +   * Controls the use of the {@link ClientSideIteratorScanner} in this job. Enabling this feature will cause the iterator stack to be constructed within the Map
 +   * task, rather than within the Accumulo TServer. To use this feature, all classes needed for those iterators must be available on the classpath for the task.
 +   * 
 +   * <p>
 +   * By default, this feature is <b>disabled</b>.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param enableFeature
 +   *          the feature is enabled if true, disabled otherwise
 +   * @since 1.6.0
 +   */
 +  public static void setLocalIterators(Class<?> implementingClass, Configuration conf, boolean enableFeature) {
 +    conf.setBoolean(enumToConfKey(implementingClass, Features.USE_LOCAL_ITERATORS), enableFeature);
 +  }
 +
 +  /**
 +   * Determines whether a configuration uses local iterators.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return true if the feature is enabled, false otherwise
 +   * @since 1.6.0
 +   * @see #setLocalIterators(Class, Configuration, boolean)
 +   */
 +  public static Boolean usesLocalIterators(Class<?> implementingClass, Configuration conf) {
 +    return conf.getBoolean(enumToConfKey(implementingClass, Features.USE_LOCAL_ITERATORS), false);
 +  }
 +
 +  /**
 +   * <p>
 +   * Enable reading offline tables. By default, this feature is disabled and only online tables are scanned. This will make the map reduce job directly read the
 +   * table's files. If the table is not offline, then the job will fail. If the table comes online during the map reduce job, it is likely that the job will
 +   * fail.
 +   * 
 +   * <p>
 +   * To use this option, the map reduce user will need access to read the Accumulo directory in HDFS.
 +   * 
 +   * <p>
 +   * Reading the offline table will create the scan time iterator stack in the map process. So any iterators that are configured for the table will need to be
 +   * on the mapper's classpath.
 +   * 
 +   * <p>
 +   * One way to use this feature is to clone a table, take the clone offline, and use the clone as the input table for a map reduce job. If you plan to map
 +   * reduce over the data many times, it may be better to the compact the table, clone it, take it offline, and use the clone for all map reduce jobs. The
 +   * reason to do this is that compaction will reduce each tablet in the table to one file, and it is faster to read from one file.
 +   * 
 +   * <p>
 +   * There are two possible advantages to reading a tables file directly out of HDFS. First, you may see better read performance. Second, it will support
 +   * speculative execution better. When reading an online table speculative execution can put more load on an already slow tablet server.
 +   * 
 +   * <p>
 +   * By default, this feature is <b>disabled</b>.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param enableFeature
 +   *          the feature is enabled if true, disabled otherwise
 +   * @since 1.6.0
 +   */
 +  public static void setOfflineTableScan(Class<?> implementingClass, Configuration conf, boolean enableFeature) {
 +    conf.setBoolean(enumToConfKey(implementingClass, Features.SCAN_OFFLINE), enableFeature);
 +  }
 +
 +  /**
 +   * Determines whether a configuration has the offline table scan feature enabled.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return true if the feature is enabled, false otherwise
 +   * @since 1.6.0
 +   * @see #setOfflineTableScan(Class, Configuration, boolean)
 +   */
 +  public static Boolean isOfflineScan(Class<?> implementingClass, Configuration conf) {
 +    return conf.getBoolean(enumToConfKey(implementingClass, Features.SCAN_OFFLINE), false);
 +  }
 +
 +  /**
 +   * Sets configurations for multiple tables at a time.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param configs
 +   *          an array of {@link InputTableConfig} objects to associate with the job
 +   * @since 1.6.0
 +   */
 +  public static void setInputTableConfigs(Class<?> implementingClass, Configuration conf, Map<String,InputTableConfig> configs) {
 +    MapWritable mapWritable = new MapWritable();
 +    for (Map.Entry<String,InputTableConfig> tableConfig : configs.entrySet())
 +      mapWritable.put(new Text(tableConfig.getKey()), tableConfig.getValue());
 +
 +    ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +    try {
 +      mapWritable.write(new DataOutputStream(baos));
 +    } catch (IOException e) {
 +      throw new IllegalStateException("Table configuration could not be serialized.");
 +    }
 +
 +    String confKey = enumToConfKey(implementingClass, ScanOpts.TABLE_CONFIGS);
-     conf.set(confKey, new String(Base64.encodeBase64(baos.toByteArray())));
++    conf.set(confKey, Base64.encodeBase64String(baos.toByteArray()));
 +  }
 +
 +  /**
 +   * Returns all {@link InputTableConfig} objects associated with this job.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @return all of the table query configs for the job
 +   * @since 1.6.0
 +   */
 +  public static Map<String,InputTableConfig> getInputTableConfigs(Class<?> implementingClass, Configuration conf) {
 +    Map<String,InputTableConfig> configs = new HashMap<String,InputTableConfig>();
 +    Map.Entry<String,InputTableConfig> defaultConfig = getDefaultInputTableConfig(implementingClass, conf);
 +    if (defaultConfig != null)
 +      configs.put(defaultConfig.getKey(), defaultConfig.getValue());
 +    String configString = conf.get(enumToConfKey(implementingClass, ScanOpts.TABLE_CONFIGS));
 +    MapWritable mapWritable = new MapWritable();
 +    if (configString != null) {
 +      try {
-         byte[] bytes = Base64.decodeBase64(configString.getBytes());
++        byte[] bytes = Base64.decodeBase64(configString.getBytes(StandardCharsets.UTF_8));
 +        ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
 +        mapWritable.readFields(new DataInputStream(bais));
 +        bais.close();
 +      } catch (IOException e) {
 +        throw new IllegalStateException("The table query configurations could not be deserialized from the given configuration");
 +      }
 +    }
 +    for (Map.Entry<Writable,Writable> entry : mapWritable.entrySet())
 +      configs.put(((Text) entry.getKey()).toString(), (InputTableConfig) entry.getValue());
 +
 +    return configs;
 +  }
 +
 +  /**
 +   * Returns the {@link InputTableConfig} for the given table
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param tableName
 +   *          the table name for which to fetch the table query config
 +   * @return the table query config for the given table name (if it exists) and null if it does not
 +   * @since 1.6.0
 +   */
 +  public static InputTableConfig getInputTableConfig(Class<?> implementingClass, Configuration conf, String tableName) {
 +    Map<String,InputTableConfig> queryConfigs = getInputTableConfigs(implementingClass, conf);
 +    return queryConfigs.get(tableName);
 +  }
 +
 +  /**
 +   * Initializes an Accumulo {@link TabletLocator} based on the configuration.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @param tableId
 +   *          The table id for which to initialize the {@link TabletLocator}
 +   * @return an Accumulo tablet locator
 +   * @throws TableNotFoundException
 +   *           if the table name set on the configuration doesn't exist
 +   * @since 1.6.0
 +   */
 +  public static TabletLocator getTabletLocator(Class<?> implementingClass, Configuration conf, String tableId) throws TableNotFoundException {
 +    String instanceType = conf.get(enumToConfKey(implementingClass, InstanceOpts.TYPE));
 +    if ("MockInstance".equals(instanceType))
 +      return new MockTabletLocator();
 +    Instance instance = getInstance(implementingClass, conf);
 +    return TabletLocator.getLocator(instance, new Text(tableId));
 +  }
 +
 +  // InputFormat doesn't have the equivalent of OutputFormat's checkOutputSpecs(JobContext job)
 +  /**
 +   * Check whether a configuration is fully configured to be used with an Accumulo {@link org.apache.hadoop.mapreduce.InputFormat}.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop configuration object to configure
 +   * @throws IOException
 +   *           if the context is improperly configured
 +   * @since 1.6.0
 +   */
 +  public static void validateOptions(Class<?> implementingClass, Configuration conf) throws IOException {
 +
 +    Map<String,InputTableConfig> inputTableConfigs = getInputTableConfigs(implementingClass, conf);
 +    if (!isConnectorInfoSet(implementingClass, conf))
 +      throw new IOException("Input info has not been set.");
 +    String instanceKey = conf.get(enumToConfKey(implementingClass, InstanceOpts.TYPE));
 +    if (!"MockInstance".equals(instanceKey) && !"ZooKeeperInstance".equals(instanceKey))
 +      throw new IOException("Instance info has not been set.");
 +    // validate that we can connect as configured
 +    try {
 +      String principal = getPrincipal(implementingClass, conf);
 +      AuthenticationToken token = getAuthenticationToken(implementingClass, conf);
 +      Connector c = getInstance(implementingClass, conf).getConnector(principal, token);
 +      if (!c.securityOperations().authenticateUser(principal, token))
 +        throw new IOException("Unable to authenticate user");
 +
 +      if (getInputTableConfigs(implementingClass, conf).size() == 0)
 +        throw new IOException("No table set.");
 +
 +      for (Map.Entry<String,InputTableConfig> tableConfig : inputTableConfigs.entrySet()) {
 +        if (!c.securityOperations().hasTablePermission(getPrincipal(implementingClass, conf), tableConfig.getKey(), TablePermission.READ))
 +          throw new IOException("Unable to access table");
 +      }
 +      for (Map.Entry<String,InputTableConfig> tableConfigEntry : inputTableConfigs.entrySet()) {
 +        InputTableConfig tableConfig = tableConfigEntry.getValue();
 +        if (!tableConfig.shouldUseLocalIterators()) {
 +          if (tableConfig.getIterators() != null) {
 +            for (IteratorSetting iter : tableConfig.getIterators()) {
 +              if (!c.tableOperations().testClassLoad(tableConfigEntry.getKey(), iter.getIteratorClass(), SortedKeyValueIterator.class.getName()))
 +                throw new AccumuloException("Servers are unable to load " + iter.getIteratorClass() + " as a " + SortedKeyValueIterator.class.getName());
 +            }
 +          }
 +        }
 +      }
 +    } catch (AccumuloException e) {
 +      throw new IOException(e);
 +    } catch (AccumuloSecurityException e) {
 +      throw new IOException(e);
 +    } catch (TableNotFoundException e) {
 +      throw new IOException(e);
 +    }
 +  }
 +
 +  /**
 +   * Returns the {@link org.apache.accumulo.core.client.mapreduce.InputTableConfig} for the configuration based on the properties set using the single-table
 +   * input methods.
 +   * 
 +   * @param implementingClass
 +   *          the class whose name will be used as a prefix for the property configuration key
 +   * @param conf
 +   *          the Hadoop instance for which to retrieve the configuration
 +   * @return the config object built from the single input table properties set on the job
 +   * @since 1.6.0
 +   */
 +  protected static Map.Entry<String,InputTableConfig> getDefaultInputTableConfig(Class<?> implementingClass, Configuration conf) {
 +    String tableName = getInputTableName(implementingClass, conf);
 +    if (tableName != null) {
 +      InputTableConfig queryConfig = new InputTableConfig();
 +      List<IteratorSetting> itrs = getIterators(implementingClass, conf);
 +      if (itrs != null)
 +        queryConfig.setIterators(itrs);
 +      Set<Pair<Text,Text>> columns = getFetchedColumns(implementingClass, conf);
 +      if (columns != null)
 +        queryConfig.fetchColumns(columns);
 +      List<Range> ranges = null;
 +      try {
 +        ranges = getRanges(implementingClass, conf);
 +      } catch (IOException e) {
 +        throw new RuntimeException(e);
 +      }
 +      if (ranges != null)
 +        queryConfig.setRanges(ranges);
 +
 +      queryConfig.setAutoAdjustRanges(getAutoAdjustRanges(implementingClass, conf)).setUseIsolatedScanners(isIsolated(implementingClass, conf))
 +          .setUseLocalIterators(usesLocalIterators(implementingClass, conf)).setOfflineScan(isOfflineScan(implementingClass, conf));
 +      return Maps.immutableEntry(tableName, queryConfig);
 +    }
 +    return null;
 +  }
 +
 +  public static Map<String,Map<KeyExtent,List<Range>>> binOffline(String tableId, List<Range> ranges, Instance instance, Connector conn)
 +      throws AccumuloException, TableNotFoundException {
 +    Map<String,Map<KeyExtent,List<Range>>> binnedRanges = new HashMap<String,Map<KeyExtent,List<Range>>>();
 +
 +    if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
 +      Tables.clearCache(instance);
 +      if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
 +        throw new AccumuloException("Table is online tableId:" + tableId + " cannot scan table in offline mode ");
 +      }
 +    }
 +
 +    for (Range range : ranges) {
 +      Text startRow;
 +
 +      if (range.getStartKey() != null)
 +        startRow = range.getStartKey().getRow();
 +      else
 +        startRow = new Text();
 +
 +      Range metadataRange = new Range(new KeyExtent(new Text(tableId), startRow, null).getMetadataEntry(), true, null, false);
 +      Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
 +      MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
 +      scanner.fetchColumnFamily(MetadataSchema.TabletsSection.LastLocationColumnFamily.NAME);
 +      scanner.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
 +      scanner.fetchColumnFamily(MetadataSchema.TabletsSection.FutureLocationColumnFamily.NAME);
 +      scanner.setRange(metadataRange);
 +
 +      RowIterator rowIter = new RowIterator(scanner);
 +      KeyExtent lastExtent = null;
 +      while (rowIter.hasNext()) {
 +        Iterator<Map.Entry<Key,Value>> row = rowIter.next();
 +        String last = "";
 +        KeyExtent extent = null;
 +        String location = null;
 +
 +        while (row.hasNext()) {
 +          Map.Entry<Key,Value> entry = row.next();
 +          Key key = entry.getKey();
 +
 +          if (key.getColumnFamily().equals(MetadataSchema.TabletsSection.LastLocationColumnFamily.NAME)) {
 +            last = entry.getValue().toString();
 +          }
 +
 +          if (key.getColumnFamily().equals(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME)
 +              || key.getColumnFamily().equals(MetadataSchema.TabletsSection.FutureLocationColumnFamily.NAME)) {
 +            location = entry.getValue().toString();
 +          }
 +
 +          if (MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
 +            extent = new KeyExtent(key.getRow(), entry.getValue());
 +          }
 +
 +        }
 +
 +        if (location != null)
 +          return null;
 +
 +        if (!extent.getTableId().toString().equals(tableId)) {
 +          throw new AccumuloException("Saw unexpected table Id " + tableId + " " + extent);
 +        }
 +
 +        if (lastExtent != null && !extent.isPreviousExtent(lastExtent)) {
 +          throw new AccumuloException(" " + lastExtent + " is not previous extent " + extent);
 +        }
 +
 +        Map<KeyExtent,List<Range>> tabletRanges = binnedRanges.get(last);
 +        if (tabletRanges == null) {
 +          tabletRanges = new HashMap<KeyExtent,List<Range>>();
 +          binnedRanges.put(last, tabletRanges);
 +        }
 +
 +        List<Range> rangeList = tabletRanges.get(extent);
 +        if (rangeList == null) {
 +          rangeList = new ArrayList<Range>();
 +          tabletRanges.put(extent, rangeList);
 +        }
 +
 +        rangeList.add(range);
 +
 +        if (extent.getEndRow() == null || range.afterEndKey(new Key(extent.getEndRow()).followingKey(PartialKey.ROW))) {
 +          break;
 +        }
 +
 +        lastExtent = extent;
 +      }
 +
 +    }
 +    return binnedRanges;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e1862d31/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java
----------------------------------------------------------------------
diff --cc mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java
index 1b7501c,0000000..2950091
mode 100644,000000..100644
--- a/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java
+++ b/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java
@@@ -1,135 -1,0 +1,135 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.mapreduce.lib.partition;
 +
 +import java.io.BufferedReader;
 +import java.io.FileInputStream;
 +import java.io.FileNotFoundException;
 +import java.io.IOException;
 +import java.io.InputStreamReader;
 +import java.net.URI;
 +import java.nio.charset.StandardCharsets;
 +import java.util.Arrays;
 +import java.util.Scanner;
 +import java.util.TreeSet;
 +
 +import org.apache.accumulo.core.client.mapreduce.lib.impl.DistributedCacheHelper;
- import org.apache.commons.codec.binary.Base64;
++import org.apache.accumulo.core.util.Base64;
 +import org.apache.hadoop.conf.Configurable;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.io.Text;
 +import org.apache.hadoop.io.Writable;
 +import org.apache.hadoop.mapreduce.Job;
 +import org.apache.hadoop.mapreduce.Partitioner;
 +
 +/**
 + * Hadoop partitioner that uses ranges, and optionally sub-bins based on hashing.
 + */
 +public class RangePartitioner extends Partitioner<Text,Writable> implements Configurable {
 +  private static final String PREFIX = RangePartitioner.class.getName();
 +  private static final String CUTFILE_KEY = PREFIX + ".cutFile";
 +  private static final String NUM_SUBBINS = PREFIX + ".subBins";
 +
 +  private Configuration conf;
 +
 +  @Override
 +  public int getPartition(Text key, Writable value, int numPartitions) {
 +    try {
 +      return findPartition(key, getCutPoints(), getNumSubBins());
 +    } catch (IOException e) {
 +      throw new RuntimeException(e);
 +    }
 +  }
 +
 +  int findPartition(Text key, Text[] array, int numSubBins) {
 +    // find the bin for the range, and guarantee it is positive
 +    int index = Arrays.binarySearch(array, key);
 +    index = index < 0 ? (index + 1) * -1 : index;
 +
 +    // both conditions work with numSubBins == 1, but this check is to avoid
 +    // hashing, when we don't need to, for speed
 +    if (numSubBins < 2)
 +      return index;
 +    return (key.toString().hashCode() & Integer.MAX_VALUE) % numSubBins + index * numSubBins;
 +  }
 +
 +  private int _numSubBins = 0;
 +
 +  private synchronized int getNumSubBins() {
 +    if (_numSubBins < 1) {
 +      // get number of sub-bins and guarantee it is positive
 +      _numSubBins = Math.max(1, getConf().getInt(NUM_SUBBINS, 1));
 +    }
 +    return _numSubBins;
 +  }
 +
 +  private Text cutPointArray[] = null;
 +
 +  private synchronized Text[] getCutPoints() throws IOException {
 +    if (cutPointArray == null) {
 +      String cutFileName = conf.get(CUTFILE_KEY);
 +      Path[] cf = DistributedCacheHelper.getLocalCacheFiles(conf);
 +
 +      if (cf != null) {
 +        for (Path path : cf) {
 +          if (path.toUri().getPath().endsWith(cutFileName.substring(cutFileName.lastIndexOf('/')))) {
 +            TreeSet<Text> cutPoints = new TreeSet<Text>();
 +            Scanner in = new Scanner(new BufferedReader(new InputStreamReader(new FileInputStream(path.toString()), StandardCharsets.UTF_8)));
 +            try {
 +              while (in.hasNextLine())
 +                cutPoints.add(new Text(Base64.decodeBase64(in.nextLine().getBytes(StandardCharsets.UTF_8))));
 +            } finally {
 +              in.close();
 +            }
 +            cutPointArray = cutPoints.toArray(new Text[cutPoints.size()]);
 +            break;
 +          }
 +        }
 +      }
 +      if (cutPointArray == null)
 +        throw new FileNotFoundException(cutFileName + " not found in distributed cache");
 +    }
 +    return cutPointArray;
 +  }
 +
 +  @Override
 +  public Configuration getConf() {
 +    return conf;
 +  }
 +
 +  @Override
 +  public void setConf(Configuration conf) {
 +    this.conf = conf;
 +  }
 +
 +  /**
 +   * Sets the hdfs file name to use, containing a newline separated list of Base64 encoded split points that represent ranges for partitioning
 +   */
 +  public static void setSplitFile(Job job, String file) {
 +    URI uri = new Path(file).toUri();
 +    DistributedCacheHelper.addCacheFile(uri, job.getConfiguration());
 +    job.getConfiguration().set(CUTFILE_KEY, uri.getPath());
 +  }
 +
 +  /**
 +   * Sets the number of random sub-bins per range
 +   */
 +  public static void setNumSubBins(Job job, int num) {
 +    job.getConfiguration().setInt(NUM_SUBBINS, num);
 +  }
 +}