You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2013/12/03 22:18:48 UTC

[10/14] Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT

http://git-wip-us.apache.org/repos/asf/accumulo/blob/cccdb8cb/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
index 5e246c4,0000000..1404a25
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
@@@ -1,1324 -1,0 +1,1322 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.mapreduce;
 +
 +import java.io.IOException;
 +import java.io.UnsupportedEncodingException;
 +import java.lang.reflect.Method;
 +import java.net.InetAddress;
 +import java.net.URLDecoder;
 +import java.net.URLEncoder;
 +import java.nio.ByteBuffer;
 +import java.util.ArrayList;
 +import java.util.Collection;
 +import java.util.HashMap;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.Set;
 +import java.util.StringTokenizer;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.ClientSideIteratorScanner;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.IsolatedScanner;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.RowIterator;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.TableDeletedException;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.client.TableOfflineException;
 +import org.apache.accumulo.core.client.ZooKeeperInstance;
 +import org.apache.accumulo.core.client.impl.OfflineScanner;
 +import org.apache.accumulo.core.client.impl.Tables;
 +import org.apache.accumulo.core.client.impl.TabletLocator;
 +import org.apache.accumulo.core.client.mapreduce.lib.util.InputConfigurator;
 +import org.apache.accumulo.core.client.mock.MockInstance;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.KeyExtent;
 +import org.apache.accumulo.core.data.PartialKey;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.iterators.user.VersioningIterator;
 +import org.apache.accumulo.core.master.state.tables.TableState;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.security.CredentialHelper;
 +import org.apache.accumulo.core.security.thrift.TCredentials;
 +import org.apache.accumulo.core.util.Pair;
 +import org.apache.accumulo.core.util.UtilWaitThread;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.io.Text;
 +import org.apache.hadoop.mapreduce.InputFormat;
 +import org.apache.hadoop.mapreduce.InputSplit;
 +import org.apache.hadoop.mapreduce.Job;
 +import org.apache.hadoop.mapreduce.JobContext;
 +import org.apache.hadoop.mapreduce.RecordReader;
 +import org.apache.hadoop.mapreduce.TaskAttemptContext;
 +import org.apache.log4j.Level;
 +import org.apache.log4j.Logger;
 +
 +/**
 + * This abstract {@link InputFormat} class allows MapReduce jobs to use Accumulo as the source of K,V pairs.
 + * <p>
 + * Subclasses must implement a {@link #createRecordReader(InputSplit, TaskAttemptContext)} to provide a {@link RecordReader} for K,V.
 + * <p>
 + * A static base class, RecordReaderBase, is provided to retrieve Accumulo {@link Key}/{@link Value} pairs, but one must implement its
 + * {@link RecordReaderBase#nextKeyValue()} to transform them to the desired generic types K,V.
 + * <p>
 + * See {@link AccumuloInputFormat} for an example implementation.
 + */
 +public abstract class InputFormatBase<K,V> extends InputFormat<K,V> {
-   
++
 +  private static final Class<?> CLASS = AccumuloInputFormat.class;
 +  protected static final Logger log = Logger.getLogger(CLASS);
-   
++
 +  /**
 +   * Sets the connector information needed to communicate with Accumulo in this job.
 +   * 
 +   * <p>
 +   * <b>WARNING:</b> The serialized token is stored in the configuration and shared with all MapReduce tasks. It is BASE64 encoded to provide a charset safe
 +   * conversion to a string, and is not intended to be secure.
 +   * 
 +   * @param job
 +   *          the Hadoop job instance to be configured
 +   * @param principal
 +   *          a valid Accumulo user name (user must have Table.CREATE permission)
 +   * @param token
 +   *          the user's password
 +   * @throws AccumuloSecurityException
 +   * @since 1.5.0
 +   */
 +  public static void setConnectorInfo(Job job, String principal, AuthenticationToken token) throws AccumuloSecurityException {
 +    InputConfigurator.setConnectorInfo(CLASS, job.getConfiguration(), principal, token);
 +  }
-   
++
 +  /**
 +   * Determines if the connector has been configured.
 +   * 
 +   * @param context
 +   *          the Hadoop context for the configured job
 +   * @return true if the connector has been configured, false otherwise
 +   * @since 1.5.0
 +   * @see #setConnectorInfo(Job, String, AuthenticationToken)
 +   */
 +  protected static Boolean isConnectorInfoSet(JobContext context) {
 +    return InputConfigurator.isConnectorInfoSet(CLASS, getConfiguration(context));
 +  }
-   
++
 +  /**
 +   * Gets the user name from the configuration.
 +   * 
 +   * @param context
 +   *          the Hadoop context for the configured job
 +   * @return the user name
 +   * @since 1.5.0
 +   * @see #setConnectorInfo(Job, String, AuthenticationToken)
 +   */
 +  protected static String getPrincipal(JobContext context) {
 +    return InputConfigurator.getPrincipal(CLASS, getConfiguration(context));
 +  }
-   
++
 +  /**
 +   * Gets the serialized token class from the configuration.
 +   * 
 +   * @param context
 +   *          the Hadoop context for the configured job
 +   * @return the user name
 +   * @since 1.5.0
 +   * @see #setConnectorInfo(Job, String, AuthenticationToken)
 +   */
 +  protected static String getTokenClass(JobContext context) {
 +    return InputConfigurator.getTokenClass(CLASS, getConfiguration(context));
 +  }
-   
++
 +  /**
 +   * Gets the password from the configuration. WARNING: The password is stored in the Configuration and shared with all MapReduce tasks; It is BASE64 encoded to
 +   * provide a charset safe conversion to a string, and is not intended to be secure.
 +   * 
 +   * @param context
 +   *          the Hadoop context for the configured job
 +   * @return the decoded user password
 +   * @since 1.5.0
 +   * @see #setConnectorInfo(Job, String, AuthenticationToken)
 +   */
 +  protected static byte[] getToken(JobContext context) {
 +    return InputConfigurator.getToken(CLASS, getConfiguration(context));
 +  }
-   
++
 +  /**
 +   * Configures a {@link ZooKeeperInstance} for this job.
 +   * 
 +   * @param job
 +   *          the Hadoop job instance to be configured
 +   * @param instanceName
 +   *          the Accumulo instance name
 +   * @param zooKeepers
 +   *          a comma-separated list of zookeeper servers
 +   * @since 1.5.0
 +   */
 +  public static void setZooKeeperInstance(Job job, String instanceName, String zooKeepers) {
 +    InputConfigurator.setZooKeeperInstance(CLASS, job.getConfiguration(), instanceName, zooKeepers);
 +  }
-   
++
 +  /**
 +   * Configures a {@link MockInstance} for this job.
 +   * 
 +   * @param job
 +   *          the Hadoop job instance to be configured
 +   * @param instanceName
 +   *          the Accumulo instance name
 +   * @since 1.5.0
 +   */
 +  public static void setMockInstance(Job job, String instanceName) {
 +    InputConfigurator.setMockInstance(CLASS, job.getConfiguration(), instanceName);
 +  }
-   
++
 +  /**
 +   * Initializes an Accumulo {@link Instance} based on the configuration.
 +   * 
 +   * @param context
 +   *          the Hadoop context for the configured job
 +   * @return an Accumulo instance
 +   * @since 1.5.0
 +   * @see #setZooKeeperInstance(Job, String, String)
 +   * @see #setMockInstance(Job, String)
 +   */
 +  protected static Instance getInstance(JobContext context) {
 +    return InputConfigurator.getInstance(CLASS, getConfiguration(context));
 +  }
-   
++
 +  /**
 +   * Sets the log level for this job.
 +   * 
 +   * @param job
 +   *          the Hadoop job instance to be configured
 +   * @param level
 +   *          the logging level
 +   * @since 1.5.0
 +   */
 +  public static void setLogLevel(Job job, Level level) {
 +    InputConfigurator.setLogLevel(CLASS, job.getConfiguration(), level);
 +  }
-   
++
 +  /**
 +   * Gets the log level from this configuration.
 +   * 
 +   * @param context
 +   *          the Hadoop context for the configured job
 +   * @return the log level
 +   * @since 1.5.0
 +   * @see #setLogLevel(Job, Level)
 +   */
 +  protected static Level getLogLevel(JobContext context) {
 +    return InputConfigurator.getLogLevel(CLASS, getConfiguration(context));
 +  }
-   
++
 +  /**
 +   * Sets the name of the input table, over which this job will scan.
 +   * 
 +   * @param job
 +   *          the Hadoop job instance to be configured
 +   * @param tableName
 +   *          the table to use when the tablename is null in the write call
 +   * @since 1.5.0
 +   */
 +  public static void setInputTableName(Job job, String tableName) {
 +    InputConfigurator.setInputTableName(CLASS, job.getConfiguration(), tableName);
 +  }
-   
++
 +  /**
 +   * Gets the table name from the configuration.
 +   * 
 +   * @param context
 +   *          the Hadoop context for the configured job
 +   * @return the table name
 +   * @since 1.5.0
 +   * @see #setInputTableName(Job, String)
 +   */
 +  protected static String getInputTableName(JobContext context) {
 +    return InputConfigurator.getInputTableName(CLASS, getConfiguration(context));
 +  }
-   
++
 +  /**
 +   * Sets the {@link Authorizations} used to scan. Must be a subset of the user's authorization. Defaults to the empty set.
 +   * 
 +   * @param job
 +   *          the Hadoop job instance to be configured
 +   * @param auths
 +   *          the user's authorizations
 +   * @since 1.5.0
 +   */
 +  public static void setScanAuthorizations(Job job, Authorizations auths) {
 +    InputConfigurator.setScanAuthorizations(CLASS, job.getConfiguration(), auths);
 +  }
-   
++
 +  /**
 +   * Gets the authorizations to set for the scans from the configuration.
 +   * 
 +   * @param context
 +   *          the Hadoop context for the configured job
 +   * @return the Accumulo scan authorizations
 +   * @since 1.5.0
 +   * @see #setScanAuthorizations(Job, Authorizations)
 +   */
 +  protected static Authorizations getScanAuthorizations(JobContext context) {
 +    return InputConfigurator.getScanAuthorizations(CLASS, getConfiguration(context));
 +  }
-   
++
 +  /**
 +   * Sets the input ranges to scan for this job. If not set, the entire table will be scanned.
 +   * 
 +   * @param job
 +   *          the Hadoop job instance to be configured
 +   * @param ranges
 +   *          the ranges that will be mapped over
 +   * @since 1.5.0
 +   */
 +  public static void setRanges(Job job, Collection<Range> ranges) {
 +    InputConfigurator.setRanges(CLASS, job.getConfiguration(), ranges);
 +  }
-   
++
 +  /**
 +   * Gets the ranges to scan over from a job.
 +   * 
 +   * @param context
 +   *          the Hadoop context for the configured job
 +   * @return the ranges
 +   * @throws IOException
 +   *           if the ranges have been encoded improperly
 +   * @since 1.5.0
 +   * @see #setRanges(Job, Collection)
 +   */
 +  protected static List<Range> getRanges(JobContext context) throws IOException {
 +    return InputConfigurator.getRanges(CLASS, getConfiguration(context));
 +  }
-   
++
 +  /**
 +   * Restricts the columns that will be mapped over for this job.
 +   * 
 +   * @param job
 +   *          the Hadoop job instance to be configured
 +   * @param columnFamilyColumnQualifierPairs
 +   *          a pair of {@link Text} objects corresponding to column family and column qualifier. If the column qualifier is null, the entire column family is
 +   *          selected. An empty set is the default and is equivalent to scanning the all columns.
 +   * @since 1.5.0
 +   */
 +  public static void fetchColumns(Job job, Collection<Pair<Text,Text>> columnFamilyColumnQualifierPairs) {
 +    InputConfigurator.fetchColumns(CLASS, job.getConfiguration(), columnFamilyColumnQualifierPairs);
 +  }
-   
++
 +  /**
 +   * Gets the columns to be mapped over from this job.
 +   * 
 +   * @param context
 +   *          the Hadoop context for the configured job
 +   * @return a set of columns
 +   * @since 1.5.0
 +   * @see #fetchColumns(Job, Collection)
 +   */
 +  protected static Set<Pair<Text,Text>> getFetchedColumns(JobContext context) {
 +    return InputConfigurator.getFetchedColumns(CLASS, getConfiguration(context));
 +  }
-   
++
 +  /**
 +   * Encode an iterator on the input for this job.
 +   * 
 +   * @param job
 +   *          the Hadoop job instance to be configured
 +   * @param cfg
 +   *          the configuration of the iterator
 +   * @since 1.5.0
 +   */
 +  public static void addIterator(Job job, IteratorSetting cfg) {
 +    InputConfigurator.addIterator(CLASS, job.getConfiguration(), cfg);
 +  }
-   
++
 +  /**
 +   * Gets a list of the iterator settings (for iterators to apply to a scanner) from this configuration.
 +   * 
 +   * @param context
 +   *          the Hadoop context for the configured job
 +   * @return a list of iterators
 +   * @since 1.5.0
 +   * @see #addIterator(Job, IteratorSetting)
 +   */
 +  protected static List<IteratorSetting> getIterators(JobContext context) {
 +    return InputConfigurator.getIterators(CLASS, getConfiguration(context));
 +  }
-   
++
 +  /**
 +   * Controls the automatic adjustment of ranges for this job. This feature merges overlapping ranges, then splits them to align with tablet boundaries.
 +   * Disabling this feature will cause exactly one Map task to be created for each specified range. The default setting is enabled. *
 +   * 
 +   * <p>
 +   * By default, this feature is <b>enabled</b>.
 +   * 
 +   * @param job
 +   *          the Hadoop job instance to be configured
 +   * @param enableFeature
 +   *          the feature is enabled if true, disabled otherwise
 +   * @see #setRanges(Job, Collection)
 +   * @since 1.5.0
 +   */
 +  public static void setAutoAdjustRanges(Job job, boolean enableFeature) {
 +    InputConfigurator.setAutoAdjustRanges(CLASS, job.getConfiguration(), enableFeature);
 +  }
-   
++
 +  /**
 +   * Determines whether a configuration has auto-adjust ranges enabled.
 +   * 
 +   * @param context
 +   *          the Hadoop context for the configured job
 +   * @return false if the feature is disabled, true otherwise
 +   * @since 1.5.0
 +   * @see #setAutoAdjustRanges(Job, boolean)
 +   */
 +  protected static boolean getAutoAdjustRanges(JobContext context) {
 +    return InputConfigurator.getAutoAdjustRanges(CLASS, getConfiguration(context));
 +  }
-   
++
 +  /**
 +   * Controls the use of the {@link IsolatedScanner} in this job.
 +   * 
 +   * <p>
 +   * By default, this feature is <b>disabled</b>.
 +   * 
 +   * @param job
 +   *          the Hadoop job instance to be configured
 +   * @param enableFeature
 +   *          the feature is enabled if true, disabled otherwise
 +   * @since 1.5.0
 +   */
 +  public static void setScanIsolation(Job job, boolean enableFeature) {
 +    InputConfigurator.setScanIsolation(CLASS, job.getConfiguration(), enableFeature);
 +  }
-   
++
 +  /**
 +   * Determines whether a configuration has isolation enabled.
 +   * 
 +   * @param context
 +   *          the Hadoop context for the configured job
 +   * @return true if the feature is enabled, false otherwise
 +   * @since 1.5.0
 +   * @see #setScanIsolation(Job, boolean)
 +   */
 +  protected static boolean isIsolated(JobContext context) {
 +    return InputConfigurator.isIsolated(CLASS, getConfiguration(context));
 +  }
-   
++
 +  /**
 +   * Controls the use of the {@link ClientSideIteratorScanner} in this job. Enabling this feature will cause the iterator stack to be constructed within the Map
 +   * task, rather than within the Accumulo TServer. To use this feature, all classes needed for those iterators must be available on the classpath for the task.
 +   * 
 +   * <p>
 +   * By default, this feature is <b>disabled</b>.
 +   * 
 +   * @param job
 +   *          the Hadoop job instance to be configured
 +   * @param enableFeature
 +   *          the feature is enabled if true, disabled otherwise
 +   * @since 1.5.0
 +   */
 +  public static void setLocalIterators(Job job, boolean enableFeature) {
 +    InputConfigurator.setLocalIterators(CLASS, job.getConfiguration(), enableFeature);
 +  }
-   
++
 +  /**
 +   * Determines whether a configuration uses local iterators.
 +   * 
 +   * @param context
 +   *          the Hadoop context for the configured job
 +   * @return true if the feature is enabled, false otherwise
 +   * @since 1.5.0
 +   * @see #setLocalIterators(Job, boolean)
 +   */
 +  protected static boolean usesLocalIterators(JobContext context) {
 +    return InputConfigurator.usesLocalIterators(CLASS, getConfiguration(context));
 +  }
-   
++
 +  /**
 +   * <p>
 +   * Enable reading offline tables. By default, this feature is disabled and only online tables are scanned. This will make the map reduce job directly read the
 +   * table's files. If the table is not offline, then the job will fail. If the table comes online during the map reduce job, it is likely that the job will
 +   * fail.
 +   * 
 +   * <p>
 +   * To use this option, the map reduce user will need access to read the Accumulo directory in HDFS.
 +   * 
 +   * <p>
 +   * Reading the offline table will create the scan time iterator stack in the map process. So any iterators that are configured for the table will need to be
 +   * on the mapper's classpath. The accumulo-site.xml may need to be on the mapper's classpath if HDFS or the Accumulo directory in HDFS are non-standard.
 +   * 
 +   * <p>
 +   * One way to use this feature is to clone a table, take the clone offline, and use the clone as the input table for a map reduce job. If you plan to map
 +   * reduce over the data many times, it may be better to the compact the table, clone it, take it offline, and use the clone for all map reduce jobs. The
 +   * reason to do this is that compaction will reduce each tablet in the table to one file, and it is faster to read from one file.
 +   * 
 +   * <p>
 +   * There are two possible advantages to reading a tables file directly out of HDFS. First, you may see better read performance. Second, it will support
 +   * speculative execution better. When reading an online table speculative execution can put more load on an already slow tablet server.
 +   * 
 +   * <p>
 +   * By default, this feature is <b>disabled</b>.
 +   * 
 +   * @param job
 +   *          the Hadoop job instance to be configured
 +   * @param enableFeature
 +   *          the feature is enabled if true, disabled otherwise
 +   * @since 1.5.0
 +   */
 +  public static void setOfflineTableScan(Job job, boolean enableFeature) {
 +    InputConfigurator.setOfflineTableScan(CLASS, job.getConfiguration(), enableFeature);
 +  }
-   
++
 +  /**
 +   * Determines whether a configuration has the offline table scan feature enabled.
 +   * 
 +   * @param context
 +   *          the Hadoop context for the configured job
 +   * @return true if the feature is enabled, false otherwise
 +   * @since 1.5.0
 +   * @see #setOfflineTableScan(Job, boolean)
 +   */
 +  protected static boolean isOfflineScan(JobContext context) {
 +    return InputConfigurator.isOfflineScan(CLASS, getConfiguration(context));
 +  }
-   
++
 +  /**
 +   * Initializes an Accumulo {@link TabletLocator} based on the configuration.
 +   * 
 +   * @param context
 +   *          the Hadoop context for the configured job
 +   * @return an Accumulo tablet locator
 +   * @throws TableNotFoundException
 +   *           if the table name set on the configuration doesn't exist
 +   * @since 1.5.0
 +   */
 +  protected static TabletLocator getTabletLocator(JobContext context) throws TableNotFoundException {
 +    return InputConfigurator.getTabletLocator(CLASS, getConfiguration(context));
 +  }
-   
++
 +  // InputFormat doesn't have the equivalent of OutputFormat's checkOutputSpecs(JobContext job)
 +  /**
 +   * Check whether a configuration is fully configured to be used with an Accumulo {@link org.apache.hadoop.mapreduce.InputFormat}.
 +   * 
 +   * @param context
 +   *          the Hadoop context for the configured job
 +   * @throws IOException
 +   *           if the context is improperly configured
 +   * @since 1.5.0
 +   */
 +  protected static void validateOptions(JobContext context) throws IOException {
 +    InputConfigurator.validateOptions(CLASS, getConfiguration(context));
 +  }
-   
++
 +  /**
 +   * An abstract base class to be used to create {@link RecordReader} instances that convert from Accumulo {@link Key}/{@link Value} pairs to the user's K/V
 +   * types.
 +   * 
 +   * Subclasses must implement {@link #nextKeyValue()} and use it to update the following variables:
 +   * <ul>
 +   * <li>K {@link #currentK}</li>
 +   * <li>V {@link #currentV}</li>
 +   * <li>Key {@link #currentKey} (used for progress reporting)</li>
 +   * <li>int {@link #numKeysRead} (used for progress reporting)</li>
 +   * </ul>
 +   */
 +  protected abstract static class RecordReaderBase<K,V> extends RecordReader<K,V> {
 +    protected long numKeysRead;
 +    protected Iterator<Entry<Key,Value>> scannerIterator;
 +    protected RangeInputSplit split;
-     
++
 +    /**
 +     * Apply the configured iterators from the configuration to the scanner.
 +     * 
-      * @param context
-      *          the Hadoop context for the configured job
 +     * @param scanner
 +     *          the scanner to configure
 +     */
 +    protected void setupIterators(List<IteratorSetting> iterators, Scanner scanner) {
 +      for (IteratorSetting iterator : iterators) {
 +        scanner.addScanIterator(iterator);
 +      }
 +    }
-     
++
 +    /**
 +     * Initialize a scanner over the given input split using this task attempt configuration.
 +     */
 +    @Override
 +    public void initialize(InputSplit inSplit, TaskAttemptContext attempt) throws IOException {
 +      Scanner scanner;
 +      split = (RangeInputSplit) inSplit;
 +      log.debug("Initializing input split: " + split.getRange());
 +
 +      Instance instance = split.getInstance();
 +      if (null == instance) {
 +        instance = getInstance(attempt);
 +      }
 +
 +      String principal = split.getPrincipal();
 +      if (null == principal) {
 +        principal = getPrincipal(attempt);
 +      }
 +
 +      AuthenticationToken token = split.getToken();
 +      if (null == token) {
 +        String tokenClass = getTokenClass(attempt);
 +        byte[] tokenBytes = getToken(attempt);
 +        try {
 +          token = CredentialHelper.extractToken(tokenClass, tokenBytes);
 +        } catch (AccumuloSecurityException e) {
 +          throw new IOException(e);
 +        }
 +      }
 +
 +      Authorizations authorizations = split.getAuths();
 +      if (null == authorizations) {
 +        authorizations = getScanAuthorizations(attempt);
 +      }
 +
 +      String table = split.getTable();
 +      if (null == table) {
 +        table = getInputTableName(attempt);
 +      }
-       
++
 +      Boolean isOffline = split.isOffline();
 +      if (null == isOffline) {
 +        isOffline = isOfflineScan(attempt);
 +      }
 +
 +      Boolean isIsolated = split.isIsolatedScan();
 +      if (null == isIsolated) {
 +        isIsolated = isIsolated(attempt);
 +      }
 +
 +      Boolean usesLocalIterators = split.usesLocalIterators();
 +      if (null == usesLocalIterators) {
 +        usesLocalIterators = usesLocalIterators(attempt);
 +      }
-       
++
 +      List<IteratorSetting> iterators = split.getIterators();
 +      if (null == iterators) {
 +        iterators = getIterators(attempt);
 +      }
-       
++
 +      Set<Pair<Text,Text>> columns = split.getFetchedColumns();
 +      if (null == columns) {
 +        columns = getFetchedColumns(attempt);
 +      }
-       
++
 +      try {
 +        log.debug("Creating connector with user: " + principal);
 +        Connector conn = instance.getConnector(principal, token);
 +        log.debug("Creating scanner for table: " + table);
 +        log.debug("Authorizations are: " + authorizations);
 +        if (isOfflineScan(attempt)) {
 +          String tokenClass = token.getClass().getCanonicalName();
 +          ByteBuffer tokenBuffer = ByteBuffer.wrap(CredentialHelper.toBytes(token));
-           scanner = new OfflineScanner(instance, new TCredentials(principal, tokenClass, tokenBuffer, instance.getInstanceID()), Tables.getTableId(
-               instance, table), authorizations);
++          scanner = new OfflineScanner(instance, new TCredentials(principal, tokenClass, tokenBuffer, instance.getInstanceID()), Tables.getTableId(instance,
++              table), authorizations);
 +        } else {
 +          scanner = conn.createScanner(table, authorizations);
 +        }
 +        if (isIsolated) {
 +          log.info("Creating isolated scanner");
 +          scanner = new IsolatedScanner(scanner);
 +        }
 +        if (usesLocalIterators) {
 +          log.info("Using local iterators");
 +          scanner = new ClientSideIteratorScanner(scanner);
 +        }
 +        setupIterators(iterators, scanner);
 +      } catch (Exception e) {
 +        throw new IOException(e);
 +      }
-       
++
 +      // setup a scanner within the bounds of this split
 +      for (Pair<Text,Text> c : columns) {
 +        if (c.getSecond() != null) {
 +          log.debug("Fetching column " + c.getFirst() + ":" + c.getSecond());
 +          scanner.fetchColumn(c.getFirst(), c.getSecond());
 +        } else {
 +          log.debug("Fetching column family " + c.getFirst());
 +          scanner.fetchColumnFamily(c.getFirst());
 +        }
 +      }
-       
++
 +      scanner.setRange(split.getRange());
-       
++
 +      numKeysRead = 0;
-       
++
 +      // do this last after setting all scanner options
 +      scannerIterator = scanner.iterator();
 +    }
-     
++
 +    @Override
 +    public void close() {}
-     
++
 +    @Override
 +    public float getProgress() throws IOException {
 +      if (numKeysRead > 0 && currentKey == null)
 +        return 1.0f;
 +      return split.getProgress(currentKey);
 +    }
-     
++
 +    protected K currentK = null;
 +    protected V currentV = null;
 +    protected Key currentKey = null;
 +    protected Value currentValue = null;
-     
++
 +    @Override
 +    public K getCurrentKey() throws IOException, InterruptedException {
 +      return currentK;
 +    }
-     
++
 +    @Override
 +    public V getCurrentValue() throws IOException, InterruptedException {
 +      return currentV;
 +    }
 +  }
-   
++
 +  Map<String,Map<KeyExtent,List<Range>>> binOfflineTable(JobContext context, String tableName, List<Range> ranges) throws TableNotFoundException,
 +      AccumuloException, AccumuloSecurityException {
-     
++
 +    Map<String,Map<KeyExtent,List<Range>>> binnedRanges = new HashMap<String,Map<KeyExtent,List<Range>>>();
-     
++
 +    Instance instance = getInstance(context);
 +    Connector conn = instance.getConnector(getPrincipal(context), CredentialHelper.extractToken(getTokenClass(context), getToken(context)));
 +    String tableId = Tables.getTableId(instance, tableName);
-     
++
 +    if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
 +      Tables.clearCache(instance);
 +      if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
 +        throw new AccumuloException("Table is online " + tableName + "(" + tableId + ") cannot scan table in offline mode ");
 +      }
 +    }
-     
++
 +    for (Range range : ranges) {
 +      Text startRow;
-       
++
 +      if (range.getStartKey() != null)
 +        startRow = range.getStartKey().getRow();
 +      else
 +        startRow = new Text();
-       
++
 +      Range metadataRange = new Range(new KeyExtent(new Text(tableId), startRow, null).getMetadataEntry(), true, null, false);
 +      Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
 +      Constants.METADATA_PREV_ROW_COLUMN.fetch(scanner);
 +      scanner.fetchColumnFamily(Constants.METADATA_LAST_LOCATION_COLUMN_FAMILY);
 +      scanner.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
 +      scanner.fetchColumnFamily(Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY);
 +      scanner.setRange(metadataRange);
-       
++
 +      RowIterator rowIter = new RowIterator(scanner);
-       
++
 +      KeyExtent lastExtent = null;
-       
++
 +      while (rowIter.hasNext()) {
 +        Iterator<Entry<Key,Value>> row = rowIter.next();
 +        String last = "";
 +        KeyExtent extent = null;
 +        String location = null;
-         
++
 +        while (row.hasNext()) {
 +          Entry<Key,Value> entry = row.next();
 +          Key key = entry.getKey();
-           
++
 +          if (key.getColumnFamily().equals(Constants.METADATA_LAST_LOCATION_COLUMN_FAMILY)) {
 +            last = entry.getValue().toString();
 +          }
-           
++
 +          if (key.getColumnFamily().equals(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY)
 +              || key.getColumnFamily().equals(Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY)) {
 +            location = entry.getValue().toString();
 +          }
-           
++
 +          if (Constants.METADATA_PREV_ROW_COLUMN.hasColumns(key)) {
 +            extent = new KeyExtent(key.getRow(), entry.getValue());
 +          }
-           
++
 +        }
-         
++
 +        if (location != null)
 +          return null;
-         
++
 +        if (!extent.getTableId().toString().equals(tableId)) {
 +          throw new AccumuloException("Saw unexpected table Id " + tableId + " " + extent);
 +        }
-         
++
 +        if (lastExtent != null && !extent.isPreviousExtent(lastExtent)) {
 +          throw new AccumuloException(" " + lastExtent + " is not previous extent " + extent);
 +        }
-         
++
 +        Map<KeyExtent,List<Range>> tabletRanges = binnedRanges.get(last);
 +        if (tabletRanges == null) {
 +          tabletRanges = new HashMap<KeyExtent,List<Range>>();
 +          binnedRanges.put(last, tabletRanges);
 +        }
-         
++
 +        List<Range> rangeList = tabletRanges.get(extent);
 +        if (rangeList == null) {
 +          rangeList = new ArrayList<Range>();
 +          tabletRanges.put(extent, rangeList);
 +        }
-         
++
 +        rangeList.add(range);
-         
++
 +        if (extent.getEndRow() == null || range.afterEndKey(new Key(extent.getEndRow()).followingKey(PartialKey.ROW))) {
 +          break;
 +        }
-         
++
 +        lastExtent = extent;
 +      }
-       
++
 +    }
-     
++
 +    return binnedRanges;
 +  }
-   
++
 +  /**
 +   * Read the metadata table to get tablets and match up ranges to them.
 +   */
 +  @Override
 +  public List<InputSplit> getSplits(JobContext context) throws IOException {
 +    Level logLevel = getLogLevel(context);
 +    log.setLevel(logLevel);
-     
++
 +    validateOptions(context);
-     
++
 +    String tableName = getInputTableName(context);
 +    boolean autoAdjust = getAutoAdjustRanges(context);
 +    List<Range> ranges = autoAdjust ? Range.mergeOverlapping(getRanges(context)) : getRanges(context);
 +    Instance instance = getInstance(context);
 +    boolean offline = isOfflineScan(context);
 +    boolean isolated = isIsolated(context);
 +    boolean localIterators = usesLocalIterators(context);
 +    boolean mockInstance = (null != instance && MockInstance.class.equals(instance.getClass()));
 +    Set<Pair<Text,Text>> fetchedColumns = getFetchedColumns(context);
 +    Authorizations auths = getScanAuthorizations(context);
 +    String principal = getPrincipal(context);
 +    String tokenClass = getTokenClass(context);
 +    byte[] tokenBytes = getToken(context);
-     
++
 +    AuthenticationToken token;
 +    try {
-        token = CredentialHelper.extractToken(tokenClass, tokenBytes);
++      token = CredentialHelper.extractToken(tokenClass, tokenBytes);
 +    } catch (AccumuloSecurityException e) {
 +      throw new IOException(e);
 +    }
-     
++
 +    List<IteratorSetting> iterators = getIterators(context);
-     
++
 +    if (ranges.isEmpty()) {
 +      ranges = new ArrayList<Range>(1);
 +      ranges.add(new Range());
 +    }
-     
++
 +    // get the metadata information for these ranges
 +    Map<String,Map<KeyExtent,List<Range>>> binnedRanges = new HashMap<String,Map<KeyExtent,List<Range>>>();
 +    TabletLocator tl;
 +    try {
 +      if (isOfflineScan(context)) {
 +        binnedRanges = binOfflineTable(context, tableName, ranges);
 +        while (binnedRanges == null) {
 +          // Some tablets were still online, try again
 +          UtilWaitThread.sleep(100 + (int) (Math.random() * 100)); // sleep randomly between 100 and 200 ms
 +          binnedRanges = binOfflineTable(context, tableName, ranges);
 +        }
 +      } else {
 +        String tableId = null;
 +        tl = getTabletLocator(context);
 +        // its possible that the cache could contain complete, but old information about a tables tablets... so clear it
 +        tl.invalidateCache();
 +        while (!tl.binRanges(ranges, binnedRanges, new TCredentials(principal, tokenClass, ByteBuffer.wrap(tokenBytes), instance.getInstanceID())).isEmpty()) {
 +          if (!(instance instanceof MockInstance)) {
 +            if (tableId == null)
 +              tableId = Tables.getTableId(instance, tableName);
 +            if (!Tables.exists(instance, tableId))
 +              throw new TableDeletedException(tableId);
 +            if (Tables.getTableState(instance, tableId) == TableState.OFFLINE)
 +              throw new TableOfflineException(instance, tableId);
 +          }
 +          binnedRanges.clear();
 +          log.warn("Unable to locate bins for specified ranges. Retrying.");
 +          UtilWaitThread.sleep(100 + (int) (Math.random() * 100)); // sleep randomly between 100 and 200 ms
 +          tl.invalidateCache();
 +        }
 +      }
 +    } catch (Exception e) {
 +      throw new IOException(e);
 +    }
-     
++
 +    ArrayList<InputSplit> splits = new ArrayList<InputSplit>(ranges.size());
 +    HashMap<Range,ArrayList<String>> splitsToAdd = null;
-     
++
 +    if (!autoAdjust)
 +      splitsToAdd = new HashMap<Range,ArrayList<String>>();
-     
++
 +    HashMap<String,String> hostNameCache = new HashMap<String,String>();
-     
++
 +    for (Entry<String,Map<KeyExtent,List<Range>>> tserverBin : binnedRanges.entrySet()) {
 +      String ip = tserverBin.getKey().split(":", 2)[0];
 +      String location = hostNameCache.get(ip);
 +      if (location == null) {
 +        InetAddress inetAddress = InetAddress.getByName(ip);
 +        location = inetAddress.getHostName();
 +        hostNameCache.put(ip, location);
 +      }
-       
++
 +      for (Entry<KeyExtent,List<Range>> extentRanges : tserverBin.getValue().entrySet()) {
 +        Range ke = extentRanges.getKey().toDataRange();
 +        for (Range r : extentRanges.getValue()) {
 +          if (autoAdjust) {
 +            // divide ranges into smaller ranges, based on the tablets
 +            splits.add(new RangeInputSplit(ke.clip(r), new String[] {location}));
 +          } else {
 +            // don't divide ranges
 +            ArrayList<String> locations = splitsToAdd.get(r);
 +            if (locations == null)
 +              locations = new ArrayList<String>(1);
 +            locations.add(location);
 +            splitsToAdd.put(r, locations);
 +          }
 +        }
 +      }
 +    }
-     
++
 +    if (!autoAdjust)
 +      for (Entry<Range,ArrayList<String>> entry : splitsToAdd.entrySet())
 +        splits.add(new RangeInputSplit(entry.getKey(), entry.getValue().toArray(new String[0])));
-     
++
 +    for (InputSplit inputSplit : splits) {
 +      RangeInputSplit split = (RangeInputSplit) inputSplit;
 +
 +      split.setTable(tableName);
 +      split.setOffline(offline);
 +      split.setIsolatedScan(isolated);
 +      split.setUsesLocalIterators(localIterators);
 +      split.setMockInstance(mockInstance);
 +      split.setFetchedColumns(fetchedColumns);
 +      split.setPrincipal(principal);
 +      split.setToken(token);
 +      split.setInstanceName(instance.getInstanceName());
 +      split.setZooKeepers(instance.getZooKeepers());
 +      split.setAuths(auths);
 +      split.setIterators(iterators);
 +      split.setLogLevel(logLevel);
 +    }
-     
++
 +    return splits;
 +  }
 +
 +  // ----------------------------------------------------------------------------------------------------
 +  // Everything below this line is deprecated and should go away in future versions
 +  // ----------------------------------------------------------------------------------------------------
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #setScanIsolation(Job, boolean)} instead.
 +   */
 +  @Deprecated
 +  public static void setIsolated(Configuration conf, boolean enable) {
 +    InputConfigurator.setScanIsolation(CLASS, conf, enable);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #setLocalIterators(Job, boolean)} instead.
 +   */
 +  @Deprecated
 +  public static void setLocalIterators(Configuration conf, boolean enable) {
 +    InputConfigurator.setLocalIterators(CLASS, conf, enable);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #setConnectorInfo(Job, String, AuthenticationToken)}, {@link #setInputTableName(Job, String)}, and
 +   *             {@link #setScanAuthorizations(Job, Authorizations)} instead.
 +   */
 +  @Deprecated
 +  public static void setInputInfo(Configuration conf, String user, byte[] passwd, String table, Authorizations auths) {
 +    try {
 +      InputConfigurator.setConnectorInfo(CLASS, conf, user, new PasswordToken(passwd));
 +    } catch (AccumuloSecurityException e) {
 +      throw new RuntimeException(e);
 +    }
 +    InputConfigurator.setInputTableName(CLASS, conf, table);
 +    InputConfigurator.setScanAuthorizations(CLASS, conf, auths);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #setZooKeeperInstance(Job, String, String)} instead.
 +   */
 +  @Deprecated
 +  public static void setZooKeeperInstance(Configuration conf, String instanceName, String zooKeepers) {
 +    InputConfigurator.setZooKeeperInstance(CLASS, conf, instanceName, zooKeepers);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #setMockInstance(Job, String)} instead.
 +   */
 +  @Deprecated
 +  public static void setMockInstance(Configuration conf, String instanceName) {
 +    InputConfigurator.setMockInstance(CLASS, conf, instanceName);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #setRanges(Job, Collection)} instead.
 +   */
 +  @Deprecated
 +  public static void setRanges(Configuration conf, Collection<Range> ranges) {
 +    InputConfigurator.setRanges(CLASS, conf, ranges);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #setAutoAdjustRanges(Job, boolean)} instead.
 +   */
 +  @Deprecated
 +  public static void disableAutoAdjustRanges(Configuration conf) {
 +    InputConfigurator.setAutoAdjustRanges(CLASS, conf, false);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #addIterator(Job, IteratorSetting)} to add the {@link VersioningIterator} instead.
 +   */
 +  @Deprecated
 +  public static void setMaxVersions(Configuration conf, int maxVersions) throws IOException {
 +    IteratorSetting vers = new IteratorSetting(1, "vers", VersioningIterator.class);
 +    try {
 +      VersioningIterator.setMaxVersions(vers, maxVersions);
 +    } catch (IllegalArgumentException e) {
 +      throw new IOException(e);
 +    }
 +    InputConfigurator.addIterator(CLASS, conf, vers);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #setOfflineTableScan(Job, boolean)} instead.
 +   */
 +  @Deprecated
 +  public static void setScanOffline(Configuration conf, boolean scanOff) {
 +    InputConfigurator.setOfflineTableScan(CLASS, conf, scanOff);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #fetchColumns(Job, Collection)} instead.
 +   */
 +  @Deprecated
 +  public static void fetchColumns(Configuration conf, Collection<Pair<Text,Text>> columnFamilyColumnQualifierPairs) {
 +    InputConfigurator.fetchColumns(CLASS, conf, columnFamilyColumnQualifierPairs);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #setLogLevel(Job, Level)} instead.
 +   */
 +  @Deprecated
 +  public static void setLogLevel(Configuration conf, Level level) {
 +    InputConfigurator.setLogLevel(CLASS, conf, level);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #addIterator(Job, IteratorSetting)} instead.
 +   */
 +  @Deprecated
 +  public static void addIterator(Configuration conf, IteratorSetting cfg) {
 +    InputConfigurator.addIterator(CLASS, conf, cfg);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #isIsolated(JobContext)} instead.
 +   */
 +  @Deprecated
 +  protected static boolean isIsolated(Configuration conf) {
 +    return InputConfigurator.isIsolated(CLASS, conf);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #usesLocalIterators(JobContext)} instead.
 +   */
 +  @Deprecated
 +  protected static boolean usesLocalIterators(Configuration conf) {
 +    return InputConfigurator.usesLocalIterators(CLASS, conf);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #getPrincipal(JobContext)} instead.
 +   */
 +  @Deprecated
 +  protected static String getPrincipal(Configuration conf) {
 +    return InputConfigurator.getPrincipal(CLASS, conf);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #getToken(JobContext)} instead.
 +   */
 +  @Deprecated
 +  protected static byte[] getToken(Configuration conf) {
 +    return InputConfigurator.getToken(CLASS, conf);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #getInputTableName(JobContext)} instead.
 +   */
 +  @Deprecated
 +  protected static String getTablename(Configuration conf) {
 +    return InputConfigurator.getInputTableName(CLASS, conf);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #getScanAuthorizations(JobContext)} instead.
 +   */
 +  @Deprecated
 +  protected static Authorizations getAuthorizations(Configuration conf) {
 +    return InputConfigurator.getScanAuthorizations(CLASS, conf);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #getInstance(JobContext)} instead.
 +   */
 +  @Deprecated
 +  protected static Instance getInstance(Configuration conf) {
 +    return InputConfigurator.getInstance(CLASS, conf);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #getTabletLocator(JobContext)} instead.
 +   */
 +  @Deprecated
 +  protected static TabletLocator getTabletLocator(Configuration conf) throws TableNotFoundException {
 +    return InputConfigurator.getTabletLocator(CLASS, conf);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #getRanges(JobContext)} instead.
 +   */
 +  @Deprecated
 +  protected static List<Range> getRanges(Configuration conf) throws IOException {
 +    return InputConfigurator.getRanges(CLASS, conf);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #getFetchedColumns(JobContext)} instead.
 +   */
 +  @Deprecated
 +  protected static Set<Pair<Text,Text>> getFetchedColumns(Configuration conf) {
 +    return InputConfigurator.getFetchedColumns(CLASS, conf);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #getAutoAdjustRanges(JobContext)} instead.
 +   */
 +  @Deprecated
 +  protected static boolean getAutoAdjustRanges(Configuration conf) {
 +    return InputConfigurator.getAutoAdjustRanges(CLASS, conf);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #getLogLevel(JobContext)} instead.
 +   */
 +  @Deprecated
 +  protected static Level getLogLevel(Configuration conf) {
 +    return InputConfigurator.getLogLevel(CLASS, conf);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #validateOptions(JobContext)} instead.
 +   */
 +  @Deprecated
 +  protected static void validateOptions(Configuration conf) throws IOException {
 +    InputConfigurator.validateOptions(CLASS, conf);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #addIterator(Job, IteratorSetting)} to add the {@link VersioningIterator} instead.
 +   */
 +  @Deprecated
 +  protected static int getMaxVersions(Configuration conf) {
 +    // This is so convoluted, because the only reason to get the number of maxVersions is to construct the same type of IteratorSetting object we have to
 +    // deconstruct to get at this option in the first place, but to preserve correct behavior, this appears necessary.
 +    List<IteratorSetting> iteratorSettings = InputConfigurator.getIterators(CLASS, conf);
 +    for (IteratorSetting setting : iteratorSettings) {
 +      if ("vers".equals(setting.getName()) && 1 == setting.getPriority() && VersioningIterator.class.getName().equals(setting.getIteratorClass())) {
 +        if (setting.getOptions().containsKey("maxVersions"))
 +          return Integer.parseInt(setting.getOptions().get("maxVersions"));
 +        else
 +          return -1;
 +      }
 +    }
 +    return -1;
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #isOfflineScan(JobContext)} instead.
 +   */
 +  @Deprecated
 +  protected static boolean isOfflineScan(Configuration conf) {
 +    return InputConfigurator.isOfflineScan(CLASS, conf);
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #getIterators(JobContext)} instead.
 +   */
 +  @Deprecated
 +  protected static List<AccumuloIterator> getIterators(Configuration conf) {
 +    List<IteratorSetting> iteratorSettings = InputConfigurator.getIterators(CLASS, conf);
 +    List<AccumuloIterator> deprecatedIterators = new ArrayList<AccumuloIterator>(iteratorSettings.size());
 +    for (IteratorSetting setting : iteratorSettings) {
 +      AccumuloIterator deprecatedIter = new AccumuloIterator(new String(setting.getPriority() + AccumuloIterator.FIELD_SEP + setting.getIteratorClass()
 +          + AccumuloIterator.FIELD_SEP + setting.getName()));
 +      deprecatedIterators.add(deprecatedIter);
 +    }
 +    return deprecatedIterators;
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link #getIterators(JobContext)} instead.
 +   */
 +  @Deprecated
 +  protected static List<AccumuloIteratorOption> getIteratorOptions(Configuration conf) {
 +    List<IteratorSetting> iteratorSettings = InputConfigurator.getIterators(CLASS, conf);
 +    List<AccumuloIteratorOption> deprecatedIteratorOptions = new ArrayList<AccumuloIteratorOption>(iteratorSettings.size());
 +    for (IteratorSetting setting : iteratorSettings) {
 +      for (Entry<String,String> opt : setting.getOptions().entrySet()) {
 +        String deprecatedOption;
 +        try {
 +          deprecatedOption = new String(setting.getName() + AccumuloIteratorOption.FIELD_SEP + URLEncoder.encode(opt.getKey(), "UTF-8")
 +              + AccumuloIteratorOption.FIELD_SEP + URLEncoder.encode(opt.getValue(), "UTF-8"));
 +        } catch (UnsupportedEncodingException e) {
 +          throw new RuntimeException(e);
 +        }
 +        deprecatedIteratorOptions.add(new AccumuloIteratorOption(deprecatedOption));
 +      }
 +    }
 +    return deprecatedIteratorOptions;
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link IteratorSetting} instead.
 +   */
 +  @Deprecated
 +  static class AccumuloIterator {
-     
++
 +    private static final String FIELD_SEP = ":";
-     
++
 +    private int priority;
 +    private String iteratorClass;
 +    private String iteratorName;
-     
++
 +    public AccumuloIterator(int priority, String iteratorClass, String iteratorName) {
 +      this.priority = priority;
 +      this.iteratorClass = iteratorClass;
 +      this.iteratorName = iteratorName;
 +    }
-     
++
 +    // Parses out a setting given an string supplied from an earlier toString() call
 +    public AccumuloIterator(String iteratorSetting) {
 +      // Parse the string to expand the iterator
 +      StringTokenizer tokenizer = new StringTokenizer(iteratorSetting, FIELD_SEP);
 +      priority = Integer.parseInt(tokenizer.nextToken());
 +      iteratorClass = tokenizer.nextToken();
 +      iteratorName = tokenizer.nextToken();
 +    }
-     
++
 +    public int getPriority() {
 +      return priority;
 +    }
-     
++
 +    public String getIteratorClass() {
 +      return iteratorClass;
 +    }
-     
++
 +    public String getIteratorName() {
 +      return iteratorName;
 +    }
-     
++
 +    @Override
 +    public String toString() {
 +      return new String(priority + FIELD_SEP + iteratorClass + FIELD_SEP + iteratorName);
 +    }
-     
++
 +  }
-   
++
 +  /**
 +   * @deprecated since 1.5.0; Use {@link IteratorSetting} instead.
 +   */
 +  @Deprecated
 +  static class AccumuloIteratorOption {
 +    private static final String FIELD_SEP = ":";
-     
++
 +    private String iteratorName;
 +    private String key;
 +    private String value;
-     
++
 +    public AccumuloIteratorOption(String iteratorName, String key, String value) {
 +      this.iteratorName = iteratorName;
 +      this.key = key;
 +      this.value = value;
 +    }
-     
++
 +    // Parses out an option given a string supplied from an earlier toString() call
 +    public AccumuloIteratorOption(String iteratorOption) {
 +      StringTokenizer tokenizer = new StringTokenizer(iteratorOption, FIELD_SEP);
 +      this.iteratorName = tokenizer.nextToken();
 +      try {
 +        this.key = URLDecoder.decode(tokenizer.nextToken(), "UTF-8");
 +        this.value = URLDecoder.decode(tokenizer.nextToken(), "UTF-8");
 +      } catch (UnsupportedEncodingException e) {
 +        throw new RuntimeException(e);
 +      }
 +    }
-     
++
 +    public String getIteratorName() {
 +      return iteratorName;
 +    }
-     
++
 +    public String getKey() {
 +      return key;
 +    }
-     
++
 +    public String getValue() {
 +      return value;
 +    }
-     
++
 +    @Override
 +    public String toString() {
 +      try {
 +        return new String(iteratorName + FIELD_SEP + URLEncoder.encode(key, "UTF-8") + FIELD_SEP + URLEncoder.encode(value, "UTF-8"));
 +      } catch (UnsupportedEncodingException e) {
 +        throw new RuntimeException(e);
 +      }
 +    }
-     
++
 +  }
-   
++
 +  // use reflection to pull the Configuration out of the JobContext for Hadoop 1 and Hadoop 2 compatibility
 +  static Configuration getConfiguration(JobContext context) {
 +    try {
 +      Class<?> c = InputFormatBase.class.getClassLoader().loadClass("org.apache.hadoop.mapreduce.JobContext");
 +      Method m = c.getMethod("getConfiguration");
 +      Object o = m.invoke(context, new Object[0]);
 +      return (Configuration) o;
 +    } catch (Exception e) {
 +      throw new RuntimeException(e);
 +    }
 +  }
-   
++
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/cccdb8cb/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
index 8ff3f0e,0000000..0718505
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
@@@ -1,428 -1,0 +1,432 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.mapreduce;
 +
 +import java.io.DataInput;
 +import java.io.DataOutput;
 +import java.io.IOException;
 +import java.math.BigInteger;
 +import java.nio.charset.Charset;
 +import java.util.ArrayList;
 +import java.util.List;
 +import java.util.Set;
 +
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.ZooKeeperInstance;
 +import org.apache.accumulo.core.client.mapreduce.lib.util.InputConfigurator;
 +import org.apache.accumulo.core.client.mock.MockInstance;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 +import org.apache.accumulo.core.data.ByteSequence;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.PartialKey;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.security.CredentialHelper;
 +import org.apache.accumulo.core.util.Pair;
 +import org.apache.commons.codec.binary.Base64;
 +import org.apache.hadoop.io.Text;
 +import org.apache.hadoop.io.Writable;
 +import org.apache.hadoop.mapreduce.InputSplit;
- import org.apache.hadoop.util.StringUtils;
 +import org.apache.log4j.Level;
 +
 +/**
 + * The Class RangeInputSplit. Encapsulates an Accumulo range for use in Map Reduce jobs.
 + */
 +public class RangeInputSplit extends InputSplit implements Writable {
 +  private Range range;
 +  private String[] locations;
 +  private String table, instanceName, zooKeepers, principal;
 +  private AuthenticationToken token;
 +  private Boolean offline, mockInstance, isolatedScan, localIterators;
 +  private Authorizations auths;
 +  private Set<Pair<Text,Text>> fetchedColumns;
 +  private List<IteratorSetting> iterators;
 +  private Level level;
 +
 +  public RangeInputSplit() {
 +    range = new Range();
 +    locations = new String[0];
 +  }
 +
 +  public RangeInputSplit(Range range, String[] locations) {
 +    this.range = range;
 +    this.locations = locations;
 +  }
 +
 +  public Range getRange() {
 +    return range;
 +  }
 +
 +  private static byte[] extractBytes(ByteSequence seq, int numBytes) {
 +    byte[] bytes = new byte[numBytes + 1];
 +    bytes[0] = 0;
 +    for (int i = 0; i < numBytes; i++) {
 +      if (i >= seq.length())
 +        bytes[i + 1] = 0;
 +      else
 +        bytes[i + 1] = seq.byteAt(i);
 +    }
 +    return bytes;
 +  }
 +
 +  public static float getProgress(ByteSequence start, ByteSequence end, ByteSequence position) {
 +    int maxDepth = Math.min(Math.max(end.length(), start.length()), position.length());
 +    BigInteger startBI = new BigInteger(extractBytes(start, maxDepth));
 +    BigInteger endBI = new BigInteger(extractBytes(end, maxDepth));
 +    BigInteger positionBI = new BigInteger(extractBytes(position, maxDepth));
 +    return (float) (positionBI.subtract(startBI).doubleValue() / endBI.subtract(startBI).doubleValue());
 +  }
 +
 +  public float getProgress(Key currentKey) {
 +    if (currentKey == null)
 +      return 0f;
 +    if (range.getStartKey() != null && range.getEndKey() != null) {
 +      if (range.getStartKey().compareTo(range.getEndKey(), PartialKey.ROW) != 0) {
 +        // just look at the row progress
 +        return getProgress(range.getStartKey().getRowData(), range.getEndKey().getRowData(), currentKey.getRowData());
 +      } else if (range.getStartKey().compareTo(range.getEndKey(), PartialKey.ROW_COLFAM) != 0) {
 +        // just look at the column family progress
 +        return getProgress(range.getStartKey().getColumnFamilyData(), range.getEndKey().getColumnFamilyData(), currentKey.getColumnFamilyData());
 +      } else if (range.getStartKey().compareTo(range.getEndKey(), PartialKey.ROW_COLFAM_COLQUAL) != 0) {
 +        // just look at the column qualifier progress
 +        return getProgress(range.getStartKey().getColumnQualifierData(), range.getEndKey().getColumnQualifierData(), currentKey.getColumnQualifierData());
 +      }
 +    }
 +    // if we can't figure it out, then claim no progress
 +    return 0f;
 +  }
 +
 +  /**
 +   * This implementation of length is only an estimate, it does not provide exact values. Do not have your code rely on this return value.
 +   */
++  @Override
 +  public long getLength() throws IOException {
 +    Text startRow = range.isInfiniteStartKey() ? new Text(new byte[] {Byte.MIN_VALUE}) : range.getStartKey().getRow();
 +    Text stopRow = range.isInfiniteStopKey() ? new Text(new byte[] {Byte.MAX_VALUE}) : range.getEndKey().getRow();
 +    int maxCommon = Math.min(7, Math.min(startRow.getLength(), stopRow.getLength()));
 +    long diff = 0;
 +
 +    byte[] start = startRow.getBytes();
 +    byte[] stop = stopRow.getBytes();
 +    for (int i = 0; i < maxCommon; ++i) {
 +      diff |= 0xff & (start[i] ^ stop[i]);
 +      diff <<= Byte.SIZE;
 +    }
 +
 +    if (startRow.getLength() != stopRow.getLength())
 +      diff |= 0xff;
 +
 +    return diff + 1;
 +  }
 +
++  @Override
 +  public String[] getLocations() throws IOException {
 +    return locations;
 +  }
 +
++  @Override
 +  public void readFields(DataInput in) throws IOException {
 +    range.readFields(in);
 +    int numLocs = in.readInt();
 +    locations = new String[numLocs];
 +    for (int i = 0; i < numLocs; ++i)
 +      locations[i] = in.readUTF();
-     
++
 +    if (in.readBoolean()) {
 +      isolatedScan = in.readBoolean();
 +    }
-     
++
 +    if (in.readBoolean()) {
 +      offline = in.readBoolean();
 +    }
-     
++
 +    if (in.readBoolean()) {
 +      localIterators = in.readBoolean();
 +    }
-     
++
 +    if (in.readBoolean()) {
 +      mockInstance = in.readBoolean();
 +    }
-     
++
 +    if (in.readBoolean()) {
 +      int numColumns = in.readInt();
 +      List<String> columns = new ArrayList<String>(numColumns);
 +      for (int i = 0; i < numColumns; i++) {
 +        columns.add(in.readUTF());
 +      }
-       
++
 +      fetchedColumns = InputConfigurator.deserializeFetchedColumns(columns);
 +    }
-     
++
 +    if (in.readBoolean()) {
 +      String strAuths = in.readUTF();
 +      auths = new Authorizations(strAuths.getBytes(Charset.forName("UTF-8")));
 +    }
-     
++
 +    if (in.readBoolean()) {
 +      principal = in.readUTF();
 +    }
-     
++
 +    if (in.readBoolean()) {
 +      String tokenClass = in.readUTF();
 +      byte[] base64TokenBytes = in.readUTF().getBytes(Charset.forName("UTF-8"));
 +      byte[] tokenBytes = Base64.decodeBase64(base64TokenBytes);
-       
++
 +      try {
 +        token = CredentialHelper.extractToken(tokenClass, tokenBytes);
 +      } catch (AccumuloSecurityException e) {
 +        throw new IOException(e);
 +      }
 +    }
-     
++
 +    if (in.readBoolean()) {
 +      instanceName = in.readUTF();
 +    }
-     
++
 +    if (in.readBoolean()) {
 +      zooKeepers = in.readUTF();
 +    }
-     
++
 +    if (in.readBoolean()) {
 +      level = Level.toLevel(in.readInt());
 +    }
 +  }
 +
++  @Override
 +  public void write(DataOutput out) throws IOException {
 +    range.write(out);
 +    out.writeInt(locations.length);
 +    for (int i = 0; i < locations.length; ++i)
 +      out.writeUTF(locations[i]);
-     
++
 +    out.writeBoolean(null != isolatedScan);
 +    if (null != isolatedScan) {
 +      out.writeBoolean(isolatedScan);
 +    }
-     
++
 +    out.writeBoolean(null != offline);
 +    if (null != offline) {
 +      out.writeBoolean(offline);
 +    }
-     
++
 +    out.writeBoolean(null != localIterators);
 +    if (null != localIterators) {
 +      out.writeBoolean(localIterators);
 +    }
-     
++
 +    out.writeBoolean(null != mockInstance);
 +    if (null != mockInstance) {
 +      out.writeBoolean(mockInstance);
 +    }
-     
++
 +    out.writeBoolean(null != fetchedColumns);
 +    if (null != fetchedColumns) {
 +      String[] cols = InputConfigurator.serializeColumns(fetchedColumns);
 +      out.writeInt(cols.length);
 +      for (String col : cols) {
 +        out.writeUTF(col);
 +      }
 +    }
-     
++
 +    out.writeBoolean(null != auths);
 +    if (null != auths) {
 +      out.writeUTF(auths.serialize());
 +    }
-     
++
 +    out.writeBoolean(null != principal);
 +    if (null != principal) {
 +      out.writeUTF(principal);
 +    }
-     
++
 +    out.writeBoolean(null != token);
 +    if (null != token) {
 +      out.writeUTF(token.getClass().getCanonicalName());
 +      try {
 +        out.writeUTF(CredentialHelper.tokenAsBase64(token));
 +      } catch (AccumuloSecurityException e) {
 +        throw new IOException(e);
 +      }
 +    }
-     
++
 +    out.writeBoolean(null != instanceName);
 +    if (null != instanceName) {
 +      out.writeUTF(instanceName);
 +    }
-     
++
 +    out.writeBoolean(null != zooKeepers);
 +    if (null != zooKeepers) {
 +      out.writeUTF(zooKeepers);
 +    }
-     
++
 +    out.writeBoolean(null != level);
 +    if (null != level) {
 +      out.writeInt(level.toInt());
 +    }
 +  }
 +
 +  @Override
 +  public String toString() {
 +    StringBuilder sb = new StringBuilder(256);
 +    sb.append("Range: ").append(range);
 +    sb.append(" Locations: ").append(locations);
 +    sb.append(" Table: ").append(table);
 +    sb.append(" InstanceName: ").append(instanceName);
 +    sb.append(" zooKeepers: ").append(zooKeepers);
 +    sb.append(" principal: ").append(principal);
 +    sb.append(" authenticationToken: ").append(token);
 +    sb.append(" Authorizations: ").append(auths);
 +    sb.append(" offlineScan: ").append(offline);
 +    sb.append(" mockInstance: ").append(mockInstance);
 +    sb.append(" isolatedScan: ").append(isolatedScan);
 +    sb.append(" localIterators: ").append(localIterators);
 +    sb.append(" fetchColumns: ").append(fetchedColumns);
 +    sb.append(" iterators: ").append(iterators);
 +    sb.append(" logLevel: ").append(level);
 +    return sb.toString();
 +  }
 +
 +  public String getTable() {
 +    return table;
 +  }
 +
 +  public void setTable(String table) {
 +    this.table = table;
 +  }
-   
++
 +  public Instance getInstance() {
 +    if (null == instanceName) {
 +      return null;
 +    }
-     
-     if (isMockInstance()) {  
++
++    if (isMockInstance()) {
 +      return new MockInstance(getInstanceName());
 +    }
-     
++
 +    if (null == zooKeepers) {
 +      return null;
 +    }
-     
++
 +    return new ZooKeeperInstance(getInstanceName(), getZooKeepers());
 +  }
 +
 +  public String getInstanceName() {
 +    return instanceName;
 +  }
 +
 +  public void setInstanceName(String instanceName) {
 +    this.instanceName = instanceName;
 +  }
 +
 +  public String getZooKeepers() {
 +    return zooKeepers;
 +  }
 +
 +  public void setZooKeepers(String zooKeepers) {
 +    this.zooKeepers = zooKeepers;
 +  }
 +
 +  public String getPrincipal() {
 +    return principal;
 +  }
 +
 +  public void setPrincipal(String principal) {
 +    this.principal = principal;
 +  }
-   
++
 +  public AuthenticationToken getToken() {
 +    return token;
 +  }
-   
++
 +  public void setToken(AuthenticationToken token) {
-     this.token = token;;
++    this.token = token;
++    ;
 +  }
 +
 +  public Boolean isOffline() {
 +    return offline;
 +  }
 +
 +  public void setOffline(Boolean offline) {
 +    this.offline = offline;
 +  }
 +
 +  public void setLocations(String[] locations) {
 +    this.locations = locations;
 +  }
 +
 +  public Boolean isMockInstance() {
 +    return mockInstance;
 +  }
 +
 +  public void setMockInstance(Boolean mockInstance) {
 +    this.mockInstance = mockInstance;
 +  }
 +
 +  public Boolean isIsolatedScan() {
 +    return isolatedScan;
 +  }
 +
 +  public void setIsolatedScan(Boolean isolatedScan) {
 +    this.isolatedScan = isolatedScan;
 +  }
 +
 +  public Authorizations getAuths() {
 +    return auths;
 +  }
 +
 +  public void setAuths(Authorizations auths) {
 +    this.auths = auths;
 +  }
 +
 +  public void setRange(Range range) {
 +    this.range = range;
 +  }
 +
 +  public Boolean usesLocalIterators() {
 +    return localIterators;
 +  }
 +
 +  public void setUsesLocalIterators(Boolean localIterators) {
 +    this.localIterators = localIterators;
 +  }
 +
 +  public Set<Pair<Text,Text>> getFetchedColumns() {
 +    return fetchedColumns;
 +  }
 +
 +  public void setFetchedColumns(Set<Pair<Text,Text>> fetchedColumns) {
 +    this.fetchedColumns = fetchedColumns;
 +  }
 +
 +  public List<IteratorSetting> getIterators() {
 +    return iterators;
 +  }
 +
 +  public void setIterators(List<IteratorSetting> iterators) {
 +    this.iterators = iterators;
 +  }
 +
 +  public Level getLogLevel() {
 +    return level;
 +  }
-   
++
 +  public void setLogLevel(Level level) {
 +    this.level = level;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/cccdb8cb/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
index 93dba65,0000000..d826895
mode 100644,000000..100644
--- a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
@@@ -1,446 -1,0 +1,446 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.mapreduce;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertNull;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.io.ByteArrayOutputStream;
 +import java.io.DataOutputStream;
 +import java.io.IOException;
- import java.util.Arrays;
 +import java.util.Collection;
 +import java.util.Collections;
 +import java.util.List;
 +
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.mock.MockInstance;
 +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.iterators.user.RegExFilter;
 +import org.apache.accumulo.core.iterators.user.WholeRowIterator;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.util.CachedConfiguration;
 +import org.apache.accumulo.core.util.Pair;
 +import org.apache.commons.codec.binary.Base64;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.conf.Configured;
 +import org.apache.hadoop.io.Text;
 +import org.apache.hadoop.mapreduce.InputFormat;
 +import org.apache.hadoop.mapreduce.InputSplit;
 +import org.apache.hadoop.mapreduce.Job;
- import org.apache.hadoop.mapreduce.JobContext;
 +import org.apache.hadoop.mapreduce.Mapper;
- import org.apache.hadoop.mapreduce.RecordReader;
- import org.apache.hadoop.mapreduce.TaskAttemptContext;
- import org.apache.hadoop.mapreduce.TaskAttemptID;
 +import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
 +import org.apache.hadoop.util.Tool;
 +import org.apache.hadoop.util.ToolRunner;
 +import org.apache.log4j.Level;
 +import org.junit.Assert;
 +import org.junit.Test;
 +
 +public class AccumuloInputFormatTest {
 +
 +  private static final String PREFIX = AccumuloInputFormatTest.class.getSimpleName();
 +
 +  /**
 +   * Test basic setting & getting of max versions.
 +   * 
 +   * @throws IOException
 +   *           Signals that an I/O exception has occurred.
 +   */
 +  @Deprecated
 +  @Test
 +  public void testMaxVersions() throws IOException {
 +    Job job = new Job();
 +    AccumuloInputFormat.setMaxVersions(job.getConfiguration(), 1);
 +    int version = AccumuloInputFormat.getMaxVersions(job.getConfiguration());
 +    assertEquals(1, version);
 +  }
 +
 +  /**
 +   * Test max versions with an invalid value.
 +   * 
 +   * @throws IOException
 +   *           Signals that an I/O exception has occurred.
 +   */
 +  @Deprecated
 +  @Test(expected = IOException.class)
 +  public void testMaxVersionsLessThan1() throws IOException {
 +    Job job = new Job();
 +    AccumuloInputFormat.setMaxVersions(job.getConfiguration(), 0);
 +  }
 +
 +  /**
 +   * Test no max version configured.
 +   * 
 +   * @throws IOException
 +   */
 +  @Deprecated
 +  @Test
 +  public void testNoMaxVersion() throws IOException {
 +    Job job = new Job();
 +    assertEquals(-1, AccumuloInputFormat.getMaxVersions(job.getConfiguration()));
 +  }
 +
 +  /**
 +   * Check that the iterator configuration is getting stored in the Job conf correctly.
 +   * 
 +   * @throws IOException
 +   */
 +  @Test
 +  public void testSetIterator() throws IOException {
 +    Job job = new Job();
 +
 +    IteratorSetting is = new IteratorSetting(1, "WholeRow", "org.apache.accumulo.core.iterators.WholeRowIterator");
 +    AccumuloInputFormat.addIterator(job, is);
 +    Configuration conf = job.getConfiguration();
 +    ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +    is.write(new DataOutputStream(baos));
 +    String iterators = conf.get("AccumuloInputFormat.ScanOpts.Iterators");
 +    assertEquals(new String(Base64.encodeBase64(baos.toByteArray())), iterators);
 +  }
 +
 +  @Test
 +  public void testAddIterator() throws IOException {
 +    Job job = new Job();
 +
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(1, "WholeRow", WholeRowIterator.class));
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(2, "Versions", "org.apache.accumulo.core.iterators.VersioningIterator"));
 +    IteratorSetting iter = new IteratorSetting(3, "Count", "org.apache.accumulo.core.iterators.CountingIterator");
 +    iter.addOption("v1", "1");
 +    iter.addOption("junk", "\0omg:!\\xyzzy");
 +    AccumuloInputFormat.addIterator(job, iter);
 +
 +    List<IteratorSetting> list = AccumuloInputFormat.getIterators(job);
 +
 +    // Check the list size
 +    assertTrue(list.size() == 3);
 +
 +    // Walk the list and make sure our settings are correct
 +    IteratorSetting setting = list.get(0);
 +    assertEquals(1, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.user.WholeRowIterator", setting.getIteratorClass());
 +    assertEquals("WholeRow", setting.getName());
 +    assertEquals(0, setting.getOptions().size());
 +
 +    setting = list.get(1);
 +    assertEquals(2, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.VersioningIterator", setting.getIteratorClass());
 +    assertEquals("Versions", setting.getName());
 +    assertEquals(0, setting.getOptions().size());
 +
 +    setting = list.get(2);
 +    assertEquals(3, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.CountingIterator", setting.getIteratorClass());
 +    assertEquals("Count", setting.getName());
 +    assertEquals(2, setting.getOptions().size());
 +    assertEquals("1", setting.getOptions().get("v1"));
 +    assertEquals("\0omg:!\\xyzzy", setting.getOptions().get("junk"));
 +  }
 +
 +  /**
 +   * Test adding iterator options where the keys and values contain both the FIELD_SEPARATOR character (':') and ITERATOR_SEPARATOR (',') characters. There
 +   * should be no exceptions thrown when trying to parse these types of option entries.
 +   * 
 +   * This test makes sure that the expected raw values, as appears in the Job, are equal to what's expected.
 +   */
 +  @Test
 +  public void testIteratorOptionEncoding() throws Throwable {
 +    String key = "colon:delimited:key";
 +    String value = "comma,delimited,value";
 +    IteratorSetting someSetting = new IteratorSetting(1, "iterator", "Iterator.class");
 +    someSetting.addOption(key, value);
 +    Job job = new Job();
 +    AccumuloInputFormat.addIterator(job, someSetting);
 +
 +    List<IteratorSetting> list = AccumuloInputFormat.getIterators(job);
 +    assertEquals(1, list.size());
 +    assertEquals(1, list.get(0).getOptions().size());
 +    assertEquals(list.get(0).getOptions().get(key), value);
 +
 +    someSetting.addOption(key + "2", value);
 +    someSetting.setPriority(2);
 +    someSetting.setName("it2");
 +    AccumuloInputFormat.addIterator(job, someSetting);
 +    list = AccumuloInputFormat.getIterators(job);
 +    assertEquals(2, list.size());
 +    assertEquals(1, list.get(0).getOptions().size());
 +    assertEquals(list.get(0).getOptions().get(key), value);
 +    assertEquals(2, list.get(1).getOptions().size());
 +    assertEquals(list.get(1).getOptions().get(key), value);
 +    assertEquals(list.get(1).getOptions().get(key + "2"), value);
 +  }
 +
 +  /**
 +   * Test getting iterator settings for multiple iterators set
 +   * 
 +   * @throws IOException
 +   */
 +  @Test
 +  public void testGetIteratorSettings() throws IOException {
 +    Job job = new Job();
 +
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(1, "WholeRow", "org.apache.accumulo.core.iterators.WholeRowIterator"));
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(2, "Versions", "org.apache.accumulo.core.iterators.VersioningIterator"));
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(3, "Count", "org.apache.accumulo.core.iterators.CountingIterator"));
 +
 +    List<IteratorSetting> list = AccumuloInputFormat.getIterators(job);
 +
 +    // Check the list size
 +    assertTrue(list.size() == 3);
 +
 +    // Walk the list and make sure our settings are correct
 +    IteratorSetting setting = list.get(0);
 +    assertEquals(1, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.WholeRowIterator", setting.getIteratorClass());
 +    assertEquals("WholeRow", setting.getName());
 +
 +    setting = list.get(1);
 +    assertEquals(2, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.VersioningIterator", setting.getIteratorClass());
 +    assertEquals("Versions", setting.getName());
 +
 +    setting = list.get(2);
 +    assertEquals(3, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.CountingIterator", setting.getIteratorClass());
 +    assertEquals("Count", setting.getName());
 +
 +  }
 +
 +  @Test
 +  public void testSetRegex() throws IOException {
 +    Job job = new Job();
 +
 +    String regex = ">\"*%<>\'\\";
 +
 +    IteratorSetting is = new IteratorSetting(50, regex, RegExFilter.class);
 +    RegExFilter.setRegexs(is, regex, null, null, null, false);
 +    AccumuloInputFormat.addIterator(job, is);
 +
 +    assertTrue(regex.equals(AccumuloInputFormat.getIterators(job).get(0).getName()));
 +  }
 +
 +  private static AssertionError e1 = null;
 +  private static AssertionError e2 = null;
 +
 +  private static class MRTester extends Configured implements Tool {
 +    private static class TestMapper extends Mapper<Key,Value,Key,Value> {
 +      Key key = null;
 +      int count = 0;
 +
 +      @Override
 +      protected void map(Key k, Value v, Context context) throws IOException, InterruptedException {
 +        try {
 +          if (key != null)
 +            assertEquals(key.getRow().toString(), new String(v.get()));
 +          assertEquals(k.getRow(), new Text(String.format("%09x", count + 1)));
 +          assertEquals(new String(v.get()), String.format("%09x", count));
 +        } catch (AssertionError e) {
 +          e1 = e;
 +        }
 +        key = new Key(k);
 +        count++;
 +      }
 +
 +      @Override
 +      protected void cleanup(Context context) throws IOException, InterruptedException {
 +        try {
 +          assertEquals(100, count);
 +        } catch (AssertionError e) {
 +          e2 = e;
 +        }
 +      }
 +    }
 +
 +    @Override
 +    public int run(String[] args) throws Exception {
 +
 +      if (args.length != 5) {
 +        throw new IllegalArgumentException("Usage : " + MRTester.class.getName() + " <user> <pass> <table> <instanceName> <inputFormatClass>");
 +      }
 +
 +      String user = args[0];
 +      String pass = args[1];
 +      String table = args[2];
 +      String instanceName = args[3];
 +      String inputFormatClassName = args[4];
-       Class<? extends InputFormat> inputFormatClass = (Class<? extends InputFormat>) Class.forName(inputFormatClassName);
++      @SuppressWarnings("unchecked")
++      Class<? extends InputFormat<?,?>> inputFormatClass = (Class<? extends InputFormat<?,?>>) Class.forName(inputFormatClassName);
 +
 +      Job job = new Job(getConf(), this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
 +      job.setJarByClass(this.getClass());
 +
 +      job.setInputFormatClass(inputFormatClass);
 +
 +      AccumuloInputFormat.setConnectorInfo(job, user, new PasswordToken(pass));
 +      AccumuloInputFormat.setInputTableName(job, table);
 +      AccumuloInputFormat.setMockInstance(job, instanceName);
 +
 +      job.setMapperClass(TestMapper.class);
 +      job.setMapOutputKeyClass(Key.class);
 +      job.setMapOutputValueClass(Value.class);
 +      job.setOutputFormatClass(NullOutputFormat.class);
 +
 +      job.setNumReduceTasks(0);
 +
 +      job.waitForCompletion(true);
 +
 +      return job.isSuccessful() ? 0 : 1;
 +    }
 +
 +    public static int main(String[] args) throws Exception {
 +      return ToolRunner.run(CachedConfiguration.getInstance(), new MRTester(), args);
 +    }
 +  }
 +
 +  @Test
 +  public void testMap() throws Exception {
 +    final String INSTANCE_NAME = PREFIX + "_mapreduce_instance";
 +    final String TEST_TABLE_1 = PREFIX + "_mapreduce_table_1";
 +
 +    MockInstance mockInstance = new MockInstance(INSTANCE_NAME);
 +    Connector c = mockInstance.getConnector("root", new PasswordToken(""));
 +    c.tableOperations().create(TEST_TABLE_1);
 +    BatchWriter bw = c.createBatchWriter(TEST_TABLE_1, new BatchWriterConfig());
 +    for (int i = 0; i < 100; i++) {
 +      Mutation m = new Mutation(new Text(String.format("%09x", i + 1)));
 +      m.put(new Text(), new Text(), new Value(String.format("%09x", i).getBytes()));
 +      bw.addMutation(m);
 +    }
 +    bw.close();
 +
 +    Assert.assertEquals(0, MRTester.main(new String[] {"root", "", TEST_TABLE_1, INSTANCE_NAME, AccumuloInputFormat.class.getCanonicalName()}));
 +    assertNull(e1);
 +    assertNull(e2);
 +  }
 +
 +  @Test
 +  public void testCorrectRangeInputSplits() throws Exception {
 +    Job job = new Job(new Configuration(), this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
 +
 +    String username = "user", table = "table", instance = "instance";
 +    PasswordToken password = new PasswordToken("password");
 +    Authorizations auths = new Authorizations("foo");
 +    Collection<Pair<Text,Text>> fetchColumns = Collections.singleton(new Pair<Text,Text>(new Text("foo"), new Text("bar")));
 +    boolean isolated = true, localIters = true;
 +    Level level = Level.WARN;
 +
 +    Instance inst = new MockInstance(instance);
 +    Connector connector = inst.getConnector(username, password);
 +    connector.tableOperations().create(table);
 +
 +    AccumuloInputFormat.setConnectorInfo(job, username, password);
 +    AccumuloInputFormat.setInputTableName(job, table);
 +    AccumuloInputFormat.setScanAuthorizations(job, auths);
 +    AccumuloInputFormat.setMockInstance(job, instance);
 +    AccumuloInputFormat.setScanIsolation(job, isolated);
 +    AccumuloInputFormat.setLocalIterators(job, localIters);
 +    AccumuloInputFormat.fetchColumns(job, fetchColumns);
 +    AccumuloInputFormat.setLogLevel(job, level);
 +
 +    AccumuloInputFormat aif = new AccumuloInputFormat();
 +
 +    List<InputSplit> splits = aif.getSplits(job);
 +
 +    Assert.assertEquals(1, splits.size());
 +
 +    InputSplit split = splits.get(0);
 +
 +    Assert.assertEquals(RangeInputSplit.class, split.getClass());
 +
 +    RangeInputSplit risplit = (RangeInputSplit) split;
 +
 +    Assert.assertEquals(username, risplit.getPrincipal());
 +    Assert.assertEquals(table, risplit.getTable());
 +    Assert.assertEquals(password, risplit.getToken());
 +    Assert.assertEquals(auths, risplit.getAuths());
 +    Assert.assertEquals(instance, risplit.getInstanceName());
 +    Assert.assertEquals(isolated, risplit.isIsolatedScan());
 +    Assert.assertEquals(localIters, risplit.usesLocalIterators());
 +    Assert.assertEquals(fetchColumns, risplit.getFetchedColumns());
 +    Assert.assertEquals(level, risplit.getLogLevel());
 +  }
 +
 +  static class TestMapper extends Mapper<Key,Value,Key,Value> {
 +    Key key = null;
 +    int count = 0;
 +
 +    @Override
 +    protected void map(Key k, Value v, Context context) throws IOException, InterruptedException {
 +      if (key != null)
 +        assertEquals(key.getRow().toString(), new String(v.get()));
 +      assertEquals(k.getRow(), new Text(String.format("%09x", count + 1)));
 +      assertEquals(new String(v.get()), String.format("%09x", count));
 +      key = new Key(k);
 +      count++;
 +    }
 +  }
 +
 +  @Test
 +  public void testPartialInputSplitDelegationToConfiguration() throws Exception {
 +    String user = "testPartialInputSplitUser";
 +    PasswordToken password = new PasswordToken("");
 +
 +    MockInstance mockInstance = new MockInstance("testPartialInputSplitDelegationToConfiguration");
 +    Connector c = mockInstance.getConnector(user, password);
 +    c.tableOperations().create("testtable");
 +    BatchWriter bw = c.createBatchWriter("testtable", new BatchWriterConfig());
 +    for (int i = 0; i < 100; i++) {
 +      Mutation m = new Mutation(new Text(String.format("%09x", i + 1)));
 +      m.put(new Text(), new Text(), new Value(String.format("%09x", i).getBytes()));
 +      bw.addMutation(m);
 +    }
 +    bw.close();
 +
-     Assert.assertEquals(0, MRTester.main(new String[] {user, "", "testtable", "testPartialInputSplitDelegationToConfiguration",
-         EmptySplitsAccumuloInputFormat.class.getCanonicalName()}));
++    Assert.assertEquals(
++        0,
++        MRTester.main(new String[] {user, "", "testtable", "testPartialInputSplitDelegationToConfiguration",
++            EmptySplitsAccumuloInputFormat.class.getCanonicalName()}));
 +    assertNull(e1);
 +    assertNull(e2);
 +  }
 +
 +  @Test
 +  public void testPartialFailedInputSplitDelegationToConfiguration() throws Exception {
 +    String user = "testPartialFailedInputSplit";
 +    PasswordToken password = new PasswordToken("");
 +
 +    MockInstance mockInstance = new MockInstance("testPartialFailedInputSplitDelegationToConfiguration");
 +    Connector c = mockInstance.getConnector(user, password);
 +    c.tableOperations().create("testtable");
 +    BatchWriter bw = c.createBatchWriter("testtable", new BatchWriterConfig());
 +    for (int i = 0; i < 100; i++) {
 +      Mutation m = new Mutation(new Text(String.format("%09x", i + 1)));
 +      m.put(new Text(), new Text(), new Value(String.format("%09x", i).getBytes()));
 +      bw.addMutation(m);
 +    }
 +    bw.close();
 +
-     // We should fail before we even get into the Mapper because we can't make the RecordReader 
-     Assert.assertEquals(1, MRTester.main(new String[] {user, "", "testtable", "testPartialFailedInputSplitDelegationToConfiguration",
-         BadPasswordSplitsAccumuloInputFormat.class.getCanonicalName()}));
++    // We should fail before we even get into the Mapper because we can't make the RecordReader
++    Assert.assertEquals(
++        1,
++        MRTester.main(new String[] {user, "", "testtable", "testPartialFailedInputSplitDelegationToConfiguration",
++            BadPasswordSplitsAccumuloInputFormat.class.getCanonicalName()}));
 +    assertNull(e1);
 +    assertNull(e2);
 +  }
 +}