You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2013/10/25 01:04:41 UTC

[4/4] git commit: ACCUMULO-391 Use more accurate "InputTableConfig" term

ACCUMULO-391 Use more accurate "InputTableConfig" term


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/61353d1e
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/61353d1e
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/61353d1e

Branch: refs/heads/master
Commit: 61353d1e3f838f566aa7006e19e0af1ccd02d18a
Parents: a5cf860
Author: Christopher Tubbs <ct...@apache.org>
Authored: Thu Oct 24 19:03:45 2013 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Thu Oct 24 19:03:45 2013 -0400

----------------------------------------------------------------------
 .../core/client/mapred/AbstractInputFormat.java |   29 +-
 .../mapred/AccumuloMultiTableInputFormat.java   |   10 +-
 .../client/mapreduce/AbstractInputFormat.java   |   24 +-
 .../AccumuloMultiTableInputFormat.java          |   10 +-
 .../core/client/mapreduce/BatchScanConfig.java  |  367 ------
 .../core/client/mapreduce/InputTableConfig.java |  370 ++++++
 .../mapreduce/lib/util/InputConfigurator.java   |   87 +-
 .../AccumuloMultiTableInputFormatTest.java      |   24 +-
 .../AccumuloMultiTableInputFormatTest.java      |   22 +-
 .../core/conf/TableQueryConfigTest.java         |   20 +-
 .../accumulo_user_manual/chapters/analytics.tex |    8 +-
 .../server/monitor/servlets/trace/Basic.java    |   15 +-
 .../server/security/SecurityOperation.java      |  198 +--
 .../server/tabletserver/TabletServer.java       | 1223 +++++++++---------
 .../TabletServerResourceManager.java            |  252 ++--
 .../compaction/CompactionStrategy.java          |   30 +-
 .../apache/accumulo/test/TableOperationsIT.java |   31 +-
 17 files changed, 1362 insertions(+), 1358 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/61353d1e/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
index eaf99cb..856936e 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
@@ -39,7 +39,7 @@ import org.apache.accumulo.core.client.impl.OfflineScanner;
 import org.apache.accumulo.core.client.impl.ScannerImpl;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.TabletLocator;
-import org.apache.accumulo.core.client.mapreduce.BatchScanConfig;
+import org.apache.accumulo.core.client.mapreduce.InputTableConfig;
 import org.apache.accumulo.core.client.mapreduce.lib.util.InputConfigurator;
 import org.apache.accumulo.core.client.mock.MockInstance;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
@@ -292,19 +292,19 @@ public abstract class AbstractInputFormat<K,V> implements InputFormat<K,V> {
   }
 
   /**
-   * Fetches all {@link BatchScanConfig}s that have been set on the given Hadoop job.
+   * Fetches all {@link InputTableConfig}s that have been set on the given Hadoop job.
    * 
    * @param job
    *          the Hadoop job instance to be configured
-   * @return the {@link BatchScanConfig} objects set on the job
+   * @return the {@link InputTableConfig} objects set on the job
    * @since 1.6.0
    */
-  public static Map<String,BatchScanConfig> getBatchScanConfigs(JobConf job) {
-    return InputConfigurator.getBatchScanConfigs(CLASS, job);
+  public static Map<String,InputTableConfig> getInputTableConfigs(JobConf job) {
+    return InputConfigurator.getInputTableConfigs(CLASS, job);
   }
 
   /**
-   * Fetches a {@link org.apache.accumulo.core.client.mapreduce.BatchScanConfig} that has been set on the configuration for a specific table.
+   * Fetches a {@link InputTableConfig} that has been set on the configuration for a specific table.
    * 
    * <p>
    * null is returned in the event that the table doesn't exist.
@@ -313,11 +313,11 @@ public abstract class AbstractInputFormat<K,V> implements InputFormat<K,V> {
    *          the Hadoop job instance to be configured
    * @param tableName
    *          the table name for which to grab the config object
-   * @return the {@link org.apache.accumulo.core.client.mapreduce.BatchScanConfig} for the given table
+   * @return the {@link InputTableConfig} for the given table
    * @since 1.6.0
    */
-  public static BatchScanConfig getBatchScanConfig(JobConf job, String tableName) {
-    return InputConfigurator.getBatchScanConfig(CLASS, job, tableName);
+  public static InputTableConfig getInputTableConfigs(JobConf job, String tableName) {
+    return InputConfigurator.getInputTableConfig(CLASS, job, tableName);
   }
 
   /**
@@ -362,8 +362,7 @@ public abstract class AbstractInputFormat<K,V> implements InputFormat<K,V> {
 
       // in case the table name changed, we can still use the previous name for terms of configuration,
       // but the scanner will use the table id resolved at job setup time
-      BatchScanConfig tableConfig = getBatchScanConfig(job, split.getTableName());
-
+      InputTableConfig tableConfig = getInputTableConfigs(job, split.getTableName());
 
       try {
         log.debug("Creating connector with user: " + principal);
@@ -432,7 +431,7 @@ public abstract class AbstractInputFormat<K,V> implements InputFormat<K,V> {
 
     Instance instance = getInstance(job);
     Connector conn = instance.getConnector(getPrincipal(job), getAuthenticationToken(job));
-    
+
     return InputConfigurator.binOffline(tableId, ranges, instance, conn);
   }
 
@@ -445,10 +444,10 @@ public abstract class AbstractInputFormat<K,V> implements InputFormat<K,V> {
     validateOptions(job);
 
     LinkedList<InputSplit> splits = new LinkedList<InputSplit>();
-    Map<String,BatchScanConfig> tableConfigs = getBatchScanConfigs(job);
-    for (Map.Entry<String,BatchScanConfig> tableConfigEntry : tableConfigs.entrySet()) {
+    Map<String,InputTableConfig> tableConfigs = getInputTableConfigs(job);
+    for (Map.Entry<String,InputTableConfig> tableConfigEntry : tableConfigs.entrySet()) {
       String tableName = tableConfigEntry.getKey();
-      BatchScanConfig tableConfig = tableConfigEntry.getValue();
+      InputTableConfig tableConfig = tableConfigEntry.getValue();
       boolean autoAdjust = tableConfig.shouldAutoAdjustRanges();
       String tableId = null;
       List<Range> ranges = autoAdjust ? Range.mergeOverlapping(tableConfig.getRanges()) : tableConfig.getRanges();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/61353d1e/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormat.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormat.java
index ed51866..61838db 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormat.java
@@ -19,7 +19,7 @@ package org.apache.accumulo.core.client.mapred;
 import java.io.IOException;
 import java.util.Map;
 
-import org.apache.accumulo.core.client.mapreduce.BatchScanConfig;
+import org.apache.accumulo.core.client.mapreduce.InputTableConfig;
 import org.apache.accumulo.core.client.mapreduce.lib.util.InputConfigurator;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
@@ -40,7 +40,7 @@ import org.apache.hadoop.mapred.Reporter;
  * <li>{@link AccumuloInputFormat#setConnectorInfo(JobConf, String, String)}
  * <li>{@link AccumuloInputFormat#setScanAuthorizations(JobConf, org.apache.accumulo.core.security.Authorizations)}
  * <li>{@link AccumuloInputFormat#setZooKeeperInstance(JobConf, String, String)} OR {@link AccumuloInputFormat#setMockInstance(JobConf, String)}
- * <li>{@link AccumuloMultiTableInputFormat#setBatchScanConfigs(org.apache.hadoop.mapred.JobConf, java.util.Map)}
+ * <li>{@link AccumuloMultiTableInputFormat#setInputTableConfigs(org.apache.hadoop.mapred.JobConf, java.util.Map)}
  * </ul>
  * 
  * Other static methods are optional.
@@ -49,7 +49,7 @@ import org.apache.hadoop.mapred.Reporter;
 public class AccumuloMultiTableInputFormat extends AbstractInputFormat<Key,Value> {
 
   /**
-   * Sets the {@link BatchScanConfig} objects on the given Hadoop configuration
+   * Sets the {@link InputTableConfig} objects on the given Hadoop configuration
    * 
    * @param job
    *          the Hadoop job instance to be configured
@@ -57,8 +57,8 @@ public class AccumuloMultiTableInputFormat extends AbstractInputFormat<Key,Value
    *          the table query configs to be set on the configuration.
    * @since 1.6.0
    */
-  public static void setBatchScanConfigs(JobConf job, Map<String,BatchScanConfig> configs) {
-    InputConfigurator.setBatchScanConfigs(CLASS, job, configs);
+  public static void setInputTableConfigs(JobConf job, Map<String,InputTableConfig> configs) {
+    InputConfigurator.setInputTableConfigs(CLASS, job, configs);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/accumulo/blob/61353d1e/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
index d426caf..626a785 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
@@ -273,19 +273,19 @@ public abstract class AbstractInputFormat<K,V> extends InputFormat<K,V> {
   }
 
   /**
-   * Fetches all {@link BatchScanConfig}s that have been set on the given job.
+   * Fetches all {@link InputTableConfig}s that have been set on the given job.
    * 
    * @param context
    *          the Hadoop job instance to be configured
-   * @return the {@link BatchScanConfig} objects for the job
+   * @return the {@link InputTableConfig} objects for the job
    * @since 1.6.0
    */
-  protected static Map<String,BatchScanConfig> getBatchScanConfigs(JobContext context) {
-    return InputConfigurator.getBatchScanConfigs(CLASS, getConfiguration(context));
+  protected static Map<String,InputTableConfig> getInputTableConfigs(JobContext context) {
+    return InputConfigurator.getInputTableConfigs(CLASS, getConfiguration(context));
   }
 
   /**
-   * Fetches a {@link BatchScanConfig} that has been set on the configuration for a specific table.
+   * Fetches a {@link InputTableConfig} that has been set on the configuration for a specific table.
    * 
    * <p>
    * null is returned in the event that the table doesn't exist.
@@ -294,11 +294,11 @@ public abstract class AbstractInputFormat<K,V> extends InputFormat<K,V> {
    *          the Hadoop job instance to be configured
    * @param tableName
    *          the table name for which to grab the config object
-   * @return the {@link BatchScanConfig} for the given table
+   * @return the {@link InputTableConfig} for the given table
    * @since 1.6.0
    */
-  protected static BatchScanConfig getBatchScanConfig(JobContext context, String tableName) {
-    return InputConfigurator.getBatchScanConfig(CLASS, getConfiguration(context), tableName);
+  protected static InputTableConfig getInputTableConfig(JobContext context, String tableName) {
+    return InputConfigurator.getInputTableConfig(CLASS, getConfiguration(context), tableName);
   }
 
   /**
@@ -377,7 +377,7 @@ public abstract class AbstractInputFormat<K,V> extends InputFormat<K,V> {
 
       // in case the table name changed, we can still use the previous name for terms of configuration,
       // but the scanner will use the table id resolved at job setup time
-      BatchScanConfig tableConfig = getBatchScanConfig(attempt, split.getTableName());
+      InputTableConfig tableConfig = getInputTableConfig(attempt, split.getTableName());
 
 
       try {
@@ -471,11 +471,11 @@ public abstract class AbstractInputFormat<K,V> extends InputFormat<K,V> {
     validateOptions(context);
 
     LinkedList<InputSplit> splits = new LinkedList<InputSplit>();
-    Map<String,BatchScanConfig> tableConfigs = getBatchScanConfigs(context);
-    for (Map.Entry<String,BatchScanConfig> tableConfigEntry : tableConfigs.entrySet()) {
+    Map<String,InputTableConfig> tableConfigs = getInputTableConfigs(context);
+    for (Map.Entry<String,InputTableConfig> tableConfigEntry : tableConfigs.entrySet()) {
 
       String tableName = tableConfigEntry.getKey();
-      BatchScanConfig tableConfig = tableConfigEntry.getValue();
+      InputTableConfig tableConfig = tableConfigEntry.getValue();
 
       boolean autoAdjust = tableConfig.shouldAutoAdjustRanges();
       String tableId = null;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/61353d1e/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloMultiTableInputFormat.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloMultiTableInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloMultiTableInputFormat.java
index 06bcd01..bd15447 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloMultiTableInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloMultiTableInputFormat.java
@@ -43,7 +43,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
  * <li>{@link AccumuloMultiTableInputFormat#setConnectorInfo(Job, String, org.apache.accumulo.core.client.security.tokens.AuthenticationToken)}
  * <li>{@link AccumuloMultiTableInputFormat#setScanAuthorizations(Job, org.apache.accumulo.core.security.Authorizations)}
  * <li>{@link AccumuloMultiTableInputFormat#setZooKeeperInstance(Job, String, String)} OR {@link AccumuloInputFormat#setMockInstance(Job, String)}
- * <li>{@link AccumuloMultiTableInputFormat#setBatchScanConfigs(org.apache.hadoop.mapreduce.Job, Map<String,BatchScanConfig>)}
+ * <li>{@link AccumuloMultiTableInputFormat#setInputTableConfigs(Job, Map)}
  * </ul>
  * 
  * Other static methods are optional.
@@ -51,7 +51,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 public class AccumuloMultiTableInputFormat extends AbstractInputFormat<Key,Value> {
 
   /**
-   * Sets the {@link BatchScanConfig} objects on the given Hadoop configuration
+   * Sets the {@link InputTableConfig} objects on the given Hadoop configuration
    * 
    * @param job
    *          the Hadoop job instance to be configured
@@ -59,9 +59,9 @@ public class AccumuloMultiTableInputFormat extends AbstractInputFormat<Key,Value
    *          the table query configs to be set on the configuration.
    * @since 1.6.0
    */
-  public static void setBatchScanConfigs(Job job, Map<String,BatchScanConfig> configs) {
+  public static void setInputTableConfigs(Job job, Map<String,InputTableConfig> configs) {
     checkNotNull(configs);
-    InputConfigurator.setBatchScanConfigs(CLASS, getConfiguration(job), configs);
+    InputConfigurator.setInputTableConfigs(CLASS, getConfiguration(job), configs);
   }
 
   @Override
@@ -84,7 +84,7 @@ public class AccumuloMultiTableInputFormat extends AbstractInputFormat<Key,Value
 
       @Override
       protected void setupIterators(TaskAttemptContext context, Scanner scanner, String tableName) {
-        List<IteratorSetting> iterators = getBatchScanConfig(context, tableName).getIterators();
+        List<IteratorSetting> iterators = getInputTableConfig(context, tableName).getIterators();
         for (IteratorSetting setting : iterators) {
           scanner.addScanIterator(setting);
         }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/61353d1e/core/src/main/java/org/apache/accumulo/core/client/mapreduce/BatchScanConfig.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/BatchScanConfig.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/BatchScanConfig.java
deleted file mode 100644
index 470b460..0000000
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/BatchScanConfig.java
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.core.client.mapreduce;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.List;
-
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.util.Pair;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-
-/**
- * This class to holds a batch scan configuration for a table. It contains all the properties needed to specify how rows should be returned from the table.
- */
-public class BatchScanConfig implements Writable {
-
-  private List<IteratorSetting> iterators;
-  private List<Range> ranges;
-  private Collection<Pair<Text,Text>> columns;
-
-  private boolean autoAdjustRanges = true;
-  private boolean useLocalIterators = false;
-  private boolean useIsolatedScanners = false;
-  private boolean offlineScan = false;
-
-  public BatchScanConfig() {}
-  
-  /**
-   * Creates a batch scan config object out of a previously serialized batch scan config object.
-   * @param input
-   *          the data input of the serialized batch scan config
-   * @throws IOException
-   */
-  public BatchScanConfig(DataInput input) throws IOException {
-    readFields(input);
-  }
-
-  /**
-   * Sets the input ranges to scan for all tables associated with this job. This will be added to any per-table ranges that have been set using
-   * 
-   * @param ranges
-   *          the ranges that will be mapped over
-   * @since 1.6.0
-   */
-  public BatchScanConfig setRanges(List<Range> ranges) {
-    this.ranges = ranges;
-    return this;
-  }
-
-  /**
-   * Returns the ranges to be queried in the configuration
-   */
-  public List<Range> getRanges() {
-    return ranges != null ? ranges : new ArrayList<Range>();
-  }
-
-  /**
-   * Restricts the columns that will be mapped over for this job for the default input table.
-   * 
-   * @param columns
-   *          a pair of {@link Text} objects corresponding to column family and column qualifier. If the column qualifier is null, the entire column family is
-   *          selected. An empty set is the default and is equivalent to scanning the all columns.
-   * @since 1.6.0
-   */
-  public BatchScanConfig fetchColumns(Collection<Pair<Text,Text>> columns) {
-    this.columns = columns;
-    return this;
-  }
-
-  /**
-   * Returns the columns to be fetched for this configuration
-   */
-  public Collection<Pair<Text,Text>> getFetchedColumns() {
-    return columns != null ? columns : new HashSet<Pair<Text,Text>>();
-  }
-
-  /**
-   * Set iterators on to be used in the query.
-   * 
-   * @param iterators
-   *          the configurations for the iterators
-   * @since 1.6.0
-   */
-  public BatchScanConfig setIterators(List<IteratorSetting> iterators) {
-    this.iterators = iterators;
-    return this;
-  }
-
-  /**
-   * Returns the iterators to be set on this configuration
-   */
-  public List<IteratorSetting> getIterators() {
-    return iterators != null ? iterators : new ArrayList<IteratorSetting>();
-  }
-
-  /**
-   * Controls the automatic adjustment of ranges for this job. This feature merges overlapping ranges, then splits them to align with tablet boundaries.
-   * Disabling this feature will cause exactly one Map task to be created for each specified range. The default setting is enabled. *
-   * 
-   * <p>
-   * By default, this feature is <b>enabled</b>.
-   * 
-   * @param autoAdjustRanges
-   *          the feature is enabled if true, disabled otherwise
-   * @see #setRanges(java.util.List)
-   * @since 1.6.0
-   */
-  public BatchScanConfig setAutoAdjustRanges(boolean autoAdjustRanges) {
-    this.autoAdjustRanges = autoAdjustRanges;
-    return this;
-  }
-
-  /**
-   * Determines whether a configuration has auto-adjust ranges enabled.
-   * 
-   * @return false if the feature is disabled, true otherwise
-   * @since 1.6.0
-   * @see #setAutoAdjustRanges(boolean)
-   */
-  public boolean shouldAutoAdjustRanges() {
-    return autoAdjustRanges;
-  }
-
-  /**
-   * Controls the use of the {@link org.apache.accumulo.core.client.ClientSideIteratorScanner} in this job. Enabling this feature will cause the iterator stack
-   * to be constructed within the Map task, rather than within the Accumulo TServer. To use this feature, all classes needed for those iterators must be
-   * available on the classpath for the task.
-   * 
-   * <p>
-   * By default, this feature is <b>disabled</b>.
-   * 
-   * @param useLocalIterators
-   *          the feature is enabled if true, disabled otherwise
-   * @since 1.6.0
-   */
-  public BatchScanConfig setUseLocalIterators(boolean useLocalIterators) {
-    this.useLocalIterators = useLocalIterators;
-    return this;
-  }
-
-  /**
-   * Determines whether a configuration uses local iterators.
-   * 
-   * @return true if the feature is enabled, false otherwise
-   * @since 1.6.0
-   * @see #setUseLocalIterators(boolean)
-   */
-  public boolean shouldUseLocalIterators() {
-    return useLocalIterators;
-  }
-
-  /**
-   * <p>
-   * Enable reading offline tables. By default, this feature is disabled and only online tables are scanned. This will make the map reduce job directly read the
-   * table's files. If the table is not offline, then the job will fail. If the table comes online during the map reduce job, it is likely that the job will
-   * fail.
-   * 
-   * <p>
-   * To use this option, the map reduce user will need access to read the Accumulo directory in HDFS.
-   * 
-   * <p>
-   * Reading the offline table will create the scan time iterator stack in the map process. So any iterators that are configured for the table will need to be
-   * on the mapper's classpath. The accumulo-site.xml may need to be on the mapper's classpath if HDFS or the Accumulo directory in HDFS are non-standard.
-   * 
-   * <p>
-   * One way to use this feature is to clone a table, take the clone offline, and use the clone as the input table for a map reduce job. If you plan to map
-   * reduce over the data many times, it may be better to the compact the table, clone it, take it offline, and use the clone for all map reduce jobs. The
-   * reason to do this is that compaction will reduce each tablet in the table to one file, and it is faster to read from one file.
-   * 
-   * <p>
-   * There are two possible advantages to reading a tables file directly out of HDFS. First, you may see better read performance. Second, it will support
-   * speculative execution better. When reading an online table speculative execution can put more load on an already slow tablet server.
-   * 
-   * <p>
-   * By default, this feature is <b>disabled</b>.
-   * 
-   * @param offlineScan
-   *          the feature is enabled if true, disabled otherwise
-   * @since 1.6.0
-   */
-  public BatchScanConfig setOfflineScan(boolean offlineScan) {
-    this.offlineScan = offlineScan;
-    return this;
-  }
-
-  /**
-   * Determines whether a configuration has the offline table scan feature enabled.
-   * 
-   * @return true if the feature is enabled, false otherwise
-   * @since 1.6.0
-   * @see #setOfflineScan(boolean)
-   */
-  public boolean isOfflineScan() {
-    return offlineScan;
-  }
-
-  /**
-   * Controls the use of the {@link org.apache.accumulo.core.client.IsolatedScanner} in this job.
-   * 
-   * <p>
-   * By default, this feature is <b>disabled</b>.
-   * 
-   * @param useIsolatedScanners
-   *          the feature is enabled if true, disabled otherwise
-   * @since 1.6.0
-   */
-  public BatchScanConfig setUseIsolatedScanners(boolean useIsolatedScanners) {
-    this.useIsolatedScanners = useIsolatedScanners;
-    return this;
-  }
-
-  /**
-   * Determines whether a configuration has isolation enabled.
-   * 
-   * @return true if the feature is enabled, false otherwise
-   * @since 1.6.0
-   * @see #setUseIsolatedScanners(boolean)
-   */
-  public boolean shouldUseIsolatedScanners() {
-    return useIsolatedScanners;
-  }
-
-  /**
-   * Writes the state for the current object out to the specified {@see DataOutput}
-   * @param dataOutput
-   *          the output for which to write the object's state
-   * @throws IOException
-   */
-  @Override
-  public void write(DataOutput dataOutput) throws IOException {
-    if (iterators != null) {
-      dataOutput.writeInt(iterators.size());
-      for (IteratorSetting setting : iterators)
-        setting.write(dataOutput);
-    } else {
-      dataOutput.writeInt(0);
-    }
-    if (ranges != null) {
-      dataOutput.writeInt(ranges.size());
-      for (Range range : ranges)
-        range.write(dataOutput);
-    } else {
-      dataOutput.writeInt(0);
-    }
-    if (columns != null) {
-      dataOutput.writeInt(columns.size());
-      for (Pair<Text,Text> column : columns) {
-        if (column.getSecond() == null) {
-          dataOutput.writeInt(1);
-          column.getFirst().write(dataOutput);
-        } else {
-          dataOutput.writeInt(2);
-          column.getFirst().write(dataOutput);
-          column.getSecond().write(dataOutput);
-        }
-      }
-    } else {
-      dataOutput.writeInt(0);
-    }
-    dataOutput.writeBoolean(autoAdjustRanges);
-    dataOutput.writeBoolean(useLocalIterators);
-    dataOutput.writeBoolean(useIsolatedScanners);
-  }
-
-  /**
-   * Reads the fields in the {@see DataInput} into the current object
-   * @param dataInput
-   *          the input fields to read into the current object
-   * @throws IOException
-   */
-  @Override
-  public void readFields(DataInput dataInput) throws IOException {
-    // load iterators
-    long iterSize = dataInput.readInt();
-    if (iterSize > 0)
-      iterators = new ArrayList<IteratorSetting>();
-    for (int i = 0; i < iterSize; i++)
-      iterators.add(new IteratorSetting(dataInput));
-    // load ranges
-    long rangeSize = dataInput.readInt();
-    if (rangeSize > 0)
-      ranges = new ArrayList<Range>();
-    for (int i = 0; i < rangeSize; i++) {
-      Range range = new Range();
-      range.readFields(dataInput);
-      ranges.add(range);
-    }
-    // load columns
-    long columnSize = dataInput.readInt();
-    if (columnSize > 0)
-      columns = new HashSet<Pair<Text,Text>>();
-    for (int i = 0; i < columnSize; i++) {
-      long numPairs = dataInput.readInt();
-      Text colFam = new Text();
-      colFam.readFields(dataInput);
-      if (numPairs == 1) {
-        columns.add(new Pair<Text,Text>(colFam, null));
-      } else if (numPairs == 2) {
-        Text colQual = new Text();
-        colQual.readFields(dataInput);
-        columns.add(new Pair<Text,Text>(colFam, colQual));
-      }
-    }
-    autoAdjustRanges = dataInput.readBoolean();
-    useLocalIterators = dataInput.readBoolean();
-    useIsolatedScanners = dataInput.readBoolean();
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o)
-      return true;
-    if (o == null || getClass() != o.getClass())
-      return false;
-
-    BatchScanConfig that = (BatchScanConfig) o;
-
-    if (autoAdjustRanges != that.autoAdjustRanges)
-      return false;
-    if (offlineScan != that.offlineScan)
-      return false;
-    if (useIsolatedScanners != that.useIsolatedScanners)
-      return false;
-    if (useLocalIterators != that.useLocalIterators)
-      return false;
-    if (columns != null ? !columns.equals(that.columns) : that.columns != null)
-      return false;
-    if (iterators != null ? !iterators.equals(that.iterators) : that.iterators != null)
-      return false;
-    if (ranges != null ? !ranges.equals(that.ranges) : that.ranges != null)
-      return false;
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = 31 * (iterators != null ? iterators.hashCode() : 0);
-    result = 31 * result + (ranges != null ? ranges.hashCode() : 0);
-    result = 31 * result + (columns != null ? columns.hashCode() : 0);
-    result = 31 * result + (autoAdjustRanges ? 1 : 0);
-    result = 31 * result + (useLocalIterators ? 1 : 0);
-    result = 31 * result + (useIsolatedScanners ? 1 : 0);
-    result = 31 * result + (offlineScan ? 1 : 0);
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/61353d1e/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputTableConfig.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputTableConfig.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputTableConfig.java
new file mode 100644
index 0000000..808bd7c
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputTableConfig.java
@@ -0,0 +1,370 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.mapreduce;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.util.Pair;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+
+/**
+ * This class to holds a batch scan configuration for a table. It contains all the properties needed to specify how rows should be returned from the table.
+ */
+public class InputTableConfig implements Writable {
+
+  private List<IteratorSetting> iterators;
+  private List<Range> ranges;
+  private Collection<Pair<Text,Text>> columns;
+
+  private boolean autoAdjustRanges = true;
+  private boolean useLocalIterators = false;
+  private boolean useIsolatedScanners = false;
+  private boolean offlineScan = false;
+
+  public InputTableConfig() {}
+
+  /**
+   * Creates a batch scan config object out of a previously serialized batch scan config object.
+   * 
+   * @param input
+   *          the data input of the serialized batch scan config
+   * @throws IOException
+   */
+  public InputTableConfig(DataInput input) throws IOException {
+    readFields(input);
+  }
+
+  /**
+   * Sets the input ranges to scan for all tables associated with this job. This will be added to any per-table ranges that have been set using
+   * 
+   * @param ranges
+   *          the ranges that will be mapped over
+   * @since 1.6.0
+   */
+  public InputTableConfig setRanges(List<Range> ranges) {
+    this.ranges = ranges;
+    return this;
+  }
+
+  /**
+   * Returns the ranges to be queried in the configuration
+   */
+  public List<Range> getRanges() {
+    return ranges != null ? ranges : new ArrayList<Range>();
+  }
+
+  /**
+   * Restricts the columns that will be mapped over for this job for the default input table.
+   * 
+   * @param columns
+   *          a pair of {@link Text} objects corresponding to column family and column qualifier. If the column qualifier is null, the entire column family is
+   *          selected. An empty set is the default and is equivalent to scanning the all columns.
+   * @since 1.6.0
+   */
+  public InputTableConfig fetchColumns(Collection<Pair<Text,Text>> columns) {
+    this.columns = columns;
+    return this;
+  }
+
+  /**
+   * Returns the columns to be fetched for this configuration
+   */
+  public Collection<Pair<Text,Text>> getFetchedColumns() {
+    return columns != null ? columns : new HashSet<Pair<Text,Text>>();
+  }
+
+  /**
+   * Set iterators on to be used in the query.
+   * 
+   * @param iterators
+   *          the configurations for the iterators
+   * @since 1.6.0
+   */
+  public InputTableConfig setIterators(List<IteratorSetting> iterators) {
+    this.iterators = iterators;
+    return this;
+  }
+
+  /**
+   * Returns the iterators to be set on this configuration
+   */
+  public List<IteratorSetting> getIterators() {
+    return iterators != null ? iterators : new ArrayList<IteratorSetting>();
+  }
+
+  /**
+   * Controls the automatic adjustment of ranges for this job. This feature merges overlapping ranges, then splits them to align with tablet boundaries.
+   * Disabling this feature will cause exactly one Map task to be created for each specified range. The default setting is enabled. *
+   * 
+   * <p>
+   * By default, this feature is <b>enabled</b>.
+   * 
+   * @param autoAdjustRanges
+   *          the feature is enabled if true, disabled otherwise
+   * @see #setRanges(java.util.List)
+   * @since 1.6.0
+   */
+  public InputTableConfig setAutoAdjustRanges(boolean autoAdjustRanges) {
+    this.autoAdjustRanges = autoAdjustRanges;
+    return this;
+  }
+
+  /**
+   * Determines whether a configuration has auto-adjust ranges enabled.
+   * 
+   * @return false if the feature is disabled, true otherwise
+   * @since 1.6.0
+   * @see #setAutoAdjustRanges(boolean)
+   */
+  public boolean shouldAutoAdjustRanges() {
+    return autoAdjustRanges;
+  }
+
+  /**
+   * Controls the use of the {@link org.apache.accumulo.core.client.ClientSideIteratorScanner} in this job. Enabling this feature will cause the iterator stack
+   * to be constructed within the Map task, rather than within the Accumulo TServer. To use this feature, all classes needed for those iterators must be
+   * available on the classpath for the task.
+   * 
+   * <p>
+   * By default, this feature is <b>disabled</b>.
+   * 
+   * @param useLocalIterators
+   *          the feature is enabled if true, disabled otherwise
+   * @since 1.6.0
+   */
+  public InputTableConfig setUseLocalIterators(boolean useLocalIterators) {
+    this.useLocalIterators = useLocalIterators;
+    return this;
+  }
+
+  /**
+   * Determines whether a configuration uses local iterators.
+   * 
+   * @return true if the feature is enabled, false otherwise
+   * @since 1.6.0
+   * @see #setUseLocalIterators(boolean)
+   */
+  public boolean shouldUseLocalIterators() {
+    return useLocalIterators;
+  }
+
+  /**
+   * <p>
+   * Enable reading offline tables. By default, this feature is disabled and only online tables are scanned. This will make the map reduce job directly read the
+   * table's files. If the table is not offline, then the job will fail. If the table comes online during the map reduce job, it is likely that the job will
+   * fail.
+   * 
+   * <p>
+   * To use this option, the map reduce user will need access to read the Accumulo directory in HDFS.
+   * 
+   * <p>
+   * Reading the offline table will create the scan time iterator stack in the map process. So any iterators that are configured for the table will need to be
+   * on the mapper's classpath. The accumulo-site.xml may need to be on the mapper's classpath if HDFS or the Accumulo directory in HDFS are non-standard.
+   * 
+   * <p>
+   * One way to use this feature is to clone a table, take the clone offline, and use the clone as the input table for a map reduce job. If you plan to map
+   * reduce over the data many times, it may be better to the compact the table, clone it, take it offline, and use the clone for all map reduce jobs. The
+   * reason to do this is that compaction will reduce each tablet in the table to one file, and it is faster to read from one file.
+   * 
+   * <p>
+   * There are two possible advantages to reading a tables file directly out of HDFS. First, you may see better read performance. Second, it will support
+   * speculative execution better. When reading an online table speculative execution can put more load on an already slow tablet server.
+   * 
+   * <p>
+   * By default, this feature is <b>disabled</b>.
+   * 
+   * @param offlineScan
+   *          the feature is enabled if true, disabled otherwise
+   * @since 1.6.0
+   */
+  public InputTableConfig setOfflineScan(boolean offlineScan) {
+    this.offlineScan = offlineScan;
+    return this;
+  }
+
+  /**
+   * Determines whether a configuration has the offline table scan feature enabled.
+   * 
+   * @return true if the feature is enabled, false otherwise
+   * @since 1.6.0
+   * @see #setOfflineScan(boolean)
+   */
+  public boolean isOfflineScan() {
+    return offlineScan;
+  }
+
+  /**
+   * Controls the use of the {@link org.apache.accumulo.core.client.IsolatedScanner} in this job.
+   * 
+   * <p>
+   * By default, this feature is <b>disabled</b>.
+   * 
+   * @param useIsolatedScanners
+   *          the feature is enabled if true, disabled otherwise
+   * @since 1.6.0
+   */
+  public InputTableConfig setUseIsolatedScanners(boolean useIsolatedScanners) {
+    this.useIsolatedScanners = useIsolatedScanners;
+    return this;
+  }
+
+  /**
+   * Determines whether a configuration has isolation enabled.
+   * 
+   * @return true if the feature is enabled, false otherwise
+   * @since 1.6.0
+   * @see #setUseIsolatedScanners(boolean)
+   */
+  public boolean shouldUseIsolatedScanners() {
+    return useIsolatedScanners;
+  }
+
+  /**
+   * Writes the state for the current object out to the specified {@link DataOutput}
+   * 
+   * @param dataOutput
+   *          the output for which to write the object's state
+   * @throws IOException
+   */
+  @Override
+  public void write(DataOutput dataOutput) throws IOException {
+    if (iterators != null) {
+      dataOutput.writeInt(iterators.size());
+      for (IteratorSetting setting : iterators)
+        setting.write(dataOutput);
+    } else {
+      dataOutput.writeInt(0);
+    }
+    if (ranges != null) {
+      dataOutput.writeInt(ranges.size());
+      for (Range range : ranges)
+        range.write(dataOutput);
+    } else {
+      dataOutput.writeInt(0);
+    }
+    if (columns != null) {
+      dataOutput.writeInt(columns.size());
+      for (Pair<Text,Text> column : columns) {
+        if (column.getSecond() == null) {
+          dataOutput.writeInt(1);
+          column.getFirst().write(dataOutput);
+        } else {
+          dataOutput.writeInt(2);
+          column.getFirst().write(dataOutput);
+          column.getSecond().write(dataOutput);
+        }
+      }
+    } else {
+      dataOutput.writeInt(0);
+    }
+    dataOutput.writeBoolean(autoAdjustRanges);
+    dataOutput.writeBoolean(useLocalIterators);
+    dataOutput.writeBoolean(useIsolatedScanners);
+  }
+
+  /**
+   * Reads the fields in the {@link DataInput} into the current object
+   * 
+   * @param dataInput
+   *          the input fields to read into the current object
+   * @throws IOException
+   */
+  @Override
+  public void readFields(DataInput dataInput) throws IOException {
+    // load iterators
+    long iterSize = dataInput.readInt();
+    if (iterSize > 0)
+      iterators = new ArrayList<IteratorSetting>();
+    for (int i = 0; i < iterSize; i++)
+      iterators.add(new IteratorSetting(dataInput));
+    // load ranges
+    long rangeSize = dataInput.readInt();
+    if (rangeSize > 0)
+      ranges = new ArrayList<Range>();
+    for (int i = 0; i < rangeSize; i++) {
+      Range range = new Range();
+      range.readFields(dataInput);
+      ranges.add(range);
+    }
+    // load columns
+    long columnSize = dataInput.readInt();
+    if (columnSize > 0)
+      columns = new HashSet<Pair<Text,Text>>();
+    for (int i = 0; i < columnSize; i++) {
+      long numPairs = dataInput.readInt();
+      Text colFam = new Text();
+      colFam.readFields(dataInput);
+      if (numPairs == 1) {
+        columns.add(new Pair<Text,Text>(colFam, null));
+      } else if (numPairs == 2) {
+        Text colQual = new Text();
+        colQual.readFields(dataInput);
+        columns.add(new Pair<Text,Text>(colFam, colQual));
+      }
+    }
+    autoAdjustRanges = dataInput.readBoolean();
+    useLocalIterators = dataInput.readBoolean();
+    useIsolatedScanners = dataInput.readBoolean();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o)
+      return true;
+    if (o == null || getClass() != o.getClass())
+      return false;
+
+    InputTableConfig that = (InputTableConfig) o;
+
+    if (autoAdjustRanges != that.autoAdjustRanges)
+      return false;
+    if (offlineScan != that.offlineScan)
+      return false;
+    if (useIsolatedScanners != that.useIsolatedScanners)
+      return false;
+    if (useLocalIterators != that.useLocalIterators)
+      return false;
+    if (columns != null ? !columns.equals(that.columns) : that.columns != null)
+      return false;
+    if (iterators != null ? !iterators.equals(that.iterators) : that.iterators != null)
+      return false;
+    if (ranges != null ? !ranges.equals(that.ranges) : that.ranges != null)
+      return false;
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    int result = 31 * (iterators != null ? iterators.hashCode() : 0);
+    result = 31 * result + (ranges != null ? ranges.hashCode() : 0);
+    result = 31 * result + (columns != null ? columns.hashCode() : 0);
+    result = 31 * result + (autoAdjustRanges ? 1 : 0);
+    result = 31 * result + (useLocalIterators ? 1 : 0);
+    result = 31 * result + (useIsolatedScanners ? 1 : 0);
+    result = 31 * result + (offlineScan ? 1 : 0);
+    return result;
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/61353d1e/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java
index 4aeffca..11a1619 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java
@@ -46,7 +46,7 @@ import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.TabletLocator;
-import org.apache.accumulo.core.client.mapreduce.BatchScanConfig;
+import org.apache.accumulo.core.client.mapreduce.InputTableConfig;
 import org.apache.accumulo.core.client.mock.MockTabletLocator;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.data.Key;
@@ -495,12 +495,12 @@ public class InputConfigurator extends ConfiguratorBase {
    * @param conf
    *          the Hadoop configuration object to configure
    * @param configs
-   *          an array of {@link BatchScanConfig} objects to associate with the job
+   *          an array of {@link InputTableConfig} objects to associate with the job
    * @since 1.6.0
    */
-  public static void setBatchScanConfigs(Class<?> implementingClass, Configuration conf, Map<String,BatchScanConfig> configs) {
+  public static void setInputTableConfigs(Class<?> implementingClass, Configuration conf, Map<String,InputTableConfig> configs) {
     MapWritable mapWritable = new MapWritable();
-    for (Map.Entry<String,BatchScanConfig> tableConfig : configs.entrySet())
+    for (Map.Entry<String,InputTableConfig> tableConfig : configs.entrySet())
       mapWritable.put(new Text(tableConfig.getKey()), tableConfig.getValue());
 
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
@@ -515,7 +515,7 @@ public class InputConfigurator extends ConfiguratorBase {
   }
 
   /**
-   * Returns all {@link BatchScanConfig} objects associated with this job.
+   * Returns all {@link InputTableConfig} objects associated with this job.
    * 
    * @param implementingClass
    *          the class whose name will be used as a prefix for the property configuration key
@@ -524,10 +524,10 @@ public class InputConfigurator extends ConfiguratorBase {
    * @return all of the table query configs for the job
    * @since 1.6.0
    */
-  public static Map<String,BatchScanConfig> getBatchScanConfigs(Class<?> implementingClass, Configuration conf) {
-    Map<String,BatchScanConfig> configs = new HashMap<String,BatchScanConfig>();
-    Map.Entry<String, BatchScanConfig> defaultConfig = getDefaultBatchScanConfig(implementingClass, conf);
-    if(defaultConfig != null)
+  public static Map<String,InputTableConfig> getInputTableConfigs(Class<?> implementingClass, Configuration conf) {
+    Map<String,InputTableConfig> configs = new HashMap<String,InputTableConfig>();
+    Map.Entry<String,InputTableConfig> defaultConfig = getDefaultInputTableConfig(implementingClass, conf);
+    if (defaultConfig != null)
       configs.put(defaultConfig.getKey(), defaultConfig.getValue());
     String configString = conf.get(enumToConfKey(implementingClass, ScanOpts.TABLE_CONFIGS));
     MapWritable mapWritable = new MapWritable();
@@ -542,13 +542,13 @@ public class InputConfigurator extends ConfiguratorBase {
       }
     }
     for (Map.Entry<Writable,Writable> entry : mapWritable.entrySet())
-      configs.put(((Text) entry.getKey()).toString(), (BatchScanConfig) entry.getValue());
+      configs.put(((Text) entry.getKey()).toString(), (InputTableConfig) entry.getValue());
 
     return configs;
   }
 
   /**
-   * Returns the {@link BatchScanConfig} for the given table
+   * Returns the {@link InputTableConfig} for the given table
    * 
    * @param implementingClass
    *          the class whose name will be used as a prefix for the property configuration key
@@ -559,8 +559,8 @@ public class InputConfigurator extends ConfiguratorBase {
    * @return the table query config for the given table name (if it exists) and null if it does not
    * @since 1.6.0
    */
-  public static BatchScanConfig getBatchScanConfig(Class<?> implementingClass, Configuration conf, String tableName) {
-    Map<String,BatchScanConfig> queryConfigs = getBatchScanConfigs(implementingClass, conf);
+  public static InputTableConfig getInputTableConfig(Class<?> implementingClass, Configuration conf, String tableName) {
+    Map<String,InputTableConfig> queryConfigs = getInputTableConfigs(implementingClass, conf);
     return queryConfigs.get(tableName);
   }
 
@@ -599,8 +599,8 @@ public class InputConfigurator extends ConfiguratorBase {
    * @since 1.5.0
    */
   public static void validateOptions(Class<?> implementingClass, Configuration conf) throws IOException {
-    
-    Map<String, BatchScanConfig> batchScanConfigs = getBatchScanConfigs(implementingClass, conf);
+
+    Map<String,InputTableConfig> inputTableConfigs = getInputTableConfigs(implementingClass, conf);
     if (!isConnectorInfoSet(implementingClass, conf))
       throw new IOException("Input info has not been set.");
     String instanceKey = conf.get(enumToConfKey(implementingClass, InstanceOpts.TYPE));
@@ -613,16 +613,16 @@ public class InputConfigurator extends ConfiguratorBase {
       Connector c = getInstance(implementingClass, conf).getConnector(principal, token);
       if (!c.securityOperations().authenticateUser(principal, token))
         throw new IOException("Unable to authenticate user");
-      
-      if(getBatchScanConfigs(implementingClass, conf).size() == 0)
+
+      if (getInputTableConfigs(implementingClass, conf).size() == 0)
         throw new IOException("No table set.");
-      
-      for (Map.Entry<String, BatchScanConfig> tableConfig : batchScanConfigs.entrySet()) {
+
+      for (Map.Entry<String,InputTableConfig> tableConfig : inputTableConfigs.entrySet()) {
         if (!c.securityOperations().hasTablePermission(getPrincipal(implementingClass, conf), tableConfig.getKey(), TablePermission.READ))
           throw new IOException("Unable to access table");
       }
-      for (Map.Entry<String, BatchScanConfig> tableConfigEntry : batchScanConfigs.entrySet()) {
-        BatchScanConfig tableConfig = tableConfigEntry.getValue();
+      for (Map.Entry<String,InputTableConfig> tableConfigEntry : inputTableConfigs.entrySet()) {
+        InputTableConfig tableConfig = tableConfigEntry.getValue();
         if (!tableConfig.shouldUseLocalIterators()) {
           if (tableConfig.getIterators() != null) {
             for (IteratorSetting iter : tableConfig.getIterators()) {
@@ -642,7 +642,7 @@ public class InputConfigurator extends ConfiguratorBase {
   }
 
   /**
-   * Returns the {@link org.apache.accumulo.core.client.mapreduce.BatchScanConfig} for the configuration based on the properties set using the single-table
+   * Returns the {@link org.apache.accumulo.core.client.mapreduce.InputTableConfig} for the configuration based on the properties set using the single-table
    * input methods.
    * 
    * @param implementingClass
@@ -650,13 +650,12 @@ public class InputConfigurator extends ConfiguratorBase {
    * @param conf
    *          the Hadoop instance for which to retrieve the configuration
    * @return the config object built from the single input table properties set on the job
-   * @throws IOException
    * @since 1.6.0
    */
-  protected static Map.Entry<String, BatchScanConfig> getDefaultBatchScanConfig(Class<?> implementingClass, Configuration conf) {
+  protected static Map.Entry<String,InputTableConfig> getDefaultInputTableConfig(Class<?> implementingClass, Configuration conf) {
     String tableName = getInputTableName(implementingClass, conf);
     if (tableName != null) {
-      BatchScanConfig queryConfig = new BatchScanConfig();
+      InputTableConfig queryConfig = new InputTableConfig();
       List<IteratorSetting> itrs = getIterators(implementingClass, conf);
       if (itrs != null)
         queryConfig.setIterators(itrs);
@@ -682,22 +681,22 @@ public class InputConfigurator extends ConfiguratorBase {
   public static Map<String,Map<KeyExtent,List<Range>>> binOffline(String tableId, List<Range> ranges, Instance instance, Connector conn)
       throws AccumuloException, TableNotFoundException {
     Map<String,Map<KeyExtent,List<Range>>> binnedRanges = new HashMap<String,Map<KeyExtent,List<Range>>>();
-  
+
     if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
       Tables.clearCache(instance);
       if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
         throw new AccumuloException("Table is online tableId:" + tableId + " cannot scan table in offline mode ");
       }
     }
-  
+
     for (Range range : ranges) {
       Text startRow;
-  
+
       if (range.getStartKey() != null)
         startRow = range.getStartKey().getRow();
       else
         startRow = new Text();
-  
+
       Range metadataRange = new Range(new KeyExtent(new Text(tableId), startRow, null).getMetadataEntry(), true, null, false);
       Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
       MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
@@ -705,7 +704,7 @@ public class InputConfigurator extends ConfiguratorBase {
       scanner.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
       scanner.fetchColumnFamily(MetadataSchema.TabletsSection.FutureLocationColumnFamily.NAME);
       scanner.setRange(metadataRange);
-  
+
       RowIterator rowIter = new RowIterator(scanner);
       KeyExtent lastExtent = null;
       while (rowIter.hasNext()) {
@@ -713,58 +712,58 @@ public class InputConfigurator extends ConfiguratorBase {
         String last = "";
         KeyExtent extent = null;
         String location = null;
-  
+
         while (row.hasNext()) {
           Map.Entry<Key,Value> entry = row.next();
           Key key = entry.getKey();
-  
+
           if (key.getColumnFamily().equals(MetadataSchema.TabletsSection.LastLocationColumnFamily.NAME)) {
             last = entry.getValue().toString();
           }
-  
+
           if (key.getColumnFamily().equals(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME)
               || key.getColumnFamily().equals(MetadataSchema.TabletsSection.FutureLocationColumnFamily.NAME)) {
             location = entry.getValue().toString();
           }
-  
+
           if (MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
             extent = new KeyExtent(key.getRow(), entry.getValue());
           }
-  
+
         }
-  
+
         if (location != null)
           return null;
-  
+
         if (!extent.getTableId().toString().equals(tableId)) {
           throw new AccumuloException("Saw unexpected table Id " + tableId + " " + extent);
         }
-  
+
         if (lastExtent != null && !extent.isPreviousExtent(lastExtent)) {
           throw new AccumuloException(" " + lastExtent + " is not previous extent " + extent);
         }
-  
+
         Map<KeyExtent,List<Range>> tabletRanges = binnedRanges.get(last);
         if (tabletRanges == null) {
           tabletRanges = new HashMap<KeyExtent,List<Range>>();
           binnedRanges.put(last, tabletRanges);
         }
-  
+
         List<Range> rangeList = tabletRanges.get(extent);
         if (rangeList == null) {
           rangeList = new ArrayList<Range>();
           tabletRanges.put(extent, rangeList);
         }
-  
+
         rangeList.add(range);
-  
+
         if (extent.getEndRow() == null || range.afterEndKey(new Key(extent.getEndRow()).followingKey(PartialKey.ROW))) {
           break;
         }
-  
+
         lastExtent = extent;
       }
-  
+
     }
     return binnedRanges;
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/61353d1e/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormatTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormatTest.java b/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormatTest.java
index 97f8d72..68f88cb 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormatTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormatTest.java
@@ -28,7 +28,7 @@ import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.mapreduce.BatchScanConfig;
+import org.apache.accumulo.core.client.mapreduce.InputTableConfig;
 import org.apache.accumulo.core.client.mock.MockInstance;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.data.Key;
@@ -113,14 +113,14 @@ public class AccumuloMultiTableInputFormatTest {
       AccumuloMultiTableInputFormat.setConnectorInfo(job, user, new PasswordToken(pass));
       AccumuloMultiTableInputFormat.setMockInstance(job, INSTANCE_NAME);
 
-      BatchScanConfig tableConfig1 = new BatchScanConfig();
-      BatchScanConfig tableConfig2 = new BatchScanConfig();
+      InputTableConfig tableConfig1 = new InputTableConfig();
+      InputTableConfig tableConfig2 = new InputTableConfig();
 
-      Map<String,BatchScanConfig> configMap = new HashMap<String,BatchScanConfig>();
+      Map<String,InputTableConfig> configMap = new HashMap<String,InputTableConfig>();
       configMap.put(table1, tableConfig1);
       configMap.put(table2, tableConfig2);
 
-      AccumuloMultiTableInputFormat.setBatchScanConfigs(job, configMap);
+      AccumuloMultiTableInputFormat.setInputTableConfigs(job, configMap);
 
       job.setMapperClass(TestMapper.class);
       job.setMapOutputKeyClass(Key.class);
@@ -162,27 +162,27 @@ public class AccumuloMultiTableInputFormatTest {
   }
 
   /**
-   * Verify {@link org.apache.accumulo.core.client.mapreduce.BatchScanConfig} objects get correctly serialized in the JobContext.
+   * Verify {@link org.apache.accumulo.core.client.mapreduce.InputTableConfig} objects get correctly serialized in the JobContext.
    */
   @Test
   public void testTableQueryConfigSerialization() throws IOException {
 
     JobConf job = new JobConf();
 
-    BatchScanConfig table1 = new BatchScanConfig().setRanges(Collections.singletonList(new Range("a", "b")))
+    InputTableConfig table1 = new InputTableConfig().setRanges(Collections.singletonList(new Range("a", "b")))
         .fetchColumns(Collections.singleton(new Pair<Text,Text>(new Text("CF1"), new Text("CQ1"))))
         .setIterators(Collections.singletonList(new IteratorSetting(50, "iter1", "iterclass1")));
 
-    BatchScanConfig table2 = new BatchScanConfig().setRanges(Collections.singletonList(new Range("a", "b")))
+    InputTableConfig table2 = new InputTableConfig().setRanges(Collections.singletonList(new Range("a", "b")))
         .fetchColumns(Collections.singleton(new Pair<Text,Text>(new Text("CF1"), new Text("CQ1"))))
         .setIterators(Collections.singletonList(new IteratorSetting(50, "iter1", "iterclass1")));
 
-    Map<String,BatchScanConfig> configMap = new HashMap<String,BatchScanConfig>();
+    Map<String,InputTableConfig> configMap = new HashMap<String,InputTableConfig>();
     configMap.put(TEST_TABLE_1, table1);
     configMap.put(TEST_TABLE_2, table2);
-    AccumuloMultiTableInputFormat.setBatchScanConfigs(job, configMap);
+    AccumuloMultiTableInputFormat.setInputTableConfigs(job, configMap);
 
-    assertEquals(table1, AccumuloMultiTableInputFormat.getBatchScanConfig(job, TEST_TABLE_1));
-    assertEquals(table2, AccumuloMultiTableInputFormat.getBatchScanConfig(job, TEST_TABLE_2));
+    assertEquals(table1, AccumuloMultiTableInputFormat.getInputTableConfigs(job, TEST_TABLE_1));
+    assertEquals(table2, AccumuloMultiTableInputFormat.getInputTableConfigs(job, TEST_TABLE_2));
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/61353d1e/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloMultiTableInputFormatTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloMultiTableInputFormatTest.java b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloMultiTableInputFormatTest.java
index 27149eb..9951367 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloMultiTableInputFormatTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloMultiTableInputFormatTest.java
@@ -105,14 +105,14 @@ public class AccumuloMultiTableInputFormatTest {
 
       AccumuloMultiTableInputFormat.setConnectorInfo(job, user, new PasswordToken(pass));
 
-      BatchScanConfig tableConfig1 = new BatchScanConfig();
-      BatchScanConfig tableConfig2 = new BatchScanConfig();
+      InputTableConfig tableConfig1 = new InputTableConfig();
+      InputTableConfig tableConfig2 = new InputTableConfig();
 
-      Map<String,BatchScanConfig> configMap = new HashMap<String,BatchScanConfig>();
+      Map<String,InputTableConfig> configMap = new HashMap<String,InputTableConfig>();
       configMap.put(table1, tableConfig1);
       configMap.put(table2, tableConfig2);
 
-      AccumuloMultiTableInputFormat.setBatchScanConfigs(job, configMap);
+      AccumuloMultiTableInputFormat.setInputTableConfigs(job, configMap);
       AccumuloMultiTableInputFormat.setMockInstance(job, INSTANCE_NAME);
 
       job.setMapperClass(TestMapper.class);
@@ -160,25 +160,25 @@ public class AccumuloMultiTableInputFormatTest {
   }
 
   /**
-   * Verify {@link BatchScanConfig} objects get correctly serialized in the JobContext.
+   * Verify {@link InputTableConfig} objects get correctly serialized in the JobContext.
    */
   @Test
-  public void testBatchScanConfigSerialization() throws IOException {
+  public void testInputTableConfigSerialization() throws IOException {
 
     Job job = new Job();
 
-    BatchScanConfig tableConfig = new BatchScanConfig().setRanges(Collections.singletonList(new Range("a", "b")))
+    InputTableConfig tableConfig = new InputTableConfig().setRanges(Collections.singletonList(new Range("a", "b")))
         .fetchColumns(Collections.singleton(new Pair<Text,Text>(new Text("CF1"), new Text("CQ1"))))
         .setIterators(Collections.singletonList(new IteratorSetting(50, "iter1", "iterclass1")));
 
-    Map<String,BatchScanConfig> configMap = new HashMap<String,BatchScanConfig>();
+    Map<String,InputTableConfig> configMap = new HashMap<String,InputTableConfig>();
     configMap.put(TEST_TABLE_1, tableConfig);
     configMap.put(TEST_TABLE_2, tableConfig);
 
-    AccumuloMultiTableInputFormat.setBatchScanConfigs(job, configMap);
+    AccumuloMultiTableInputFormat.setInputTableConfigs(job, configMap);
 
-    assertEquals(tableConfig, AccumuloMultiTableInputFormat.getBatchScanConfig(job, TEST_TABLE_1));
-    assertEquals(tableConfig, AccumuloMultiTableInputFormat.getBatchScanConfig(job, TEST_TABLE_2));
+    assertEquals(tableConfig, AccumuloMultiTableInputFormat.getInputTableConfig(job, TEST_TABLE_1));
+    assertEquals(tableConfig, AccumuloMultiTableInputFormat.getInputTableConfig(job, TEST_TABLE_2));
   }
 
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/61353d1e/core/src/test/java/org/apache/accumulo/core/conf/TableQueryConfigTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/conf/TableQueryConfigTest.java b/core/src/test/java/org/apache/accumulo/core/conf/TableQueryConfigTest.java
index a2b0db0..65845f3 100644
--- a/core/src/test/java/org/apache/accumulo/core/conf/TableQueryConfigTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/conf/TableQueryConfigTest.java
@@ -29,7 +29,7 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.mapreduce.BatchScanConfig;
+import org.apache.accumulo.core.client.mapreduce.InputTableConfig;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.hadoop.io.Text;
@@ -38,17 +38,17 @@ import org.junit.Test;
 
 public class TableQueryConfigTest {
   
-  private BatchScanConfig tableQueryConfig;
+  private InputTableConfig tableQueryConfig;
   
   @Before
   public void setUp() {
-    tableQueryConfig = new BatchScanConfig();
+    tableQueryConfig = new InputTableConfig();
   }
   
   @Test
   public void testSerialization_OnlyTable() throws IOException {
     byte[] serialized = serialize(tableQueryConfig);
-    BatchScanConfig actualConfig = deserialize(serialized);
+    InputTableConfig actualConfig = deserialize(serialized);
     
     assertEquals(tableQueryConfig, actualConfig);
   }
@@ -61,7 +61,7 @@ public class TableQueryConfigTest {
     tableQueryConfig.setRanges(ranges);
     
     byte[] serialized = serialize(tableQueryConfig);
-    BatchScanConfig actualConfig = deserialize(serialized);
+    InputTableConfig actualConfig = deserialize(serialized);
     
     assertEquals(ranges, actualConfig.getRanges());
   }
@@ -74,7 +74,7 @@ public class TableQueryConfigTest {
     tableQueryConfig.fetchColumns(columns);
     
     byte[] serialized = serialize(tableQueryConfig);
-    BatchScanConfig actualConfig = deserialize(serialized);
+    InputTableConfig actualConfig = deserialize(serialized);
     
     assertEquals(actualConfig.getFetchedColumns(), columns);
   }
@@ -86,21 +86,21 @@ public class TableQueryConfigTest {
     settings.add(new IteratorSetting(55, "iter2", "iterclass2"));
     tableQueryConfig.setIterators(settings);
     byte[] serialized = serialize(tableQueryConfig);
-    BatchScanConfig actualConfig = deserialize(serialized);
+    InputTableConfig actualConfig = deserialize(serialized);
     assertEquals(actualConfig.getIterators(), settings);
     
   }
   
-  private byte[] serialize(BatchScanConfig tableQueryConfig) throws IOException {
+  private byte[] serialize(InputTableConfig tableQueryConfig) throws IOException {
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
     tableQueryConfig.write(new DataOutputStream(baos));
     baos.close();
     return baos.toByteArray();
   }
   
-  private BatchScanConfig deserialize(byte[] bytes) throws IOException {
+  private InputTableConfig deserialize(byte[] bytes) throws IOException {
     ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
-    BatchScanConfig actualConfig = new BatchScanConfig(new DataInputStream(bais));
+    InputTableConfig actualConfig = new InputTableConfig(new DataInputStream(bais));
     bais.close();
     return actualConfig;
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/61353d1e/docs/src/main/latex/accumulo_user_manual/chapters/analytics.tex
----------------------------------------------------------------------
diff --git a/docs/src/main/latex/accumulo_user_manual/chapters/analytics.tex b/docs/src/main/latex/accumulo_user_manual/chapters/analytics.tex
index 7bbd177..fc50d4a 100644
--- a/docs/src/main/latex/accumulo_user_manual/chapters/analytics.tex
+++ b/docs/src/main/latex/accumulo_user_manual/chapters/analytics.tex
@@ -133,8 +133,8 @@ used for each table.
 
 \small
 \begin{verbatim}
-BatchScanConfig tableOneConfig = new BatchScanConfig();
-BatchScanConfig tableTwoConfig = new BatchScanConfig();
+InputTableConfig tableOneConfig = new InputTableConfig();
+InputTableConfig tableTwoConfig = new InputTableConfig();
 \end{verbatim}
 \normalsize
 
@@ -142,10 +142,10 @@ To set the configuration objects on the job:
 
 \small
 \begin{verbatim}
-Map<String, BatchScanConfig> configs = new HashMap<String,BatchScanConfig>();
+Map<String, InputTableConfig> configs = new HashMap<String,InputTableConfig>();
 configs.put("table1", tableOneConfig);
 configs.put("table2", tableTwoConfig);
-AccumuloMultiTableInputFormat.setBatchScanConfigs(job, configs);
+AccumuloMultiTableInputFormat.setInputTableConfigs(job, configs);
 \end{verbatim}
 \normalsize
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/61353d1e/server/src/main/java/org/apache/accumulo/server/monitor/servlets/trace/Basic.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/monitor/servlets/trace/Basic.java b/server/src/main/java/org/apache/accumulo/server/monitor/servlets/trace/Basic.java
index 2faa9b1..69f2ace 100644
--- a/server/src/main/java/org/apache/accumulo/server/monitor/servlets/trace/Basic.java
+++ b/server/src/main/java/org/apache/accumulo/server/monitor/servlets/trace/Basic.java
@@ -36,12 +36,11 @@ import org.apache.accumulo.core.trace.TraceFormatter;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.monitor.Monitor;
 import org.apache.accumulo.server.monitor.servlets.BasicServlet;
-import org.apache.accumulo.start.classloader.AccumuloClassLoader;
 
 abstract class Basic extends BasicServlet {
-  
+
   private static final long serialVersionUID = 1L;
-  
+
   public static String getStringParameter(HttpServletRequest req, String name, String defaultValue) {
     String result = req.getParameter(name);
     if (result == null) {
@@ -49,7 +48,7 @@ abstract class Basic extends BasicServlet {
     }
     return result;
   }
-  
+
   public static int getIntParameter(HttpServletRequest req, String name, int defaultMinutes) {
     String valueString = req.getParameter(name);
     if (valueString == null)
@@ -62,11 +61,11 @@ abstract class Basic extends BasicServlet {
     }
     return result;
   }
-  
+
   public static String dateString(long millis) {
     return TraceFormatter.formatDate(new Date(millis));
   }
-  
+
   protected Scanner getScanner(StringBuilder sb) throws AccumuloException, AccumuloSecurityException {
     AccumuloConfiguration conf = Monitor.getSystemConfiguration();
     String principal = conf.get(Property.TRACE_USER);
@@ -81,12 +80,12 @@ abstract class Basic extends BasicServlet {
       for (Entry<String,String> entry : loginMap.entrySet()) {
         props.put(entry.getKey().substring(prefixLength), entry.getValue());
       }
-      
+
       AuthenticationToken token = Property.createInstanceFromPropertyName(conf, Property.TRACE_TOKEN_TYPE, AuthenticationToken.class, new PasswordToken());
       token.init(props);
       at = token;
     }
-    
+
     String table = conf.get(Property.TRACE_TABLE);
     try {
       Connector conn = HdfsZooInstance.getInstance().getConnector(principal, at);