You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2019/04/20 05:29:27 UTC

[accumulo] branch master updated (89d36cf -> bffc9a3)

This is an automated email from the ASF dual-hosted git repository.

ctubbsii pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo.git.


    from 89d36cf  Avoid lock contention in table configuration (#1102)
     new 8dcee50  Fix #496 Update formatter
     new 0a9837f  Apply formatter changes for #496
     new bffc9a3  Merge branch '1.9'

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 contrib/Eclipse-Accumulo-Codestyle.xml             | 577 +++++++++++----------
 .../java/org/apache/accumulo/core/Constants.java   |   4 +-
 .../org/apache/accumulo/core/cli/ClientOpts.java   |   4 +-
 .../accumulo/core/client/BatchWriterConfig.java    |  16 +-
 .../core/client/MutationsRejectedException.java    |   4 +-
 .../accumulo/core/client/ZooKeeperInstance.java    |   4 +-
 .../core/client/admin/NewTableConfiguration.java   |   8 +-
 .../core/client/mapred/AbstractInputFormat.java    |  49 +-
 .../client/mapred/AccumuloFileOutputFormat.java    |   8 +-
 .../core/client/mapred/AccumuloInputFormat.java    |   4 +-
 .../mapred/AccumuloMultiTableInputFormat.java      |  48 +-
 .../core/client/mapred/AccumuloRowInputFormat.java |   4 +-
 .../core/client/mapreduce/AbstractInputFormat.java |  20 +-
 .../client/mapreduce/AccumuloFileOutputFormat.java |   4 +-
 .../core/client/mapreduce/AccumuloInputFormat.java |   4 +-
 .../client/mapreduce/AccumuloOutputFormat.java     |   4 +-
 .../core/client/mapreduce/RangeInputSplit.java     |  19 +-
 .../core/client/sample/RowColumnSampler.java       |   4 +-
 .../security/tokens/AuthenticationToken.java       |   4 +-
 .../core/client/summary/CountingSummarizer.java    |  30 +-
 .../client/summary/SummarizerConfiguration.java    |   8 +-
 .../summarizers/AuthorizationSummarizer.java       |  18 +-
 .../core/clientImpl/ActiveCompactionImpl.java      |   4 +-
 .../core/clientImpl/ClientConfConverter.java       |  36 +-
 .../accumulo/core/clientImpl/ClientContext.java    |   8 +-
 .../core/clientImpl/ConditionalWriterImpl.java     |  22 +-
 .../accumulo/core/clientImpl/ConnectorImpl.java    |   4 +-
 .../accumulo/core/clientImpl/Credentials.java      |  14 +-
 .../core/clientImpl/DelegationTokenImpl.java       |   4 +-
 .../core/clientImpl/InstanceOperationsImpl.java    |   4 +-
 .../core/clientImpl/NamespaceOperationsHelper.java |   8 +-
 .../accumulo/core/clientImpl/OfflineIterator.java  |  33 +-
 .../core/clientImpl/ReplicationClient.java         |   8 +-
 .../accumulo/core/clientImpl/ScannerImpl.java      |  20 +-
 .../accumulo/core/clientImpl/ScannerIterator.java  |  16 +-
 .../core/clientImpl/SecurityOperationsImpl.java    |   4 +-
 .../accumulo/core/clientImpl/ServerClient.java     |   4 +-
 .../core/clientImpl/TableOperationsHelper.java     |   8 +-
 .../core/clientImpl/TableOperationsImpl.java       |  56 +-
 .../apache/accumulo/core/clientImpl/Tables.java    |   8 +-
 .../core/clientImpl/TabletLocatorImpl.java         |  18 +-
 .../core/clientImpl/TabletServerBatchReader.java   |   4 +-
 .../TabletServerBatchReaderIterator.java           |  24 +-
 .../core/clientImpl/TabletServerBatchWriter.java   |  23 +-
 .../accumulo/core/clientImpl/ThriftScanner.java    |  28 +-
 .../accumulo/core/clientImpl/bulk/BulkImport.java  |  16 +-
 .../core/clientImpl/bulk/BulkSerialize.java        |   5 +-
 .../core/clientImpl/mapreduce/BatchInputSplit.java |   4 +-
 .../clientImpl/mapreduce/lib/ConfiguratorBase.java |  30 +-
 .../mapreduce/lib/InputConfigurator.java           |  61 ++-
 .../accumulo/core/conf/AccumuloConfiguration.java  |   4 +-
 .../core/conf/CredentialProviderFactoryShim.java   |  25 +-
 .../accumulo/core/conf/DefaultConfiguration.java   |   6 +-
 .../apache/accumulo/core/conf/IterConfigUtil.java  |  10 +-
 .../core/conf/ObservableConfiguration.java         |   4 +-
 .../org/apache/accumulo/core/conf/Property.java    |  36 +-
 .../apache/accumulo/core/conf/PropertyType.java    |   4 +-
 .../accumulo/core/cryptoImpl/AESCryptoService.java |   4 +-
 .../java/org/apache/accumulo/core/data/Column.java |   8 +-
 .../java/org/apache/accumulo/core/data/Key.java    |   4 +-
 .../org/apache/accumulo/core/data/KeyBuilder.java  |   6 +-
 .../org/apache/accumulo/core/data/Mutation.java    |   6 +-
 .../apache/accumulo/core/dataImpl/KeyExtent.java   |  20 +-
 .../accumulo/core/file/BloomFilterLayer.java       |  12 +-
 .../apache/accumulo/core/file/FileOperations.java  |   4 +-
 .../cache/impl/BlockCacheManagerFactory.java       |   8 +-
 .../core/file/blockfile/cache/impl/ClassSize.java  |   4 +-
 .../core/file/blockfile/cache/lru/CachedBlock.java |   4 +-
 .../file/blockfile/cache/lru/LruBlockCache.java    |  12 +-
 .../cache/lru/LruBlockCacheConfiguration.java      |   8 +-
 .../file/blockfile/impl/CachableBlockFile.java     |  12 +-
 .../accumulo/core/file/rfile/BlockIndex.java       |   4 +-
 .../accumulo/core/file/rfile/KeyShortener.java     |   8 +-
 .../accumulo/core/file/rfile/MultiLevelIndex.java  |   8 +-
 .../org/apache/accumulo/core/file/rfile/RFile.java |  40 +-
 .../accumulo/core/file/rfile/RFileOperations.java  |  13 +-
 .../accumulo/core/file/rfile/SplitLarge.java       |   4 +-
 .../accumulo/core/file/rfile/bcfile/BCFile.java    |  20 +-
 .../core/file/rfile/bcfile/Compression.java        |  35 +-
 .../apache/accumulo/core/iterators/Combiner.java   |  12 +-
 .../accumulo/core/iterators/IteratorAdapter.java   |   4 +-
 .../apache/accumulo/core/iterators/OrIterator.java |   4 +-
 .../core/iterators/conf/ColumnToClassMapping.java  |   4 +-
 .../iterators/system/LocalityGroupIterator.java    |   4 +-
 .../core/iterators/user/CfCqSliceOpts.java         |   8 +-
 .../iterators/user/CfCqSliceSeekingFilter.java     |   8 +-
 .../core/iterators/user/IntersectingIterator.java  |  20 +-
 .../core/iterators/user/LargeRowFilter.java        |  12 +-
 .../core/iterators/user/RowDeletingIterator.java   |   5 +-
 .../core/iterators/user/RowEncodingIterator.java   |   4 +-
 .../core/iterators/user/SeekingFilter.java         |   8 +-
 .../core/iterators/user/TransformingIterator.java  |  12 +-
 .../apache/accumulo/core/metadata/RootTable.java   |   4 +-
 .../core/metadata/schema/MetadataSchema.java       |  16 +-
 .../core/metadata/schema/TabletsMetadata.java      |   4 +-
 .../core/replication/ReplicationTable.java         |   4 +-
 .../accumulo/core/rpc/TTimeoutTransport.java       |   4 +-
 .../org/apache/accumulo/core/rpc/ThriftUtil.java   |  22 +-
 .../core/spi/scan/HintScanPrioritizer.java         |   4 +-
 .../core/spi/scan/SimpleScanDispatcher.java        |   4 +-
 .../org/apache/accumulo/core/summary/Gatherer.java |  35 +-
 .../core/summary/SummarizerConfigurationUtil.java  |   8 +-
 .../accumulo/core/summary/SummaryReader.java       |   8 +-
 .../accumulo/core/summary/SummaryWriter.java       |   4 +-
 .../org/apache/accumulo/core/trace/TraceUtil.java  |   4 +-
 .../org/apache/accumulo/core/util/CreateToken.java |   4 +-
 .../org/apache/accumulo/core/util/HostAndPort.java |   4 +-
 .../accumulo/core/util/LocalityGroupUtil.java      |   4 +-
 .../java/org/apache/accumulo/core/util/Merge.java  |   4 +-
 .../accumulo/core/util/NamingThreadFactory.java    |   4 +-
 .../accumulo/core/util/SystemIteratorUtil.java     |   4 +-
 .../accumulo/core/util/ThriftMessageUtil.java      |   4 +-
 .../core/util/format/DateStringFormatter.java      |   4 +-
 .../core/util/format/DefaultFormatter.java         |   4 +-
 .../accumulo/core/util/format/HexFormatter.java    |   4 +-
 .../java/org/apache/accumulo/fate/AdminUtil.java   |   2 +-
 .../main/java/org/apache/accumulo/fate/Fate.java   |   8 +-
 .../java/org/apache/accumulo/fate/ZooStore.java    |   4 +-
 .../apache/accumulo/fate/zookeeper/ZooUtil.java    |   6 +-
 .../core/client/ClientConfigurationTest.java       |  12 +-
 .../accumulo/core/client/ClientPropertiesTest.java |   8 +-
 .../accumulo/core/client/IteratorSettingTest.java  |   8 +-
 .../core/client/ZooKeeperInstanceTest.java         |  16 +-
 .../core/client/lexicoder/PairLexicoderTest.java   |  12 +-
 .../mapred/AccumuloFileOutputFormatTest.java       |   5 +-
 .../client/mapred/AccumuloInputFormatTest.java     |   8 +-
 .../mapred/AccumuloMultiTableInputFormatTest.java  |  23 +-
 .../mapreduce/AccumuloFileOutputFormatTest.java    |   5 +-
 .../client/mapreduce/AccumuloInputFormatTest.java  |   8 +-
 .../lib/partition/RangePartitionerTest.java        |   8 +-
 .../core/client/rfile/RFileClientTest.java         |  48 +-
 .../client/security/SecurityErrorCodeTest.java     |   5 +-
 .../security/tokens/DelegationTokenImplTest.java   |  28 +-
 .../client/security/tokens/KerberosTokenTest.java  |   4 +-
 .../summarizers/AuthorizationSummarizerTest.java   |   4 +-
 .../summarizers/EntryLengthSummarizersTest.java    |  64 +--
 .../core/clientImpl/ScannerOptionsTest.java        |   4 +-
 .../core/clientImpl/TabletLocatorImplTest.java     | 225 ++++----
 .../core/clientImpl/ThriftTransportKeyTest.java    |  36 +-
 .../core/clientImpl/bulk/BulkSerializeTest.java    |   4 +-
 .../clientImpl/mapreduce/BatchInputSplitTest.java  |   8 +-
 .../mapreduce/lib/ConfiguratorBaseTest.java        |   6 +-
 .../core/conf/AccumuloConfigurationTest.java       |  32 +-
 .../core/conf/ConfigurationTypeHelperTest.java     |   2 +-
 .../conf/CredentialProviderFactoryShimTest.java    |  20 +-
 .../accumulo/core/conf/IterConfigUtilTest.java     |   4 +-
 .../apache/accumulo/core/conf/PropertyTest.java    |  15 +-
 .../apache/accumulo/core/crypto/CryptoTest.java    |  44 +-
 .../core/data/ConstraintViolationSummaryTest.java  |   4 +-
 .../apache/accumulo/core/data/KeyBuilderTest.java  |  26 +-
 .../org/apache/accumulo/core/data/KeyTest.java     |   4 +-
 .../apache/accumulo/core/data/LoadPlanTest.java    |   4 +-
 .../org/apache/accumulo/core/data/RangeTest.java   |  56 +-
 .../core/file/BloomFilterLayerLookupTest.java      |   4 +-
 .../file/blockfile/cache/TestCachedBlockQueue.java |   8 +-
 .../accumulo/core/file/rfile/BlockIndexTest.java   |   4 +-
 .../core/file/rfile/MultiThreadedRFileTest.java    |  12 +-
 .../accumulo/core/file/rfile/RFileMetricsTest.java |   4 +-
 .../apache/accumulo/core/file/rfile/RFileTest.java |  64 +--
 .../accumulo/core/file/rfile/RelativeKeyTest.java  |  16 +-
 .../core/file/rfile/bcfile/CompressionTest.java    |   8 +-
 .../file/streams/RateLimitedOutputStreamTest.java  |   4 +-
 .../iterators/FirstEntryInRowIteratorTest.java     |   4 +-
 .../core/iterators/system/ColumnFilterTest.java    |   8 +-
 .../iterators/system/DeletingIteratorTest.java     |  24 +-
 .../iterators/system/VisibilityFilterTest.java     |   8 +-
 .../accumulo/core/iterators/user/CombinerTest.java |   4 +-
 .../accumulo/core/iterators/user/FilterTest.java   |  12 +-
 .../core/iterators/user/RowFilterTest.java         |   4 +-
 .../iterators/user/TransformingIteratorTest.java   |  20 +-
 .../core/iterators/user/VisibilityFilterTest.java  |   4 +-
 .../core/metadata/schema/LinkingIteratorTest.java  |   8 +-
 .../rpc/SaslClientDigestCallbackHandlerTest.java   |   4 +-
 .../core/rpc/SaslConnectionParamsTest.java         |  12 +-
 .../accumulo/core/rpc/TTimeoutTransportTest.java   |   6 +-
 .../AuthenticationTokenIdentifierTest.java         |   4 +-
 .../core/security/AuthenticationTokenTest.java     |   8 +-
 .../core/spi/scan/HintScanPrioritizerTest.java     |   4 +-
 .../spi/scan/IdleRatioScanPrioritizerTest.java     |   4 +-
 .../core/util/CompletableFutureUtilTest.java       |   8 +-
 .../accumulo/core/util/LocalityGroupUtilTest.java  |   4 +-
 .../core/util/format/DefaultFormatterTest.java     |   2 +-
 .../core/util/format/FormatterFactoryTest.java     |   6 +-
 .../apache/accumulo/fate/ReadOnlyStoreTest.java    |   2 +-
 .../org/apache/accumulo/fate/util/RetryTest.java   |  16 +-
 .../accumulo/fate/zookeeper/ZooCacheTest.java      |   8 +-
 .../fate/zookeeper/ZooReaderWriterTest.java        |   8 +-
 .../hadoop/mapred/AccumuloFileOutputFormat.java    |   8 +-
 .../hadoop/mapred/AccumuloRowInputFormat.java      |   4 +-
 .../hadoopImpl/mapred/AccumuloRecordReader.java    |  16 +-
 .../hadoopImpl/mapreduce/AccumuloRecordReader.java |  20 +-
 .../hadoopImpl/mapreduce/BatchInputSplit.java      |   4 +-
 .../mapreduce/InputFormatBuilderImpl.java          |  20 +-
 .../mapreduce/OutputFormatBuilderImpl.java         |   8 +-
 .../hadoopImpl/mapreduce/RangeInputSplit.java      |   4 +-
 .../hadoopImpl/mapreduce/lib/ConfiguratorBase.java |   4 +-
 .../mapreduce/lib/FileOutputConfigurator.java      |   4 +-
 .../mapreduce/lib/InputConfigurator.java           |  28 +-
 .../mapreduce/lib/MapReduceClientOpts.java         |   8 +-
 .../its/mapred/AccumuloFileOutputFormatIT.java     |   9 +-
 .../hadoop/its/mapred/AccumuloInputFormatIT.java   |  15 +-
 .../hadoop/its/mapred/AccumuloOutputFormatIT.java  |   4 +-
 .../accumulo/hadoop/its/mapred/TokenFileIT.java    |   4 +-
 .../its/mapreduce/AccumuloFileOutputFormatIT.java  |   9 +-
 .../its/mapreduce/AccumuloInputFormatIT.java       |  29 +-
 .../accumulo/hadoop/its/mapreduce/RowHashIT.java   |   4 +-
 .../accumulo/hadoop/its/mapreduce/TokenFileIT.java |   4 +-
 .../mapred/AccumuloFileOutputFormatTest.java       |   8 +-
 .../hadoop/mapred/AccumuloInputFormatTest.java     |   8 +-
 .../hadoop/mapred/AccumuloOutputFormatTest.java    |   4 +-
 .../hadoop/mapred/MultiTableInputFormatTest.java   |  16 +-
 .../mapreduce/AccumuloFileOutputFormatTest.java    |   4 +-
 .../hadoop/mapreduce/AccumuloInputFormatTest.java  |  16 +-
 .../mapreduce/MultiTableInputFormatTest.java       |  20 +-
 .../mapreduce/partition/RangePartitionerTest.java  |   8 +-
 .../hadoopImpl/mapreduce/BatchInputSplitTest.java  |   8 +-
 .../mapreduce/lib/ConfiguratorBaseTest.java        |   4 +-
 .../iteratortest/IteratorTestCaseFinder.java       |   4 +-
 .../testcases/IsolatedDeepCopiesTestCase.java      |   4 +-
 .../iteratortest/testcases/ReSeekTestCase.java     |   8 +-
 .../standalone/StandaloneAccumuloCluster.java      |   4 +-
 .../accumulo/minicluster/MiniAccumuloRunner.java   |   3 +-
 .../MiniAccumuloClusterControl.java                |   8 +-
 .../miniclusterImpl/MiniAccumuloClusterImpl.java   |  16 +-
 .../miniclusterImpl/MiniAccumuloConfigImpl.java    |   4 +-
 .../standalone/StandaloneClusterControlTest.java   |   4 +-
 .../MiniAccumuloClusterExistingZooKeepersTest.java |   8 +-
 .../MiniAccumuloClusterStartStopTest.java          |   4 +-
 .../minicluster/MiniAccumuloClusterTest.java       |   4 +-
 .../miniclusterImpl/CleanShutdownMacTest.java      |   6 +-
 .../MiniAccumuloClusterImplTest.java               |   4 +-
 .../MiniAccumuloConfigImplTest.java                |   8 +-
 pom.xml                                            |  28 +-
 .../main/java/org/apache/accumulo/proxy/Proxy.java |  12 +-
 .../org/apache/accumulo/proxy/ProxyServer.java     | 117 ++---
 .../main/java/org/apache/accumulo/proxy/Util.java  |   4 +-
 .../apache/accumulo/server/ServerConstants.java    |   4 +-
 .../org/apache/accumulo/server/ServerContext.java  |   4 +-
 .../org/apache/accumulo/server/ServerUtil.java     |   5 +-
 .../accumulo/server/ServiceEnvironmentImpl.java    |   4 +-
 .../accumulo/server/client/BulkImporter.java       |  54 +-
 .../server/client/ClientServiceHandler.java        |   8 +-
 .../server/conf/NamespaceConfiguration.java        |   3 +-
 .../server/conf/ServerConfigurationFactory.java    |  12 +-
 .../accumulo/server/conf/TableConfiguration.java   |  11 +-
 .../server/constraints/MetadataConstraints.java    |  27 +-
 .../accumulo/server/fs/PerTableVolumeChooser.java  |  20 +-
 .../accumulo/server/fs/PreferredVolumeChooser.java |  16 +-
 .../server/fs/SpaceAwareVolumeChooser.java         |   7 +-
 .../accumulo/server/fs/VolumeManagerImpl.java      |   4 +-
 .../org/apache/accumulo/server/fs/VolumeUtil.java  |   8 +-
 .../apache/accumulo/server/init/Initialize.java    |  41 +-
 .../accumulo/server/master/LiveTServerSet.java     |  48 +-
 .../balancer/HostRegexTableLoadBalancer.java       |  48 +-
 .../server/master/balancer/RegexGroupBalancer.java |  12 +-
 .../server/master/balancer/TableLoadBalancer.java  |   8 +-
 .../server/master/state/MetaDataStateStore.java    |   8 +-
 .../server/master/state/MetaDataTableScanner.java  |   4 +-
 .../master/state/TabletStateChangeIterator.java    |   8 +-
 .../server/metrics/MetricsConfiguration.java       |   4 +-
 .../server/metrics/MetricsSystemHelper.java        |   4 +-
 .../accumulo/server/monitor/DedupedLogEvent.java   |   4 +-
 .../apache/accumulo/server/monitor/LogService.java |  18 +-
 .../server/replication/ReplicaSystemHelper.java    |   4 +-
 .../server/replication/ReplicationUtil.java        |   8 +-
 .../server/replication/StatusFormatter.java        |   9 +-
 .../server/rpc/CustomNonBlockingServer.java        |   4 +-
 .../HighlyAvailableServiceInvocationHandler.java   |   4 +-
 .../rpc/SaslServerDigestCallbackHandler.java       |   4 +-
 .../rpc/TCredentialsUpdatingInvocationHandler.java |  11 +-
 .../apache/accumulo/server/rpc/TServerUtils.java   |  64 +--
 .../server/security/AuditedSecurityOperation.java  | 146 +++---
 .../server/security/SystemCredentials.java         |   4 +-
 .../AuthenticationTokenSecretManager.java          |   8 +-
 .../security/handler/KerberosAuthenticator.java    |   8 +-
 .../server/security/handler/ZKPermHandler.java     |  16 +-
 .../accumulo/server/tables/TableManager.java       |  12 +-
 .../tabletserver/LargestFirstMemoryManager.java    |   8 +-
 .../org/apache/accumulo/server/util/Admin.java     |  24 +-
 .../apache/accumulo/server/util/ChangeSecret.java  |   4 +-
 .../server/util/CheckForMetadataProblems.java      |   8 +-
 .../org/apache/accumulo/server/util/FileUtil.java  |  16 +-
 .../accumulo/server/util/FindOfflineTablets.java   |  13 +-
 .../apache/accumulo/server/util/LocalityCheck.java |   8 +-
 .../accumulo/server/util/MetadataTableUtil.java    |  34 +-
 .../apache/accumulo/server/util/RandomWriter.java  |   4 +-
 .../accumulo/server/util/RandomizeVolumes.java     |   6 +-
 .../server/util/RemoveEntriesForMissingFiles.java  |   8 +-
 .../accumulo/server/util/SystemPropUtil.java       |   4 +-
 .../server/util/VerifyTabletAssignments.java       |   4 +-
 .../accumulo/server/ServerConstantsTest.java       |   4 +-
 .../accumulo/server/client/BulkImporterTest.java   |   4 +-
 .../server/fs/PerTableVolumeChooserTest.java       |  14 +-
 .../server/fs/PreferredVolumeChooserTest.java      |  14 +-
 .../apache/accumulo/server/fs/ViewFSUtilsTest.java |  14 +-
 .../accumulo/server/fs/VolumeManagerImplTest.java  |  12 +-
 .../apache/accumulo/server/fs/VolumeUtilTest.java  |   4 +-
 .../master/balancer/ChaoticLoadBalancerTest.java   |   4 +-
 .../master/balancer/DefaultLoadBalancerTest.java   |   4 +-
 .../balancer/HostRegexTableLoadBalancerTest.java   |  12 +-
 .../master/balancer/TableLoadBalancerTest.java     |  36 +-
 .../server/problems/ProblemReportTest.java         |  16 +-
 .../server/replication/StatusCombinerTest.java     |  44 +-
 .../server/rpc/SaslDigestCallbackHandlerTest.java  |  20 +-
 .../server/rpc/SaslServerConnectionParamsTest.java |   4 +-
 .../server/security/SystemCredentialsTest.java     |   4 +-
 .../security/delegation/AuthenticationKeyTest.java |  12 +-
 .../AuthenticationTokenKeyManagerTest.java         |   8 +-
 .../AuthenticationTokenSecretManagerTest.java      |  84 +--
 .../ZooAuthenticationKeyDistributorTest.java       |  48 +-
 .../security/handler/ZKAuthenticatorTest.java      |   8 +-
 .../IllegalTableTransitionExceptionTest.java       |   8 +-
 .../accumulo/server/util/DefaultMapTest.java       |   4 +-
 .../apache/accumulo/server/util/FileUtilTest.java  |   4 +-
 .../server/util/ReplicationTableUtilTest.java      |   4 +-
 .../accumulo/server/util/TServerUtilsTest.java     |   8 +-
 .../accumulo/gc/GarbageCollectionAlgorithm.java    |  12 +-
 .../apache/accumulo/gc/SimpleGarbageCollector.java |  38 +-
 .../gc/GarbageCollectWriteAheadLogsTest.java       |  28 +-
 .../apache/accumulo/master/FateServiceHandler.java |  92 ++--
 .../java/org/apache/accumulo/master/Master.java    |  58 +--
 .../master/MasterClientServiceHandler.java         |  14 +-
 .../apache/accumulo/master/TabletGroupWatcher.java |  28 +-
 .../master/metrics/fate/Metrics2FateMetrics.java   |   4 +-
 .../accumulo/master/recovery/RecoveryManager.java  |  11 +-
 .../DistributedWorkQueueWorkAssigner.java          |   7 +-
 .../master/replication/ReplicationDriver.java      |   4 +-
 .../master/replication/SequentialWorkAssigner.java |   8 +-
 .../master/replication/UnorderedWorkAssigner.java  |  13 +-
 .../accumulo/master/replication/WorkMaker.java     |   4 +-
 .../master/tableOps/bulkVer1/BulkImport.java       |   8 +-
 .../master/tableOps/bulkVer1/CopyFailed.java       |   4 +-
 .../master/tableOps/bulkVer1/LoadFiles.java        |  10 +-
 .../master/tableOps/bulkVer2/BulkImportMove.java   |   4 +-
 .../master/tableOps/bulkVer2/LoadFiles.java        |  10 +-
 .../master/tableOps/bulkVer2/PrepBulkImport.java   |   8 +-
 .../accumulo/master/tableOps/clone/CloneTable.java |   4 +-
 .../master/tableOps/clone/CloneZookeeper.java      |   8 +-
 .../master/tableOps/compact/CompactionDriver.java  |   6 +-
 .../accumulo/master/tableOps/create/ChooseDir.java |  18 +-
 .../accumulo/master/tableOps/create/CreateDir.java |   8 +-
 .../master/tableOps/create/PopulateMetadata.java   |   8 +-
 .../accumulo/master/tableOps/delete/CleanUp.java   |  12 +-
 .../tableOps/namespace/create/CreateNamespace.java |   4 +-
 .../master/tableOps/rename/RenameTable.java        |   4 +-
 .../tableImport/PopulateMetadataTable.java         |   4 +-
 .../master/metrics/fate/FateMetricValuesTest.java  |   4 +-
 ...DistributedWorkQueueWorkAssignerHelperTest.java |   4 +-
 .../replication/UnorderedWorkAssignerTest.java     |  12 +-
 .../master/state/RootTabletStateStoreTest.java     |   4 +-
 .../apache/accumulo/monitor/EmbeddedWebServer.java |  14 +-
 .../java/org/apache/accumulo/monitor/Monitor.java  |   4 +-
 .../monitor/rest/master/MasterResource.java        |   4 +-
 .../monitor/rest/problems/ProblemsResource.java    |  10 +-
 .../rest/statistics/StatisticsResource.java        |   4 +-
 .../monitor/rest/tables/TablesResource.java        |   4 +-
 .../monitor/rest/trace/TracesResource.java         |   8 +-
 .../rest/tservers/TabletServerResource.java        |  20 +-
 .../monitor/util/AccumuloMonitorAppender.java      |   8 +-
 .../org/apache/accumulo/monitor/view/WebViews.java |  12 +-
 .../accumulo/monitor/ZooKeeperStatusTest.java      |   4 +-
 .../monitor/util/AccumuloMonitorAppenderTest.java  |   8 +-
 .../org/apache/accumulo/tracer/TraceFormatter.java |   9 +-
 .../org/apache/accumulo/tracer/TraceServer.java    |  33 +-
 .../org/apache/accumulo/tracer/ZooTraceClient.java |   4 +-
 .../accumulo/tserver/ActiveAssignmentRunnable.java |   4 +-
 .../apache/accumulo/tserver/CompactionQueue.java   |   4 +-
 .../accumulo/tserver/ConditionCheckerContext.java  |   4 +-
 .../org/apache/accumulo/tserver/FileManager.java   |  12 +-
 .../org/apache/accumulo/tserver/InMemoryMap.java   |   8 +-
 .../org/apache/accumulo/tserver/NativeMap.java     |   4 +-
 .../tserver/TabletIteratorEnvironment.java         |   4 +-
 .../org/apache/accumulo/tserver/TabletServer.java  | 179 ++++---
 .../tserver/TabletServerResourceManager.java       |  32 +-
 .../compaction/DefaultCompactionStrategy.java      |   4 +-
 .../strategies/ConfigurableCompactionStrategy.java |  11 +-
 .../TooManyDeletesCompactionStrategy.java          |  13 +-
 .../tserver/constraints/ConstraintChecker.java     |   9 +-
 .../org/apache/accumulo/tserver/log/DfsLogger.java |  20 +-
 .../accumulo/tserver/log/SortedLogRecovery.java    |   6 +-
 .../metrics/Metrics2TabletServerMetrics.java       |  28 +-
 .../metrics/Metrics2TabletServerUpdateMetrics.java |  24 +-
 .../tserver/metrics/TabletServerMinCMetrics.java   |   4 +-
 .../tserver/metrics/TabletServerScanMetrics.java   |   4 +-
 .../tserver/metrics/TabletServerUpdateMetrics.java |   4 +-
 .../tserver/replication/AccumuloReplicaSystem.java |  20 +-
 .../BatchWriterReplicationReplayer.java            |   4 +-
 .../tserver/replication/ReplicationProcessor.java  |   8 +-
 .../replication/ReplicationServicerHandler.java    |   4 +-
 .../accumulo/tserver/session/SessionManager.java   |  10 +-
 .../tserver/tablet/BulkImportCacheCleaner.java     |   4 +-
 .../accumulo/tserver/tablet/CompactionWatcher.java |   4 +-
 .../apache/accumulo/tserver/tablet/Compactor.java  |  16 +-
 .../accumulo/tserver/tablet/DatafileManager.java   |   8 +-
 .../accumulo/tserver/tablet/MinorCompactor.java    |   4 +-
 .../apache/accumulo/tserver/tablet/RootFiles.java  |   4 +-
 .../accumulo/tserver/tablet/ScanDataSource.java    |  35 +-
 .../org/apache/accumulo/tserver/tablet/Tablet.java |  78 +--
 .../apache/accumulo/tserver/tablet/TabletData.java |   4 +-
 .../accumulo/tserver/tablet/TabletMemory.java      |   8 +-
 .../apache/accumulo/tserver/InMemoryMapTest.java   |   4 +-
 .../tserver/LargestFirstMemoryManagerTest.java     |  12 +-
 .../tserver/TabletServerSyncCheckTest.java         |   4 +-
 .../accumulo/tserver/TservConstraintEnvTest.java   |   4 +-
 .../accumulo/tserver/WalRemovalOrderTest.java      |   4 +-
 .../compaction/DefaultCompactionStrategyTest.java  |  12 +-
 .../strategies/BasicCompactionStrategyTest.java    |   8 +-
 .../ConfigurableCompactionStrategyTest.java        |   6 +-
 .../apache/accumulo/tserver/log/DfsLoggerTest.java |  28 +-
 .../tserver/log/SortedLogRecoveryTest.java         | 236 ++++-----
 .../tserver/log/TestUpgradePathForWALogs.java      |   8 +-
 .../replication/AccumuloReplicaSystemTest.java     |  20 +-
 .../replication/ReplicationProcessorTest.java      |   4 +-
 .../tserver/tablet/DatafileManagerTest.java        |   4 +-
 .../accumulo/tserver/tablet/RootFilesTest.java     |   8 +-
 .../main/java/org/apache/accumulo/shell/Shell.java |  20 +-
 .../accumulo/shell/commands/AddAuthsCommand.java   |   8 +-
 .../accumulo/shell/commands/AddSplitsCommand.java  |   4 +-
 .../accumulo/shell/commands/CloneTableCommand.java |   4 +-
 .../accumulo/shell/commands/CompactCommand.java    |  36 +-
 .../accumulo/shell/commands/ConfigCommand.java     |  12 +-
 .../shell/commands/CreateNamespaceCommand.java     |   4 +-
 .../shell/commands/CreateTableCommand.java         |  16 +-
 .../accumulo/shell/commands/CreateUserCommand.java |   8 +-
 .../apache/accumulo/shell/commands/DUCommand.java  |   8 +-
 .../accumulo/shell/commands/DeleteCommand.java     |   8 +-
 .../accumulo/shell/commands/DeleteIterCommand.java |   8 +-
 .../shell/commands/DeleteNamespaceCommand.java     |   4 +-
 .../accumulo/shell/commands/DeleteRowsCommand.java |   4 +-
 .../shell/commands/DeleteScanIterCommand.java      |   8 +-
 .../accumulo/shell/commands/ExecfileCommand.java   |   4 +-
 .../accumulo/shell/commands/FateCommand.java       |  18 +-
 .../accumulo/shell/commands/GetAuthsCommand.java   |   8 +-
 .../accumulo/shell/commands/GetGroupsCommand.java  |   4 +-
 .../accumulo/shell/commands/GetSplitsCommand.java  |  24 +-
 .../accumulo/shell/commands/GrepCommand.java       |   4 +-
 .../accumulo/shell/commands/InsertCommand.java     |  10 +-
 .../shell/commands/ListCompactionsCommand.java     |   4 +-
 .../accumulo/shell/commands/ListIterCommand.java   |   4 +-
 .../accumulo/shell/commands/MergeCommand.java      |   4 +-
 .../accumulo/shell/commands/NamespacesCommand.java |   8 +-
 .../apache/accumulo/shell/commands/OptUtil.java    |   4 +-
 .../accumulo/shell/commands/PasswdCommand.java     |   8 +-
 .../accumulo/shell/commands/ScanCommand.java       |  16 +-
 .../accumulo/shell/commands/SetAuthsCommand.java   |   8 +-
 .../accumulo/shell/commands/SetIterCommand.java    |  12 +-
 .../shell/commands/SetScanIterCommand.java         |   4 +-
 .../commands/ShellPluginConfigurationCommand.java  |   8 +-
 .../accumulo/shell/commands/SummariesCommand.java  |   4 +-
 .../accumulo/shell/commands/TableOperation.java    |  12 +-
 .../accumulo/shell/commands/TablesCommand.java     |   7 +-
 .../accumulo/shell/commands/TraceCommand.java      |  12 +-
 .../shell/commands/UserPermissionsCommand.java     |   4 +-
 .../org/apache/accumulo/shell/ShellUtilTest.java   |  10 +-
 .../shell/commands/DeleteTableCommandTest.java     |   4 +-
 .../shell/format/DeleterFormatterTest.java         |   2 +-
 .../main/java/org/apache/accumulo/start/Main.java  |  16 +-
 .../start/classloader/AccumuloClassLoader.java     |   4 +-
 .../classloader/vfs/AccumuloVFSClassLoader.java    |  16 +-
 .../start/classloader/vfs/ContextManager.java      |   4 +-
 .../classloader/vfs/UniqueFileReplicator.java      |   4 +-
 .../vfs/AccumuloReloadingVFSClassLoaderTest.java   |   4 +-
 .../vfs/AccumuloVFSClassLoaderTest.java            |   4 +-
 .../start/classloader/vfs/ContextManagerTest.java  |   8 +-
 .../accumulo/harness/AccumuloClusterHarness.java   |  18 +-
 .../accumulo/harness/SharedMiniClusterBase.java    |   4 +-
 .../conf/AccumuloClusterPropertyConfiguration.java |   4 +-
 .../conf/AccumuloMiniClusterConfiguration.java     |   4 +-
 .../StandaloneAccumuloClusterConfiguration.java    |  60 +--
 .../org/apache/accumulo/test/AuditMessageIT.java   |  20 +-
 .../accumulo/test/BadDeleteMarkersCreatedIT.java   |   4 +-
 .../java/org/apache/accumulo/test/CleanWalIT.java  |   8 +-
 .../apache/accumulo/test/ClientSideIteratorIT.java |   4 +-
 .../java/org/apache/accumulo/test/CloneIT.java     |  24 +-
 .../accumulo/test/CompactionRateLimitingIT.java    |   4 +-
 .../apache/accumulo/test/ConditionalWriterIT.java  | 195 ++++---
 .../org/apache/accumulo/test/ExistingMacIT.java    |  12 +-
 .../apache/accumulo/test/FairVolumeChooser.java    |   3 +-
 .../apache/accumulo/test/GarbageCollectWALIT.java  |   4 +-
 .../org/apache/accumulo/test/HardListIterator.java |   6 +-
 .../org/apache/accumulo/test/ImportExportIT.java   |   5 +-
 .../org/apache/accumulo/test/InMemoryMapIT.java    |  12 +-
 .../accumulo/test/InterruptibleScannersIT.java     |   4 +-
 .../accumulo/test/IsolationAndDeepCopyIT.java      |   4 +-
 .../test/MissingWalHeaderCompletesRecoveryIT.java  |   8 +-
 .../org/apache/accumulo/test/NamespacesIT.java     |  11 +-
 .../accumulo/test/NewTableConfigurationIT.java     |  64 +--
 .../java/org/apache/accumulo/test/SampleIT.java    |  36 +-
 .../org/apache/accumulo/test/ShellConfigIT.java    |   8 +-
 .../java/org/apache/accumulo/test/ShellIT.java     |   4 +-
 .../org/apache/accumulo/test/ShellServerIT.java    |  20 +-
 .../accumulo/test/TableConfigurationUpdateIT.java  |   4 +-
 .../apache/accumulo/test/TableOperationsIT.java    |  16 +-
 .../org/apache/accumulo/test/TestBinaryRows.java   |  16 +-
 .../java/org/apache/accumulo/test/TestIngest.java  |  12 +-
 .../ThriftServerBindsBeforeZooKeeperLockIT.java    |  12 +-
 .../org/apache/accumulo/test/TotalQueuedIT.java    |   4 +-
 .../apache/accumulo/test/TransportCachingIT.java   |   4 +-
 .../accumulo/test/UserCompactionStrategyIT.java    |  20 +-
 .../org/apache/accumulo/test/VerifyIngest.java     |   4 +-
 .../org/apache/accumulo/test/VolumeChooserIT.java  |   4 +-
 .../java/org/apache/accumulo/test/VolumeIT.java    |   8 +-
 .../test/constraints/NumericValueConstraint.java   |   4 +-
 .../accumulo/test/functional/AccumuloClientIT.java |   4 +-
 .../functional/BalanceAfterCommsFailureIT.java     |   4 +-
 .../accumulo/test/functional/BloomFilterIT.java    |   4 +-
 .../accumulo/test/functional/BulkFailureIT.java    |   4 +-
 .../accumulo/test/functional/BulkFileIT.java       |   9 +-
 .../accumulo/test/functional/BulkLoadIT.java       |   4 +-
 .../apache/accumulo/test/functional/CleanUpIT.java |   6 +-
 .../test/functional/ConfigurableCompactionIT.java  |   4 +-
 .../accumulo/test/functional/CredentialsIT.java    |   8 +-
 .../test/functional/DeleteEverythingIT.java        |   4 +-
 .../test/functional/DynamicThreadPoolsIT.java      |   4 +-
 .../test/functional/FateConcurrencyIT.java         |   9 +-
 .../test/functional/GarbageCollectorIT.java        |   8 +-
 .../test/functional/HalfDeadTServerIT.java         |   6 +-
 .../accumulo/test/functional/KerberosIT.java       |  84 +--
 .../accumulo/test/functional/KerberosProxyIT.java  |  47 +-
 .../test/functional/KerberosRenewalIT.java         |   4 +-
 .../accumulo/test/functional/LargeRowIT.java       |   4 +-
 .../accumulo/test/functional/NativeMapIT.java      |  24 +-
 .../accumulo/test/functional/PermissionsIT.java    |  16 +-
 .../accumulo/test/functional/ReadWriteIT.java      |  14 +-
 .../test/functional/RegexGroupBalanceIT.java       |   4 +-
 .../apache/accumulo/test/functional/ScanIdIT.java  |   4 +-
 .../accumulo/test/functional/ScanIteratorIT.java   |   8 +-
 .../accumulo/test/functional/ShutdownIT.java       |   8 +-
 .../accumulo/test/functional/SplitRecoveryIT.java  |  20 +-
 .../apache/accumulo/test/functional/SummaryIT.java |  39 +-
 .../functional/TabletStateChangeIteratorIT.java    |  16 +-
 .../accumulo/test/functional/TooManyDeletesIT.java |   8 +-
 .../test/functional/WriteAheadLogEncryptedIT.java  |   4 +-
 .../accumulo/test/functional/WriteLotsIT.java      |   4 +-
 .../accumulo/test/functional/ZombieTServer.java    |   4 +-
 .../CloseWriteAheadLogReferencesIT.java            |   4 +-
 .../test/mapred/AccumuloFileOutputFormatIT.java    |   9 +-
 .../test/mapred/AccumuloInputFormatIT.java         |  15 +-
 .../mapred/AccumuloMultiTableInputFormatIT.java    |  14 +-
 .../test/mapred/AccumuloOutputFormatIT.java        |   3 +-
 .../apache/accumulo/test/mapred/TokenFileIT.java   |   4 +-
 .../test/mapreduce/AccumuloFileOutputFormatIT.java |   9 +-
 .../test/mapreduce/AccumuloInputFormatIT.java      |  25 +-
 .../mapreduce/AccumuloMultiTableInputFormatIT.java |  14 +-
 .../accumulo/test/mapreduce/MapReduceIT.java       |   4 +-
 .../apache/accumulo/test/mapreduce/RowHash.java    |   4 +-
 .../accumulo/test/mapreduce/TokenFileIT.java       |   4 +-
 .../apache/accumulo/test/master/MergeStateIT.java  |  13 +-
 .../accumulo/test/master/SuspendedTabletsIT.java   |  17 +-
 .../test/performance/ContinuousIngest.java         |  12 +-
 .../accumulo/test/performance/NullTserver.java     |   4 +-
 .../test/performance/scan/CollectTabletStats.java  |  16 +-
 .../accumulo/test/proxy/ProxyDurabilityIT.java     |   9 +-
 .../accumulo/test/proxy/SimpleProxyBase.java       |  38 +-
 .../test/replication/CyclicReplicationIT.java      |  16 +-
 .../test/replication/FinishedWorkUpdaterIT.java    |  20 +-
 ...GarbageCollectorCommunicatesWithTServersIT.java |   8 +-
 .../test/replication/KerberosReplicationIT.java    |   8 +-
 .../replication/MultiInstanceReplicationIT.java    |  28 +-
 .../replication/MultiTserverReplicationIT.java     |  19 +-
 .../RemoveCompleteReplicationRecordsIT.java        |   4 +-
 .../accumulo/test/replication/ReplicationIT.java   |  12 +-
 .../accumulo/test/replication/StatusMakerIT.java   |  26 +-
 .../test/replication/UnorderedWorkAssignerIT.java  |   7 +-
 .../UnorderedWorkAssignerReplicationIT.java        |  32 +-
 .../UnusedWalDoesntCloseReplicationStatusIT.java   |   8 +-
 .../apache/accumulo/test/start/KeywordStartIT.java |   8 +-
 .../org/apache/accumulo/test/util/CertUtils.java   |   8 +-
 .../accumulo/test/fate/zookeeper/ZooLockTest.java  |  44 +-
 .../accumulo/test/iterator/AgeOffFilterTest.java   |   4 +-
 .../apache/accumulo/test/util/CertUtilsTest.java   |   4 +-
 571 files changed, 4180 insertions(+), 4124 deletions(-)


[accumulo] 01/01: Merge branch '1.9'

Posted by ct...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ctubbsii pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo.git

commit bffc9a3ed05f0314bc3d148bf32d85238db28db0
Merge: 89d36cf 0a9837f
Author: Christopher Tubbs <ct...@apache.org>
AuthorDate: Sat Apr 20 01:03:29 2019 -0400

    Merge branch '1.9'

 contrib/Eclipse-Accumulo-Codestyle.xml             | 577 +++++++++++----------
 .../java/org/apache/accumulo/core/Constants.java   |   4 +-
 .../org/apache/accumulo/core/cli/ClientOpts.java   |   4 +-
 .../accumulo/core/client/BatchWriterConfig.java    |  16 +-
 .../core/client/MutationsRejectedException.java    |   4 +-
 .../accumulo/core/client/ZooKeeperInstance.java    |   4 +-
 .../core/client/admin/NewTableConfiguration.java   |   8 +-
 .../core/client/mapred/AbstractInputFormat.java    |  49 +-
 .../client/mapred/AccumuloFileOutputFormat.java    |   8 +-
 .../core/client/mapred/AccumuloInputFormat.java    |   4 +-
 .../mapred/AccumuloMultiTableInputFormat.java      |  48 +-
 .../core/client/mapred/AccumuloRowInputFormat.java |   4 +-
 .../core/client/mapreduce/AbstractInputFormat.java |  20 +-
 .../client/mapreduce/AccumuloFileOutputFormat.java |   4 +-
 .../core/client/mapreduce/AccumuloInputFormat.java |   4 +-
 .../client/mapreduce/AccumuloOutputFormat.java     |   4 +-
 .../core/client/mapreduce/RangeInputSplit.java     |  19 +-
 .../core/client/sample/RowColumnSampler.java       |   4 +-
 .../security/tokens/AuthenticationToken.java       |   4 +-
 .../core/client/summary/CountingSummarizer.java    |  30 +-
 .../client/summary/SummarizerConfiguration.java    |   8 +-
 .../summarizers/AuthorizationSummarizer.java       |  18 +-
 .../core/clientImpl/ActiveCompactionImpl.java      |   4 +-
 .../core/clientImpl/ClientConfConverter.java       |  36 +-
 .../accumulo/core/clientImpl/ClientContext.java    |   8 +-
 .../core/clientImpl/ConditionalWriterImpl.java     |  22 +-
 .../accumulo/core/clientImpl/ConnectorImpl.java    |   4 +-
 .../accumulo/core/clientImpl/Credentials.java      |  14 +-
 .../core/clientImpl/DelegationTokenImpl.java       |   4 +-
 .../core/clientImpl/InstanceOperationsImpl.java    |   4 +-
 .../core/clientImpl/NamespaceOperationsHelper.java |   8 +-
 .../accumulo/core/clientImpl/OfflineIterator.java  |  33 +-
 .../core/clientImpl/ReplicationClient.java         |   8 +-
 .../accumulo/core/clientImpl/ScannerImpl.java      |  20 +-
 .../accumulo/core/clientImpl/ScannerIterator.java  |  16 +-
 .../core/clientImpl/SecurityOperationsImpl.java    |   4 +-
 .../accumulo/core/clientImpl/ServerClient.java     |   4 +-
 .../core/clientImpl/TableOperationsHelper.java     |   8 +-
 .../core/clientImpl/TableOperationsImpl.java       |  56 +-
 .../apache/accumulo/core/clientImpl/Tables.java    |   8 +-
 .../core/clientImpl/TabletLocatorImpl.java         |  18 +-
 .../core/clientImpl/TabletServerBatchReader.java   |   4 +-
 .../TabletServerBatchReaderIterator.java           |  24 +-
 .../core/clientImpl/TabletServerBatchWriter.java   |  23 +-
 .../accumulo/core/clientImpl/ThriftScanner.java    |  28 +-
 .../accumulo/core/clientImpl/bulk/BulkImport.java  |  16 +-
 .../core/clientImpl/bulk/BulkSerialize.java        |   5 +-
 .../core/clientImpl/mapreduce/BatchInputSplit.java |   4 +-
 .../clientImpl/mapreduce/lib/ConfiguratorBase.java |  30 +-
 .../mapreduce/lib/InputConfigurator.java           |  61 ++-
 .../accumulo/core/conf/AccumuloConfiguration.java  |   4 +-
 .../core/conf/CredentialProviderFactoryShim.java   |  25 +-
 .../accumulo/core/conf/DefaultConfiguration.java   |   6 +-
 .../apache/accumulo/core/conf/IterConfigUtil.java  |  10 +-
 .../core/conf/ObservableConfiguration.java         |   4 +-
 .../org/apache/accumulo/core/conf/Property.java    |  36 +-
 .../apache/accumulo/core/conf/PropertyType.java    |   4 +-
 .../accumulo/core/cryptoImpl/AESCryptoService.java |   4 +-
 .../java/org/apache/accumulo/core/data/Column.java |   8 +-
 .../java/org/apache/accumulo/core/data/Key.java    |   4 +-
 .../org/apache/accumulo/core/data/KeyBuilder.java  |   6 +-
 .../org/apache/accumulo/core/data/Mutation.java    |   6 +-
 .../apache/accumulo/core/dataImpl/KeyExtent.java   |  20 +-
 .../accumulo/core/file/BloomFilterLayer.java       |  12 +-
 .../apache/accumulo/core/file/FileOperations.java  |   4 +-
 .../cache/impl/BlockCacheManagerFactory.java       |   8 +-
 .../core/file/blockfile/cache/impl/ClassSize.java  |   4 +-
 .../core/file/blockfile/cache/lru/CachedBlock.java |   4 +-
 .../file/blockfile/cache/lru/LruBlockCache.java    |  12 +-
 .../cache/lru/LruBlockCacheConfiguration.java      |   8 +-
 .../file/blockfile/impl/CachableBlockFile.java     |  12 +-
 .../accumulo/core/file/rfile/BlockIndex.java       |   4 +-
 .../accumulo/core/file/rfile/KeyShortener.java     |   8 +-
 .../accumulo/core/file/rfile/MultiLevelIndex.java  |   8 +-
 .../org/apache/accumulo/core/file/rfile/RFile.java |  40 +-
 .../accumulo/core/file/rfile/RFileOperations.java  |  13 +-
 .../accumulo/core/file/rfile/SplitLarge.java       |   4 +-
 .../accumulo/core/file/rfile/bcfile/BCFile.java    |  20 +-
 .../core/file/rfile/bcfile/Compression.java        |  35 +-
 .../apache/accumulo/core/iterators/Combiner.java   |  12 +-
 .../accumulo/core/iterators/IteratorAdapter.java   |   4 +-
 .../apache/accumulo/core/iterators/OrIterator.java |   4 +-
 .../core/iterators/conf/ColumnToClassMapping.java  |   4 +-
 .../iterators/system/LocalityGroupIterator.java    |   4 +-
 .../core/iterators/user/CfCqSliceOpts.java         |   8 +-
 .../iterators/user/CfCqSliceSeekingFilter.java     |   8 +-
 .../core/iterators/user/IntersectingIterator.java  |  20 +-
 .../core/iterators/user/LargeRowFilter.java        |  12 +-
 .../core/iterators/user/RowDeletingIterator.java   |   5 +-
 .../core/iterators/user/RowEncodingIterator.java   |   4 +-
 .../core/iterators/user/SeekingFilter.java         |   8 +-
 .../core/iterators/user/TransformingIterator.java  |  12 +-
 .../apache/accumulo/core/metadata/RootTable.java   |   4 +-
 .../core/metadata/schema/MetadataSchema.java       |  16 +-
 .../core/metadata/schema/TabletsMetadata.java      |   4 +-
 .../core/replication/ReplicationTable.java         |   4 +-
 .../accumulo/core/rpc/TTimeoutTransport.java       |   4 +-
 .../org/apache/accumulo/core/rpc/ThriftUtil.java   |  22 +-
 .../core/spi/scan/HintScanPrioritizer.java         |   4 +-
 .../core/spi/scan/SimpleScanDispatcher.java        |   4 +-
 .../org/apache/accumulo/core/summary/Gatherer.java |  35 +-
 .../core/summary/SummarizerConfigurationUtil.java  |   8 +-
 .../accumulo/core/summary/SummaryReader.java       |   8 +-
 .../accumulo/core/summary/SummaryWriter.java       |   4 +-
 .../org/apache/accumulo/core/trace/TraceUtil.java  |   4 +-
 .../org/apache/accumulo/core/util/CreateToken.java |   4 +-
 .../org/apache/accumulo/core/util/HostAndPort.java |   4 +-
 .../accumulo/core/util/LocalityGroupUtil.java      |   4 +-
 .../java/org/apache/accumulo/core/util/Merge.java  |   4 +-
 .../accumulo/core/util/NamingThreadFactory.java    |   4 +-
 .../accumulo/core/util/SystemIteratorUtil.java     |   4 +-
 .../accumulo/core/util/ThriftMessageUtil.java      |   4 +-
 .../core/util/format/DateStringFormatter.java      |   4 +-
 .../core/util/format/DefaultFormatter.java         |   4 +-
 .../accumulo/core/util/format/HexFormatter.java    |   4 +-
 .../java/org/apache/accumulo/fate/AdminUtil.java   |   2 +-
 .../main/java/org/apache/accumulo/fate/Fate.java   |   8 +-
 .../java/org/apache/accumulo/fate/ZooStore.java    |   4 +-
 .../apache/accumulo/fate/zookeeper/ZooUtil.java    |   6 +-
 .../core/client/ClientConfigurationTest.java       |  12 +-
 .../accumulo/core/client/ClientPropertiesTest.java |   8 +-
 .../accumulo/core/client/IteratorSettingTest.java  |   8 +-
 .../core/client/ZooKeeperInstanceTest.java         |  16 +-
 .../core/client/lexicoder/PairLexicoderTest.java   |  12 +-
 .../mapred/AccumuloFileOutputFormatTest.java       |   5 +-
 .../client/mapred/AccumuloInputFormatTest.java     |   8 +-
 .../mapred/AccumuloMultiTableInputFormatTest.java  |  23 +-
 .../mapreduce/AccumuloFileOutputFormatTest.java    |   5 +-
 .../client/mapreduce/AccumuloInputFormatTest.java  |   8 +-
 .../lib/partition/RangePartitionerTest.java        |   8 +-
 .../core/client/rfile/RFileClientTest.java         |  48 +-
 .../client/security/SecurityErrorCodeTest.java     |   5 +-
 .../security/tokens/DelegationTokenImplTest.java   |  28 +-
 .../client/security/tokens/KerberosTokenTest.java  |   4 +-
 .../summarizers/AuthorizationSummarizerTest.java   |   4 +-
 .../summarizers/EntryLengthSummarizersTest.java    |  64 +--
 .../core/clientImpl/ScannerOptionsTest.java        |   4 +-
 .../core/clientImpl/TabletLocatorImplTest.java     | 225 ++++----
 .../core/clientImpl/ThriftTransportKeyTest.java    |  36 +-
 .../core/clientImpl/bulk/BulkSerializeTest.java    |   4 +-
 .../clientImpl/mapreduce/BatchInputSplitTest.java  |   8 +-
 .../mapreduce/lib/ConfiguratorBaseTest.java        |   6 +-
 .../core/conf/AccumuloConfigurationTest.java       |  32 +-
 .../core/conf/ConfigurationTypeHelperTest.java     |   2 +-
 .../conf/CredentialProviderFactoryShimTest.java    |  20 +-
 .../accumulo/core/conf/IterConfigUtilTest.java     |   4 +-
 .../apache/accumulo/core/conf/PropertyTest.java    |  15 +-
 .../apache/accumulo/core/crypto/CryptoTest.java    |  44 +-
 .../core/data/ConstraintViolationSummaryTest.java  |   4 +-
 .../apache/accumulo/core/data/KeyBuilderTest.java  |  26 +-
 .../org/apache/accumulo/core/data/KeyTest.java     |   4 +-
 .../apache/accumulo/core/data/LoadPlanTest.java    |   4 +-
 .../org/apache/accumulo/core/data/RangeTest.java   |  56 +-
 .../core/file/BloomFilterLayerLookupTest.java      |   4 +-
 .../file/blockfile/cache/TestCachedBlockQueue.java |   8 +-
 .../accumulo/core/file/rfile/BlockIndexTest.java   |   4 +-
 .../core/file/rfile/MultiThreadedRFileTest.java    |  12 +-
 .../accumulo/core/file/rfile/RFileMetricsTest.java |   4 +-
 .../apache/accumulo/core/file/rfile/RFileTest.java |  64 +--
 .../accumulo/core/file/rfile/RelativeKeyTest.java  |  16 +-
 .../core/file/rfile/bcfile/CompressionTest.java    |   8 +-
 .../file/streams/RateLimitedOutputStreamTest.java  |   4 +-
 .../iterators/FirstEntryInRowIteratorTest.java     |   4 +-
 .../core/iterators/system/ColumnFilterTest.java    |   8 +-
 .../iterators/system/DeletingIteratorTest.java     |  24 +-
 .../iterators/system/VisibilityFilterTest.java     |   8 +-
 .../accumulo/core/iterators/user/CombinerTest.java |   4 +-
 .../accumulo/core/iterators/user/FilterTest.java   |  12 +-
 .../core/iterators/user/RowFilterTest.java         |   4 +-
 .../iterators/user/TransformingIteratorTest.java   |  20 +-
 .../core/iterators/user/VisibilityFilterTest.java  |   4 +-
 .../core/metadata/schema/LinkingIteratorTest.java  |   8 +-
 .../rpc/SaslClientDigestCallbackHandlerTest.java   |   4 +-
 .../core/rpc/SaslConnectionParamsTest.java         |  12 +-
 .../accumulo/core/rpc/TTimeoutTransportTest.java   |   6 +-
 .../AuthenticationTokenIdentifierTest.java         |   4 +-
 .../core/security/AuthenticationTokenTest.java     |   8 +-
 .../core/spi/scan/HintScanPrioritizerTest.java     |   4 +-
 .../spi/scan/IdleRatioScanPrioritizerTest.java     |   4 +-
 .../core/util/CompletableFutureUtilTest.java       |   8 +-
 .../accumulo/core/util/LocalityGroupUtilTest.java  |   4 +-
 .../core/util/format/DefaultFormatterTest.java     |   2 +-
 .../core/util/format/FormatterFactoryTest.java     |   6 +-
 .../apache/accumulo/fate/ReadOnlyStoreTest.java    |   2 +-
 .../org/apache/accumulo/fate/util/RetryTest.java   |  16 +-
 .../accumulo/fate/zookeeper/ZooCacheTest.java      |   8 +-
 .../fate/zookeeper/ZooReaderWriterTest.java        |   8 +-
 .../hadoop/mapred/AccumuloFileOutputFormat.java    |   8 +-
 .../hadoop/mapred/AccumuloRowInputFormat.java      |   4 +-
 .../hadoopImpl/mapred/AccumuloRecordReader.java    |  16 +-
 .../hadoopImpl/mapreduce/AccumuloRecordReader.java |  20 +-
 .../hadoopImpl/mapreduce/BatchInputSplit.java      |   4 +-
 .../mapreduce/InputFormatBuilderImpl.java          |  20 +-
 .../mapreduce/OutputFormatBuilderImpl.java         |   8 +-
 .../hadoopImpl/mapreduce/RangeInputSplit.java      |   4 +-
 .../hadoopImpl/mapreduce/lib/ConfiguratorBase.java |   4 +-
 .../mapreduce/lib/FileOutputConfigurator.java      |   4 +-
 .../mapreduce/lib/InputConfigurator.java           |  28 +-
 .../mapreduce/lib/MapReduceClientOpts.java         |   8 +-
 .../its/mapred/AccumuloFileOutputFormatIT.java     |   9 +-
 .../hadoop/its/mapred/AccumuloInputFormatIT.java   |  15 +-
 .../hadoop/its/mapred/AccumuloOutputFormatIT.java  |   4 +-
 .../accumulo/hadoop/its/mapred/TokenFileIT.java    |   4 +-
 .../its/mapreduce/AccumuloFileOutputFormatIT.java  |   9 +-
 .../its/mapreduce/AccumuloInputFormatIT.java       |  29 +-
 .../accumulo/hadoop/its/mapreduce/RowHashIT.java   |   4 +-
 .../accumulo/hadoop/its/mapreduce/TokenFileIT.java |   4 +-
 .../mapred/AccumuloFileOutputFormatTest.java       |   8 +-
 .../hadoop/mapred/AccumuloInputFormatTest.java     |   8 +-
 .../hadoop/mapred/AccumuloOutputFormatTest.java    |   4 +-
 .../hadoop/mapred/MultiTableInputFormatTest.java   |  16 +-
 .../mapreduce/AccumuloFileOutputFormatTest.java    |   4 +-
 .../hadoop/mapreduce/AccumuloInputFormatTest.java  |  16 +-
 .../mapreduce/MultiTableInputFormatTest.java       |  20 +-
 .../mapreduce/partition/RangePartitionerTest.java  |   8 +-
 .../hadoopImpl/mapreduce/BatchInputSplitTest.java  |   8 +-
 .../mapreduce/lib/ConfiguratorBaseTest.java        |   4 +-
 .../iteratortest/IteratorTestCaseFinder.java       |   4 +-
 .../testcases/IsolatedDeepCopiesTestCase.java      |   4 +-
 .../iteratortest/testcases/ReSeekTestCase.java     |   8 +-
 .../standalone/StandaloneAccumuloCluster.java      |   4 +-
 .../accumulo/minicluster/MiniAccumuloRunner.java   |   3 +-
 .../MiniAccumuloClusterControl.java                |   8 +-
 .../miniclusterImpl/MiniAccumuloClusterImpl.java   |  16 +-
 .../miniclusterImpl/MiniAccumuloConfigImpl.java    |   4 +-
 .../standalone/StandaloneClusterControlTest.java   |   4 +-
 .../MiniAccumuloClusterExistingZooKeepersTest.java |   8 +-
 .../MiniAccumuloClusterStartStopTest.java          |   4 +-
 .../minicluster/MiniAccumuloClusterTest.java       |   4 +-
 .../miniclusterImpl/CleanShutdownMacTest.java      |   6 +-
 .../MiniAccumuloClusterImplTest.java               |   4 +-
 .../MiniAccumuloConfigImplTest.java                |   8 +-
 pom.xml                                            |  28 +-
 .../main/java/org/apache/accumulo/proxy/Proxy.java |  12 +-
 .../org/apache/accumulo/proxy/ProxyServer.java     | 117 ++---
 .../main/java/org/apache/accumulo/proxy/Util.java  |   4 +-
 .../apache/accumulo/server/ServerConstants.java    |   4 +-
 .../org/apache/accumulo/server/ServerContext.java  |   4 +-
 .../org/apache/accumulo/server/ServerUtil.java     |   5 +-
 .../accumulo/server/ServiceEnvironmentImpl.java    |   4 +-
 .../accumulo/server/client/BulkImporter.java       |  54 +-
 .../server/client/ClientServiceHandler.java        |   8 +-
 .../server/conf/NamespaceConfiguration.java        |   3 +-
 .../server/conf/ServerConfigurationFactory.java    |  12 +-
 .../accumulo/server/conf/TableConfiguration.java   |  11 +-
 .../server/constraints/MetadataConstraints.java    |  27 +-
 .../accumulo/server/fs/PerTableVolumeChooser.java  |  20 +-
 .../accumulo/server/fs/PreferredVolumeChooser.java |  16 +-
 .../server/fs/SpaceAwareVolumeChooser.java         |   7 +-
 .../accumulo/server/fs/VolumeManagerImpl.java      |   4 +-
 .../org/apache/accumulo/server/fs/VolumeUtil.java  |   8 +-
 .../apache/accumulo/server/init/Initialize.java    |  41 +-
 .../accumulo/server/master/LiveTServerSet.java     |  48 +-
 .../balancer/HostRegexTableLoadBalancer.java       |  48 +-
 .../server/master/balancer/RegexGroupBalancer.java |  12 +-
 .../server/master/balancer/TableLoadBalancer.java  |   8 +-
 .../server/master/state/MetaDataStateStore.java    |   8 +-
 .../server/master/state/MetaDataTableScanner.java  |   4 +-
 .../master/state/TabletStateChangeIterator.java    |   8 +-
 .../server/metrics/MetricsConfiguration.java       |   4 +-
 .../server/metrics/MetricsSystemHelper.java        |   4 +-
 .../accumulo/server/monitor/DedupedLogEvent.java   |   4 +-
 .../apache/accumulo/server/monitor/LogService.java |  18 +-
 .../server/replication/ReplicaSystemHelper.java    |   4 +-
 .../server/replication/ReplicationUtil.java        |   8 +-
 .../server/replication/StatusFormatter.java        |   9 +-
 .../server/rpc/CustomNonBlockingServer.java        |   4 +-
 .../HighlyAvailableServiceInvocationHandler.java   |   4 +-
 .../rpc/SaslServerDigestCallbackHandler.java       |   4 +-
 .../rpc/TCredentialsUpdatingInvocationHandler.java |  11 +-
 .../apache/accumulo/server/rpc/TServerUtils.java   |  64 +--
 .../server/security/AuditedSecurityOperation.java  | 146 +++---
 .../server/security/SystemCredentials.java         |   4 +-
 .../AuthenticationTokenSecretManager.java          |   8 +-
 .../security/handler/KerberosAuthenticator.java    |   8 +-
 .../server/security/handler/ZKPermHandler.java     |  16 +-
 .../accumulo/server/tables/TableManager.java       |  12 +-
 .../tabletserver/LargestFirstMemoryManager.java    |   8 +-
 .../org/apache/accumulo/server/util/Admin.java     |  24 +-
 .../apache/accumulo/server/util/ChangeSecret.java  |   4 +-
 .../server/util/CheckForMetadataProblems.java      |   8 +-
 .../org/apache/accumulo/server/util/FileUtil.java  |  16 +-
 .../accumulo/server/util/FindOfflineTablets.java   |  13 +-
 .../apache/accumulo/server/util/LocalityCheck.java |   8 +-
 .../accumulo/server/util/MetadataTableUtil.java    |  34 +-
 .../apache/accumulo/server/util/RandomWriter.java  |   4 +-
 .../accumulo/server/util/RandomizeVolumes.java     |   6 +-
 .../server/util/RemoveEntriesForMissingFiles.java  |   8 +-
 .../accumulo/server/util/SystemPropUtil.java       |   4 +-
 .../server/util/VerifyTabletAssignments.java       |   4 +-
 .../accumulo/server/ServerConstantsTest.java       |   4 +-
 .../accumulo/server/client/BulkImporterTest.java   |   4 +-
 .../server/fs/PerTableVolumeChooserTest.java       |  14 +-
 .../server/fs/PreferredVolumeChooserTest.java      |  14 +-
 .../apache/accumulo/server/fs/ViewFSUtilsTest.java |  14 +-
 .../accumulo/server/fs/VolumeManagerImplTest.java  |  12 +-
 .../apache/accumulo/server/fs/VolumeUtilTest.java  |   4 +-
 .../master/balancer/ChaoticLoadBalancerTest.java   |   4 +-
 .../master/balancer/DefaultLoadBalancerTest.java   |   4 +-
 .../balancer/HostRegexTableLoadBalancerTest.java   |  12 +-
 .../master/balancer/TableLoadBalancerTest.java     |  36 +-
 .../server/problems/ProblemReportTest.java         |  16 +-
 .../server/replication/StatusCombinerTest.java     |  44 +-
 .../server/rpc/SaslDigestCallbackHandlerTest.java  |  20 +-
 .../server/rpc/SaslServerConnectionParamsTest.java |   4 +-
 .../server/security/SystemCredentialsTest.java     |   4 +-
 .../security/delegation/AuthenticationKeyTest.java |  12 +-
 .../AuthenticationTokenKeyManagerTest.java         |   8 +-
 .../AuthenticationTokenSecretManagerTest.java      |  84 +--
 .../ZooAuthenticationKeyDistributorTest.java       |  48 +-
 .../security/handler/ZKAuthenticatorTest.java      |   8 +-
 .../IllegalTableTransitionExceptionTest.java       |   8 +-
 .../accumulo/server/util/DefaultMapTest.java       |   4 +-
 .../apache/accumulo/server/util/FileUtilTest.java  |   4 +-
 .../server/util/ReplicationTableUtilTest.java      |   4 +-
 .../accumulo/server/util/TServerUtilsTest.java     |   8 +-
 .../accumulo/gc/GarbageCollectionAlgorithm.java    |  12 +-
 .../apache/accumulo/gc/SimpleGarbageCollector.java |  38 +-
 .../gc/GarbageCollectWriteAheadLogsTest.java       |  28 +-
 .../apache/accumulo/master/FateServiceHandler.java |  92 ++--
 .../java/org/apache/accumulo/master/Master.java    |  58 +--
 .../master/MasterClientServiceHandler.java         |  14 +-
 .../apache/accumulo/master/TabletGroupWatcher.java |  28 +-
 .../master/metrics/fate/Metrics2FateMetrics.java   |   4 +-
 .../accumulo/master/recovery/RecoveryManager.java  |  11 +-
 .../DistributedWorkQueueWorkAssigner.java          |   7 +-
 .../master/replication/ReplicationDriver.java      |   4 +-
 .../master/replication/SequentialWorkAssigner.java |   8 +-
 .../master/replication/UnorderedWorkAssigner.java  |  13 +-
 .../accumulo/master/replication/WorkMaker.java     |   4 +-
 .../master/tableOps/bulkVer1/BulkImport.java       |   8 +-
 .../master/tableOps/bulkVer1/CopyFailed.java       |   4 +-
 .../master/tableOps/bulkVer1/LoadFiles.java        |  10 +-
 .../master/tableOps/bulkVer2/BulkImportMove.java   |   4 +-
 .../master/tableOps/bulkVer2/LoadFiles.java        |  10 +-
 .../master/tableOps/bulkVer2/PrepBulkImport.java   |   8 +-
 .../accumulo/master/tableOps/clone/CloneTable.java |   4 +-
 .../master/tableOps/clone/CloneZookeeper.java      |   8 +-
 .../master/tableOps/compact/CompactionDriver.java  |   6 +-
 .../accumulo/master/tableOps/create/ChooseDir.java |  18 +-
 .../accumulo/master/tableOps/create/CreateDir.java |   8 +-
 .../master/tableOps/create/PopulateMetadata.java   |   8 +-
 .../accumulo/master/tableOps/delete/CleanUp.java   |  12 +-
 .../tableOps/namespace/create/CreateNamespace.java |   4 +-
 .../master/tableOps/rename/RenameTable.java        |   4 +-
 .../tableImport/PopulateMetadataTable.java         |   4 +-
 .../master/metrics/fate/FateMetricValuesTest.java  |   4 +-
 ...DistributedWorkQueueWorkAssignerHelperTest.java |   4 +-
 .../replication/UnorderedWorkAssignerTest.java     |  12 +-
 .../master/state/RootTabletStateStoreTest.java     |   4 +-
 .../apache/accumulo/monitor/EmbeddedWebServer.java |  14 +-
 .../java/org/apache/accumulo/monitor/Monitor.java  |   4 +-
 .../monitor/rest/master/MasterResource.java        |   4 +-
 .../monitor/rest/problems/ProblemsResource.java    |  10 +-
 .../rest/statistics/StatisticsResource.java        |   4 +-
 .../monitor/rest/tables/TablesResource.java        |   4 +-
 .../monitor/rest/trace/TracesResource.java         |   8 +-
 .../rest/tservers/TabletServerResource.java        |  20 +-
 .../monitor/util/AccumuloMonitorAppender.java      |   8 +-
 .../org/apache/accumulo/monitor/view/WebViews.java |  12 +-
 .../accumulo/monitor/ZooKeeperStatusTest.java      |   4 +-
 .../monitor/util/AccumuloMonitorAppenderTest.java  |   8 +-
 .../org/apache/accumulo/tracer/TraceFormatter.java |   9 +-
 .../org/apache/accumulo/tracer/TraceServer.java    |  33 +-
 .../org/apache/accumulo/tracer/ZooTraceClient.java |   4 +-
 .../accumulo/tserver/ActiveAssignmentRunnable.java |   4 +-
 .../apache/accumulo/tserver/CompactionQueue.java   |   4 +-
 .../accumulo/tserver/ConditionCheckerContext.java  |   4 +-
 .../org/apache/accumulo/tserver/FileManager.java   |  12 +-
 .../org/apache/accumulo/tserver/InMemoryMap.java   |   8 +-
 .../org/apache/accumulo/tserver/NativeMap.java     |   4 +-
 .../tserver/TabletIteratorEnvironment.java         |   4 +-
 .../org/apache/accumulo/tserver/TabletServer.java  | 179 ++++---
 .../tserver/TabletServerResourceManager.java       |  32 +-
 .../compaction/DefaultCompactionStrategy.java      |   4 +-
 .../strategies/ConfigurableCompactionStrategy.java |  11 +-
 .../TooManyDeletesCompactionStrategy.java          |  13 +-
 .../tserver/constraints/ConstraintChecker.java     |   9 +-
 .../org/apache/accumulo/tserver/log/DfsLogger.java |  20 +-
 .../accumulo/tserver/log/SortedLogRecovery.java    |   6 +-
 .../metrics/Metrics2TabletServerMetrics.java       |  28 +-
 .../metrics/Metrics2TabletServerUpdateMetrics.java |  24 +-
 .../tserver/metrics/TabletServerMinCMetrics.java   |   4 +-
 .../tserver/metrics/TabletServerScanMetrics.java   |   4 +-
 .../tserver/metrics/TabletServerUpdateMetrics.java |   4 +-
 .../tserver/replication/AccumuloReplicaSystem.java |  20 +-
 .../BatchWriterReplicationReplayer.java            |   4 +-
 .../tserver/replication/ReplicationProcessor.java  |   8 +-
 .../replication/ReplicationServicerHandler.java    |   4 +-
 .../accumulo/tserver/session/SessionManager.java   |  10 +-
 .../tserver/tablet/BulkImportCacheCleaner.java     |   4 +-
 .../accumulo/tserver/tablet/CompactionWatcher.java |   4 +-
 .../apache/accumulo/tserver/tablet/Compactor.java  |  16 +-
 .../accumulo/tserver/tablet/DatafileManager.java   |   8 +-
 .../accumulo/tserver/tablet/MinorCompactor.java    |   4 +-
 .../apache/accumulo/tserver/tablet/RootFiles.java  |   4 +-
 .../accumulo/tserver/tablet/ScanDataSource.java    |  35 +-
 .../org/apache/accumulo/tserver/tablet/Tablet.java |  78 +--
 .../apache/accumulo/tserver/tablet/TabletData.java |   4 +-
 .../accumulo/tserver/tablet/TabletMemory.java      |   8 +-
 .../apache/accumulo/tserver/InMemoryMapTest.java   |   4 +-
 .../tserver/LargestFirstMemoryManagerTest.java     |  12 +-
 .../tserver/TabletServerSyncCheckTest.java         |   4 +-
 .../accumulo/tserver/TservConstraintEnvTest.java   |   4 +-
 .../accumulo/tserver/WalRemovalOrderTest.java      |   4 +-
 .../compaction/DefaultCompactionStrategyTest.java  |  12 +-
 .../strategies/BasicCompactionStrategyTest.java    |   8 +-
 .../ConfigurableCompactionStrategyTest.java        |   6 +-
 .../apache/accumulo/tserver/log/DfsLoggerTest.java |  28 +-
 .../tserver/log/SortedLogRecoveryTest.java         | 236 ++++-----
 .../tserver/log/TestUpgradePathForWALogs.java      |   8 +-
 .../replication/AccumuloReplicaSystemTest.java     |  20 +-
 .../replication/ReplicationProcessorTest.java      |   4 +-
 .../tserver/tablet/DatafileManagerTest.java        |   4 +-
 .../accumulo/tserver/tablet/RootFilesTest.java     |   8 +-
 .../main/java/org/apache/accumulo/shell/Shell.java |  20 +-
 .../accumulo/shell/commands/AddAuthsCommand.java   |   8 +-
 .../accumulo/shell/commands/AddSplitsCommand.java  |   4 +-
 .../accumulo/shell/commands/CloneTableCommand.java |   4 +-
 .../accumulo/shell/commands/CompactCommand.java    |  36 +-
 .../accumulo/shell/commands/ConfigCommand.java     |  12 +-
 .../shell/commands/CreateNamespaceCommand.java     |   4 +-
 .../shell/commands/CreateTableCommand.java         |  16 +-
 .../accumulo/shell/commands/CreateUserCommand.java |   8 +-
 .../apache/accumulo/shell/commands/DUCommand.java  |   8 +-
 .../accumulo/shell/commands/DeleteCommand.java     |   8 +-
 .../accumulo/shell/commands/DeleteIterCommand.java |   8 +-
 .../shell/commands/DeleteNamespaceCommand.java     |   4 +-
 .../accumulo/shell/commands/DeleteRowsCommand.java |   4 +-
 .../shell/commands/DeleteScanIterCommand.java      |   8 +-
 .../accumulo/shell/commands/ExecfileCommand.java   |   4 +-
 .../accumulo/shell/commands/FateCommand.java       |  18 +-
 .../accumulo/shell/commands/GetAuthsCommand.java   |   8 +-
 .../accumulo/shell/commands/GetGroupsCommand.java  |   4 +-
 .../accumulo/shell/commands/GetSplitsCommand.java  |  24 +-
 .../accumulo/shell/commands/GrepCommand.java       |   4 +-
 .../accumulo/shell/commands/InsertCommand.java     |  10 +-
 .../shell/commands/ListCompactionsCommand.java     |   4 +-
 .../accumulo/shell/commands/ListIterCommand.java   |   4 +-
 .../accumulo/shell/commands/MergeCommand.java      |   4 +-
 .../accumulo/shell/commands/NamespacesCommand.java |   8 +-
 .../apache/accumulo/shell/commands/OptUtil.java    |   4 +-
 .../accumulo/shell/commands/PasswdCommand.java     |   8 +-
 .../accumulo/shell/commands/ScanCommand.java       |  16 +-
 .../accumulo/shell/commands/SetAuthsCommand.java   |   8 +-
 .../accumulo/shell/commands/SetIterCommand.java    |  12 +-
 .../shell/commands/SetScanIterCommand.java         |   4 +-
 .../commands/ShellPluginConfigurationCommand.java  |   8 +-
 .../accumulo/shell/commands/SummariesCommand.java  |   4 +-
 .../accumulo/shell/commands/TableOperation.java    |  12 +-
 .../accumulo/shell/commands/TablesCommand.java     |   7 +-
 .../accumulo/shell/commands/TraceCommand.java      |  12 +-
 .../shell/commands/UserPermissionsCommand.java     |   4 +-
 .../org/apache/accumulo/shell/ShellUtilTest.java   |  10 +-
 .../shell/commands/DeleteTableCommandTest.java     |   4 +-
 .../shell/format/DeleterFormatterTest.java         |   2 +-
 .../main/java/org/apache/accumulo/start/Main.java  |  16 +-
 .../start/classloader/AccumuloClassLoader.java     |   4 +-
 .../classloader/vfs/AccumuloVFSClassLoader.java    |  16 +-
 .../start/classloader/vfs/ContextManager.java      |   4 +-
 .../classloader/vfs/UniqueFileReplicator.java      |   4 +-
 .../vfs/AccumuloReloadingVFSClassLoaderTest.java   |   4 +-
 .../vfs/AccumuloVFSClassLoaderTest.java            |   4 +-
 .../start/classloader/vfs/ContextManagerTest.java  |   8 +-
 .../accumulo/harness/AccumuloClusterHarness.java   |  18 +-
 .../accumulo/harness/SharedMiniClusterBase.java    |   4 +-
 .../conf/AccumuloClusterPropertyConfiguration.java |   4 +-
 .../conf/AccumuloMiniClusterConfiguration.java     |   4 +-
 .../StandaloneAccumuloClusterConfiguration.java    |  60 +--
 .../org/apache/accumulo/test/AuditMessageIT.java   |  20 +-
 .../accumulo/test/BadDeleteMarkersCreatedIT.java   |   4 +-
 .../java/org/apache/accumulo/test/CleanWalIT.java  |   8 +-
 .../apache/accumulo/test/ClientSideIteratorIT.java |   4 +-
 .../java/org/apache/accumulo/test/CloneIT.java     |  24 +-
 .../accumulo/test/CompactionRateLimitingIT.java    |   4 +-
 .../apache/accumulo/test/ConditionalWriterIT.java  | 195 ++++---
 .../org/apache/accumulo/test/ExistingMacIT.java    |  12 +-
 .../apache/accumulo/test/FairVolumeChooser.java    |   3 +-
 .../apache/accumulo/test/GarbageCollectWALIT.java  |   4 +-
 .../org/apache/accumulo/test/HardListIterator.java |   6 +-
 .../org/apache/accumulo/test/ImportExportIT.java   |   5 +-
 .../org/apache/accumulo/test/InMemoryMapIT.java    |  12 +-
 .../accumulo/test/InterruptibleScannersIT.java     |   4 +-
 .../accumulo/test/IsolationAndDeepCopyIT.java      |   4 +-
 .../test/MissingWalHeaderCompletesRecoveryIT.java  |   8 +-
 .../org/apache/accumulo/test/NamespacesIT.java     |  11 +-
 .../accumulo/test/NewTableConfigurationIT.java     |  64 +--
 .../java/org/apache/accumulo/test/SampleIT.java    |  36 +-
 .../org/apache/accumulo/test/ShellConfigIT.java    |   8 +-
 .../java/org/apache/accumulo/test/ShellIT.java     |   4 +-
 .../org/apache/accumulo/test/ShellServerIT.java    |  20 +-
 .../accumulo/test/TableConfigurationUpdateIT.java  |   4 +-
 .../apache/accumulo/test/TableOperationsIT.java    |  16 +-
 .../org/apache/accumulo/test/TestBinaryRows.java   |  16 +-
 .../java/org/apache/accumulo/test/TestIngest.java  |  12 +-
 .../ThriftServerBindsBeforeZooKeeperLockIT.java    |  12 +-
 .../org/apache/accumulo/test/TotalQueuedIT.java    |   4 +-
 .../apache/accumulo/test/TransportCachingIT.java   |   4 +-
 .../accumulo/test/UserCompactionStrategyIT.java    |  20 +-
 .../org/apache/accumulo/test/VerifyIngest.java     |   4 +-
 .../org/apache/accumulo/test/VolumeChooserIT.java  |   4 +-
 .../java/org/apache/accumulo/test/VolumeIT.java    |   8 +-
 .../test/constraints/NumericValueConstraint.java   |   4 +-
 .../accumulo/test/functional/AccumuloClientIT.java |   4 +-
 .../functional/BalanceAfterCommsFailureIT.java     |   4 +-
 .../accumulo/test/functional/BloomFilterIT.java    |   4 +-
 .../accumulo/test/functional/BulkFailureIT.java    |   4 +-
 .../accumulo/test/functional/BulkFileIT.java       |   9 +-
 .../accumulo/test/functional/BulkLoadIT.java       |   4 +-
 .../apache/accumulo/test/functional/CleanUpIT.java |   6 +-
 .../test/functional/ConfigurableCompactionIT.java  |   4 +-
 .../accumulo/test/functional/CredentialsIT.java    |   8 +-
 .../test/functional/DeleteEverythingIT.java        |   4 +-
 .../test/functional/DynamicThreadPoolsIT.java      |   4 +-
 .../test/functional/FateConcurrencyIT.java         |   9 +-
 .../test/functional/GarbageCollectorIT.java        |   8 +-
 .../test/functional/HalfDeadTServerIT.java         |   6 +-
 .../accumulo/test/functional/KerberosIT.java       |  84 +--
 .../accumulo/test/functional/KerberosProxyIT.java  |  47 +-
 .../test/functional/KerberosRenewalIT.java         |   4 +-
 .../accumulo/test/functional/LargeRowIT.java       |   4 +-
 .../accumulo/test/functional/NativeMapIT.java      |  24 +-
 .../accumulo/test/functional/PermissionsIT.java    |  16 +-
 .../accumulo/test/functional/ReadWriteIT.java      |  14 +-
 .../test/functional/RegexGroupBalanceIT.java       |   4 +-
 .../apache/accumulo/test/functional/ScanIdIT.java  |   4 +-
 .../accumulo/test/functional/ScanIteratorIT.java   |   8 +-
 .../accumulo/test/functional/ShutdownIT.java       |   8 +-
 .../accumulo/test/functional/SplitRecoveryIT.java  |  20 +-
 .../apache/accumulo/test/functional/SummaryIT.java |  39 +-
 .../functional/TabletStateChangeIteratorIT.java    |  16 +-
 .../accumulo/test/functional/TooManyDeletesIT.java |   8 +-
 .../test/functional/WriteAheadLogEncryptedIT.java  |   4 +-
 .../accumulo/test/functional/WriteLotsIT.java      |   4 +-
 .../accumulo/test/functional/ZombieTServer.java    |   4 +-
 .../CloseWriteAheadLogReferencesIT.java            |   4 +-
 .../test/mapred/AccumuloFileOutputFormatIT.java    |   9 +-
 .../test/mapred/AccumuloInputFormatIT.java         |  15 +-
 .../mapred/AccumuloMultiTableInputFormatIT.java    |  14 +-
 .../test/mapred/AccumuloOutputFormatIT.java        |   3 +-
 .../apache/accumulo/test/mapred/TokenFileIT.java   |   4 +-
 .../test/mapreduce/AccumuloFileOutputFormatIT.java |   9 +-
 .../test/mapreduce/AccumuloInputFormatIT.java      |  25 +-
 .../mapreduce/AccumuloMultiTableInputFormatIT.java |  14 +-
 .../accumulo/test/mapreduce/MapReduceIT.java       |   4 +-
 .../apache/accumulo/test/mapreduce/RowHash.java    |   4 +-
 .../accumulo/test/mapreduce/TokenFileIT.java       |   4 +-
 .../apache/accumulo/test/master/MergeStateIT.java  |  13 +-
 .../accumulo/test/master/SuspendedTabletsIT.java   |  17 +-
 .../test/performance/ContinuousIngest.java         |  12 +-
 .../accumulo/test/performance/NullTserver.java     |   4 +-
 .../test/performance/scan/CollectTabletStats.java  |  16 +-
 .../accumulo/test/proxy/ProxyDurabilityIT.java     |   9 +-
 .../accumulo/test/proxy/SimpleProxyBase.java       |  38 +-
 .../test/replication/CyclicReplicationIT.java      |  16 +-
 .../test/replication/FinishedWorkUpdaterIT.java    |  20 +-
 ...GarbageCollectorCommunicatesWithTServersIT.java |   8 +-
 .../test/replication/KerberosReplicationIT.java    |   8 +-
 .../replication/MultiInstanceReplicationIT.java    |  28 +-
 .../replication/MultiTserverReplicationIT.java     |  19 +-
 .../RemoveCompleteReplicationRecordsIT.java        |   4 +-
 .../accumulo/test/replication/ReplicationIT.java   |  12 +-
 .../accumulo/test/replication/StatusMakerIT.java   |  26 +-
 .../test/replication/UnorderedWorkAssignerIT.java  |   7 +-
 .../UnorderedWorkAssignerReplicationIT.java        |  32 +-
 .../UnusedWalDoesntCloseReplicationStatusIT.java   |   8 +-
 .../apache/accumulo/test/start/KeywordStartIT.java |   8 +-
 .../org/apache/accumulo/test/util/CertUtils.java   |   8 +-
 .../accumulo/test/fate/zookeeper/ZooLockTest.java  |  44 +-
 .../accumulo/test/iterator/AgeOffFilterTest.java   |   4 +-
 .../apache/accumulo/test/util/CertUtilsTest.java   |   4 +-
 571 files changed, 4180 insertions(+), 4124 deletions(-)

diff --cc core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
index e821283,f1ec42d..94f9c08
--- a/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
+++ b/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
@@@ -132,36 -280,129 +132,36 @@@ public class ClientOpts extends Help 
    public void parseArgs(String programName, String[] args, Object... others) {
      super.parseArgs(programName, args, others);
      startDebugLogging();
 -    startTracing(programName);
 -    updateKerberosCredentials();
    }
  
 -  protected Instance cachedInstance = null;
 -  protected ClientConfiguration cachedClientConfig = null;
 +  private Properties cachedProps = null;
  
 -  synchronized public Instance getInstance() {
 -    if (cachedInstance != null)
 -      return cachedInstance;
 -    if (mock)
 -      return cachedInstance = DeprecationUtil.makeMockInstance(instance);
 -    return cachedInstance = new ZooKeeperInstance(this.getClientConfiguration());
 +  public String getClientConfigFile() {
 +    if (clientConfigFile == null) {
-       URL clientPropsUrl = ClientOpts.class.getClassLoader()
-           .getResource("accumulo-client.properties");
++      URL clientPropsUrl =
++          ClientOpts.class.getClassLoader().getResource("accumulo-client.properties");
 +      if (clientPropsUrl != null) {
 +        clientConfigFile = clientPropsUrl.getFile();
 +      }
 +    }
 +    return clientConfigFile;
    }
  
 -  public String getPrincipal() throws AccumuloSecurityException {
 -    if (null == principal) {
 -      AuthenticationToken token = getToken();
 -      if (null == token) {
 -        throw new AccumuloSecurityException("No principal or authentication token was provided",
 -            SecurityErrorCode.BAD_CREDENTIALS);
 +  public Properties getClientProps() {
 +    if (cachedProps == null) {
 +      cachedProps = new Properties();
 +      if (getClientConfigFile() != null) {
 +        cachedProps = ClientInfoImpl.toProperties(getClientConfigFile());
        }
 -
 -      // In MapReduce, if we create a DelegationToken, the principal is updated from the
 -      // KerberosToken
 -      // used to obtain the DelegationToken.
 -      if (null != principal) {
 -        return principal;
 +      if (principal != null) {
 +        cachedProps.setProperty(ClientProperty.AUTH_PRINCIPAL.getKey(), principal);
        }
 -
 -      // Try to extract the principal automatically from Kerberos
 -      if (token instanceof KerberosToken) {
 -        principal = ((KerberosToken) token).getPrincipal();
 -      } else {
 -        principal = System.getProperty("user.name");
 +      if (securePassword != null) {
 +        ClientProperty.setPassword(cachedProps, securePassword.toString());
        }
 +      getOverrides().forEach((k, v) -> cachedProps.put(k, v));
 +      ClientProperty.validate(cachedProps);
      }
 -    return principal;
 -  }
 -
 -  public void setPrincipal(String principal) {
 -    this.principal = principal;
 -  }
 -
 -  public Password getPassword() {
 -    return password;
 -  }
 -
 -  public void setPassword(Password password) {
 -    this.password = password;
 -  }
 -
 -  public Password getSecurePassword() {
 -    return securePassword;
 -  }
 -
 -  public void setSecurePassword(Password securePassword) {
 -    this.securePassword = securePassword;
 -  }
 -
 -  public String getTokenClassName() {
 -    return tokenClassName;
 -  }
 -
 -  public Connector getConnector() throws AccumuloException, AccumuloSecurityException {
 -    return getInstance().getConnector(getPrincipal(), getToken());
 -  }
 -
 -  public ClientConfiguration getClientConfiguration() throws IllegalArgumentException {
 -    if (cachedClientConfig != null)
 -      return cachedClientConfig;
 -
 -    ClientConfiguration clientConfig;
 -    try {
 -      if (clientConfigFile == null)
 -        clientConfig = ClientConfiguration.loadDefault();
 -      else
 -        clientConfig = ClientConfiguration.fromFile(new File(clientConfigFile));
 -    } catch (Exception e) {
 -      throw new IllegalArgumentException(e);
 -    }
 -    if (sslEnabled)
 -      clientConfig.setProperty(ClientProperty.INSTANCE_RPC_SSL_ENABLED, "true");
 -
 -    if (saslEnabled)
 -      clientConfig.setProperty(ClientProperty.INSTANCE_RPC_SASL_ENABLED, "true");
 -
 -    if (siteFile != null) {
 -      AccumuloConfiguration config = new AccumuloConfiguration() {
 -        Configuration xml = new Configuration();
 -        {
 -          xml.addResource(new Path(siteFile));
 -        }
 -
 -        @Override
 -        public void getProperties(Map<String,String> props, Predicate<String> filter) {
 -          for (Entry<String,String> prop : DefaultConfiguration.getInstance())
 -            if (filter.apply(prop.getKey()))
 -              props.put(prop.getKey(), prop.getValue());
 -          for (Entry<String,String> prop : xml)
 -            if (filter.apply(prop.getKey()))
 -              props.put(prop.getKey(), prop.getValue());
 -        }
 -
 -        @Override
 -        public String get(Property property) {
 -          String value = xml.get(property.getKey());
 -          if (value != null)
 -            return value;
 -          return DefaultConfiguration.getInstance().get(property);
 -        }
 -      };
 -      this.zookeepers = config.get(Property.INSTANCE_ZK_HOST);
 -
 -      String volDir = VolumeConfiguration.getVolumeUris(config)[0];
 -      Path instanceDir = new Path(volDir, "instance_id");
 -      String instanceIDFromFile = ZooUtil.getInstanceIDFromHdfs(instanceDir, config);
 -      if (config.getBoolean(Property.INSTANCE_RPC_SSL_ENABLED))
 -        clientConfig.setProperty(ClientProperty.INSTANCE_RPC_SSL_ENABLED, "true");
 -      return cachedClientConfig =
 -          clientConfig.withInstance(UUID.fromString(instanceIDFromFile)).withZkHosts(zookeepers);
 -    }
 -    return cachedClientConfig = clientConfig.withInstance(instance).withZkHosts(zookeepers);
 +    return cachedProps;
    }
 -
  }
diff --cc core/src/main/java/org/apache/accumulo/core/client/BatchWriterConfig.java
index fe5eeef,f2ec775..d180475
--- a/core/src/main/java/org/apache/accumulo/core/client/BatchWriterConfig.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/BatchWriterConfig.java
@@@ -42,32 -37,19 +42,32 @@@ import org.apache.hadoop.util.StringUti
   */
  public class BatchWriterConfig implements Writable {
  
-   private static final Long DEFAULT_MAX_MEMORY = ConfigurationTypeHelper
-       .getMemoryAsBytes(BATCH_WRITER_MEMORY_MAX.getDefaultValue());
 -  private static final Long DEFAULT_MAX_MEMORY = 50 * 1024 * 1024l;
++  private static final Long DEFAULT_MAX_MEMORY =
++      ConfigurationTypeHelper.getMemoryAsBytes(BATCH_WRITER_MEMORY_MAX.getDefaultValue());
    private Long maxMemory = null;
  
-   private static final Long DEFAULT_MAX_LATENCY = ConfigurationTypeHelper
-       .getTimeInMillis(BATCH_WRITER_LATENCY_MAX.getDefaultValue());
 -  private static final Long DEFAULT_MAX_LATENCY = 2 * 60 * 1000l;
++  private static final Long DEFAULT_MAX_LATENCY =
++      ConfigurationTypeHelper.getTimeInMillis(BATCH_WRITER_LATENCY_MAX.getDefaultValue());
    private Long maxLatency = null;
  
 -  private static final Long DEFAULT_TIMEOUT = Long.MAX_VALUE;
 +  private static final long DEFAULT_TIMEOUT = getDefaultTimeout();
    private Long timeout = null;
  
-   private static final Integer DEFAULT_MAX_WRITE_THREADS = Integer
-       .parseInt(BATCH_WRITER_THREADS_MAX.getDefaultValue());
 -  private static final Integer DEFAULT_MAX_WRITE_THREADS = 3;
++  private static final Integer DEFAULT_MAX_WRITE_THREADS =
++      Integer.parseInt(BATCH_WRITER_THREADS_MAX.getDefaultValue());
    private Integer maxWriteThreads = null;
  
    private Durability durability = Durability.DEFAULT;
 +  private boolean isDurabilitySet = false;
 +
 +  private static long getDefaultTimeout() {
-     long defVal = ConfigurationTypeHelper
-         .getTimeInMillis(BATCH_WRITER_TIMEOUT_MAX.getDefaultValue());
++    long defVal =
++        ConfigurationTypeHelper.getTimeInMillis(BATCH_WRITER_TIMEOUT_MAX.getDefaultValue());
 +    if (defVal == 0L)
 +      return Long.MAX_VALUE;
 +    else
 +      return defVal;
 +  }
  
    /**
     * Sets the maximum memory to batch before writing. The smaller this value, the more frequently
diff --cc core/src/main/java/org/apache/accumulo/core/client/MutationsRejectedException.java
index b259a63,7c2a337..520da6c
--- a/core/src/main/java/org/apache/accumulo/core/client/MutationsRejectedException.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/MutationsRejectedException.java
@@@ -104,15 -139,14 +104,15 @@@ public class MutationsRejectedExceptio
      Map<String,Set<SecurityErrorCode>> result = new HashMap<>();
  
      for (Entry<TabletId,Set<SecurityErrorCode>> entry : hashMap.entrySet()) {
 +      TabletId tabletId = entry.getKey();
-       String tableInfo = Tables.getPrintableTableInfoFromId(context,
-           TableId.of(tabletId.getTableId().toString()));
+       String tableInfo =
 -          Tables.getPrintableTableInfoFromId(instance, entry.getKey().getTableId().toString());
++          Tables.getPrintableTableInfoFromId(context, TableId.of(tabletId.getTableId().toString()));
  
        if (!result.containsKey(tableInfo)) {
 -        result.put(tableInfo, new HashSet<SecurityErrorCode>());
 +        result.put(tableInfo, new HashSet<>());
        }
  
 -      result.get(tableInfo).addAll(hashMap.get(entry.getKey()));
 +      result.get(tableInfo).addAll(hashMap.get(tabletId));
      }
  
      return result.toString();
diff --cc core/src/main/java/org/apache/accumulo/core/client/admin/NewTableConfiguration.java
index f4b1d8e,fc60b0c..9e8623c
--- a/core/src/main/java/org/apache/accumulo/core/client/admin/NewTableConfiguration.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/NewTableConfiguration.java
@@@ -196,131 -135,8 +196,131 @@@ public class NewTableConfiguration 
     */
    public NewTableConfiguration enableSampling(SamplerConfiguration samplerConfiguration) {
      requireNonNull(samplerConfiguration);
-     Map<String,String> tmp = new SamplerConfigurationImpl(samplerConfiguration)
-         .toTablePropertiesMap();
 -    SamplerConfigurationImpl.checkDisjoint(properties, samplerConfiguration);
 -    this.samplerConfiguration = samplerConfiguration;
++    Map<String,String> tmp =
++        new SamplerConfigurationImpl(samplerConfiguration).toTablePropertiesMap();
 +    checkDisjoint(properties, tmp, "sampler");
 +    this.samplerProps = tmp;
 +    return this;
 +  }
 +
 +  /**
 +   * Enables creating summary statistics using {@link Summarizer}'s for the new table.
 +   *
 +   * @since 2.0.0
 +   */
 +  public NewTableConfiguration enableSummarization(SummarizerConfiguration... configs) {
 +    requireNonNull(configs);
-     Map<String,String> tmp = SummarizerConfigurationUtil
-         .toTablePropertiesMap(Arrays.asList(configs));
++    Map<String,String> tmp =
++        SummarizerConfigurationUtil.toTablePropertiesMap(Arrays.asList(configs));
 +    checkDisjoint(properties, tmp, "summarizer");
 +    summarizerProps = tmp;
 +    return this;
 +  }
 +
 +  /**
 +   * Configures a table's locality groups prior to initial table creation.
 +   *
 +   * Allows locality groups to be set prior to table creation. Additional calls to this method prior
 +   * to table creation will overwrite previous locality group mappings.
 +   *
 +   * @param groups
 +   *          mapping of locality group names to column families in the locality group
 +   *
 +   * @since 2.0.0
 +   *
 +   * @see TableOperations#setLocalityGroups
 +   */
 +  public NewTableConfiguration setLocalityGroups(Map<String,Set<Text>> groups) {
 +    // ensure locality groups do not overlap
 +    LocalityGroupUtil.ensureNonOverlappingGroups(groups);
 +    Map<String,String> tmp = new HashMap<>();
 +    for (Entry<String,Set<Text>> entry : groups.entrySet()) {
 +      Set<Text> colFams = entry.getValue();
 +      String value = LocalityGroupUtil.encodeColumnFamilies(colFams);
 +      tmp.put(Property.TABLE_LOCALITY_GROUP_PREFIX + entry.getKey(), value);
 +    }
 +    tmp.put(Property.TABLE_LOCALITY_GROUPS.getKey(), String.join(",", groups.keySet()));
 +    checkDisjoint(properties, tmp, "locality groups");
 +    localityProps = tmp;
      return this;
    }
 +
 +  /**
 +   * Create a new table with pre-configured splits from the provided input collection.
 +   *
 +   * @param splits
 +   *          A SortedSet of String values to be used as split points in a newly created table.
 +   * @return this
 +   *
 +   * @since 2.0.0
 +   */
 +  public NewTableConfiguration withSplits(final SortedSet<Text> splits) {
 +    checkArgument(splits != null, "splits set is null");
 +    checkArgument(!splits.isEmpty(), "splits set is empty");
 +    this.splitProps = ImmutableSortedSet.copyOf(splits);
 +    return this;
 +  }
 +
 +  /**
 +   * Configure iterator settings for a table prior to its creation.
 +   *
 +   * Additional calls to this method before table creation will overwrite previous iterator
 +   * settings.
 +   *
 +   * @param setting
 +   *          object specifying the properties of the iterator
 +   *
 +   * @since 2.0.0
 +   *
 +   * @see TableOperations#attachIterator(String, IteratorSetting)
 +   */
 +  public NewTableConfiguration attachIterator(IteratorSetting setting) {
 +    return attachIterator(setting, EnumSet.allOf(IteratorScope.class));
 +  }
 +
 +  /**
 +   * Configure iterator settings for a table prior to its creation.
 +   *
 +   * @param setting
 +   *          object specifying the properties of the iterator
 +   * @param scopes
 +   *          enumerated set of iterator scopes
 +   *
 +   * @since 2.0.0
 +   *
 +   * @see TableOperations#attachIterator(String, IteratorSetting, EnumSet)
 +   */
 +  public NewTableConfiguration attachIterator(IteratorSetting setting,
 +      EnumSet<IteratorScope> scopes) {
 +    Objects.requireNonNull(setting, "setting cannot be null!");
 +    Objects.requireNonNull(scopes, "scopes cannot be null!");
 +    try {
 +      TableOperationsHelper.checkIteratorConflicts(iteratorProps, setting, scopes);
 +    } catch (AccumuloException e) {
 +      throw new IllegalArgumentException("The specified IteratorSetting"
 +          + " conflicts with an iterator already defined on this NewTableConfiguration", e);
 +    }
 +    for (IteratorScope scope : scopes) {
 +      String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
 +          scope.name().toLowerCase(), setting.getName());
 +      for (Entry<String,String> prop : setting.getOptions().entrySet()) {
 +        iteratorProps.put(root + ".opt." + prop.getKey(), prop.getValue());
 +      }
 +      iteratorProps.put(root, setting.getPriority() + "," + setting.getIteratorClass());
 +      // verify that the iteratorProps assigned and the properties do not share any keys.
 +      checkDisjoint(properties, iteratorProps, "iterator");
 +    }
 +    return this;
 +  }
 +
 +  /**
 +   * Verify the provided properties are valid table properties.
 +   */
 +  private void checkTableProperties(Map<String,String> props) {
 +    props.keySet().forEach((key) -> {
 +      if (!key.startsWith(Property.TABLE_PREFIX.toString())) {
 +        throw new IllegalArgumentException("'" + key + "' is not a valid table property");
 +      }
 +    });
 +  }
  }
diff --cc core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
index 024536f,6b158f8..1274d1b
--- a/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
@@@ -333,8 -368,7 +333,8 @@@ public abstract class AbstractInputForm
     * @return The client configuration for the job
     * @since 1.7.0
     */
-   protected static org.apache.accumulo.core.client.ClientConfiguration getClientConfiguration(
-       JobConf job) {
 -  protected static ClientConfiguration getClientConfiguration(JobConf job) {
++  protected static org.apache.accumulo.core.client.ClientConfiguration
++      getClientConfiguration(JobConf job) {
      return InputConfigurator.getClientConfiguration(CLASS, job);
    }
  
@@@ -359,12 -403,10 +359,12 @@@
     *
     * @param job
     *          the Hadoop job instance to be configured
 -   * @return the {@link InputTableConfig} objects set on the job
 +   * @return the {@link org.apache.accumulo.core.client.mapreduce.InputTableConfig} objects set on
 +   *         the job
     * @since 1.6.0
     */
-   public static Map<String,org.apache.accumulo.core.client.mapreduce.InputTableConfig> getInputTableConfigs(
-       JobConf job) {
 -  public static Map<String,InputTableConfig> getInputTableConfigs(JobConf job) {
++  public static Map<String,org.apache.accumulo.core.client.mapreduce.InputTableConfig>
++      getInputTableConfigs(JobConf job) {
      return InputConfigurator.getInputTableConfigs(CLASS, job);
    }
  
@@@ -379,12 -420,10 +379,12 @@@
     *          the Hadoop job instance to be configured
     * @param tableName
     *          the table name for which to grab the config object
 -   * @return the {@link InputTableConfig} for the given table
 +   * @return the {@link org.apache.accumulo.core.client.mapreduce.InputTableConfig} for the given
 +   *         table
     * @since 1.6.0
     */
-   public static org.apache.accumulo.core.client.mapreduce.InputTableConfig getInputTableConfig(
-       JobConf job, String tableName) {
 -  public static InputTableConfig getInputTableConfig(JobConf job, String tableName) {
++  public static org.apache.accumulo.core.client.mapreduce.InputTableConfig
++      getInputTableConfig(JobConf job, String tableName) {
      return InputConfigurator.getInputTableConfig(CLASS, job, tableName);
    }
  
@@@ -480,28 -535,26 +480,29 @@@
        String table = baseSplit.getTableName();
  
        // in case the table name changed, we can still use the previous name for terms of
 -      // configuration,
 -      // but the scanner will use the table id resolved at job setup time
 -      InputTableConfig tableConfig = getInputTableConfig(job, baseSplit.getTableName());
 +      // configuration, but the scanner will use the table id resolved at job setup time
-       org.apache.accumulo.core.client.mapreduce.InputTableConfig tableConfig = getInputTableConfig(
-           job, baseSplit.getTableName());
++      org.apache.accumulo.core.client.mapreduce.InputTableConfig tableConfig =
++          getInputTableConfig(job, baseSplit.getTableName());
 +
 +      ClientContext client = InputConfigurator.client(CLASS, baseSplit, job);
  
 -      log.debug("Creating connector with user: " + principal);
 +      log.debug("Created client with user: " + client.whoami());
        log.debug("Creating scanner for table: " + table);
        log.debug("Authorizations are: " + authorizations);
  
 -      if (baseSplit instanceof BatchInputSplit) {
 +      if (baseSplit instanceof org.apache.accumulo.core.clientImpl.mapred.BatchInputSplit) {
          BatchScanner scanner;
-         org.apache.accumulo.core.clientImpl.mapred.BatchInputSplit multiRangeSplit = (org.apache.accumulo.core.clientImpl.mapred.BatchInputSplit) baseSplit;
 -        BatchInputSplit multiRangeSplit = (BatchInputSplit) baseSplit;
++        org.apache.accumulo.core.clientImpl.mapred.BatchInputSplit multiRangeSplit =
++            (org.apache.accumulo.core.clientImpl.mapred.BatchInputSplit) baseSplit;
  
          try {
            // Note: BatchScanner will use at most one thread per tablet, currently BatchInputSplit
            // will not span tablets
            int scanThreads = 1;
-           scanner = client.createBatchScanner(baseSplit.getTableName(), authorizations,
-               scanThreads);
 -          scanner = instance.getConnector(principal, token)
 -              .createBatchScanner(baseSplit.getTableName(), authorizations, scanThreads);
++          scanner =
++              client.createBatchScanner(baseSplit.getTableName(), authorizations, scanThreads);
            setupIterators(job, scanner, baseSplit.getTableName(), baseSplit);
 -          if (null != classLoaderContext) {
 +          if (classLoaderContext != null) {
              scanner.setClassLoaderContext(classLoaderContext);
            }
          } catch (Exception e) {
@@@ -532,10 -585,16 +533,10 @@@
  
          try {
            if (isOffline) {
-             scanner = new OfflineScanner(client, TableId.of(baseSplit.getTableId()),
-                 authorizations);
 -            scanner = new OfflineScanner(instance, new Credentials(principal, token),
 -                baseSplit.getTableId(), authorizations);
 -          } else if (DeprecationUtil.isMockInstance(instance)) {
 -            scanner = instance.getConnector(principal, token)
 -                .createScanner(baseSplit.getTableName(), authorizations);
++            scanner =
++                new OfflineScanner(client, TableId.of(baseSplit.getTableId()), authorizations);
            } else {
 -            ClientConfiguration clientConf = getClientConfiguration(job);
 -            ClientContext context =
 -                new ClientContext(instance, new Credentials(principal, token), clientConf);
 -            scanner = new ScannerImpl(context, baseSplit.getTableId(), authorizations);
 +            scanner = new ScannerImpl(client, TableId.of(baseSplit.getTableId()), authorizations);
            }
            if (isIsolated) {
              log.info("Creating isolated scanner");
@@@ -629,32 -693,29 +630,33 @@@
      log.setLevel(logLevel);
      validateOptions(job);
  
 -    Random random = new Random();
 +    Random random = new SecureRandom();
      LinkedList<InputSplit> splits = new LinkedList<>();
-     Map<String,org.apache.accumulo.core.client.mapreduce.InputTableConfig> tableConfigs = getInputTableConfigs(
-         job);
 -    Map<String,InputTableConfig> tableConfigs = getInputTableConfigs(job);
 -    for (Map.Entry<String,InputTableConfig> tableConfigEntry : tableConfigs.entrySet()) {
++    Map<String,org.apache.accumulo.core.client.mapreduce.InputTableConfig> tableConfigs =
++        getInputTableConfigs(job);
 +
-     for (Map.Entry<String,org.apache.accumulo.core.client.mapreduce.InputTableConfig> tableConfigEntry : tableConfigs
-         .entrySet()) {
++    for (Map.Entry<String,
++        org.apache.accumulo.core.client.mapreduce.InputTableConfig> tableConfigEntry : tableConfigs
++            .entrySet()) {
 +
        String tableName = tableConfigEntry.getKey();
-       org.apache.accumulo.core.client.mapreduce.InputTableConfig tableConfig = tableConfigEntry
-           .getValue();
 -      InputTableConfig tableConfig = tableConfigEntry.getValue();
++      org.apache.accumulo.core.client.mapreduce.InputTableConfig tableConfig =
++          tableConfigEntry.getValue();
  
 -      Instance instance = getInstance(job);
 -      String tableId;
 -      // resolve table name to id once, and use id from this point forward
 -      if (DeprecationUtil.isMockInstance(instance)) {
 -        tableId = "";
 -      } else {
 -        try {
 -          tableId = Tables.getTableId(instance, tableName);
 -        } catch (TableNotFoundException e) {
 -          throw new IOException(e);
 -        }
 +      ClientContext client;
 +      try {
 +        client = InputConfigurator.client(CLASS, job);
 +      } catch (AccumuloException | AccumuloSecurityException e) {
 +        throw new IOException(e);
        }
  
 -      Authorizations auths = getScanAuthorizations(job);
 -      String principal = getPrincipal(job);
 -      AuthenticationToken token = getAuthenticationToken(job);
 +      TableId tableId;
 +      // resolve table name to id once, and use id from this point forward
 +      try {
 +        tableId = Tables.getTableId(client, tableName);
 +      } catch (TableNotFoundException e) {
 +        throw new IOException(e);
 +      }
  
        boolean batchScan = InputConfigurator.isBatchScan(CLASS, job);
        boolean supportBatchScan = !(tableConfig.isOfflineScan()
@@@ -734,10 -796,10 +736,11 @@@
              ArrayList<Range> clippedRanges = new ArrayList<>();
              for (Range r : extentRanges.getValue())
                clippedRanges.add(ke.clip(r));
-             org.apache.accumulo.core.clientImpl.mapred.BatchInputSplit split = new org.apache.accumulo.core.clientImpl.mapred.BatchInputSplit(
-                 tableName, tableId, clippedRanges, new String[] {location});
 -
 -            BatchInputSplit split =
 -                new BatchInputSplit(tableName, tableId, clippedRanges, new String[] {location});
 -            SplitUtils.updateSplit(split, instance, tableConfig, principal, token, auths, logLevel);
++            org.apache.accumulo.core.clientImpl.mapred.BatchInputSplit split =
++                new org.apache.accumulo.core.clientImpl.mapred.BatchInputSplit(tableName, tableId,
++                    clippedRanges, new String[] {location});
 +            org.apache.accumulo.core.clientImpl.mapreduce.SplitUtils.updateSplit(split, tableConfig,
 +                logLevel);
  
              splits.add(split);
            } else {
diff --cc core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloFileOutputFormat.java
index 79ebeeb,94a16ef..6d381af
--- a/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloFileOutputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloFileOutputFormat.java
@@@ -154,13 -167,13 +154,13 @@@ public class AccumuloFileOutputFormat e
        Progressable progress) throws IOException {
      // get the path of the temporary output file
      final Configuration conf = job;
-     final AccumuloConfiguration acuConf = FileOutputConfigurator.getAccumuloConfiguration(CLASS,
-         job);
+     final AccumuloConfiguration acuConf =
+         FileOutputConfigurator.getAccumuloConfiguration(CLASS, job);
  
      final String extension = acuConf.get(Property.TABLE_FILE_TYPE);
-     final Path file = new Path(getWorkOutputPath(job),
-         getUniqueName(job, "part") + "." + extension);
+     final Path file =
+         new Path(getWorkOutputPath(job), getUniqueName(job, "part") + "." + extension);
 -    final int visCacheSize = ConfiguratorBase.getVisibilityCacheSize(conf);
 +    final int visCacheSize = FileOutputConfigurator.getVisibilityCacheSize(conf);
  
      return new RecordWriter<Key,Value>() {
        RFileWriter out = null;
diff --cc core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormat.java
index 241bf61,1f4642b..bead6ea
--- a/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormat.java
@@@ -61,12 -57,10 +61,10 @@@ public class AccumuloInputFormat extend
      // Override the log level from the configuration as if the RangeInputSplit has one it's the more
      // correct one to use.
      if (split instanceof org.apache.accumulo.core.client.mapreduce.RangeInputSplit) {
-       // @formatter:off
        org.apache.accumulo.core.client.mapreduce.RangeInputSplit accSplit =
-         (org.apache.accumulo.core.client.mapreduce.RangeInputSplit) split;
-       // @formatter:on
+           (org.apache.accumulo.core.client.mapreduce.RangeInputSplit) split;
        Level level = accSplit.getLogLevel();
 -      if (null != level) {
 +      if (level != null) {
          log.setLevel(level);
        }
      } else {
diff --cc core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormat.java
index 8ea4f56,6604bf0..9dde164
--- a/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormat.java
@@@ -72,33 -68,33 +72,35 @@@ public class AccumuloMultiTableInputFor
    public RecordReader<Key,Value> getRecordReader(InputSplit split, JobConf job, Reporter reporter)
        throws IOException {
      log.setLevel(getLogLevel(job));
-     InputFormatBase.RecordReaderBase<Key,Value> recordReader = new InputFormatBase.RecordReaderBase<Key,Value>() {
 -    RecordReaderBase<Key,Value> recordReader = new RecordReaderBase<Key,Value>() {
++    InputFormatBase.RecordReaderBase<Key,Value> recordReader =
++        new InputFormatBase.RecordReaderBase<Key,Value>() {
  
--      @Override
--      public boolean next(Key key, Value value) throws IOException {
--        if (scannerIterator.hasNext()) {
--          ++numKeysRead;
--          Map.Entry<Key,Value> entry = scannerIterator.next();
--          key.set(currentKey = entry.getKey());
--          value.set(entry.getValue().get());
--          if (log.isTraceEnabled())
--            log.trace("Processing key/value pair: " + DefaultFormatter.formatEntry(entry, true));
--          return true;
--        }
--        return false;
--      }
++          @Override
++          public boolean next(Key key, Value value) throws IOException {
++            if (scannerIterator.hasNext()) {
++              ++numKeysRead;
++              Map.Entry<Key,Value> entry = scannerIterator.next();
++              key.set(currentKey = entry.getKey());
++              value.set(entry.getValue().get());
++              if (log.isTraceEnabled())
++                log.trace(
++                    "Processing key/value pair: " + DefaultFormatter.formatEntry(entry, true));
++              return true;
++            }
++            return false;
++          }
  
--      @Override
--      public Key createKey() {
--        return new Key();
--      }
++          @Override
++          public Key createKey() {
++            return new Key();
++          }
  
--      @Override
--      public Value createValue() {
--        return new Value();
--      }
++          @Override
++          public Value createValue() {
++            return new Value();
++          }
  
--    };
++        };
      recordReader.initialize(split, job);
      return recordReader;
    }
diff --cc core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
index 9082159,ad731b3..b5a891d
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
@@@ -243,10 -248,10 +243,10 @@@ public abstract class AbstractInputForm
     * @see #setConnectorInfo(Job, String, AuthenticationToken)
     * @see #setConnectorInfo(Job, String, String)
     */
 -  protected static AuthenticationToken getAuthenticationToken(JobContext context) {
 +  protected static AuthenticationToken getAuthenticationToken(JobContext job) {
-     AuthenticationToken token = InputConfigurator.getAuthenticationToken(CLASS,
-         job.getConfiguration());
+     AuthenticationToken token =
 -        InputConfigurator.getAuthenticationToken(CLASS, context.getConfiguration());
 -    return ConfiguratorBase.unwrapAuthenticationToken(context, token);
++        InputConfigurator.getAuthenticationToken(CLASS, job.getConfiguration());
 +    return InputConfigurator.unwrapAuthenticationToken(job, token);
    }
  
    /**
@@@ -400,9 -452,8 +400,9 @@@
     * @return The ClientConfiguration
     * @since 1.7.0
     */
-   protected static org.apache.accumulo.core.client.ClientConfiguration getClientConfiguration(
-       JobContext job) {
 -  protected static ClientConfiguration getClientConfiguration(JobContext context) {
 -    return InputConfigurator.getClientConfiguration(CLASS, context.getConfiguration());
++  protected static org.apache.accumulo.core.client.ClientConfiguration
++      getClientConfiguration(JobContext job) {
 +    return InputConfigurator.getClientConfiguration(CLASS, job.getConfiguration());
    }
  
    /**
@@@ -507,8 -576,8 +507,9 @@@
        log.debug("Creating scanner for table: " + table);
        log.debug("Authorizations are: " + authorizations);
  
 -      if (split instanceof BatchInputSplit) {
 -        BatchInputSplit batchSplit = (BatchInputSplit) split;
 +      if (split instanceof org.apache.accumulo.core.clientImpl.mapreduce.BatchInputSplit) {
-         org.apache.accumulo.core.clientImpl.mapreduce.BatchInputSplit batchSplit = (org.apache.accumulo.core.clientImpl.mapreduce.BatchInputSplit) split;
++        org.apache.accumulo.core.clientImpl.mapreduce.BatchInputSplit batchSplit =
++            (org.apache.accumulo.core.clientImpl.mapreduce.BatchInputSplit) split;
  
          BatchScanner scanner;
          try {
@@@ -766,10 -849,9 +767,11 @@@
              ArrayList<Range> clippedRanges = new ArrayList<>();
              for (Range r : extentRanges.getValue())
                clippedRanges.add(ke.clip(r));
-             org.apache.accumulo.core.clientImpl.mapreduce.BatchInputSplit split = new org.apache.accumulo.core.clientImpl.mapreduce.BatchInputSplit(
-                 tableName, tableId, clippedRanges, new String[] {location});
 -            BatchInputSplit split =
 -                new BatchInputSplit(tableName, tableId, clippedRanges, new String[] {location});
 -            SplitUtils.updateSplit(split, instance, tableConfig, principal, token, auths, logLevel);
++            org.apache.accumulo.core.clientImpl.mapreduce.BatchInputSplit split =
++                new org.apache.accumulo.core.clientImpl.mapreduce.BatchInputSplit(tableName,
++                    tableId, clippedRanges, new String[] {location});
 +            org.apache.accumulo.core.clientImpl.mapreduce.SplitUtils.updateSplit(split, tableConfig,
 +                logLevel);
  
              splits.add(split);
            } else {
diff --cc core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java
index acf6cd1,6aa82ae..d4cf85d
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java
@@@ -60,12 -56,10 +60,10 @@@ public class AccumuloInputFormat extend
      // Override the log level from the configuration as if the InputSplit has one it's the more
      // correct one to use.
      if (split instanceof org.apache.accumulo.core.client.mapreduce.RangeInputSplit) {
-       // @formatter:off
        org.apache.accumulo.core.client.mapreduce.RangeInputSplit accSplit =
-         (org.apache.accumulo.core.client.mapreduce.RangeInputSplit) split;
-       // @formatter:on
+           (org.apache.accumulo.core.client.mapreduce.RangeInputSplit) split;
        Level level = accSplit.getLogLevel();
 -      if (null != level) {
 +      if (level != null) {
          log.setLevel(level);
        }
      } else {
diff --cc core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
index 93cc7a6,06e4405..377d5f0
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
@@@ -214,9 -214,9 +214,9 @@@ public class AccumuloOutputFormat exten
     * @see #setConnectorInfo(Job, String, String)
     */
    protected static AuthenticationToken getAuthenticationToken(JobContext context) {
-     AuthenticationToken token = OutputConfigurator.getAuthenticationToken(CLASS,
-         context.getConfiguration());
+     AuthenticationToken token =
+         OutputConfigurator.getAuthenticationToken(CLASS, context.getConfiguration());
 -    return ConfiguratorBase.unwrapAuthenticationToken(context, token);
 +    return OutputConfigurator.unwrapAuthenticationToken(context, token);
    }
  
    /**
diff --cc core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
index 78e462c,a7a55c9..4c8cabb
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
@@@ -179,8 -184,7 +179,9 @@@ public class RangeInputSplit extends In
  
      if (in.readBoolean()) {
        int ordinal = in.readInt();
-       this.tokenSource = org.apache.accumulo.core.clientImpl.mapreduce.lib.ConfiguratorBase.TokenSource
-           .values()[ordinal];
 -      this.tokenSource = TokenSource.values()[ordinal];
++      this.tokenSource =
++          org.apache.accumulo.core.clientImpl.mapreduce.lib.ConfiguratorBase.TokenSource
++              .values()[ordinal];
  
        switch (this.tokenSource) {
          case INLINE:
@@@ -355,12 -362,11 +356,12 @@@
     * @deprecated since 1.7.0, use getInstance(ClientConfiguration) instead.
     */
    @Deprecated
 -  public Instance getInstance() {
 -    return getInstance(ClientConfiguration.loadDefault());
 +  public org.apache.accumulo.core.client.Instance getInstance() {
 +    return getInstance(org.apache.accumulo.core.client.ClientConfiguration.loadDefault());
    }
  
-   public org.apache.accumulo.core.client.Instance getInstance(
-       org.apache.accumulo.core.client.ClientConfiguration base) {
 -  public Instance getInstance(ClientConfiguration base) {
++  public org.apache.accumulo.core.client.Instance
++      getInstance(org.apache.accumulo.core.client.ClientConfiguration base) {
      if (null == instanceName) {
        return null;
      }
@@@ -402,12 -411,12 +403,14 @@@
    }
  
    public void setToken(AuthenticationToken token) {
-     this.tokenSource = org.apache.accumulo.core.clientImpl.mapreduce.lib.ConfiguratorBase.TokenSource.INLINE;
 -    this.tokenSource = TokenSource.INLINE;
++    this.tokenSource =
++        org.apache.accumulo.core.clientImpl.mapreduce.lib.ConfiguratorBase.TokenSource.INLINE;
      this.token = token;
    }
  
    public void setToken(String tokenFile) {
-     this.tokenSource = org.apache.accumulo.core.clientImpl.mapreduce.lib.ConfiguratorBase.TokenSource.FILE;
 -    this.tokenSource = TokenSource.FILE;
++    this.tokenSource =
++        org.apache.accumulo.core.clientImpl.mapreduce.lib.ConfiguratorBase.TokenSource.FILE;
      this.tokenFile = tokenFile;
    }
  
diff --cc core/src/main/java/org/apache/accumulo/core/client/summary/CountingSummarizer.java
index 41b1216,0000000..6bb9dcd
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/client/summary/CountingSummarizer.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/summary/CountingSummarizer.java
@@@ -1,319 -1,0 +1,323 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.summary;
 +
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.function.Consumer;
 +import java.util.function.Function;
 +import java.util.function.UnaryOperator;
 +import java.util.stream.Collectors;
 +
 +import org.apache.accumulo.core.client.summary.summarizers.VisibilitySummarizer;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.commons.lang.mutable.MutableLong;
 +
 +/**
 + * This class counts arbitrary keys while defending against too many keys and keys that are too
 + * long.
 + *
 + * <p>
 + * During collection and summarization this class will use the functions from {@link #converter()}
 + * and {@link #encoder()}. For each key/value the function from {@link #converter()} will be called
 + * to create zero or more counter objects. A counter associated with each counter object will be
 + * incremented, as long as there are not too many counters and the counter object is not too long.
 + *
 + * <p>
 + * When {@link Summarizer.Collector#summarize(Summarizer.StatisticConsumer)} is called, the function
 + * from {@link #encoder()} will be used to convert counter objects to strings. These strings will be
 + * used to emit statistics. Overriding {@link #encoder()} is optional. One reason to override is if
 + * the counter object contains binary or special data. For example, a function that base64 encodes
 + * counter objects could be created.
 + *
 + * <p>
 + * If the counter key type is mutable, then consider overriding {@link #copier()}.
 + *
 + * <p>
 + * The function returned by {@link #converter()} will be called frequently and should be very
 + * efficient. The function returned by {@link #encoder()} will be called less frequently and can be
 + * more expensive. The reason these two functions exists is to avoid the conversion to string for
 + * each key value, if that conversion is unnecessary.
 + *
 + * <p>
 + * Below is an example implementation that counts column visibilities. This example avoids
 + * converting column visibility to string for each key/value. This example shows the source code for
 + * {@link VisibilitySummarizer}.
 + *
 + * <pre>
 + * <code>
 + *   public class VisibilitySummarizer extends CountingSummarizer&lt;ByteSequence&gt; {
 + *     &#064;Override
 + *     protected UnaryOperator&lt;ByteSequence&gt; copier() {
 + *       // ByteSequences are mutable, so override and provide a copy function
 + *       return ArrayByteSequence::new;
 + *     }
 + *
 + *     &#064;Override
 + *     protected Converter&lt;ByteSequence&gt; converter() {
 + *       return (key, val, consumer) -&gt; consumer.accept(key.getColumnVisibilityData());
 + *     }
 + *   }
 + * </code>
 + * </pre>
 + *
 + * @param <K>
 + *          The counter key type. This type must have good implementations of
 + *          {@link Object#hashCode()} and {@link Object#equals(Object)}.
 + * @see CounterSummary
 + * @since 2.0.0
 + */
 +public abstract class CountingSummarizer<K> implements Summarizer {
 +
 +  /**
 +   * A configuration option for specifying the maximum number of unique counters an instance of this
 +   * summarizer should track. If not specified, a default of {@value #MAX_COUNTER_DEFAULT} will be
 +   * used.
 +   */
 +  public static final String MAX_COUNTERS_OPT = "maxCounters";
 +
 +  /**
 +   * A configuration option for specifying the maximum length of an individual counter key. If not
 +   * specified, a default of {@value #MAX_CKL_DEFAULT} will be used.
 +   */
 +  public static final String MAX_COUNTER_LEN_OPT = "maxCounterLen";
 +
 +  /**
 +   * A configuration option to determine if delete keys should be counted. If set to true then
 +   * delete keys will not be passed to the {@link Converter} and the statistic
 +   * {@value #DELETES_IGNORED_STAT} will track the number of deleted ignored. This options defaults
 +   * to {@value #INGNORE_DELETES_DEFAULT}.
 +   */
 +  public static final String INGNORE_DELETES_OPT = "ignoreDeletes";
 +
 +  /**
 +   * This prefixes all counters when emitting statistics in
 +   * {@link Summarizer.Collector#summarize(Summarizer.StatisticConsumer)}.
 +   */
 +  public static final String COUNTER_STAT_PREFIX = "c:";
 +
 +  /**
 +   * This is the name of the statistic that tracks how many counters objects were ignored because
 +   * the number of unique counters was exceeded. The max number of unique counters is specified by
 +   * {@link #MAX_COUNTERS_OPT}.
 +   */
 +  public static final String TOO_MANY_STAT = "tooMany";
 +
 +  /**
 +   * This is the name of the statistic that tracks how many counter objects were ignored because
 +   * they were too long. The maximum length is specified by {@link #MAX_COUNTER_LEN_OPT}.
 +   */
 +  public static final String TOO_LONG_STAT = "tooLong";
 +
 +  /**
 +   * This is the name of the statistic that tracks the total number of counter objects emitted by
 +   * the {@link Converter}. This includes emitted Counter objects that were ignored.
 +   */
 +  public static final String EMITTED_STAT = "emitted";
 +
 +  /**
 +   * This is the name of the statistic that tracks the total number of deleted keys seen. This
 +   * statistic is only incremented when the {@value #INGNORE_DELETES_OPT} option is set to true.
 +   */
 +  public static final String DELETES_IGNORED_STAT = "deletesIgnored";
 +
 +  /**
 +   * This tracks the total number of key/values seen by the {@link Summarizer.Collector}
 +   */
 +  public static final String SEEN_STAT = "seen";
 +
 +  // this default can not be changed as persisted summary data depends on it. See the documentation
 +  // about persistence in the Summarizer class javadoc.
 +  public static final String MAX_COUNTER_DEFAULT = "1024";
 +
 +  // this default can not be changed as persisted summary data depends on it
 +  public static final String MAX_CKL_DEFAULT = "128";
 +
 +  // this default can not be changed as persisted summary data depends on it
 +  public static final String INGNORE_DELETES_DEFAULT = "true";
 +
-   private static final String[] ALL_STATS = {TOO_LONG_STAT, TOO_MANY_STAT, EMITTED_STAT, SEEN_STAT,
-       DELETES_IGNORED_STAT};
++  private static final String[] ALL_STATS =
++      {TOO_LONG_STAT, TOO_MANY_STAT, EMITTED_STAT, SEEN_STAT, DELETES_IGNORED_STAT};
 +
 +  private int maxCounters;
 +  private int maxCounterKeyLen;
 +  private boolean ignoreDeletes;
 +
 +  private void init(SummarizerConfiguration conf) {
-     maxCounters = Integer
-         .parseInt(conf.getOptions().getOrDefault(MAX_COUNTERS_OPT, MAX_COUNTER_DEFAULT));
-     maxCounterKeyLen = Integer
-         .parseInt(conf.getOptions().getOrDefault(MAX_COUNTER_LEN_OPT, MAX_CKL_DEFAULT));
++    maxCounters =
++        Integer.parseInt(conf.getOptions().getOrDefault(MAX_COUNTERS_OPT, MAX_COUNTER_DEFAULT));
++    maxCounterKeyLen =
++        Integer.parseInt(conf.getOptions().getOrDefault(MAX_COUNTER_LEN_OPT, MAX_CKL_DEFAULT));
 +    ignoreDeletes = Boolean
 +        .parseBoolean(conf.getOptions().getOrDefault(INGNORE_DELETES_OPT, INGNORE_DELETES_DEFAULT));
 +  }
 +
 +  /**
 +   * A function that converts key values to zero or more counter objects.
 +   *
 +   * @since 2.0.0
 +   */
 +  public interface Converter<K> {
 +    /**
 +     * @param consumer
 +     *          emit counter objects derived from key and value to this consumer
 +     */
 +    void convert(Key k, Value v, Consumer<K> consumer);
 +  }
 +
 +  /**
 +   *
 +   * @return A function that is used to convert each key value to zero or more counter objects. Each
 +   *         function returned should be independent.
 +   */
 +  protected abstract Converter<K> converter();
 +
 +  /**
 +   * @return A function that is used to convert counter objects to String. The default function
 +   *         calls {@link Object#toString()} on the counter object.
 +   */
 +  protected Function<K,String> encoder() {
 +    return Object::toString;
 +  }
 +
 +  /**
 +   * Override this if your key type is mutable and subject to change.
 +   *
 +   * @return a function that used to copy the counter object. This function is only used when the
 +   *         collector has never seen the counter object before. In this case the collector needs to
 +   *         possibly copy the counter object before using as map key. The default implementation is
 +   *         the {@link UnaryOperator#identity()} function.
 +   */
 +  protected UnaryOperator<K> copier() {
 +    return UnaryOperator.identity();
 +  }
 +
 +  @Override
 +  public Collector collector(SummarizerConfiguration sc) {
 +    init(sc);
 +    return new Collector() {
 +
 +      // Map used for computing summary incrementally uses ByteSequence for key which is more
 +      // efficient than converting String for each Key. The
 +      // conversion to String is deferred until the summary is requested.
 +
 +      private Map<K,MutableLong> counters = new HashMap<>();
 +      private long tooMany = 0;
 +      private long tooLong = 0;
 +      private long seen = 0;
 +      private long emitted = 0;
 +      private long deleted = 0;
 +      private Converter<K> converter = converter();
 +      private Function<K,String> encoder = encoder();
 +      private UnaryOperator<K> copier = copier();
 +
 +      private void incrementCounter(K counter) {
 +        emitted++;
 +
 +        MutableLong ml = counters.get(counter);
 +        if (ml == null) {
 +          if (counters.size() >= maxCounters) {
 +            // no need to store this counter in the map and get() it... just use instance variable
 +            tooMany++;
 +          } else {
 +            // we have never seen this key before, check if its too long
 +            if (encoder.apply(counter).length() >= maxCounterKeyLen) {
 +              tooLong++;
 +            } else {
 +              counters.put(copier.apply(counter), new MutableLong(1));
 +            }
 +          }
 +        } else {
 +          // using mutable long allows calling put() to be avoided
 +          ml.increment();
 +        }
 +      }
 +
 +      @Override
 +      public void accept(Key k, Value v) {
 +        seen++;
 +        if (ignoreDeletes && k.isDeleted()) {
 +          deleted++;
 +        } else {
 +          converter.convert(k, v, this::incrementCounter);
 +        }
 +      }
 +
 +      @Override
 +      public void summarize(StatisticConsumer sc) {
 +        StringBuilder sb = new StringBuilder(COUNTER_STAT_PREFIX);
 +
 +        for (Entry<K,MutableLong> entry : counters.entrySet()) {
 +          sb.setLength(COUNTER_STAT_PREFIX.length());
 +          sb.append(encoder.apply(entry.getKey()));
 +          sc.accept(sb.toString(), entry.getValue().longValue());
 +        }
 +
 +        sc.accept(TOO_MANY_STAT, tooMany);
 +        sc.accept(TOO_LONG_STAT, tooLong);
 +        sc.accept(EMITTED_STAT, emitted);
 +        sc.accept(SEEN_STAT, seen);
 +        sc.accept(DELETES_IGNORED_STAT, deleted);
 +      }
 +    };
 +  }
 +
 +  @Override
 +  public Combiner combiner(SummarizerConfiguration sc) {
 +    init(sc);
 +    return (summary1, summary2) -> {
 +
 +      for (String key : ALL_STATS) {
 +        summary1.merge(key, summary2.getOrDefault(key, 0L), Long::sum);
 +      }
 +
 +      for (Entry<String,Long> entry : summary2.entrySet()) {
 +        String k2 = entry.getKey();
 +        Long v2 = entry.getValue();
 +
 +        if (k2.startsWith(COUNTER_STAT_PREFIX)) {
 +          summary1.merge(k2, v2, Long::sum);
 +        }
 +      }
 +
 +      if (summary1.size() - ALL_STATS.length > maxCounters) {
 +        // find the keys with the lowest counts to remove
-         List<String> keysToRemove = summary1.entrySet().stream()
-             .filter(e -> e.getKey().startsWith(COUNTER_STAT_PREFIX)) // filter out non counters
-             .sorted((e1, e2) -> Long.compare(e2.getValue(), e1.getValue())) // sort descending by
-                                                                             // count
-             .skip(maxCounters) // skip most frequent
-             .map(Entry::getKey).collect(Collectors.toList()); // collect the least frequent
-                                                               // counters in a list
++        List<String> keysToRemove =
++            summary1.entrySet().stream().filter(e -> e.getKey().startsWith(COUNTER_STAT_PREFIX)) // filter
++                                                                                                 // out
++                                                                                                 // non
++                                                                                                 // counters
++                .sorted((e1, e2) -> Long.compare(e2.getValue(), e1.getValue())) // sort descending
++                                                                                // by
++                                                                                // count
++                .skip(maxCounters) // skip most frequent
++                .map(Entry::getKey).collect(Collectors.toList()); // collect the least frequent
++                                                                  // counters in a list
 +
 +        long removedCount = 0;
 +        for (String key : keysToRemove) {
 +          removedCount += summary1.remove(key);
 +        }
 +
 +        summary1.merge(TOO_MANY_STAT, removedCount, Long::sum);
 +      }
 +    };
 +  }
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/client/summary/SummarizerConfiguration.java
index ab2a2bd,0000000..acc7ab3
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/client/summary/SummarizerConfiguration.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/summary/SummarizerConfiguration.java
@@@ -1,298 -1,0 +1,298 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.summary;
 +
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collection;
 +import java.util.Collections;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +
 +import org.apache.accumulo.core.summary.SummarizerConfigurationUtil;
 +
 +import com.google.common.base.Preconditions;
 +import com.google.common.collect.ImmutableMap;
 +import com.google.common.hash.Hasher;
 +import com.google.common.hash.Hashing;
 +
 +/**
 + * This class encapsulates the configuration needed to instantiate a {@link Summarizer}. It also
 + * provides methods and documentation for setting the table properties that configure a Summarizer.
 + *
 + * @since 2.0.0
 + */
 +public class SummarizerConfiguration {
 +
 +  private final String className;
 +  private final Map<String,String> options;
 +  private int hashCode = 0;
 +  private final String configId;
 +
 +  private SummarizerConfiguration(String className, String configId, Map<String,String> options) {
 +    this.className = className;
 +    this.options = ImmutableMap.copyOf(options);
 +
 +    if (configId == null) {
 +      ArrayList<String> keys = new ArrayList<>(this.options.keySet());
 +      Collections.sort(keys);
 +      Hasher hasher = Hashing.murmur3_32().newHasher();
 +      hasher.putString(className, UTF_8);
 +      for (String key : keys) {
 +        hasher.putString(key, UTF_8);
 +        hasher.putString(options.get(key), UTF_8);
 +      }
 +
 +      this.configId = hasher.hash().toString();
 +    } else {
 +      this.configId = configId;
 +    }
 +  }
 +
 +  /**
 +   * @return the name of a class that implements @link {@link Summarizer}.
 +   */
 +  public String getClassName() {
 +    return className;
 +  }
 +
 +  /**
 +   * @return custom options for a {link @Summarizer}
 +   */
 +  public Map<String,String> getOptions() {
 +    return options;
 +  }
 +
 +  /**
 +   * The propertyId is used to when creating table properties for a summarizer. Its not used for
 +   * equality or hashCode for this class.
 +   */
 +  public String getPropertyId() {
 +    return configId;
 +  }
 +
 +  @Override
 +  public String toString() {
 +    return className + " " + configId + " " + options;
 +  }
 +
 +  /**
 +   * Compares the classname and options to determine equality.
 +   */
 +  @Override
 +  public boolean equals(Object o) {
 +    if (o instanceof SummarizerConfiguration) {
 +      SummarizerConfiguration osc = (SummarizerConfiguration) o;
 +      return className.equals(osc.className) && options.equals(osc.options);
 +    }
 +
 +    return false;
 +  }
 +
 +  /**
 +   * Hashes the classname and options to create a hashcode.
 +   */
 +  @Override
 +  public int hashCode() {
 +    if (hashCode == 0) {
 +      hashCode = 31 * options.hashCode() + className.hashCode();
 +    }
 +    return hashCode;
 +  }
 +
 +  /**
 +   * Converts this configuration to Accumulo per table properties. The returned map has the
 +   * following key values. The {@code <configId>} below is from {@link #getPropertyId()}. The
 +   * {@code <optionKey>} and {@code <optionValue>} below are derived from the key values of
 +   * {@link #getOptions()}.
 +   *
 +   * <pre>
 +   * {@code
 +   *   table.summarizer.<configId>=<classname>
 +   *   table.summarizer.<configId>.opt.<optionKey1>=<optionValue1>
 +   *   table.summarizer.<configId>.opt.<optionKey2>=<optionValue2>
 +   *      .
 +   *      .
 +   *      .
 +   *   table.summarizer.<configId>.opt.<optionKeyN>=<optionValueN>
 +   * }
 +   * </pre>
 +   */
 +  public Map<String,String> toTableProperties() {
 +    return SummarizerConfigurationUtil.toTablePropertiesMap(Collections.singletonList(this));
 +  }
 +
 +  /**
 +   * Encodes each configuration in the same way as {@link #toTableProperties()}.
 +   *
 +   * @throws IllegalArgumentException
 +   *           when there are duplicate values for {@link #getPropertyId()}
 +   */
 +  public static Map<String,String> toTableProperties(SummarizerConfiguration... configurations) {
 +    return SummarizerConfigurationUtil.toTablePropertiesMap(Arrays.asList(configurations));
 +  }
 +
 +  /**
 +   * Encodes each configuration in the same way as {@link #toTableProperties()}.
 +   *
 +   * @throws IllegalArgumentException
 +   *           when there are duplicate values for {@link #getPropertyId()}
 +   */
-   public static Map<String,String> toTableProperties(
-       Collection<SummarizerConfiguration> configurations) {
++  public static Map<String,String>
++      toTableProperties(Collection<SummarizerConfiguration> configurations) {
 +    return SummarizerConfigurationUtil.toTablePropertiesMap(new ArrayList<>(configurations));
 +  }
 +
 +  /**
 +   * Decodes table properties with the prefix {@code table.summarizer} into
 +   * {@link SummarizerConfiguration} objects. Table properties with prefixes other than
 +   * {@code table.summarizer} are ignored.
 +   */
 +  public static Collection<SummarizerConfiguration> fromTableProperties(Map<String,String> props) {
 +    return fromTableProperties(props.entrySet());
 +  }
 +
 +  /**
 +   * @see #fromTableProperties(Map)
 +   */
-   public static Collection<SummarizerConfiguration> fromTableProperties(
-       Iterable<Entry<String,String>> props) {
++  public static Collection<SummarizerConfiguration>
++      fromTableProperties(Iterable<Entry<String,String>> props) {
 +    return SummarizerConfigurationUtil.getSummarizerConfigs(props);
 +  }
 +
 +  /**
 +   * @since 2.0.0
 +   */
 +  public static class Builder {
 +    private String className;
 +    private ImmutableMap.Builder<String,String> imBuilder;
 +    private String configId = null;
 +
 +    private Builder(String className) {
 +      this.className = className;
 +      this.imBuilder = ImmutableMap.builder();
 +    }
 +
 +    /**
 +     * Sets the id used when generating table properties. Setting this is optional. If not set, an
 +     * id is generated using hashing that will likely be unique.
 +     *
 +     * @param propId
 +     *          This id is used when converting a {@link SummarizerConfiguration} to table
 +     *          properties. Since tables can have multiple summarizers, make sure its unique.
 +     *
 +     * @see SummarizerConfiguration#toTableProperties()
 +     */
 +    public Builder setPropertyId(String propId) {
 +      Preconditions.checkArgument(propId.matches("\\w+"), "Config Id %s is not alphanum", propId);
 +      this.configId = propId;
 +      return this;
 +    }
 +
 +    /**
 +     * Adds an option that Summarizers can use when constructing Collectors and Combiners.
 +     *
 +     * @return this
 +     *
 +     * @see SummarizerConfiguration#getOptions()
 +     */
 +    public Builder addOption(String key, String value) {
 +      Preconditions.checkArgument(key.matches("\\w+"), "Option Id %s is not alphanum", key);
 +      imBuilder.put(key, value);
 +      return this;
 +    }
 +
 +    /**
 +     * Adds an option that Summarizers can use when constructing Collectors and Combiners.
 +     *
 +     * @return this
 +     *
 +     * @see SummarizerConfiguration#getOptions()
 +     */
 +    public Builder addOption(String key, long value) {
 +      return addOption(key, Long.toString(value));
 +    }
 +
 +    /**
 +     * Convenience method for adding multiple options. The following
 +     *
 +     * <pre>
 +     * {@code builder.addOptions("opt1","val1","opt2","val2","opt3","val3")}
 +     * </pre>
 +     *
 +     * <p>
 +     * is equivalent to
 +     *
 +     * <pre>
 +     * {@code
 +     *   builder.addOption("opt1","val1");
 +     *   builder.addOption("opt2","val2");
 +     *   builder.addOption("opt3","val3");
 +     * }
 +     * </pre>
 +     *
 +     * @param keyValuePairs
 +     *          This array must have an even and positive number of elements.
 +     * @return this
 +     * @see SummarizerConfiguration#getOptions()
 +     */
 +    public Builder addOptions(String... keyValuePairs) {
 +      Preconditions.checkArgument(keyValuePairs.length % 2 == 0 && keyValuePairs.length > 0,
 +          "Require an even, positive number of arguments, got %s", keyValuePairs.length);
 +      for (int i = 1; i < keyValuePairs.length; i += 2) {
 +        addOption(keyValuePairs[i - 1], keyValuePairs[i]);
 +      }
 +      return this;
 +    }
 +
 +    /**
 +     * @param options
 +     *          Each entry in the map is passed to {@link #addOption(String, String)}
 +     * @return this
 +     *
 +     * @see SummarizerConfiguration#getOptions()
 +     */
 +    public Builder addOptions(Map<String,String> options) {
 +      options.entrySet().forEach(e -> addOption(e.getKey(), e.getValue()));
 +      return this;
 +    }
 +
 +    public SummarizerConfiguration build() {
 +      return new SummarizerConfiguration(className, configId, imBuilder.build());
 +    }
 +  }
 +
 +  /**
 +   * Call this method to initiate a chain of fluent method calls to a create an immutable
 +   * {@link SummarizerConfiguration}
 +   *
 +   * @param className
 +   *          The fully qualified name of a class that implements {@link Summarizer}.
 +   */
 +  public static Builder builder(String className) {
 +    return new Builder(className);
 +  }
 +
 +  /**
 +   * @see #builder(String)
 +   */
 +  public static Builder builder(Class<? extends Summarizer> clazz) {
 +    return new Builder(clazz.getName());
 +  }
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/client/summary/summarizers/AuthorizationSummarizer.java
index efee030,0000000..f9b02e0
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/client/summary/summarizers/AuthorizationSummarizer.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/summary/summarizers/AuthorizationSummarizer.java
@@@ -1,122 -1,0 +1,122 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.accumulo.core.client.summary.summarizers;
 +
 +import java.util.HashSet;
 +import java.util.LinkedHashMap;
 +import java.util.Map;
 +import java.util.Set;
 +import java.util.function.Consumer;
 +
 +import org.apache.accumulo.core.client.admin.TableOperations;
 +import org.apache.accumulo.core.client.summary.CountingSummarizer;
 +import org.apache.accumulo.core.data.ArrayByteSequence;
 +import org.apache.accumulo.core.data.ByteSequence;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.ColumnVisibility;
 +import org.apache.accumulo.core.security.ColumnVisibility.Node;
 +
 +/**
 + * Counts unique authorizations in column visibility labels. Leverages super class to defend against
 + * too many. This class is useful for discovering what authorizations are present when the expected
 + * number of authorizations is small.
 + *
 + * <p>
 + * As an example, assume a data set of three keys with the column visibilities :
 + * {@code (A&C)|(A&D)}, {@code A&B}, and {@code C|E}. For these input this summarizer would output :
 + * {@code c:A=2}, {@code c:B=1}, {@code c:C=2}, {@code D:1}, {@code E:1}. Notice that even though
 + * {@code A} occurred 3 times in total, its only counted once per column visibility.
 + *
 + * <p>
 + * See the superclass documentation for more information about usage and configuration.
 + *
 + * @since 2.0.0
 + *
 + * @see VisibilitySummarizer
 + * @see TableOperations#addSummarizers(String,
 + *      org.apache.accumulo.core.client.summary.SummarizerConfiguration...)
 + * @see TableOperations#summaries(String)
 + */
 +public class AuthorizationSummarizer extends CountingSummarizer<ByteSequence> {
 +
 +  @Override
 +  protected Converter<ByteSequence> converter() {
 +    return new AuthsConverter();
 +  }
 +
 +  private static class AuthsConverter implements Converter<ByteSequence> {
 +
 +    final int MAX_ENTRIES = 1000;
-     private Map<ByteSequence,Set<ByteSequence>> cache = new LinkedHashMap<ByteSequence,Set<ByteSequence>>(
-         MAX_ENTRIES + 1, .75F, true) {
-       private static final long serialVersionUID = 1L;
++    private Map<ByteSequence,Set<ByteSequence>> cache =
++        new LinkedHashMap<ByteSequence,Set<ByteSequence>>(MAX_ENTRIES + 1, .75F, true) {
++          private static final long serialVersionUID = 1L;
 +
-       // This method is called just after a new entry has been added
-       @Override
-       public boolean removeEldestEntry(Map.Entry<ByteSequence,Set<ByteSequence>> eldest) {
-         return size() > MAX_ENTRIES;
-       }
-     };
++          // This method is called just after a new entry has been added
++          @Override
++          public boolean removeEldestEntry(Map.Entry<ByteSequence,Set<ByteSequence>> eldest) {
++            return size() > MAX_ENTRIES;
++          }
++        };
 +
 +    @Override
 +    public void convert(Key k, Value v, Consumer<ByteSequence> consumer) {
 +      ByteSequence vis = k.getColumnVisibilityData();
 +
 +      if (vis.length() > 0) {
 +        Set<ByteSequence> auths = cache.get(vis);
 +        if (auths == null) {
 +          auths = findAuths(vis);
 +          cache.put(new ArrayByteSequence(vis), auths);
 +        }
 +
 +        for (ByteSequence auth : auths) {
 +          consumer.accept(auth);
 +        }
 +      }
 +    }
 +
 +    private Set<ByteSequence> findAuths(ByteSequence vis) {
 +      HashSet<ByteSequence> auths = new HashSet<>();
 +      byte[] expression = vis.toArray();
 +      Node root = new ColumnVisibility(expression).getParseTree();
 +
 +      findAuths(root, expression, auths);
 +
 +      return auths;
 +    }
 +
 +    private void findAuths(Node node, byte[] expression, HashSet<ByteSequence> auths) {
 +      switch (node.getType()) {
 +        case AND:
 +        case OR:
 +          for (Node child : node.getChildren()) {
 +            findAuths(child, expression, auths);
 +          }
 +          break;
 +        case TERM:
 +          auths.add(node.getTerm(expression));
 +          break;
 +        case EMPTY:
 +          break;
 +        default:
 +          throw new IllegalArgumentException("Unknown node type " + node.getType());
 +      }
 +    }
 +  }
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/ActiveCompactionImpl.java
index ea31889,0000000..64d9b66
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/ActiveCompactionImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/ActiveCompactionImpl.java
@@@ -1,110 -1,0 +1,110 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.clientImpl;
 +
 +import java.util.ArrayList;
 +import java.util.List;
 +import java.util.Map;
 +
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.client.admin.ActiveCompaction;
 +import org.apache.accumulo.core.data.TabletId;
 +import org.apache.accumulo.core.dataImpl.KeyExtent;
 +import org.apache.accumulo.core.dataImpl.TabletIdImpl;
 +import org.apache.accumulo.core.dataImpl.thrift.IterInfo;
 +
 +/**
 + * @since 1.6.0
 + */
 +public class ActiveCompactionImpl extends ActiveCompaction {
 +
 +  private org.apache.accumulo.core.tabletserver.thrift.ActiveCompaction tac;
 +  private ClientContext context;
 +
 +  ActiveCompactionImpl(ClientContext context,
 +      org.apache.accumulo.core.tabletserver.thrift.ActiveCompaction tac) {
 +    this.tac = tac;
 +    this.context = context;
 +  }
 +
 +  @Override
 +  public String getTable() throws TableNotFoundException {
 +    return Tables.getTableName(context, new KeyExtent(tac.getExtent()).getTableId());
 +  }
 +
 +  @Override
 +  public TabletId getTablet() {
 +    return new TabletIdImpl(new KeyExtent(tac.getExtent()));
 +  }
 +
 +  @Override
 +  public long getAge() {
 +    return tac.getAge();
 +  }
 +
 +  @Override
 +  public List<String> getInputFiles() {
 +    return tac.getInputFiles();
 +  }
 +
 +  @Override
 +  public String getOutputFile() {
 +    return tac.getOutputFile();
 +  }
 +
 +  @Override
 +  public CompactionType getType() {
 +    return CompactionType.valueOf(tac.getType().name());
 +  }
 +
 +  @Override
 +  public CompactionReason getReason() {
 +    return CompactionReason.valueOf(tac.getReason().name());
 +  }
 +
 +  @Override
 +  public String getLocalityGroup() {
 +    return tac.getLocalityGroup();
 +  }
 +
 +  @Override
 +  public long getEntriesRead() {
 +    return tac.getEntriesRead();
 +  }
 +
 +  @Override
 +  public long getEntriesWritten() {
 +    return tac.getEntriesWritten();
 +  }
 +
 +  @Override
 +  public List<IteratorSetting> getIterators() {
 +    ArrayList<IteratorSetting> ret = new ArrayList<>();
 +
 +    for (IterInfo ii : tac.getSsiList()) {
-       IteratorSetting settings = new IteratorSetting(ii.getPriority(), ii.getIterName(),
-           ii.getClassName());
++      IteratorSetting settings =
++          new IteratorSetting(ii.getPriority(), ii.getIterName(), ii.getClassName());
 +      Map<String,String> options = tac.getSsio().get(ii.getIterName());
 +      settings.addOptions(options);
 +
 +      ret.add(settings);
 +    }
 +
 +    return ret;
 +  }
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/ClientConfConverter.java
index 50f5d21,0000000..0550c72
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/ClientConfConverter.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/ClientConfConverter.java
@@@ -1,293 -1,0 +1,293 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.clientImpl;
 +
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Iterator;
 +import java.util.Map;
 +import java.util.Properties;
 +import java.util.function.Predicate;
 +
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.conf.ClientProperty;
 +import org.apache.accumulo.core.conf.CredentialProviderFactoryShim;
 +import org.apache.accumulo.core.conf.DefaultConfiguration;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.rpc.SaslConnectionParams;
 +import org.apache.hadoop.security.authentication.util.KerberosName;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +public class ClientConfConverter {
 +
 +  private static final Logger log = LoggerFactory.getLogger(ClientConfConverter.class);
 +  private static Map<String,String> confProps = new HashMap<>();
 +  private static Map<String,String> propsConf = new HashMap<>();
 +
 +  @SuppressWarnings("deprecation")
 +  private static void init() {
 +    propsConf.put(ClientProperty.INSTANCE_ZOOKEEPERS.getKey(),
 +        org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.INSTANCE_ZK_HOST
 +            .getKey());
 +    propsConf.put(ClientProperty.INSTANCE_ZOOKEEPERS_TIMEOUT.getKey(),
 +        org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.INSTANCE_ZK_TIMEOUT
 +            .getKey());
 +    propsConf.put(ClientProperty.SSL_ENABLED.getKey(),
 +        org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.INSTANCE_RPC_SSL_ENABLED
 +            .getKey());
 +    propsConf.put(ClientProperty.SSL_KEYSTORE_PATH.getKey(),
 +        org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.RPC_SSL_KEYSTORE_PATH
 +            .getKey());
 +    propsConf.put(ClientProperty.SSL_KEYSTORE_TYPE.getKey(),
 +        org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.RPC_SSL_KEYSTORE_TYPE
 +            .getKey());
 +    propsConf.put(ClientProperty.SSL_KEYSTORE_PASSWORD.getKey(),
 +        org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.RPC_SSL_KEYSTORE_PASSWORD
 +            .getKey());
 +    propsConf.put(ClientProperty.SSL_TRUSTSTORE_PATH.getKey(),
 +        org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.RPC_SSL_TRUSTSTORE_PATH
 +            .getKey());
 +    propsConf.put(ClientProperty.SSL_TRUSTSTORE_TYPE.getKey(),
 +        org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.RPC_SSL_TRUSTSTORE_TYPE
 +            .getKey());
 +    propsConf.put(ClientProperty.SSL_TRUSTSTORE_PASSWORD.getKey(),
 +        org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.RPC_SSL_TRUSTSTORE_PASSWORD
 +            .getKey());
 +    propsConf.put(ClientProperty.SSL_USE_JSSE.getKey(),
 +        org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.RPC_USE_JSSE.getKey());
 +    propsConf.put(ClientProperty.SASL_ENABLED.getKey(),
 +        org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.INSTANCE_RPC_SASL_ENABLED
 +            .getKey());
 +    propsConf.put(ClientProperty.SASL_QOP.getKey(),
 +        org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.RPC_SASL_QOP.getKey());
 +    propsConf.put(ClientProperty.SASL_KERBEROS_SERVER_PRIMARY.getKey(),
 +        org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.KERBEROS_SERVER_PRIMARY
 +            .getKey());
 +
 +    for (Map.Entry<String,String> entry : propsConf.entrySet()) {
 +      confProps.put(entry.getValue(), entry.getKey());
 +    }
 +  }
 +
 +  static {
 +    init();
 +  }
 +
 +  @SuppressWarnings("deprecation")
-   public static org.apache.accumulo.core.client.ClientConfiguration toClientConf(
-       Properties properties) {
-     org.apache.accumulo.core.client.ClientConfiguration config = org.apache.accumulo.core.client.ClientConfiguration
-         .create();
++  public static org.apache.accumulo.core.client.ClientConfiguration
++      toClientConf(Properties properties) {
++    org.apache.accumulo.core.client.ClientConfiguration config =
++        org.apache.accumulo.core.client.ClientConfiguration.create();
 +    for (Object keyObj : properties.keySet()) {
 +      String propKey = (String) keyObj;
 +      String val = properties.getProperty(propKey);
 +      String confKey = propsConf.get(propKey);
 +      if (confKey == null) {
 +        config.setProperty(propKey, val);
 +      } else {
 +        config.setProperty(confKey, val);
 +      }
 +      if (propKey.equals(ClientProperty.SSL_KEYSTORE_PATH.getKey())) {
 +        config.setProperty(
 +            org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.INSTANCE_RPC_SSL_CLIENT_AUTH,
 +            "true");
 +      }
 +    }
 +    return config;
 +  }
 +
 +  @SuppressWarnings("deprecation")
-   public static Properties toProperties(
-       org.apache.accumulo.core.client.ClientConfiguration clientConf) {
++  public static Properties
++      toProperties(org.apache.accumulo.core.client.ClientConfiguration clientConf) {
 +    Properties props = new Properties();
 +    Iterator<String> clientConfIter = clientConf.getKeys();
 +    while (clientConfIter.hasNext()) {
 +      String confKey = clientConfIter.next();
 +      String val = clientConf.getString(confKey);
 +      String propKey = confProps.get(confKey);
 +      if (propKey == null) {
 +        if (!confKey.equals(
 +            org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.INSTANCE_RPC_SSL_CLIENT_AUTH
 +                .getKey())) {
 +          props.setProperty(confKey, val);
 +        }
 +      } else {
 +        props.setProperty(propKey, val);
 +      }
 +    }
 +    return props;
 +  }
 +
 +  public static Properties toProperties(AccumuloConfiguration config) {
 +    return toProperties(toClientConf(config));
 +  }
 +
 +  public static AccumuloConfiguration toAccumuloConf(Properties properties) {
 +    return toAccumuloConf(toClientConf(properties));
 +  }
 +
 +  /**
 +   * A utility method for converting client configuration to a standard configuration object for use
 +   * internally.
 +   *
 +   * @param config
 +   *          the original config
 +   * @return the client configuration presented in the form of an {@link AccumuloConfiguration}
 +   */
 +  @SuppressWarnings("deprecation")
-   public static AccumuloConfiguration toAccumuloConf(
-       final org.apache.accumulo.core.client.ClientConfiguration config) {
++  public static AccumuloConfiguration
++      toAccumuloConf(final org.apache.accumulo.core.client.ClientConfiguration config) {
 +
 +    final AccumuloConfiguration defaults = DefaultConfiguration.getInstance();
 +
 +    return new AccumuloConfiguration() {
 +
 +      @Override
 +      public String get(Property property) {
 +        final String key = property.getKey();
 +
 +        // Attempt to load sensitive properties from a CredentialProvider, if configured
 +        if (property.isSensitive()) {
 +          org.apache.hadoop.conf.Configuration hadoopConf = getHadoopConfiguration();
 +          if (hadoopConf != null) {
-             char[] value = CredentialProviderFactoryShim.getValueFromCredentialProvider(hadoopConf,
-                 key);
++            char[] value =
++                CredentialProviderFactoryShim.getValueFromCredentialProvider(hadoopConf, key);
 +            if (value != null) {
 +              log.trace("Loaded sensitive value for {} from CredentialProvider", key);
 +              return new String(value);
 +            } else {
 +              log.trace("Tried to load sensitive value for {} from CredentialProvider, "
 +                  + "but none was found", key);
 +            }
 +          }
 +        }
 +
 +        if (config.containsKey(key))
 +          return config.getString(key);
 +        else {
 +          // Reconstitute the server kerberos property from the client config
 +          if (property == Property.GENERAL_KERBEROS_PRINCIPAL) {
 +            if (config.containsKey(
 +                org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.KERBEROS_SERVER_PRIMARY
 +                    .getKey())) {
 +              // Avoid providing a realm since we don't know what it is...
 +              return config.getString(
 +                  org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.KERBEROS_SERVER_PRIMARY
 +                      .getKey())
 +                  + "/_HOST@" + SaslConnectionParams.getDefaultRealm();
 +            }
 +          }
 +          return defaults.get(property);
 +        }
 +      }
 +
 +      @Override
 +      public void getProperties(Map<String,String> props, Predicate<String> filter) {
 +        defaults.getProperties(props, filter);
 +
 +        Iterator<String> keyIter = config.getKeys();
 +        while (keyIter.hasNext()) {
 +          String key = keyIter.next();
 +          if (filter.test(key))
 +            props.put(key, config.getString(key));
 +        }
 +
 +        // Two client props that don't exist on the server config. Client doesn't need to know about
 +        // the Kerberos instance from the principle, but servers do
 +        // Automatically reconstruct the server property when converting a client config.
 +        if (props.containsKey(
 +            org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.KERBEROS_SERVER_PRIMARY
 +                .getKey())) {
 +          final String serverPrimary = props.remove(
 +              org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.KERBEROS_SERVER_PRIMARY
 +                  .getKey());
 +          if (filter.test(Property.GENERAL_KERBEROS_PRINCIPAL.getKey())) {
 +            // Use the _HOST expansion. It should be unnecessary in "client land".
 +            props.put(Property.GENERAL_KERBEROS_PRINCIPAL.getKey(),
 +                serverPrimary + "/_HOST@" + SaslConnectionParams.getDefaultRealm());
 +          }
 +        }
 +
 +        // Attempt to load sensitive properties from a CredentialProvider, if configured
 +        org.apache.hadoop.conf.Configuration hadoopConf = getHadoopConfiguration();
 +        if (hadoopConf != null) {
 +          for (String key : CredentialProviderFactoryShim.getKeys(hadoopConf)) {
 +            if (!Property.isValidPropertyKey(key) || !Property.isSensitive(key)) {
 +              continue;
 +            }
 +            if (filter.test(key)) {
-               char[] value = CredentialProviderFactoryShim
-                   .getValueFromCredentialProvider(hadoopConf, key);
++              char[] value =
++                  CredentialProviderFactoryShim.getValueFromCredentialProvider(hadoopConf, key);
 +              if (value != null) {
 +                props.put(key, new String(value));
 +              }
 +            }
 +          }
 +        }
 +      }
 +
 +      private org.apache.hadoop.conf.Configuration getHadoopConfiguration() {
-         String credProviderPaths = config
-             .getString(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey());
++        String credProviderPaths =
++            config.getString(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey());
 +        if (credProviderPaths != null && !credProviderPaths.isEmpty()) {
 +          org.apache.hadoop.conf.Configuration hConf = new org.apache.hadoop.conf.Configuration();
 +          hConf.set(CredentialProviderFactoryShim.CREDENTIAL_PROVIDER_PATH, credProviderPaths);
 +          return hConf;
 +        }
 +
 +        log.trace("Did not find credential provider configuration in ClientConfiguration");
 +
 +        return null;
 +      }
 +    };
 +  }
 +
 +  @SuppressWarnings("deprecation")
-   public static org.apache.accumulo.core.client.ClientConfiguration toClientConf(
-       AccumuloConfiguration conf) {
-     org.apache.accumulo.core.client.ClientConfiguration clientConf = org.apache.accumulo.core.client.ClientConfiguration
-         .create();
++  public static org.apache.accumulo.core.client.ClientConfiguration
++      toClientConf(AccumuloConfiguration conf) {
++    org.apache.accumulo.core.client.ClientConfiguration clientConf =
++        org.apache.accumulo.core.client.ClientConfiguration.create();
 +
 +    // Servers will only have the full principal in their configuration -- parse the
 +    // primary and realm from it.
 +    final String serverPrincipal = conf.get(Property.GENERAL_KERBEROS_PRINCIPAL);
 +
 +    final KerberosName krbName;
 +    if (serverPrincipal != null && !serverPrincipal.isEmpty()) {
 +      krbName = new KerberosName(serverPrincipal);
 +      clientConf.setProperty(
 +          org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.KERBEROS_SERVER_PRIMARY,
 +          krbName.getServiceName());
 +    }
 +
 +    HashSet<String> clientKeys = new HashSet<>();
 +    for (org.apache.accumulo.core.client.ClientConfiguration.ClientProperty prop : org.apache.accumulo.core.client.ClientConfiguration.ClientProperty
 +        .values()) {
 +      clientKeys.add(prop.getKey());
 +    }
 +
 +    String key;
 +    for (Map.Entry<String,String> entry : conf) {
 +      key = entry.getKey();
 +      if (clientKeys.contains(key)) {
 +        clientConf.setProperty(key, entry.getValue());
 +      }
 +    }
 +    return clientConf;
 +  }
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/ClientContext.java
index 7451d1c,0000000..01474b2
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/ClientContext.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/ClientContext.java
@@@ -1,824 -1,0 +1,824 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.clientImpl;
 +
 +import static com.google.common.base.Preconditions.checkArgument;
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +
 +import java.nio.file.Path;
 +import java.util.Collections;
 +import java.util.List;
 +import java.util.Objects;
 +import java.util.Properties;
 +import java.util.concurrent.TimeUnit;
 +import java.util.function.Function;
 +import java.util.function.Supplier;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.AccumuloClient;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.BatchDeleter;
 +import org.apache.accumulo.core.client.BatchScanner;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.ConditionalWriter;
 +import org.apache.accumulo.core.client.ConditionalWriterConfig;
 +import org.apache.accumulo.core.client.Durability;
 +import org.apache.accumulo.core.client.MultiTableBatchWriter;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.client.TableOfflineException;
 +import org.apache.accumulo.core.client.admin.InstanceOperations;
 +import org.apache.accumulo.core.client.admin.NamespaceOperations;
 +import org.apache.accumulo.core.client.admin.ReplicationOperations;
 +import org.apache.accumulo.core.client.admin.SecurityOperations;
 +import org.apache.accumulo.core.client.admin.TableOperations;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.conf.ClientProperty;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.TableId;
 +import org.apache.accumulo.core.master.state.tables.TableState;
 +import org.apache.accumulo.core.metadata.RootTable;
 +import org.apache.accumulo.core.rpc.SaslConnectionParams;
 +import org.apache.accumulo.core.rpc.SslConnectionParams;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.securityImpl.thrift.TCredentials;
 +import org.apache.accumulo.core.singletons.SingletonManager;
 +import org.apache.accumulo.core.singletons.SingletonReservation;
 +import org.apache.accumulo.core.util.OpTimer;
 +import org.apache.accumulo.fate.zookeeper.ZooCache;
 +import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
 +import org.apache.accumulo.fate.zookeeper.ZooUtil;
 +import org.apache.hadoop.conf.Configuration;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import com.google.common.base.Suppliers;
 +
 +/**
 + * This class represents any essential configuration and credentials needed to initiate RPC
 + * operations throughout the code. It is intended to represent a shared object that contains these
 + * things from when the client was first constructed. It is not public API, and is only an internal
 + * representation of the context in which a client is executing RPCs. If additional parameters are
 + * added to the public API that need to be used in the internals of Accumulo, they should be added
 + * to this object for later retrieval, rather than as a separate parameter. Any state in this object
 + * should be available at the time of its construction.
 + */
 +public class ClientContext implements AccumuloClient {
 +
 +  private static final Logger log = LoggerFactory.getLogger(ClientContext.class);
 +
 +  private ClientInfo info;
 +  private String instanceId;
 +  private final ZooCache zooCache;
 +
 +  private Credentials creds;
 +  private BatchWriterConfig batchWriterConfig;
 +  private AccumuloConfiguration serverConf;
 +  private Configuration hadoopConf;
 +
 +  // These fields are very frequently accessed (each time a connection is created) and expensive to
 +  // compute, so cache them.
 +  private Supplier<Long> timeoutSupplier;
 +  private Supplier<SaslConnectionParams> saslSupplier;
 +  private Supplier<SslConnectionParams> sslSupplier;
 +  private TCredentials rpcCreds;
 +
 +  private volatile boolean closed = false;
 +
 +  private SecurityOperations secops = null;
 +  private TableOperationsImpl tableops = null;
 +  private NamespaceOperations namespaceops = null;
 +  private InstanceOperations instanceops = null;
 +  private ReplicationOperations replicationops = null;
 +  private SingletonReservation singletonReservation;
 +
 +  private void ensureOpen() {
 +    if (closed) {
 +      throw new IllegalStateException("This client was closed.");
 +    }
 +  }
 +
 +  private static <T> Supplier<T> memoizeWithExpiration(Supplier<T> s) {
 +    // This insanity exists to make modernizer plugin happy. We are living in the future now.
 +    return () -> Suppliers.memoizeWithExpiration(s::get, 100, TimeUnit.MILLISECONDS).get();
 +  }
 +
 +  public ClientContext(Properties clientProperties) {
 +    this(ClientInfo.from(clientProperties));
 +  }
 +
 +  public ClientContext(SingletonReservation reservation, ClientInfo info) {
 +    this(reservation, info, ClientConfConverter.toAccumuloConf(info.getProperties()));
 +  }
 +
 +  public ClientContext(ClientInfo info) {
 +    this(info, ClientConfConverter.toAccumuloConf(info.getProperties()));
 +  }
 +
 +  public ClientContext(ClientInfo info, AccumuloConfiguration serverConf) {
 +    this(SingletonReservation.noop(), info, serverConf);
 +  }
 +
 +  public ClientContext(SingletonReservation reservation, ClientInfo info,
 +      AccumuloConfiguration serverConf) {
 +    this.info = info;
 +    this.hadoopConf = info.getHadoopConf();
-     zooCache = new ZooCacheFactory().getZooCache(info.getZooKeepers(),
-         info.getZooKeepersSessionTimeOut());
++    zooCache =
++        new ZooCacheFactory().getZooCache(info.getZooKeepers(), info.getZooKeepersSessionTimeOut());
 +    this.serverConf = serverConf;
 +    timeoutSupplier = memoizeWithExpiration(
 +        () -> getConfiguration().getTimeInMillis(Property.GENERAL_RPC_TIMEOUT));
 +    sslSupplier = memoizeWithExpiration(() -> SslConnectionParams.forClient(getConfiguration()));
 +    saslSupplier = memoizeWithExpiration(
 +        () -> SaslConnectionParams.from(getConfiguration(), getCredentials().getToken()));
 +    this.singletonReservation = Objects.requireNonNull(reservation);
 +    this.tableops = new TableOperationsImpl(this);
 +    this.namespaceops = new NamespaceOperationsImpl(this, tableops);
 +  }
 +
 +  /**
 +   * Retrieve the instance used to construct this context
 +   *
 +   * @deprecated since 2.0.0
 +   */
 +  @Deprecated
 +  public org.apache.accumulo.core.client.Instance getDeprecatedInstance() {
 +    final ClientContext context = this;
 +    return new org.apache.accumulo.core.client.Instance() {
 +      @Override
 +      public String getRootTabletLocation() {
 +        return context.getRootTabletLocation();
 +      }
 +
 +      @Override
 +      public List<String> getMasterLocations() {
 +        return context.getMasterLocations();
 +      }
 +
 +      @Override
 +      public String getInstanceID() {
 +        return context.getInstanceID();
 +      }
 +
 +      @Override
 +      public String getInstanceName() {
 +        return context.getInstanceName();
 +      }
 +
 +      @Override
 +      public String getZooKeepers() {
 +        return context.getZooKeepers();
 +      }
 +
 +      @Override
 +      public int getZooKeepersSessionTimeOut() {
 +        return context.getZooKeepersSessionTimeOut();
 +      }
 +
 +      @Override
 +      public org.apache.accumulo.core.client.Connector getConnector(String principal,
 +          AuthenticationToken token) throws AccumuloException, AccumuloSecurityException {
 +        return org.apache.accumulo.core.client.Connector.from(context);
 +      }
 +    };
 +  }
 +
 +  /**
 +   * Retrieve the credentials used to construct this context
 +   */
 +  public synchronized Credentials getCredentials() {
 +    ensureOpen();
 +    if (creds == null) {
 +      creds = new Credentials(info.getPrincipal(), info.getAuthenticationToken());
 +    }
 +    return creds;
 +  }
 +
 +  public String getPrincipal() {
 +    ensureOpen();
 +    return getCredentials().getPrincipal();
 +  }
 +
 +  public AuthenticationToken getAuthenticationToken() {
 +    ensureOpen();
 +    return getCredentials().getToken();
 +  }
 +
 +  public Properties getProperties() {
 +    ensureOpen();
 +    return info.getProperties();
 +  }
 +
 +  /**
 +   * Update the credentials in the current context after changing the current user's password or
 +   * other auth token
 +   */
 +  public synchronized void setCredentials(Credentials newCredentials) {
 +    checkArgument(newCredentials != null, "newCredentials is null");
 +    ensureOpen();
 +    creds = newCredentials;
 +    rpcCreds = null;
 +  }
 +
 +  /**
 +   * Retrieve the configuration used to construct this context
 +   */
 +  public AccumuloConfiguration getConfiguration() {
 +    ensureOpen();
 +    return serverConf;
 +  }
 +
 +  /**
 +   * Retrieve the hadoop configuration
 +   */
 +  public Configuration getHadoopConf() {
 +    ensureOpen();
 +    return this.hadoopConf;
 +  }
 +
 +  /**
 +   * Retrieve the universal RPC client timeout from the configuration
 +   */
 +  public long getClientTimeoutInMillis() {
 +    ensureOpen();
 +    return timeoutSupplier.get();
 +  }
 +
 +  /**
 +   * Retrieve SSL/TLS configuration to initiate an RPC connection to a server
 +   */
 +  public SslConnectionParams getClientSslParams() {
 +    ensureOpen();
 +    return sslSupplier.get();
 +  }
 +
 +  /**
 +   * Retrieve SASL configuration to initiate an RPC connection to a server
 +   */
 +  public SaslConnectionParams getSaslParams() {
 +    ensureOpen();
 +    return saslSupplier.get();
 +  }
 +
 +  public BatchWriterConfig getBatchWriterConfig() {
 +    ensureOpen();
 +    if (batchWriterConfig == null) {
 +      Properties props = info.getProperties();
 +      batchWriterConfig = new BatchWriterConfig();
 +      Long maxMemory = ClientProperty.BATCH_WRITER_MEMORY_MAX.getBytes(props);
 +      if (maxMemory != null) {
 +        batchWriterConfig.setMaxMemory(maxMemory);
 +      }
 +      Long maxLatency = ClientProperty.BATCH_WRITER_LATENCY_MAX.getTimeInMillis(props);
 +      if (maxLatency != null) {
 +        batchWriterConfig.setMaxLatency(maxLatency, TimeUnit.SECONDS);
 +      }
 +      Long timeout = ClientProperty.BATCH_WRITER_TIMEOUT_MAX.getTimeInMillis(props);
 +      if (timeout != null) {
 +        batchWriterConfig.setTimeout(timeout, TimeUnit.SECONDS);
 +      }
 +      String durability = ClientProperty.BATCH_WRITER_DURABILITY.getValue(props);
 +      if (!durability.isEmpty()) {
 +        batchWriterConfig.setDurability(Durability.valueOf(durability.toUpperCase()));
 +      }
 +    }
 +    return batchWriterConfig;
 +  }
 +
 +  /**
 +   * Serialize the credentials just before initiating the RPC call
 +   */
 +  public synchronized TCredentials rpcCreds() {
 +    ensureOpen();
 +    if (getCredentials().getToken().isDestroyed()) {
 +      rpcCreds = null;
 +    }
 +
 +    if (rpcCreds == null) {
 +      rpcCreds = getCredentials().toThrift(getInstanceID());
 +    }
 +
 +    return rpcCreds;
 +  }
 +
 +  /**
 +   * Returns the location of the tablet server that is serving the root tablet.
 +   *
 +   * @return location in "hostname:port" form
 +   */
 +  public String getRootTabletLocation() {
 +    ensureOpen();
 +    String zRootLocPath = getZooKeeperRoot() + RootTable.ZROOT_TABLET_LOCATION;
 +
 +    OpTimer timer = null;
 +
 +    if (log.isTraceEnabled()) {
 +      log.trace("tid={} Looking up root tablet location in zookeeper.",
 +          Thread.currentThread().getId());
 +      timer = new OpTimer().start();
 +    }
 +
 +    byte[] loc = zooCache.get(zRootLocPath);
 +
 +    if (timer != null) {
 +      timer.stop();
 +      log.trace("tid={} Found root tablet at {} in {}", Thread.currentThread().getId(),
 +          (loc == null ? "null" : new String(loc, UTF_8)),
 +          String.format("%.3f secs", timer.scale(TimeUnit.SECONDS)));
 +    }
 +
 +    if (loc == null) {
 +      return null;
 +    }
 +
 +    return new String(loc, UTF_8).split("\\|")[0];
 +  }
 +
 +  /**
 +   * Returns the location(s) of the accumulo master and any redundant servers.
 +   *
 +   * @return a list of locations in "hostname:port" form
 +   */
 +  public List<String> getMasterLocations() {
 +    ensureOpen();
 +    String masterLocPath = getZooKeeperRoot() + Constants.ZMASTER_LOCK;
 +
 +    OpTimer timer = null;
 +
 +    if (log.isTraceEnabled()) {
 +      log.trace("tid={} Looking up master location in zookeeper.", Thread.currentThread().getId());
 +      timer = new OpTimer().start();
 +    }
 +
 +    byte[] loc = ZooUtil.getLockData(zooCache, masterLocPath);
 +
 +    if (timer != null) {
 +      timer.stop();
 +      log.trace("tid={} Found master at {} in {}", Thread.currentThread().getId(),
 +          (loc == null ? "null" : new String(loc, UTF_8)),
 +          String.format("%.3f secs", timer.scale(TimeUnit.SECONDS)));
 +    }
 +
 +    if (loc == null) {
 +      return Collections.emptyList();
 +    }
 +
 +    return Collections.singletonList(new String(loc, UTF_8));
 +  }
 +
 +  /**
 +   * Returns a unique string that identifies this instance of accumulo.
 +   *
 +   * @return a UUID
 +   */
 +  public String getInstanceID() {
 +    ensureOpen();
 +    final String instanceName = info.getInstanceName();
 +    if (instanceId == null) {
 +      // want the instance id to be stable for the life of this instance object,
 +      // so only get it once
 +      String instanceNamePath = Constants.ZROOT + Constants.ZINSTANCES + "/" + instanceName;
 +      byte[] iidb = zooCache.get(instanceNamePath);
 +      if (iidb == null) {
 +        throw new RuntimeException(
 +            "Instance name " + instanceName + " does not exist in zookeeper. "
 +                + "Run \"accumulo org.apache.accumulo.server.util.ListInstances\" to see a list.");
 +      }
 +      instanceId = new String(iidb, UTF_8);
 +    }
 +
 +    if (zooCache.get(Constants.ZROOT + "/" + instanceId) == null) {
 +      if (instanceName == null)
 +        throw new RuntimeException("Instance id " + instanceId + " does not exist in zookeeper");
 +      throw new RuntimeException("Instance id " + instanceId + " pointed to by the name "
 +          + instanceName + " does not exist in zookeeper");
 +    }
 +
 +    return instanceId;
 +  }
 +
 +  public String getZooKeeperRoot() {
 +    ensureOpen();
 +    return ZooUtil.getRoot(getInstanceID());
 +  }
 +
 +  /**
 +   * Returns the instance name given at system initialization time.
 +   *
 +   * @return current instance name
 +   */
 +  public String getInstanceName() {
 +    ensureOpen();
 +    return info.getInstanceName();
 +  }
 +
 +  /**
 +   * Returns a comma-separated list of zookeeper servers the instance is using.
 +   *
 +   * @return the zookeeper servers this instance is using in "hostname:port" form
 +   */
 +  public String getZooKeepers() {
 +    ensureOpen();
 +    return info.getZooKeepers();
 +  }
 +
 +  /**
 +   * Returns the zookeeper connection timeout.
 +   *
 +   * @return the configured timeout to connect to zookeeper
 +   */
 +  public int getZooKeepersSessionTimeOut() {
 +    ensureOpen();
 +    return info.getZooKeepersSessionTimeOut();
 +  }
 +
 +  public ZooCache getZooCache() {
 +    ensureOpen();
 +    return zooCache;
 +  }
 +
 +  TableId getTableId(String tableName) throws TableNotFoundException {
 +    TableId tableId = Tables.getTableId(this, tableName);
 +    if (Tables.getTableState(this, tableId) == TableState.OFFLINE)
 +      throw new TableOfflineException(Tables.getTableOfflineMsg(this, tableId));
 +    return tableId;
 +  }
 +
 +  @Override
 +  public BatchScanner createBatchScanner(String tableName, Authorizations authorizations,
 +      int numQueryThreads) throws TableNotFoundException {
 +    checkArgument(tableName != null, "tableName is null");
 +    checkArgument(authorizations != null, "authorizations is null");
 +    ensureOpen();
 +    return new TabletServerBatchReader(this, getTableId(tableName), authorizations,
 +        numQueryThreads);
 +  }
 +
 +  @Override
 +  public BatchScanner createBatchScanner(String tableName, Authorizations authorizations)
 +      throws TableNotFoundException {
-     Integer numQueryThreads = ClientProperty.BATCH_SCANNER_NUM_QUERY_THREADS
-         .getInteger(getProperties());
++    Integer numQueryThreads =
++        ClientProperty.BATCH_SCANNER_NUM_QUERY_THREADS.getInteger(getProperties());
 +    Objects.requireNonNull(numQueryThreads);
 +    ensureOpen();
 +    return createBatchScanner(tableName, authorizations, numQueryThreads);
 +  }
 +
 +  @Override
 +  public BatchScanner createBatchScanner(String tableName)
 +      throws TableNotFoundException, AccumuloSecurityException, AccumuloException {
 +    Authorizations auths = securityOperations().getUserAuthorizations(getPrincipal());
 +    return createBatchScanner(tableName, auths);
 +  }
 +
 +  @Override
 +  public BatchDeleter createBatchDeleter(String tableName, Authorizations authorizations,
 +      int numQueryThreads, BatchWriterConfig config) throws TableNotFoundException {
 +    checkArgument(tableName != null, "tableName is null");
 +    checkArgument(authorizations != null, "authorizations is null");
 +    ensureOpen();
 +    return new TabletServerBatchDeleter(this, getTableId(tableName), authorizations,
 +        numQueryThreads, config.merge(getBatchWriterConfig()));
 +  }
 +
 +  @Override
 +  public BatchDeleter createBatchDeleter(String tableName, Authorizations authorizations,
 +      int numQueryThreads) throws TableNotFoundException {
 +    ensureOpen();
 +    return createBatchDeleter(tableName, authorizations, numQueryThreads, new BatchWriterConfig());
 +  }
 +
 +  @Override
 +  public BatchWriter createBatchWriter(String tableName, BatchWriterConfig config)
 +      throws TableNotFoundException {
 +    checkArgument(tableName != null, "tableName is null");
 +    ensureOpen();
 +    // we used to allow null inputs for bw config
 +    if (config == null) {
 +      config = new BatchWriterConfig();
 +    }
 +    return new BatchWriterImpl(this, getTableId(tableName), config.merge(getBatchWriterConfig()));
 +  }
 +
 +  @Override
 +  public BatchWriter createBatchWriter(String tableName) throws TableNotFoundException {
 +    return createBatchWriter(tableName, new BatchWriterConfig());
 +  }
 +
 +  @Override
 +  public MultiTableBatchWriter createMultiTableBatchWriter(BatchWriterConfig config) {
 +    ensureOpen();
 +    return new MultiTableBatchWriterImpl(this, config.merge(getBatchWriterConfig()));
 +  }
 +
 +  @Override
 +  public MultiTableBatchWriter createMultiTableBatchWriter() {
 +    return createMultiTableBatchWriter(new BatchWriterConfig());
 +  }
 +
 +  @Override
 +  public ConditionalWriter createConditionalWriter(String tableName, ConditionalWriterConfig config)
 +      throws TableNotFoundException {
 +    ensureOpen();
 +    return new ConditionalWriterImpl(this, getTableId(tableName), config);
 +  }
 +
 +  @Override
 +  public Scanner createScanner(String tableName, Authorizations authorizations)
 +      throws TableNotFoundException {
 +    checkArgument(tableName != null, "tableName is null");
 +    checkArgument(authorizations != null, "authorizations is null");
 +    ensureOpen();
 +    Scanner scanner = new ScannerImpl(this, getTableId(tableName), authorizations);
 +    Integer batchSize = ClientProperty.SCANNER_BATCH_SIZE.getInteger(getProperties());
 +    if (batchSize != null) {
 +      scanner.setBatchSize(batchSize);
 +    }
 +    return scanner;
 +  }
 +
 +  @Override
 +  public Scanner createScanner(String tableName)
 +      throws TableNotFoundException, AccumuloSecurityException, AccumuloException {
 +    Authorizations auths = securityOperations().getUserAuthorizations(getPrincipal());
 +    return createScanner(tableName, auths);
 +  }
 +
 +  @Override
 +  public String whoami() {
 +    ensureOpen();
 +    return getCredentials().getPrincipal();
 +  }
 +
 +  @Override
 +  public synchronized TableOperations tableOperations() {
 +    ensureOpen();
 +    return tableops;
 +  }
 +
 +  @Override
 +  public synchronized NamespaceOperations namespaceOperations() {
 +    ensureOpen();
 +    return namespaceops;
 +  }
 +
 +  @Override
 +  public synchronized SecurityOperations securityOperations() {
 +    ensureOpen();
 +    if (secops == null)
 +      secops = new SecurityOperationsImpl(this);
 +
 +    return secops;
 +  }
 +
 +  @Override
 +  public synchronized InstanceOperations instanceOperations() {
 +    ensureOpen();
 +    if (instanceops == null)
 +      instanceops = new InstanceOperationsImpl(this);
 +
 +    return instanceops;
 +  }
 +
 +  @Override
 +  public synchronized ReplicationOperations replicationOperations() {
 +    ensureOpen();
 +    if (replicationops == null) {
 +      replicationops = new ReplicationOperationsImpl(this);
 +    }
 +
 +    return replicationops;
 +  }
 +
 +  @Override
 +  public Properties properties() {
 +    ensureOpen();
 +    Properties result = new Properties();
 +    getProperties().forEach((key, value) -> {
 +      if (!key.equals(ClientProperty.AUTH_TOKEN.getKey())) {
 +        result.setProperty((String) key, (String) value);
 +      }
 +    });
 +    return result;
 +  }
 +
 +  public AuthenticationToken token() {
 +    ensureOpen();
 +    return getAuthenticationToken();
 +  }
 +
 +  @Override
 +  public void close() {
 +    closed = true;
 +    singletonReservation.close();
 +  }
 +
 +  public static class ClientBuilderImpl<T>
 +      implements InstanceArgs<T>, PropertyOptions<T>, AuthenticationArgs<T>, ConnectionOptions<T>,
 +      SslOptions<T>, SaslOptions<T>, ClientFactory<T>, FromOptions<T> {
 +
 +    private Properties properties = new Properties();
 +    private AuthenticationToken token = null;
 +    private Function<ClientBuilderImpl<T>,T> builderFunction;
 +
 +    public ClientBuilderImpl(Function<ClientBuilderImpl<T>,T> builderFunction) {
 +      this.builderFunction = builderFunction;
 +    }
 +
 +    private ClientInfo getClientInfo() {
 +      if (token != null) {
 +        ClientProperty.validate(properties, false);
 +        return new ClientInfoImpl(properties, token);
 +      }
 +      ClientProperty.validate(properties);
 +      return new ClientInfoImpl(properties);
 +    }
 +
 +    @Override
 +    public T build() {
 +      return builderFunction.apply(this);
 +    }
 +
 +    public static AccumuloClient buildClient(ClientBuilderImpl<AccumuloClient> cbi) {
 +      SingletonReservation reservation = SingletonManager.getClientReservation();
 +      try {
 +        // ClientContext closes reservation unless a RuntimeException is thrown
 +        return new ClientContext(reservation, cbi.getClientInfo());
 +      } catch (RuntimeException e) {
 +        reservation.close();
 +        throw e;
 +      }
 +    }
 +
 +    public static Properties buildProps(ClientBuilderImpl<Properties> cbi) {
 +      ClientProperty.validate(cbi.properties);
 +      return cbi.properties;
 +    }
 +
 +    @Override
 +    public AuthenticationArgs<T> to(CharSequence instanceName, CharSequence zookeepers) {
 +      setProperty(ClientProperty.INSTANCE_NAME, instanceName);
 +      setProperty(ClientProperty.INSTANCE_ZOOKEEPERS, zookeepers);
 +      return this;
 +    }
 +
 +    @Override
 +    public SslOptions<T> truststore(CharSequence path) {
 +      setProperty(ClientProperty.SSL_TRUSTSTORE_PATH, path);
 +      return this;
 +    }
 +
 +    @Override
 +    public SslOptions<T> truststore(CharSequence path, CharSequence password, CharSequence type) {
 +      setProperty(ClientProperty.SSL_TRUSTSTORE_PATH, path);
 +      setProperty(ClientProperty.SSL_TRUSTSTORE_PASSWORD, password);
 +      setProperty(ClientProperty.SSL_TRUSTSTORE_TYPE, type);
 +      return this;
 +    }
 +
 +    @Override
 +    public SslOptions<T> keystore(CharSequence path) {
 +      setProperty(ClientProperty.SSL_KEYSTORE_PATH, path);
 +      return this;
 +    }
 +
 +    @Override
 +    public SslOptions<T> keystore(CharSequence path, CharSequence password, CharSequence type) {
 +      setProperty(ClientProperty.SSL_KEYSTORE_PATH, path);
 +      setProperty(ClientProperty.SSL_KEYSTORE_PASSWORD, password);
 +      setProperty(ClientProperty.SSL_KEYSTORE_TYPE, type);
 +      return this;
 +    }
 +
 +    @Override
 +    public SslOptions<T> useJsse() {
 +      setProperty(ClientProperty.SSL_USE_JSSE, "true");
 +      return this;
 +    }
 +
 +    @Override
 +    public ConnectionOptions<T> zkTimeout(int timeout) {
 +      ClientProperty.INSTANCE_ZOOKEEPERS_TIMEOUT.setTimeInMillis(properties, (long) timeout);
 +      return this;
 +    }
 +
 +    @Override
 +    public SslOptions<T> useSsl() {
 +      setProperty(ClientProperty.SSL_ENABLED, "true");
 +      return this;
 +    }
 +
 +    @Override
 +    public SaslOptions<T> useSasl() {
 +      setProperty(ClientProperty.SASL_ENABLED, "true");
 +      return this;
 +    }
 +
 +    @Override
 +    public ConnectionOptions<T> batchWriterConfig(BatchWriterConfig batchWriterConfig) {
 +      ClientProperty.BATCH_WRITER_MEMORY_MAX.setBytes(properties, batchWriterConfig.getMaxMemory());
 +      ClientProperty.BATCH_WRITER_LATENCY_MAX.setTimeInMillis(properties,
 +          batchWriterConfig.getMaxLatency(TimeUnit.MILLISECONDS));
 +      ClientProperty.BATCH_WRITER_TIMEOUT_MAX.setTimeInMillis(properties,
 +          batchWriterConfig.getTimeout(TimeUnit.MILLISECONDS));
 +      setProperty(ClientProperty.BATCH_WRITER_THREADS_MAX, batchWriterConfig.getMaxWriteThreads());
 +      setProperty(ClientProperty.BATCH_WRITER_DURABILITY,
 +          batchWriterConfig.getDurability().toString());
 +      return this;
 +    }
 +
 +    @Override
 +    public ConnectionOptions<T> batchScannerQueryThreads(int numQueryThreads) {
 +      setProperty(ClientProperty.BATCH_SCANNER_NUM_QUERY_THREADS, numQueryThreads);
 +      return this;
 +    }
 +
 +    @Override
 +    public ConnectionOptions<T> scannerBatchSize(int batchSize) {
 +      setProperty(ClientProperty.SCANNER_BATCH_SIZE, batchSize);
 +      return this;
 +    }
 +
 +    @Override
 +    public SaslOptions<T> primary(CharSequence kerberosServerPrimary) {
 +      setProperty(ClientProperty.SASL_KERBEROS_SERVER_PRIMARY, kerberosServerPrimary);
 +      return this;
 +    }
 +
 +    @Override
 +    public SaslOptions<T> qop(CharSequence qualityOfProtection) {
 +      setProperty(ClientProperty.SASL_QOP, qualityOfProtection);
 +      return this;
 +    }
 +
 +    @Override
 +    public FromOptions<T> from(String propertiesFilePath) {
 +      return from(ClientInfoImpl.toProperties(propertiesFilePath));
 +    }
 +
 +    @Override
 +    public FromOptions<T> from(Path propertiesFile) {
 +      return from(ClientInfoImpl.toProperties(propertiesFile));
 +    }
 +
 +    @Override
 +    public FromOptions<T> from(Properties properties) {
 +      this.properties = properties;
 +      return this;
 +    }
 +
 +    @Override
 +    public ConnectionOptions<T> as(CharSequence username, CharSequence password) {
 +      setProperty(ClientProperty.AUTH_PRINCIPAL, username);
 +      ClientProperty.setPassword(properties, password);
 +      return this;
 +    }
 +
 +    @Override
 +    public ConnectionOptions<T> as(CharSequence principal, Path keyTabFile) {
 +      setProperty(ClientProperty.AUTH_PRINCIPAL, principal);
 +      ClientProperty.setKerberosKeytab(properties, keyTabFile.toString());
 +      return this;
 +    }
 +
 +    @Override
 +    public ConnectionOptions<T> as(CharSequence principal, AuthenticationToken token) {
 +      if (token.isDestroyed()) {
 +        throw new IllegalArgumentException("AuthenticationToken has been destroyed");
 +      }
 +      setProperty(ClientProperty.AUTH_PRINCIPAL, principal.toString());
 +      ClientProperty.setAuthenticationToken(properties, token);
 +      this.token = token;
 +      return this;
 +    }
 +
 +    public void setProperty(ClientProperty property, CharSequence value) {
 +      properties.setProperty(property.getKey(), value.toString());
 +    }
 +
 +    public void setProperty(ClientProperty property, Long value) {
 +      setProperty(property, Long.toString(value));
 +    }
 +
 +    public void setProperty(ClientProperty property, Integer value) {
 +      setProperty(property, Integer.toString(value));
 +    }
 +  }
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/ConditionalWriterImpl.java
index 11752ea,0000000..bd2617a
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/ConditionalWriterImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/ConditionalWriterImpl.java
@@@ -1,834 -1,0 +1,834 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.accumulo.core.clientImpl;
 +
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +import static org.apache.accumulo.fate.util.UtilWaitThread.sleepUninterruptibly;
 +
 +import java.nio.ByteBuffer;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collections;
 +import java.util.Comparator;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.NoSuchElementException;
 +import java.util.concurrent.BlockingQueue;
 +import java.util.concurrent.DelayQueue;
 +import java.util.concurrent.Delayed;
 +import java.util.concurrent.LinkedBlockingQueue;
 +import java.util.concurrent.ScheduledThreadPoolExecutor;
 +import java.util.concurrent.ThreadPoolExecutor;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.ConditionalWriter;
 +import org.apache.accumulo.core.client.ConditionalWriterConfig;
 +import org.apache.accumulo.core.client.Durability;
 +import org.apache.accumulo.core.client.TableDeletedException;
 +import org.apache.accumulo.core.client.TableOfflineException;
 +import org.apache.accumulo.core.client.TimedOutException;
 +import org.apache.accumulo.core.clientImpl.TabletLocator.TabletServerMutations;
 +import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException;
 +import org.apache.accumulo.core.data.ByteSequence;
 +import org.apache.accumulo.core.data.Condition;
 +import org.apache.accumulo.core.data.ConditionalMutation;
 +import org.apache.accumulo.core.data.TableId;
 +import org.apache.accumulo.core.dataImpl.KeyExtent;
 +import org.apache.accumulo.core.dataImpl.thrift.TCMResult;
 +import org.apache.accumulo.core.dataImpl.thrift.TCMStatus;
 +import org.apache.accumulo.core.dataImpl.thrift.TCondition;
 +import org.apache.accumulo.core.dataImpl.thrift.TConditionalMutation;
 +import org.apache.accumulo.core.dataImpl.thrift.TConditionalSession;
 +import org.apache.accumulo.core.dataImpl.thrift.TKeyExtent;
 +import org.apache.accumulo.core.dataImpl.thrift.TMutation;
 +import org.apache.accumulo.core.master.state.tables.TableState;
 +import org.apache.accumulo.core.rpc.ThriftUtil;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.security.ColumnVisibility;
 +import org.apache.accumulo.core.security.VisibilityEvaluator;
 +import org.apache.accumulo.core.security.VisibilityParseException;
 +import org.apache.accumulo.core.tabletserver.thrift.NoSuchScanIDException;
 +import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
 +import org.apache.accumulo.core.trace.TraceUtil;
 +import org.apache.accumulo.core.trace.thrift.TInfo;
 +import org.apache.accumulo.core.util.BadArgumentException;
 +import org.apache.accumulo.core.util.ByteBufferUtil;
 +import org.apache.accumulo.core.util.HostAndPort;
 +import org.apache.accumulo.core.util.NamingThreadFactory;
 +import org.apache.accumulo.fate.util.LoggingRunnable;
 +import org.apache.accumulo.fate.zookeeper.ZooLock;
 +import org.apache.accumulo.fate.zookeeper.ZooUtil.LockID;
 +import org.apache.commons.collections.map.LRUMap;
 +import org.apache.commons.lang.mutable.MutableLong;
 +import org.apache.hadoop.io.Text;
 +import org.apache.htrace.Trace;
 +import org.apache.thrift.TApplicationException;
 +import org.apache.thrift.TException;
 +import org.apache.thrift.TServiceClient;
 +import org.apache.thrift.transport.TTransportException;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +class ConditionalWriterImpl implements ConditionalWriter {
 +
-   private static ThreadPoolExecutor cleanupThreadPool = new ThreadPoolExecutor(1, 1, 3,
-       TimeUnit.SECONDS, new LinkedBlockingQueue<>(), r -> {
++  private static ThreadPoolExecutor cleanupThreadPool =
++      new ThreadPoolExecutor(1, 1, 3, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), r -> {
 +        Thread t = new Thread(r, "Conditional Writer Cleanup Thread");
 +        t.setDaemon(true);
 +        return t;
 +      });
 +
 +  static {
 +    cleanupThreadPool.allowCoreThreadTimeOut(true);
 +  }
 +
 +  private static final Logger log = LoggerFactory.getLogger(ConditionalWriterImpl.class);
 +
 +  private static final int MAX_SLEEP = 30000;
 +
 +  private Authorizations auths;
 +  private VisibilityEvaluator ve;
 +  @SuppressWarnings("unchecked")
 +  private Map<Text,Boolean> cache = Collections.synchronizedMap(new LRUMap(1000));
 +  private final ClientContext context;
 +  private TabletLocator locator;
 +  private final TableId tableId;
 +  private long timeout;
 +  private final Durability durability;
 +  private final String classLoaderContext;
 +
 +  private static class ServerQueue {
 +    BlockingQueue<TabletServerMutations<QCMutation>> queue = new LinkedBlockingQueue<>();
 +    boolean taskQueued = false;
 +  }
 +
 +  private Map<String,ServerQueue> serverQueues;
 +  private DelayQueue<QCMutation> failedMutations = new DelayQueue<>();
 +  private ScheduledThreadPoolExecutor threadPool;
 +
 +  private class RQIterator implements Iterator<Result> {
 +
 +    private BlockingQueue<Result> rq;
 +    private int count;
 +
 +    public RQIterator(BlockingQueue<Result> resultQueue, int count) {
 +      this.rq = resultQueue;
 +      this.count = count;
 +    }
 +
 +    @Override
 +    public boolean hasNext() {
 +      return count > 0;
 +    }
 +
 +    @Override
 +    public Result next() {
 +      if (count <= 0)
 +        throw new NoSuchElementException();
 +
 +      try {
 +        Result result = rq.poll(1, TimeUnit.SECONDS);
 +        while (result == null) {
 +
 +          if (threadPool.isShutdown()) {
 +            throw new NoSuchElementException("ConditionalWriter closed");
 +          }
 +
 +          result = rq.poll(1, TimeUnit.SECONDS);
 +        }
 +        count--;
 +        return result;
 +      } catch (InterruptedException e) {
 +        throw new RuntimeException(e);
 +      }
 +    }
 +
 +    @Override
 +    public void remove() {
 +      throw new UnsupportedOperationException();
 +    }
 +
 +  }
 +
 +  private static class QCMutation extends ConditionalMutation implements Delayed {
 +    private BlockingQueue<Result> resultQueue;
 +    private long resetTime;
 +    private long delay = 50;
 +    private long entryTime;
 +
 +    QCMutation(ConditionalMutation cm, BlockingQueue<Result> resultQueue, long entryTime) {
 +      super(cm);
 +      this.resultQueue = resultQueue;
 +      this.entryTime = entryTime;
 +    }
 +
 +    @Override
 +    public int compareTo(Delayed o) {
 +      QCMutation oqcm = (QCMutation) o;
 +      return Long.compare(resetTime, oqcm.resetTime);
 +    }
 +
 +    @Override
 +    public int hashCode() {
 +      return super.hashCode();
 +    }
 +
 +    @Override
 +    public boolean equals(Object o) {
 +      if (o instanceof QCMutation)
 +        return compareTo((QCMutation) o) == 0;
 +      return false;
 +    }
 +
 +    @Override
 +    public long getDelay(TimeUnit unit) {
 +      return unit.convert(delay - (System.currentTimeMillis() - resetTime), TimeUnit.MILLISECONDS);
 +    }
 +
 +    void resetDelay() {
 +      delay = Math.min(delay * 2, MAX_SLEEP);
 +      resetTime = System.currentTimeMillis();
 +    }
 +
 +    void queueResult(Result result) {
 +      resultQueue.add(result);
 +    }
 +  }
 +
 +  private ServerQueue getServerQueue(String location) {
 +    ServerQueue serverQueue;
 +    synchronized (serverQueues) {
 +      serverQueue = serverQueues.get(location);
 +      if (serverQueue == null) {
 +
 +        serverQueue = new ServerQueue();
 +        serverQueues.put(location, serverQueue);
 +      }
 +    }
 +    return serverQueue;
 +  }
 +
 +  private class CleanupTask implements Runnable {
 +    private List<SessionID> sessions;
 +
 +    CleanupTask(List<SessionID> activeSessions) {
 +      this.sessions = activeSessions;
 +    }
 +
 +    @Override
 +    public void run() {
 +      TabletClientService.Iface client = null;
 +
 +      for (SessionID sid : sessions) {
 +        if (!sid.isActive())
 +          continue;
 +
 +        TInfo tinfo = TraceUtil.traceInfo();
 +        try {
 +          client = getClient(sid.location);
 +          client.closeConditionalUpdate(tinfo, sid.sessionID);
 +        } catch (Exception e) {} finally {
 +          ThriftUtil.returnClient((TServiceClient) client);
 +        }
 +
 +      }
 +    }
 +  }
 +
 +  private void queueRetry(List<QCMutation> mutations, HostAndPort server) {
 +
 +    if (timeout < Long.MAX_VALUE) {
 +
 +      long time = System.currentTimeMillis();
 +
 +      ArrayList<QCMutation> mutations2 = new ArrayList<>(mutations.size());
 +
 +      for (QCMutation qcm : mutations) {
 +        qcm.resetDelay();
 +        if (time + qcm.getDelay(TimeUnit.MILLISECONDS) > qcm.entryTime + timeout) {
 +          TimedOutException toe;
 +          if (server != null)
 +            toe = new TimedOutException(Collections.singleton(server.toString()));
 +          else
 +            toe = new TimedOutException("Conditional mutation timed out");
 +
 +          qcm.queueResult(new Result(toe, qcm, (server == null ? null : server.toString())));
 +        } else {
 +          mutations2.add(qcm);
 +        }
 +      }
 +
 +      if (mutations2.size() > 0)
 +        failedMutations.addAll(mutations2);
 +
 +    } else {
 +      for (QCMutation qcm : mutations)
 +        qcm.resetDelay();
 +      failedMutations.addAll(mutations);
 +    }
 +  }
 +
 +  private void queue(List<QCMutation> mutations) {
 +    List<QCMutation> failures = new ArrayList<>();
 +    Map<String,TabletServerMutations<QCMutation>> binnedMutations = new HashMap<>();
 +
 +    try {
 +      locator.binMutations(context, mutations, binnedMutations, failures);
 +
 +      if (failures.size() == mutations.size())
 +        if (!Tables.exists(context, tableId))
 +          throw new TableDeletedException(tableId.canonical());
 +        else if (Tables.getTableState(context, tableId) == TableState.OFFLINE)
 +          throw new TableOfflineException(Tables.getTableOfflineMsg(context, tableId));
 +
 +    } catch (Exception e) {
 +      for (QCMutation qcm : mutations)
 +        qcm.queueResult(new Result(e, qcm, null));
 +
 +      // do not want to queue anything that was put in before binMutations() failed
 +      failures.clear();
 +      binnedMutations.clear();
 +    }
 +
 +    if (failures.size() > 0)
 +      queueRetry(failures, null);
 +
 +    for (Entry<String,TabletServerMutations<QCMutation>> entry : binnedMutations.entrySet()) {
 +      queue(entry.getKey(), entry.getValue());
 +    }
 +
 +  }
 +
 +  private void queue(String location, TabletServerMutations<QCMutation> mutations) {
 +
 +    ServerQueue serverQueue = getServerQueue(location);
 +
 +    synchronized (serverQueue) {
 +      serverQueue.queue.add(mutations);
 +      // never execute more than one task per server
 +      if (!serverQueue.taskQueued) {
 +        threadPool.execute(new LoggingRunnable(log, Trace.wrap(new SendTask(location))));
 +        serverQueue.taskQueued = true;
 +      }
 +    }
 +
 +  }
 +
 +  private void reschedule(SendTask task) {
 +    ServerQueue serverQueue = getServerQueue(task.location);
 +    // just finished processing work for this server, could reschedule if it has more work or
 +    // immediately process the work
 +    // this code reschedules the the server for processing later... there may be other queues with
 +    // more data that need to be processed... also it will give the current server time to build
 +    // up more data... the thinking is that rescheduling instead or processing immediately will
 +    // result
 +    // in bigger batches and less RPC overhead
 +
 +    synchronized (serverQueue) {
 +      if (serverQueue.queue.size() > 0)
 +        threadPool.execute(new LoggingRunnable(log, Trace.wrap(task)));
 +      else
 +        serverQueue.taskQueued = false;
 +    }
 +
 +  }
 +
 +  private TabletServerMutations<QCMutation> dequeue(String location) {
 +    BlockingQueue<TabletServerMutations<QCMutation>> queue = getServerQueue(location).queue;
 +
 +    ArrayList<TabletServerMutations<QCMutation>> mutations = new ArrayList<>();
 +    queue.drainTo(mutations);
 +
 +    if (mutations.size() == 0)
 +      return null;
 +
 +    if (mutations.size() == 1) {
 +      return mutations.get(0);
 +    } else {
 +      // merge multiple request to a single tablet server
 +      TabletServerMutations<QCMutation> tsm = mutations.get(0);
 +
 +      for (int i = 1; i < mutations.size(); i++) {
 +        for (Entry<KeyExtent,List<QCMutation>> entry : mutations.get(i).getMutations().entrySet()) {
 +          tsm.getMutations().computeIfAbsent(entry.getKey(), k -> new ArrayList<>())
 +              .addAll(entry.getValue());
 +        }
 +      }
 +
 +      return tsm;
 +    }
 +  }
 +
 +  ConditionalWriterImpl(ClientContext context, TableId tableId, ConditionalWriterConfig config) {
 +    this.context = context;
 +    this.auths = config.getAuthorizations();
 +    this.ve = new VisibilityEvaluator(config.getAuthorizations());
 +    this.threadPool = new ScheduledThreadPoolExecutor(config.getMaxWriteThreads(),
 +        new NamingThreadFactory(this.getClass().getSimpleName()));
 +    this.locator = new SyncingTabletLocator(context, tableId);
 +    this.serverQueues = new HashMap<>();
 +    this.tableId = tableId;
 +    this.timeout = config.getTimeout(TimeUnit.MILLISECONDS);
 +    this.durability = config.getDurability();
 +    this.classLoaderContext = config.getClassLoaderContext();
 +
 +    Runnable failureHandler = () -> {
 +      List<QCMutation> mutations = new ArrayList<>();
 +      failedMutations.drainTo(mutations);
 +      if (mutations.size() > 0)
 +        queue(mutations);
 +    };
 +
 +    failureHandler = new LoggingRunnable(log, failureHandler);
 +
 +    threadPool.scheduleAtFixedRate(failureHandler, 250, 250, TimeUnit.MILLISECONDS);
 +  }
 +
 +  @Override
 +  public Iterator<Result> write(Iterator<ConditionalMutation> mutations) {
 +
 +    BlockingQueue<Result> resultQueue = new LinkedBlockingQueue<>();
 +
 +    List<QCMutation> mutationList = new ArrayList<>();
 +
 +    int count = 0;
 +
 +    long entryTime = System.currentTimeMillis();
 +
 +    mloop: while (mutations.hasNext()) {
 +      ConditionalMutation mut = mutations.next();
 +      count++;
 +
 +      if (mut.getConditions().size() == 0)
 +        throw new IllegalArgumentException(
 +            "ConditionalMutation had no conditions " + new String(mut.getRow(), UTF_8));
 +
 +      for (Condition cond : mut.getConditions()) {
 +        if (!isVisible(cond.getVisibility())) {
 +          resultQueue.add(new Result(Status.INVISIBLE_VISIBILITY, mut, null));
 +          continue mloop;
 +        }
 +      }
 +
 +      // copy the mutations so that even if caller changes it, it will not matter
 +      mutationList.add(new QCMutation(mut, resultQueue, entryTime));
 +    }
 +
 +    queue(mutationList);
 +
 +    return new RQIterator(resultQueue, count);
 +
 +  }
 +
 +  private class SendTask implements Runnable {
 +
 +    String location;
 +
 +    public SendTask(String location) {
 +      this.location = location;
 +
 +    }
 +
 +    @Override
 +    public void run() {
 +      try {
 +        TabletServerMutations<QCMutation> mutations = dequeue(location);
 +        if (mutations != null)
 +          sendToServer(HostAndPort.fromString(location), mutations);
 +      } finally {
 +        reschedule(this);
 +      }
 +    }
 +  }
 +
 +  private static class CMK {
 +
 +    QCMutation cm;
 +    KeyExtent ke;
 +
 +    public CMK(KeyExtent ke, QCMutation cm) {
 +      this.ke = ke;
 +      this.cm = cm;
 +    }
 +  }
 +
 +  private static class SessionID {
 +    HostAndPort location;
 +    String lockId;
 +    long sessionID;
 +    boolean reserved;
 +    long lastAccessTime;
 +    long ttl;
 +
 +    boolean isActive() {
 +      return System.currentTimeMillis() - lastAccessTime < ttl * .95;
 +    }
 +  }
 +
 +  private HashMap<HostAndPort,SessionID> cachedSessionIDs = new HashMap<>();
 +
 +  private SessionID reserveSessionID(HostAndPort location, TabletClientService.Iface client,
 +      TInfo tinfo) throws ThriftSecurityException, TException {
 +    // avoid cost of repeatedly making RPC to create sessions, reuse sessions
 +    synchronized (cachedSessionIDs) {
 +      SessionID sid = cachedSessionIDs.get(location);
 +      if (sid != null) {
 +        if (sid.reserved)
 +          throw new IllegalStateException();
 +
 +        if (!sid.isActive()) {
 +          cachedSessionIDs.remove(location);
 +        } else {
 +          sid.reserved = true;
 +          return sid;
 +        }
 +      }
 +    }
 +
 +    TConditionalSession tcs = client.startConditionalUpdate(tinfo, context.rpcCreds(),
 +        ByteBufferUtil.toByteBuffers(auths.getAuthorizations()), tableId.canonical(),
 +        DurabilityImpl.toThrift(durability), this.classLoaderContext);
 +
 +    synchronized (cachedSessionIDs) {
 +      SessionID sid = new SessionID();
 +      sid.reserved = true;
 +      sid.sessionID = tcs.sessionId;
 +      sid.lockId = tcs.tserverLock;
 +      sid.ttl = tcs.ttl;
 +      sid.location = location;
 +      if (cachedSessionIDs.put(location, sid) != null)
 +        throw new IllegalStateException();
 +
 +      return sid;
 +    }
 +
 +  }
 +
 +  private void invalidateSessionID(HostAndPort location) {
 +    synchronized (cachedSessionIDs) {
 +      cachedSessionIDs.remove(location);
 +    }
 +
 +  }
 +
 +  private void unreserveSessionID(HostAndPort location) {
 +    synchronized (cachedSessionIDs) {
 +      SessionID sid = cachedSessionIDs.get(location);
 +      if (sid != null) {
 +        if (!sid.reserved)
 +          throw new IllegalStateException();
 +        sid.reserved = false;
 +        sid.lastAccessTime = System.currentTimeMillis();
 +      }
 +    }
 +  }
 +
 +  List<SessionID> getActiveSessions() {
 +    ArrayList<SessionID> activeSessions = new ArrayList<>();
 +    for (SessionID sid : cachedSessionIDs.values())
 +      if (sid.isActive())
 +        activeSessions.add(sid);
 +    return activeSessions;
 +  }
 +
 +  private TabletClientService.Iface getClient(HostAndPort location) throws TTransportException {
 +    TabletClientService.Iface client;
 +    if (timeout < context.getClientTimeoutInMillis())
 +      client = ThriftUtil.getTServerClient(location, context, timeout);
 +    else
 +      client = ThriftUtil.getTServerClient(location, context);
 +    return client;
 +  }
 +
 +  private void sendToServer(HostAndPort location, TabletServerMutations<QCMutation> mutations) {
 +    TabletClientService.Iface client = null;
 +
 +    TInfo tinfo = TraceUtil.traceInfo();
 +
 +    Map<Long,CMK> cmidToCm = new HashMap<>();
 +    MutableLong cmid = new MutableLong(0);
 +
 +    SessionID sessionId = null;
 +
 +    try {
 +      Map<TKeyExtent,List<TConditionalMutation>> tmutations = new HashMap<>();
 +
 +      CompressedIterators compressedIters = new CompressedIterators();
 +      convertMutations(mutations, cmidToCm, cmid, tmutations, compressedIters);
 +
 +      // getClient() call must come after converMutations in case it throws a TException
 +      client = getClient(location);
 +
 +      List<TCMResult> tresults = null;
 +      while (tresults == null) {
 +        try {
 +          sessionId = reserveSessionID(location, client, tinfo);
 +          tresults = client.conditionalUpdate(tinfo, sessionId.sessionID, tmutations,
 +              compressedIters.getSymbolTable());
 +        } catch (NoSuchScanIDException nssie) {
 +          sessionId = null;
 +          invalidateSessionID(location);
 +        }
 +      }
 +
 +      HashSet<KeyExtent> extentsToInvalidate = new HashSet<>();
 +
 +      ArrayList<QCMutation> ignored = new ArrayList<>();
 +
 +      for (TCMResult tcmResult : tresults) {
 +        if (tcmResult.status == TCMStatus.IGNORED) {
 +          CMK cmk = cmidToCm.get(tcmResult.cmid);
 +          ignored.add(cmk.cm);
 +          extentsToInvalidate.add(cmk.ke);
 +        } else {
 +          QCMutation qcm = cmidToCm.get(tcmResult.cmid).cm;
 +          qcm.queueResult(new Result(fromThrift(tcmResult.status), qcm, location.toString()));
 +        }
 +      }
 +
 +      for (KeyExtent ke : extentsToInvalidate) {
 +        locator.invalidateCache(ke);
 +      }
 +
 +      queueRetry(ignored, location);
 +
 +    } catch (ThriftSecurityException tse) {
-       AccumuloSecurityException ase = new AccumuloSecurityException(
-           context.getCredentials().getPrincipal(), tse.getCode(),
-           Tables.getPrintableTableInfoFromId(context, tableId), tse);
++      AccumuloSecurityException ase =
++          new AccumuloSecurityException(context.getCredentials().getPrincipal(), tse.getCode(),
++              Tables.getPrintableTableInfoFromId(context, tableId), tse);
 +      queueException(location, cmidToCm, ase);
 +    } catch (TTransportException e) {
 +      locator.invalidateCache(context, location.toString());
 +      invalidateSession(location, cmidToCm, sessionId);
 +    } catch (TApplicationException tae) {
 +      queueException(location, cmidToCm, new AccumuloServerException(location.toString(), tae));
 +    } catch (TException e) {
 +      locator.invalidateCache(context, location.toString());
 +      invalidateSession(location, cmidToCm, sessionId);
 +    } catch (Exception e) {
 +      queueException(location, cmidToCm, e);
 +    } finally {
 +      if (sessionId != null)
 +        unreserveSessionID(location);
 +      ThriftUtil.returnClient((TServiceClient) client);
 +    }
 +  }
 +
 +  private void queueRetry(Map<Long,CMK> cmidToCm, HostAndPort location) {
 +    ArrayList<QCMutation> ignored = new ArrayList<>();
 +    for (CMK cmk : cmidToCm.values())
 +      ignored.add(cmk.cm);
 +    queueRetry(ignored, location);
 +  }
 +
 +  private void queueException(HostAndPort location, Map<Long,CMK> cmidToCm, Exception e) {
 +    for (CMK cmk : cmidToCm.values())
 +      cmk.cm.queueResult(new Result(e, cmk.cm, location.toString()));
 +  }
 +
 +  private void invalidateSession(HostAndPort location, Map<Long,CMK> cmidToCm,
 +      SessionID sessionId) {
 +    if (sessionId == null) {
 +      queueRetry(cmidToCm, location);
 +    } else {
 +      try {
 +        invalidateSession(sessionId, location);
 +        for (CMK cmk : cmidToCm.values())
 +          cmk.cm.queueResult(new Result(Status.UNKNOWN, cmk.cm, location.toString()));
 +      } catch (Exception e2) {
 +        queueException(location, cmidToCm, e2);
 +      }
 +    }
 +  }
 +
 +  /**
 +   * The purpose of this code is to ensure that a conditional mutation will not execute when its
 +   * status is unknown. This allows a user to read the row when the status is unknown and not have
 +   * to worry about the tserver applying the mutation after the scan.
 +   *
 +   * <p>
 +   * If a conditional mutation is taking a long time to process, then this method will wait for it
 +   * to finish... unless this exceeds timeout.
 +   */
 +  private void invalidateSession(SessionID sessionId, HostAndPort location)
 +      throws AccumuloException {
 +
 +    long sleepTime = 50;
 +
 +    long startTime = System.currentTimeMillis();
 +
 +    LockID lid = new LockID(context.getZooKeeperRoot() + Constants.ZTSERVERS, sessionId.lockId);
 +
 +    while (true) {
 +      if (!ZooLock.isLockHeld(context.getZooCache(), lid)) {
 +        // ACCUMULO-1152 added a tserver lock check to the tablet location cache, so this
 +        // invalidation prevents future attempts to contact the
 +        // tserver even its gone zombie and is still running w/o a lock
 +        locator.invalidateCache(context, location.toString());
 +        return;
 +      }
 +
 +      try {
 +        // if the mutation is currently processing, this method will block until its done or times
 +        // out
 +        invalidateSession(sessionId.sessionID, location);
 +
 +        return;
 +      } catch (TApplicationException tae) {
 +        throw new AccumuloServerException(location.toString(), tae);
 +      } catch (TException e) {
 +        locator.invalidateCache(context, location.toString());
 +      }
 +
 +      if ((System.currentTimeMillis() - startTime) + sleepTime > timeout)
 +        throw new TimedOutException(Collections.singleton(location.toString()));
 +
 +      sleepUninterruptibly(sleepTime, TimeUnit.MILLISECONDS);
 +      sleepTime = Math.min(2 * sleepTime, MAX_SLEEP);
 +
 +    }
 +
 +  }
 +
 +  private void invalidateSession(long sessionId, HostAndPort location) throws TException {
 +    TabletClientService.Iface client = null;
 +
 +    TInfo tinfo = TraceUtil.traceInfo();
 +
 +    try {
 +      client = getClient(location);
 +      client.invalidateConditionalUpdate(tinfo, sessionId);
 +    } finally {
 +      ThriftUtil.returnClient((TServiceClient) client);
 +    }
 +  }
 +
 +  private Status fromThrift(TCMStatus status) {
 +    switch (status) {
 +      case ACCEPTED:
 +        return Status.ACCEPTED;
 +      case REJECTED:
 +        return Status.REJECTED;
 +      case VIOLATED:
 +        return Status.VIOLATED;
 +      default:
 +        throw new IllegalArgumentException(status.toString());
 +    }
 +  }
 +
 +  private void convertMutations(TabletServerMutations<QCMutation> mutations, Map<Long,CMK> cmidToCm,
 +      MutableLong cmid, Map<TKeyExtent,List<TConditionalMutation>> tmutations,
 +      CompressedIterators compressedIters) {
 +
 +    for (Entry<KeyExtent,List<QCMutation>> entry : mutations.getMutations().entrySet()) {
 +      TKeyExtent tke = entry.getKey().toThrift();
 +      ArrayList<TConditionalMutation> tcondMutaions = new ArrayList<>();
 +
 +      List<QCMutation> condMutations = entry.getValue();
 +
 +      for (QCMutation cm : condMutations) {
 +        TMutation tm = cm.toThrift();
 +
 +        List<TCondition> conditions = convertConditions(cm, compressedIters);
 +
 +        cmidToCm.put(cmid.longValue(), new CMK(entry.getKey(), cm));
 +        TConditionalMutation tcm = new TConditionalMutation(conditions, tm, cmid.longValue());
 +        cmid.increment();
 +        tcondMutaions.add(tcm);
 +      }
 +
 +      tmutations.put(tke, tcondMutaions);
 +    }
 +  }
 +
-   private static final Comparator<Long> TIMESTAMP_COMPARATOR = Comparator
-       .nullsFirst(Comparator.reverseOrder());
++  private static final Comparator<Long> TIMESTAMP_COMPARATOR =
++      Comparator.nullsFirst(Comparator.reverseOrder());
 +
-   static final Comparator<Condition> CONDITION_COMPARATOR = Comparator
-       .comparing(Condition::getFamily).thenComparing(Condition::getQualifier)
-       .thenComparing(Condition::getVisibility)
-       .thenComparing(Condition::getTimestamp, TIMESTAMP_COMPARATOR);
++  static final Comparator<Condition> CONDITION_COMPARATOR =
++      Comparator.comparing(Condition::getFamily).thenComparing(Condition::getQualifier)
++          .thenComparing(Condition::getVisibility)
++          .thenComparing(Condition::getTimestamp, TIMESTAMP_COMPARATOR);
 +
 +  private List<TCondition> convertConditions(ConditionalMutation cm,
 +      CompressedIterators compressedIters) {
 +    List<TCondition> conditions = new ArrayList<>(cm.getConditions().size());
 +
 +    // sort conditions inorder to get better lookup performance. Sort on client side so tserver does
 +    // not have to do it.
 +    Condition[] ca = cm.getConditions().toArray(new Condition[cm.getConditions().size()]);
 +    Arrays.sort(ca, CONDITION_COMPARATOR);
 +
 +    for (Condition cond : ca) {
 +      long ts = 0;
 +      boolean hasTs = false;
 +
 +      if (cond.getTimestamp() != null) {
 +        ts = cond.getTimestamp();
 +        hasTs = true;
 +      }
 +
 +      ByteBuffer iters = compressedIters.compress(cond.getIterators());
 +
 +      TCondition tc = new TCondition(ByteBufferUtil.toByteBuffers(cond.getFamily()),
 +          ByteBufferUtil.toByteBuffers(cond.getQualifier()),
 +          ByteBufferUtil.toByteBuffers(cond.getVisibility()), ts, hasTs,
 +          ByteBufferUtil.toByteBuffers(cond.getValue()), iters);
 +
 +      conditions.add(tc);
 +    }
 +
 +    return conditions;
 +  }
 +
 +  private boolean isVisible(ByteSequence cv) {
 +    Text testVis = new Text(cv.toArray());
 +    if (testVis.getLength() == 0)
 +      return true;
 +
 +    Boolean b = cache.get(testVis);
 +    if (b != null)
 +      return b;
 +
 +    try {
 +      boolean bb = ve.evaluate(new ColumnVisibility(testVis));
 +      cache.put(new Text(testVis), bb);
 +      return bb;
 +    } catch (VisibilityParseException | BadArgumentException e) {
 +      return false;
 +    }
 +  }
 +
 +  @Override
 +  public Result write(ConditionalMutation mutation) {
 +    return write(Collections.singleton(mutation).iterator()).next();
 +  }
 +
 +  @Override
 +  public void close() {
 +    threadPool.shutdownNow();
 +    cleanupThreadPool.execute(new CleanupTask(getActiveSessions()));
 +  }
 +
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/ConnectorImpl.java
index 6a6aa4a,0000000..f98ba9a
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/ConnectorImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/ConnectorImpl.java
@@@ -1,176 -1,0 +1,176 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.clientImpl;
 +
 +import static com.google.common.base.Preconditions.checkArgument;
 +
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.BatchDeleter;
 +import org.apache.accumulo.core.client.BatchScanner;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.ConditionalWriter;
 +import org.apache.accumulo.core.client.ConditionalWriterConfig;
 +import org.apache.accumulo.core.client.MultiTableBatchWriter;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.client.admin.InstanceOperations;
 +import org.apache.accumulo.core.client.admin.NamespaceOperations;
 +import org.apache.accumulo.core.client.admin.ReplicationOperations;
 +import org.apache.accumulo.core.client.admin.SecurityOperations;
 +import org.apache.accumulo.core.client.admin.TableOperations;
 +import org.apache.accumulo.core.clientImpl.thrift.SecurityErrorCode;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.singletons.SingletonManager;
 +import org.apache.accumulo.core.singletons.SingletonManager.Mode;
 +import org.apache.accumulo.core.trace.TraceUtil;
 +
 +/**
 + * This class now delegates to {@link ClientContext}, except for the methods which were not copied
 + * over to that.
 + */
 +@Deprecated
 +public class ConnectorImpl extends org.apache.accumulo.core.client.Connector {
 +
-   private static final String SYSTEM_TOKEN_NAME = "org.apache.accumulo.server.security."
-       + "SystemCredentials$SystemToken";
++  private static final String SYSTEM_TOKEN_NAME =
++      "org.apache.accumulo.server.security." + "SystemCredentials$SystemToken";
 +  private final ClientContext context;
 +
 +  public ConnectorImpl(ClientContext context) throws AccumuloSecurityException, AccumuloException {
 +    this.context = context;
 +    SingletonManager.setMode(Mode.CONNECTOR);
 +    if (context.getCredentials().getToken().isDestroyed())
 +      throw new AccumuloSecurityException(context.getCredentials().getPrincipal(),
 +          SecurityErrorCode.TOKEN_EXPIRED);
 +    // Skip fail fast for system services; string literal for class name, to avoid dependency on
 +    // server jar
 +    final String tokenClassName = context.getCredentials().getToken().getClass().getName();
 +    if (!SYSTEM_TOKEN_NAME.equals(tokenClassName)) {
 +      ServerClient.executeVoid(context, iface -> {
 +        if (!iface.authenticate(TraceUtil.traceInfo(), context.rpcCreds()))
 +          throw new AccumuloSecurityException("Authentication failed, access denied",
 +              SecurityErrorCode.BAD_CREDENTIALS);
 +      });
 +    }
 +  }
 +
 +  public ClientContext getAccumuloClient() {
 +    return context;
 +  }
 +
 +  @Override
 +  @Deprecated
 +  public org.apache.accumulo.core.client.Instance getInstance() {
 +    return context.getDeprecatedInstance();
 +  }
 +
 +  @Override
 +  public BatchScanner createBatchScanner(String tableName, Authorizations authorizations,
 +      int numQueryThreads) throws TableNotFoundException {
 +    return context.createBatchScanner(tableName, authorizations, numQueryThreads);
 +  }
 +
 +  @Override
 +  public BatchDeleter createBatchDeleter(String tableName, Authorizations authorizations,
 +      int numQueryThreads, long maxMemory, long maxLatency, int maxWriteThreads)
 +      throws TableNotFoundException {
 +    checkArgument(tableName != null, "tableName is null");
 +    checkArgument(authorizations != null, "authorizations is null");
 +    return new TabletServerBatchDeleter(context, context.getTableId(tableName), authorizations,
 +        numQueryThreads, new BatchWriterConfig().setMaxMemory(maxMemory)
 +            .setMaxLatency(maxLatency, TimeUnit.MILLISECONDS).setMaxWriteThreads(maxWriteThreads));
 +  }
 +
 +  @Override
 +  public BatchDeleter createBatchDeleter(String tableName, Authorizations authorizations,
 +      int numQueryThreads, BatchWriterConfig config) throws TableNotFoundException {
 +    return context.createBatchDeleter(tableName, authorizations, numQueryThreads, config);
 +  }
 +
 +  @Override
 +  public BatchWriter createBatchWriter(String tableName, long maxMemory, long maxLatency,
 +      int maxWriteThreads) throws TableNotFoundException {
 +    checkArgument(tableName != null, "tableName is null");
 +    return new BatchWriterImpl(context, context.getTableId(tableName),
 +        new BatchWriterConfig().setMaxMemory(maxMemory)
 +            .setMaxLatency(maxLatency, TimeUnit.MILLISECONDS).setMaxWriteThreads(maxWriteThreads));
 +  }
 +
 +  @Override
 +  public BatchWriter createBatchWriter(String tableName, BatchWriterConfig config)
 +      throws TableNotFoundException {
 +    return context.createBatchWriter(tableName, config);
 +  }
 +
 +  @Override
 +  public MultiTableBatchWriter createMultiTableBatchWriter(long maxMemory, long maxLatency,
 +      int maxWriteThreads) {
 +    return new MultiTableBatchWriterImpl(context, new BatchWriterConfig().setMaxMemory(maxMemory)
 +        .setMaxLatency(maxLatency, TimeUnit.MILLISECONDS).setMaxWriteThreads(maxWriteThreads));
 +  }
 +
 +  @Override
 +  public MultiTableBatchWriter createMultiTableBatchWriter(BatchWriterConfig config) {
 +    return context.createMultiTableBatchWriter(config);
 +  }
 +
 +  @Override
 +  public ConditionalWriter createConditionalWriter(String tableName, ConditionalWriterConfig config)
 +      throws TableNotFoundException {
 +    return context.createConditionalWriter(tableName, config);
 +  }
 +
 +  @Override
 +  public Scanner createScanner(String tableName, Authorizations authorizations)
 +      throws TableNotFoundException {
 +    return context.createScanner(tableName, authorizations);
 +  }
 +
 +  @Override
 +  public String whoami() {
 +    return context.whoami();
 +  }
 +
 +  @Override
 +  public TableOperations tableOperations() {
 +    return context.tableOperations();
 +  }
 +
 +  @Override
 +  public NamespaceOperations namespaceOperations() {
 +    return context.namespaceOperations();
 +  }
 +
 +  @Override
 +  public SecurityOperations securityOperations() {
 +    return context.securityOperations();
 +  }
 +
 +  @Override
 +  public InstanceOperations instanceOperations() {
 +    return context.instanceOperations();
 +  }
 +
 +  @Override
 +  public ReplicationOperations replicationOperations() {
 +    return context.replicationOperations();
 +  }
 +
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/Credentials.java
index a2bd280,0000000..eaf39cd
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/Credentials.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/Credentials.java
@@@ -1,176 -1,0 +1,174 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.clientImpl;
 +
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +
 +import java.nio.ByteBuffer;
 +import java.util.Base64;
 +
 +import org.apache.accumulo.core.client.AccumuloClient;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
 +import org.apache.accumulo.core.clientImpl.thrift.SecurityErrorCode;
 +import org.apache.accumulo.core.securityImpl.thrift.TCredentials;
 +
 +/**
 + * A wrapper for internal use. This class carries the instance, principal, and authentication token
 + * for use in the public API, in a non-serialized form. This is important, so that the
 + * authentication token carried in a {@link AccumuloClient} can be destroyed, invalidating future
 + * RPC operations from that {@link AccumuloClient}.
 + * <p>
 + * See ACCUMULO-1312
 + *
 + * @since 1.6.0
 + */
 +public class Credentials {
 +
 +  private String principal;
 +  private AuthenticationToken token;
 +
 +  /**
 +   * Creates a new credentials object.
 +   *
 +   * @param principal
 +   *          unique identifier for the entity (e.g. a user or service) authorized for these
 +   *          credentials
 +   * @param token
 +   *          authentication token used to prove that the principal for these credentials has been
 +   *          properly verified
 +   */
 +  public Credentials(String principal, AuthenticationToken token) {
 +    this.principal = principal;
 +    this.token = token;
 +  }
 +
 +  /**
 +   * Gets the principal.
 +   *
 +   * @return unique identifier for the entity (e.g. a user or service) authorized for these
 +   *         credentials
 +   */
 +  public String getPrincipal() {
 +    return principal;
 +  }
 +
 +  /**
 +   * Gets the authentication token.
 +   *
 +   * @return authentication token used to prove that the principal for these credentials has been
 +   *         properly verified
 +   */
 +  public AuthenticationToken getToken() {
 +    return token;
 +  }
 +
 +  /**
 +   * Converts the current object to the relevant thrift type. The object returned from this contains
 +   * a non-destroyable version of the {@link AuthenticationToken}, so this should be used just
 +   * before placing on the wire, and references to it should be tightly controlled.
 +   *
 +   * @param instanceID
 +   *          Accumulo instance ID
 +   * @return Thrift credentials
 +   * @throws RuntimeException
 +   *           if the authentication token has been destroyed (expired)
 +   */
 +  public TCredentials toThrift(String instanceID) {
 +    TCredentials tCreds = new TCredentials(getPrincipal(), getToken().getClass().getName(),
 +        ByteBuffer.wrap(AuthenticationTokenSerializer.serialize(getToken())), instanceID);
 +    if (getToken().isDestroyed())
 +      throw new RuntimeException("Token has been destroyed",
 +          new AccumuloSecurityException(getPrincipal(), SecurityErrorCode.TOKEN_EXPIRED));
 +    return tCreds;
 +  }
 +
 +  /**
 +   * Converts a given thrift object to our internal Credentials representation.
 +   *
 +   * @param serialized
 +   *          a Thrift encoded set of credentials
 +   * @return a new Credentials instance; destroy the token when you're done.
 +   */
 +  public static Credentials fromThrift(TCredentials serialized) {
 +    return new Credentials(serialized.getPrincipal(), AuthenticationTokenSerializer
 +        .deserialize(serialized.getTokenClassName(), serialized.getToken()));
 +  }
 +
 +  /**
 +   * Converts the current object to a serialized form. The object returned from this contains a
 +   * non-destroyable version of the {@link AuthenticationToken}, so references to it should be
 +   * tightly controlled.
 +   *
 +   * @return serialized form of these credentials
 +   */
 +  public final String serialize() {
 +    return (getPrincipal() == null ? "-"
 +        : Base64.getEncoder().encodeToString(getPrincipal().getBytes(UTF_8)))
 +        + ":"
 +        + (getToken() == null ? "-"
 +            : Base64.getEncoder().encodeToString(getToken().getClass().getName().getBytes(UTF_8)))
-         + ":"
-         + (getToken() == null ? "-"
-             : Base64.getEncoder()
-                 .encodeToString(AuthenticationTokenSerializer.serialize(getToken())));
++        + ":" + (getToken() == null ? "-" : Base64.getEncoder()
++            .encodeToString(AuthenticationTokenSerializer.serialize(getToken())));
 +  }
 +
 +  /**
 +   * Converts the serialized form to an instance of {@link Credentials}. The original serialized
 +   * form will not be affected.
 +   *
 +   * @param serializedForm
 +   *          serialized form of credentials
 +   * @return deserialized credentials
 +   */
 +  public static final Credentials deserialize(String serializedForm) {
 +    String[] split = serializedForm.split(":", 3);
-     String principal = split[0].equals("-") ? null
-         : new String(Base64.getDecoder().decode(split[0]), UTF_8);
-     String tokenType = split[1].equals("-") ? null
-         : new String(Base64.getDecoder().decode(split[1]), UTF_8);
++    String principal =
++        split[0].equals("-") ? null : new String(Base64.getDecoder().decode(split[0]), UTF_8);
++    String tokenType =
++        split[1].equals("-") ? null : new String(Base64.getDecoder().decode(split[1]), UTF_8);
 +    AuthenticationToken token = null;
 +    if (!split[2].equals("-")) {
 +      byte[] tokenBytes = Base64.getDecoder().decode(split[2]);
 +      token = AuthenticationTokenSerializer.deserialize(tokenType, tokenBytes);
 +    }
 +    return new Credentials(principal, token);
 +  }
 +
 +  @Override
 +  public int hashCode() {
 +    return getPrincipal() == null ? 0 : getPrincipal().hashCode();
 +  }
 +
 +  @Override
 +  public boolean equals(Object obj) {
 +    if (obj == null || !(obj instanceof Credentials))
 +      return false;
 +    Credentials other = Credentials.class.cast(obj);
 +    boolean pEq = getPrincipal() == null ? (other.getPrincipal() == null)
 +        : (getPrincipal().equals(other.getPrincipal()));
 +    if (!pEq)
 +      return false;
 +    return getToken() == null ? (other.getToken() == null) : (getToken().equals(other.getToken()));
 +  }
 +
 +  @Override
 +  public String toString() {
 +    return getClass().getName() + ":" + getPrincipal() + ":"
 +        + (getToken() == null ? null : getToken().getClass().getName()) + ":<hidden>";
 +  }
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/DelegationTokenImpl.java
index b0dc992,0000000..d2d055e
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/DelegationTokenImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/DelegationTokenImpl.java
@@@ -1,150 -1,0 +1,150 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.clientImpl;
 +
 +import static java.util.Objects.requireNonNull;
 +
 +import java.io.DataInput;
 +import java.io.DataOutput;
 +import java.io.IOException;
 +import java.util.Collections;
 +import java.util.Set;
 +
 +import org.apache.accumulo.core.client.security.tokens.DelegationToken;
 +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 +import org.apache.hadoop.io.Text;
 +import org.apache.hadoop.security.Credentials;
 +import org.apache.hadoop.security.UserGroupInformation;
 +import org.apache.hadoop.security.token.Token;
 +import org.apache.hadoop.security.token.TokenIdentifier;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +public class DelegationTokenImpl extends PasswordToken implements DelegationToken {
 +  private static final Logger log = LoggerFactory.getLogger(DelegationTokenImpl.class);
 +
 +  public static final String SERVICE_NAME = "AccumuloDelegationToken";
 +
 +  private AuthenticationTokenIdentifier identifier;
 +
 +  public DelegationTokenImpl() {
 +    super();
 +  }
 +
 +  public DelegationTokenImpl(byte[] delegationTokenPassword,
 +      AuthenticationTokenIdentifier identifier) {
 +    requireNonNull(delegationTokenPassword);
 +    requireNonNull(identifier);
 +    setPassword(delegationTokenPassword);
 +    this.identifier = identifier;
 +  }
 +
 +  public DelegationTokenImpl(String instanceID, UserGroupInformation user,
 +      AuthenticationTokenIdentifier identifier) {
 +    requireNonNull(instanceID);
 +    requireNonNull(user);
 +    requireNonNull(identifier);
 +
 +    Credentials creds = user.getCredentials();
-     Token<? extends TokenIdentifier> token = creds
-         .getToken(new Text(SERVICE_NAME + "-" + instanceID));
++    Token<? extends TokenIdentifier> token =
++        creds.getToken(new Text(SERVICE_NAME + "-" + instanceID));
 +    if (token == null) {
 +      throw new IllegalArgumentException(
 +          "Did not find Accumulo delegation token in provided UserGroupInformation");
 +    }
 +    setPasswordFromToken(token, identifier);
 +  }
 +
 +  public DelegationTokenImpl(Token<? extends TokenIdentifier> token,
 +      AuthenticationTokenIdentifier identifier) {
 +    requireNonNull(token);
 +    requireNonNull(identifier);
 +    setPasswordFromToken(token, identifier);
 +  }
 +
 +  private void setPasswordFromToken(Token<? extends TokenIdentifier> token,
 +      AuthenticationTokenIdentifier identifier) {
 +    if (!AuthenticationTokenIdentifier.TOKEN_KIND.equals(token.getKind())) {
 +      String msg = "Expected an AuthenticationTokenIdentifier but got a " + token.getKind();
 +      log.error(msg);
 +      throw new IllegalArgumentException(msg);
 +    }
 +
 +    setPassword(token.getPassword());
 +    this.identifier = identifier;
 +  }
 +
 +  /**
 +   * The identifier for this token, may be null.
 +   */
 +  public AuthenticationTokenIdentifier getIdentifier() {
 +    return identifier;
 +  }
 +
 +  /**
 +   * The service name used to identify the {@link Token}
 +   */
 +  public Text getServiceName() {
 +    requireNonNull(identifier);
 +    return new Text(SERVICE_NAME + "-" + identifier.getInstanceId());
 +  }
 +
 +  @Override
 +  public void init(Properties properties) {
 +    // Encourage use of UserGroupInformation as entry point
 +  }
 +
 +  @Override
 +  public Set<TokenProperty> getProperties() {
 +    // Encourage use of UserGroupInformation as entry point
 +    return Collections.emptySet();
 +  }
 +
 +  @Override
 +  public void write(DataOutput out) throws IOException {
 +    super.write(out);
 +    identifier.write(out);
 +  }
 +
 +  @Override
 +  public void readFields(DataInput in) throws IOException {
 +    super.readFields(in);
 +    identifier = new AuthenticationTokenIdentifier();
 +    identifier.readFields(in);
 +  }
 +
 +  @Override
 +  public DelegationTokenImpl clone() {
 +    DelegationTokenImpl copy = (DelegationTokenImpl) super.clone();
 +    copy.setPassword(getPassword());
 +    copy.identifier = new AuthenticationTokenIdentifier(identifier);
 +    return copy;
 +  }
 +
 +  @Override
 +  public int hashCode() {
 +    return super.hashCode() ^ identifier.hashCode();
 +  }
 +
 +  @Override
 +  public boolean equals(Object obj) {
 +    // We assume we can cast obj to DelegationToken because the super.equals(obj) check ensures obj
 +    // is of the same type as this
 +    return super.equals(obj) && identifier.equals(((DelegationTokenImpl) obj).identifier);
 +  }
 +
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/InstanceOperationsImpl.java
index 2754874,0000000..a452ce4
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/InstanceOperationsImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/InstanceOperationsImpl.java
@@@ -1,241 -1,0 +1,241 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.clientImpl;
 +
 +import static com.google.common.base.Preconditions.checkArgument;
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.UUID;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.client.admin.ActiveCompaction;
 +import org.apache.accumulo.core.client.admin.ActiveScan;
 +import org.apache.accumulo.core.client.admin.InstanceOperations;
 +import org.apache.accumulo.core.clientImpl.thrift.ConfigurationType;
 +import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException;
 +import org.apache.accumulo.core.rpc.ThriftUtil;
 +import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
 +import org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Client;
 +import org.apache.accumulo.core.trace.TraceUtil;
 +import org.apache.accumulo.core.util.AddressUtil;
 +import org.apache.accumulo.core.util.HostAndPort;
 +import org.apache.accumulo.core.util.LocalityGroupUtil;
 +import org.apache.accumulo.core.util.LocalityGroupUtil.LocalityGroupConfigurationError;
 +import org.apache.accumulo.fate.zookeeper.ZooCache;
 +import org.apache.thrift.TException;
 +import org.apache.thrift.transport.TTransport;
 +import org.apache.thrift.transport.TTransportException;
 +import org.slf4j.LoggerFactory;
 +
 +/**
 + * Provides a class for administering the accumulo instance
 + */
 +public class InstanceOperationsImpl implements InstanceOperations {
 +  private final ClientContext context;
 +
 +  public InstanceOperationsImpl(ClientContext context) {
 +    checkArgument(context != null, "context is null");
 +    this.context = context;
 +  }
 +
 +  @Override
 +  public void setProperty(final String property, final String value)
 +      throws AccumuloException, AccumuloSecurityException, IllegalArgumentException {
 +    checkArgument(property != null, "property is null");
 +    checkArgument(value != null, "value is null");
 +    MasterClient.executeVoid(context, client -> client.setSystemProperty(TraceUtil.traceInfo(),
 +        context.rpcCreds(), property, value));
 +    checkLocalityGroups(property);
 +  }
 +
 +  @Override
 +  public void removeProperty(final String property)
 +      throws AccumuloException, AccumuloSecurityException {
 +    checkArgument(property != null, "property is null");
 +    MasterClient.executeVoid(context,
 +        client -> client.removeSystemProperty(TraceUtil.traceInfo(), context.rpcCreds(), property));
 +    checkLocalityGroups(property);
 +  }
 +
 +  void checkLocalityGroups(String propChanged) throws AccumuloSecurityException, AccumuloException {
 +    if (LocalityGroupUtil.isLocalityGroupProperty(propChanged)) {
 +      try {
 +        LocalityGroupUtil.checkLocalityGroups(getSystemConfiguration().entrySet());
 +      } catch (LocalityGroupConfigurationError | RuntimeException e) {
 +        LoggerFactory.getLogger(this.getClass()).warn("Changing '" + propChanged
 +            + "' resulted in bad locality group config. This may be a transient situation since "
 +            + "the config spreads over multiple properties. Setting properties in a different "
 +            + "order may help. Even though this warning was displayed, the property was updated. "
 +            + "Please check your config to ensure consistency.", e);
 +      }
 +    }
 +  }
 +
 +  @Override
 +  public Map<String,String> getSystemConfiguration()
 +      throws AccumuloException, AccumuloSecurityException {
 +    return ServerClient.execute(context, client -> client.getConfiguration(TraceUtil.traceInfo(),
 +        context.rpcCreds(), ConfigurationType.CURRENT));
 +  }
 +
 +  @Override
 +  public Map<String,String> getSiteConfiguration()
 +      throws AccumuloException, AccumuloSecurityException {
 +    return ServerClient.execute(context, client -> client.getConfiguration(TraceUtil.traceInfo(),
 +        context.rpcCreds(), ConfigurationType.SITE));
 +  }
 +
 +  @Override
 +  public List<String> getTabletServers() {
 +    ZooCache cache = context.getZooCache();
 +    String path = context.getZooKeeperRoot() + Constants.ZTSERVERS;
 +    List<String> results = new ArrayList<>();
 +    for (String candidate : cache.getChildren(path)) {
 +      List<String> children = cache.getChildren(path + "/" + candidate);
 +      if (children != null && children.size() > 0) {
 +        List<String> copy = new ArrayList<>(children);
 +        Collections.sort(copy);
 +        byte[] data = cache.get(path + "/" + candidate + "/" + copy.get(0));
 +        if (data != null && !"master".equals(new String(data, UTF_8))) {
 +          results.add(candidate);
 +        }
 +      }
 +    }
 +    return results;
 +  }
 +
 +  @Override
 +  public List<ActiveScan> getActiveScans(String tserver)
 +      throws AccumuloException, AccumuloSecurityException {
 +    final HostAndPort parsedTserver = HostAndPort.fromString(tserver);
 +    Client client = null;
 +    try {
 +      client = ThriftUtil.getTServerClient(parsedTserver, context);
 +
 +      List<ActiveScan> as = new ArrayList<>();
 +      for (org.apache.accumulo.core.tabletserver.thrift.ActiveScan activeScan : client
 +          .getActiveScans(TraceUtil.traceInfo(), context.rpcCreds())) {
 +        try {
 +          as.add(new ActiveScanImpl(context, activeScan));
 +        } catch (TableNotFoundException e) {
 +          throw new AccumuloException(e);
 +        }
 +      }
 +      return as;
 +    } catch (TTransportException e) {
 +      throw new AccumuloException(e);
 +    } catch (ThriftSecurityException e) {
 +      throw new AccumuloSecurityException(e.user, e.code, e);
 +    } catch (TException e) {
 +      throw new AccumuloException(e);
 +    } finally {
 +      if (client != null)
 +        ThriftUtil.returnClient(client);
 +    }
 +  }
 +
 +  @Override
 +  public boolean testClassLoad(final String className, final String asTypeName)
 +      throws AccumuloException, AccumuloSecurityException {
 +    return ServerClient.execute(context, client -> client.checkClass(TraceUtil.traceInfo(),
 +        context.rpcCreds(), className, asTypeName));
 +  }
 +
 +  @Override
 +  public List<ActiveCompaction> getActiveCompactions(String tserver)
 +      throws AccumuloException, AccumuloSecurityException {
 +    final HostAndPort parsedTserver = HostAndPort.fromString(tserver);
 +    Client client = null;
 +    try {
 +      client = ThriftUtil.getTServerClient(parsedTserver, context);
 +
 +      List<ActiveCompaction> as = new ArrayList<>();
 +      for (org.apache.accumulo.core.tabletserver.thrift.ActiveCompaction activeCompaction : client
 +          .getActiveCompactions(TraceUtil.traceInfo(), context.rpcCreds())) {
 +        as.add(new ActiveCompactionImpl(context, activeCompaction));
 +      }
 +      return as;
 +    } catch (TTransportException e) {
 +      throw new AccumuloException(e);
 +    } catch (ThriftSecurityException e) {
 +      throw new AccumuloSecurityException(e.user, e.code, e);
 +    } catch (TException e) {
 +      throw new AccumuloException(e);
 +    } finally {
 +      if (client != null)
 +        ThriftUtil.returnClient(client);
 +    }
 +  }
 +
 +  @Override
 +  public void ping(String tserver) throws AccumuloException {
 +    TTransport transport = null;
 +    try {
 +      transport = ThriftUtil.createTransport(AddressUtil.parseAddress(tserver, false), context);
-       TabletClientService.Client client = ThriftUtil
-           .createClient(new TabletClientService.Client.Factory(), transport);
++      TabletClientService.Client client =
++          ThriftUtil.createClient(new TabletClientService.Client.Factory(), transport);
 +      client.getTabletServerStatus(TraceUtil.traceInfo(), context.rpcCreds());
 +    } catch (TException e) {
 +      throw new AccumuloException(e);
 +    } finally {
 +      if (transport != null) {
 +        transport.close();
 +      }
 +    }
 +  }
 +
 +  @Override
 +  public void waitForBalance() throws AccumuloException {
 +    try {
 +      MasterClient.executeVoid(context, client -> client.waitForBalance(TraceUtil.traceInfo()));
 +    } catch (AccumuloSecurityException ex) {
 +      // should never happen
 +      throw new RuntimeException("Unexpected exception thrown", ex);
 +    }
 +
 +  }
 +
 +  /**
 +   * Given a zooCache and instanceId, look up the instance name.
 +   */
 +  public static String lookupInstanceName(ZooCache zooCache, UUID instanceId) {
 +    checkArgument(zooCache != null, "zooCache is null");
 +    checkArgument(instanceId != null, "instanceId is null");
 +    for (String name : zooCache.getChildren(Constants.ZROOT + Constants.ZINSTANCES)) {
 +      String instanceNamePath = Constants.ZROOT + Constants.ZINSTANCES + "/" + name;
 +      byte[] bytes = zooCache.get(instanceNamePath);
 +      UUID iid = UUID.fromString(new String(bytes, UTF_8));
 +      if (iid.equals(instanceId)) {
 +        return name;
 +      }
 +    }
 +    return null;
 +  }
 +
 +  @Override
 +  public String getInstanceID() {
 +
 +    return context.getInstanceID();
 +  }
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/NamespaceOperationsHelper.java
index 80ec3e9,0000000..79c7dfd
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/NamespaceOperationsHelper.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/NamespaceOperationsHelper.java
@@@ -1,234 -1,0 +1,234 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.clientImpl;
 +
 +import java.util.EnumSet;
 +import java.util.HashMap;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.TreeMap;
 +import java.util.TreeSet;
 +
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.NamespaceNotFoundException;
 +import org.apache.accumulo.core.client.admin.NamespaceOperations;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
 +
 +public abstract class NamespaceOperationsHelper implements NamespaceOperations {
 +
 +  @Override
 +  public String systemNamespace() {
 +    return Namespace.ACCUMULO.name();
 +  }
 +
 +  @Override
 +  public String defaultNamespace() {
 +    return Namespace.DEFAULT.name();
 +  }
 +
 +  @Override
 +  public void attachIterator(String namespace, IteratorSetting setting)
 +      throws AccumuloSecurityException, AccumuloException, NamespaceNotFoundException {
 +    attachIterator(namespace, setting, EnumSet.allOf(IteratorScope.class));
 +  }
 +
 +  @Override
 +  public void attachIterator(String namespace, IteratorSetting setting,
 +      EnumSet<IteratorScope> scopes)
 +      throws AccumuloSecurityException, AccumuloException, NamespaceNotFoundException {
 +    checkIteratorConflicts(namespace, setting, scopes);
 +    for (IteratorScope scope : scopes) {
 +      String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
 +          scope.name().toLowerCase(), setting.getName());
 +      for (Entry<String,String> prop : setting.getOptions().entrySet()) {
 +        this.setProperty(namespace, root + ".opt." + prop.getKey(), prop.getValue());
 +      }
 +      this.setProperty(namespace, root, setting.getPriority() + "," + setting.getIteratorClass());
 +    }
 +  }
 +
 +  @Override
 +  public void removeIterator(String namespace, String name, EnumSet<IteratorScope> scopes)
 +      throws AccumuloSecurityException, AccumuloException, NamespaceNotFoundException {
 +    if (!exists(namespace))
 +      throw new NamespaceNotFoundException(null, namespace, null);
 +    Map<String,String> copy = new TreeMap<>();
 +    for (Entry<String,String> property : this.getProperties(namespace)) {
 +      copy.put(property.getKey(), property.getValue());
 +    }
 +    for (IteratorScope scope : scopes) {
 +      String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
 +          scope.name().toLowerCase(), name);
 +      for (Entry<String,String> property : copy.entrySet()) {
 +        if (property.getKey().equals(root) || property.getKey().startsWith(root + ".opt."))
 +          this.removeProperty(namespace, property.getKey());
 +      }
 +    }
 +  }
 +
 +  @Override
 +  public IteratorSetting getIteratorSetting(String namespace, String name, IteratorScope scope)
 +      throws AccumuloSecurityException, AccumuloException, NamespaceNotFoundException {
 +    if (!exists(namespace))
 +      throw new NamespaceNotFoundException(null, namespace, null);
 +    int priority = -1;
 +    String classname = null;
 +    Map<String,String> settings = new HashMap<>();
 +
-     String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
-         scope.name().toLowerCase(), name);
++    String root =
++        String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX, scope.name().toLowerCase(), name);
 +    String opt = root + ".opt.";
 +    for (Entry<String,String> property : this.getProperties(namespace)) {
 +      if (property.getKey().equals(root)) {
 +        String[] parts = property.getValue().split(",");
 +        if (parts.length != 2) {
 +          throw new AccumuloException("Bad value for iterator setting: " + property.getValue());
 +        }
 +        priority = Integer.parseInt(parts[0]);
 +        classname = parts[1];
 +      } else if (property.getKey().startsWith(opt)) {
 +        settings.put(property.getKey().substring(opt.length()), property.getValue());
 +      }
 +    }
 +    if (priority <= 0 || classname == null) {
 +      return null;
 +    }
 +    return new IteratorSetting(priority, name, classname, settings);
 +  }
 +
 +  @Override
 +  public Map<String,EnumSet<IteratorScope>> listIterators(String namespace)
 +      throws AccumuloSecurityException, AccumuloException, NamespaceNotFoundException {
 +    if (!exists(namespace))
 +      throw new NamespaceNotFoundException(null, namespace, null);
 +    Map<String,EnumSet<IteratorScope>> result = new TreeMap<>();
 +    for (Entry<String,String> property : this.getProperties(namespace)) {
 +      String name = property.getKey();
 +      String[] parts = name.split("\\.");
 +      if (parts.length == 4) {
 +        if (parts[0].equals("table") && parts[1].equals("iterator")) {
 +          IteratorScope scope = IteratorScope.valueOf(parts[2]);
 +          if (!result.containsKey(parts[3]))
 +            result.put(parts[3], EnumSet.noneOf(IteratorScope.class));
 +          result.get(parts[3]).add(scope);
 +        }
 +      }
 +    }
 +    return result;
 +  }
 +
 +  @Override
 +  public void checkIteratorConflicts(String namespace, IteratorSetting setting,
 +      EnumSet<IteratorScope> scopes)
 +      throws AccumuloException, NamespaceNotFoundException, AccumuloSecurityException {
 +    if (!exists(namespace))
 +      throw new NamespaceNotFoundException(null, namespace, null);
 +    for (IteratorScope scope : scopes) {
-       String scopeStr = String.format("%s%s", Property.TABLE_ITERATOR_PREFIX,
-           scope.name().toLowerCase());
++      String scopeStr =
++          String.format("%s%s", Property.TABLE_ITERATOR_PREFIX, scope.name().toLowerCase());
 +      String nameStr = String.format("%s.%s", scopeStr, setting.getName());
 +      String optStr = String.format("%s.opt.", nameStr);
 +      Map<String,String> optionConflicts = new TreeMap<>();
 +      for (Entry<String,String> property : this.getProperties(namespace)) {
 +        if (property.getKey().startsWith(scopeStr)) {
 +          if (property.getKey().equals(nameStr))
 +            throw new AccumuloException(new IllegalArgumentException("iterator name conflict for "
 +                + setting.getName() + ": " + property.getKey() + "=" + property.getValue()));
 +          if (property.getKey().startsWith(optStr))
 +            optionConflicts.put(property.getKey(), property.getValue());
 +          if (property.getKey().contains(".opt."))
 +            continue;
 +          String[] parts = property.getValue().split(",");
 +          if (parts.length != 2)
 +            throw new AccumuloException("Bad value for existing iterator setting: "
 +                + property.getKey() + "=" + property.getValue());
 +          try {
 +            if (Integer.parseInt(parts[0]) == setting.getPriority())
 +              throw new AccumuloException(new IllegalArgumentException(
 +                  "iterator priority conflict: " + property.getKey() + "=" + property.getValue()));
 +          } catch (NumberFormatException e) {
 +            throw new AccumuloException("Bad value for existing iterator setting: "
 +                + property.getKey() + "=" + property.getValue());
 +          }
 +        }
 +      }
 +      if (optionConflicts.size() > 0)
 +        throw new AccumuloException(new IllegalArgumentException(
 +            "iterator options conflict for " + setting.getName() + ": " + optionConflicts));
 +    }
 +  }
 +
 +  @Override
 +  public int addConstraint(String namespace, String constraintClassName)
 +      throws AccumuloException, AccumuloSecurityException, NamespaceNotFoundException {
 +    TreeSet<Integer> constraintNumbers = new TreeSet<>();
 +    TreeMap<String,Integer> constraintClasses = new TreeMap<>();
 +    int i;
 +    for (Entry<String,String> property : this.getProperties(namespace)) {
 +      if (property.getKey().startsWith(Property.TABLE_CONSTRAINT_PREFIX.toString())) {
 +        try {
 +          i = Integer.parseInt(
 +              property.getKey().substring(Property.TABLE_CONSTRAINT_PREFIX.toString().length()));
 +        } catch (NumberFormatException e) {
 +          throw new AccumuloException("Bad key for existing constraint: " + property);
 +        }
 +        constraintNumbers.add(i);
 +        constraintClasses.put(property.getValue(), i);
 +      }
 +    }
 +    i = 1;
 +    while (constraintNumbers.contains(i))
 +      i++;
 +    if (constraintClasses.containsKey(constraintClassName))
 +      throw new AccumuloException(
 +          "Constraint " + constraintClassName + " already exists for namespace " + namespace
 +              + " with number " + constraintClasses.get(constraintClassName));
 +    this.setProperty(namespace, Property.TABLE_CONSTRAINT_PREFIX.toString() + i,
 +        constraintClassName);
 +    return i;
 +  }
 +
 +  @Override
 +  public void removeConstraint(String namespace, int number)
 +      throws AccumuloException, AccumuloSecurityException, NamespaceNotFoundException {
 +    this.removeProperty(namespace, Property.TABLE_CONSTRAINT_PREFIX.toString() + number);
 +  }
 +
 +  @Override
 +  public Map<String,Integer> listConstraints(String namespace)
 +      throws AccumuloException, NamespaceNotFoundException, AccumuloSecurityException {
 +    Map<String,Integer> constraints = new TreeMap<>();
 +    for (Entry<String,String> property : this.getProperties(namespace)) {
 +      if (property.getKey().startsWith(Property.TABLE_CONSTRAINT_PREFIX.toString())) {
 +        if (constraints.containsKey(property.getValue()))
 +          throw new AccumuloException("Same constraint configured twice: " + property.getKey() + "="
 +              + Property.TABLE_CONSTRAINT_PREFIX + constraints.get(property.getValue()) + "="
 +              + property.getKey());
 +        try {
 +          constraints.put(property.getValue(), Integer.parseInt(
 +              property.getKey().substring(Property.TABLE_CONSTRAINT_PREFIX.toString().length())));
 +        } catch (NumberFormatException e) {
 +          throw new AccumuloException("Bad key for existing constraint: " + property);
 +        }
 +      }
 +    }
 +    return constraints;
 +  }
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/OfflineIterator.java
index 843c72c,0000000..9abe346
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/OfflineIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/OfflineIterator.java
@@@ -1,364 -1,0 +1,365 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.clientImpl;
 +
 +import static org.apache.accumulo.fate.util.UtilWaitThread.sleepUninterruptibly;
 +
 +import java.io.IOException;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.HashSet;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map.Entry;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.SampleNotPresentException;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.client.sample.SamplerConfiguration;
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.conf.ConfigurationCopy;
 +import org.apache.accumulo.core.conf.IterConfigUtil;
 +import org.apache.accumulo.core.conf.IterLoad;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.crypto.CryptoServiceFactory;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.KeyValue;
 +import org.apache.accumulo.core.data.PartialKey;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.TableId;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.dataImpl.KeyExtent;
 +import org.apache.accumulo.core.file.FileOperations;
 +import org.apache.accumulo.core.file.FileSKVIterator;
 +import org.apache.accumulo.core.iterators.IteratorEnvironment;
 +import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
 +import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 +import org.apache.accumulo.core.iterators.system.MultiIterator;
 +import org.apache.accumulo.core.master.state.tables.TableState;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 +import org.apache.accumulo.core.metadata.schema.TabletMetadata;
 +import org.apache.accumulo.core.metadata.schema.TabletsMetadata;
 +import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.security.ColumnVisibility;
 +import org.apache.accumulo.core.util.LocalityGroupUtil;
 +import org.apache.accumulo.core.util.SystemIteratorUtil;
 +import org.apache.accumulo.core.volume.VolumeConfiguration;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.hadoop.io.Text;
 +
 +class OfflineIterator implements Iterator<Entry<Key,Value>> {
 +
 +  static class OfflineIteratorEnvironment implements IteratorEnvironment {
 +
 +    private final Authorizations authorizations;
 +    private AccumuloConfiguration conf;
 +    private boolean useSample;
 +    private SamplerConfiguration sampleConf;
 +
 +    public OfflineIteratorEnvironment(Authorizations auths, AccumuloConfiguration acuTableConf,
 +        boolean useSample, SamplerConfiguration samplerConf) {
 +      this.authorizations = auths;
 +      this.conf = acuTableConf;
 +      this.useSample = useSample;
 +      this.sampleConf = samplerConf;
 +    }
 +
 +    @Deprecated
 +    @Override
 +    public AccumuloConfiguration getConfig() {
 +      return conf;
 +    }
 +
 +    @Override
 +    public IteratorScope getIteratorScope() {
 +      return IteratorScope.scan;
 +    }
 +
 +    @Override
 +    public boolean isFullMajorCompaction() {
 +      return false;
 +    }
 +
 +    @Override
 +    public boolean isUserCompaction() {
 +      return false;
 +    }
 +
 +    private ArrayList<SortedKeyValueIterator<Key,Value>> topLevelIterators = new ArrayList<>();
 +
 +    @Deprecated
 +    @Override
 +    public void registerSideChannel(SortedKeyValueIterator<Key,Value> iter) {
 +      topLevelIterators.add(iter);
 +    }
 +
 +    @Override
 +    public Authorizations getAuthorizations() {
 +      return authorizations;
 +    }
 +
 +    SortedKeyValueIterator<Key,Value> getTopLevelIterator(SortedKeyValueIterator<Key,Value> iter) {
 +      if (topLevelIterators.isEmpty())
 +        return iter;
 +      ArrayList<SortedKeyValueIterator<Key,Value>> allIters = new ArrayList<>(topLevelIterators);
 +      allIters.add(iter);
 +      return new MultiIterator(allIters, false);
 +    }
 +
 +    @Override
 +    public boolean isSamplingEnabled() {
 +      return useSample;
 +    }
 +
 +    @Override
 +    public SamplerConfiguration getSamplerConfiguration() {
 +      return sampleConf;
 +    }
 +
 +    @Override
 +    public IteratorEnvironment cloneWithSamplingEnabled() {
 +      if (sampleConf == null)
 +        throw new SampleNotPresentException();
 +      return new OfflineIteratorEnvironment(authorizations, conf, true, sampleConf);
 +    }
 +  }
 +
 +  private SortedKeyValueIterator<Key,Value> iter;
 +  private Range range;
 +  private KeyExtent currentExtent;
 +  private TableId tableId;
 +  private Authorizations authorizations;
 +  private ClientContext context;
 +  private ScannerOptions options;
 +  private ArrayList<SortedKeyValueIterator<Key,Value>> readers;
 +  private AccumuloConfiguration config;
 +
 +  public OfflineIterator(ScannerOptions options, ClientContext context,
 +      Authorizations authorizations, Text table, Range range) {
 +    this.options = new ScannerOptions(options);
 +    this.context = context;
 +    this.range = range;
 +
 +    if (this.options.fetchedColumns.size() > 0) {
-       this.range = range.bound(this.options.fetchedColumns.first(),
-           this.options.fetchedColumns.last());
++      this.range =
++          range.bound(this.options.fetchedColumns.first(), this.options.fetchedColumns.last());
 +    }
 +
 +    this.tableId = TableId.of(table.toString());
 +    this.authorizations = authorizations;
 +    this.readers = new ArrayList<>();
 +
 +    try {
 +      config = new ConfigurationCopy(context.instanceOperations().getSiteConfiguration());
 +      nextTablet();
 +
 +      while (iter != null && !iter.hasTop())
 +        nextTablet();
 +
 +    } catch (Exception e) {
 +      if (e instanceof RuntimeException)
 +        throw (RuntimeException) e;
 +      throw new RuntimeException(e);
 +    }
 +  }
 +
 +  @Override
 +  public boolean hasNext() {
 +    return iter != null && iter.hasTop();
 +  }
 +
 +  @Override
 +  public Entry<Key,Value> next() {
 +    try {
 +      byte[] v = iter.getTopValue().get();
 +      // copy just like tablet server does, do this before calling next
 +      KeyValue ret = new KeyValue(new Key(iter.getTopKey()), Arrays.copyOf(v, v.length));
 +
 +      iter.next();
 +
 +      while (iter != null && !iter.hasTop())
 +        nextTablet();
 +
 +      return ret;
 +    } catch (Exception e) {
 +      throw new RuntimeException(e);
 +    }
 +  }
 +
 +  private void nextTablet() throws TableNotFoundException, AccumuloException, IOException {
 +
 +    Range nextRange = null;
 +
 +    if (currentExtent == null) {
 +      Text startRow;
 +
 +      if (range.getStartKey() != null)
 +        startRow = range.getStartKey().getRow();
 +      else
 +        startRow = new Text();
 +
 +      nextRange = new Range(TabletsSection.getRow(tableId, startRow), true, null, false);
 +    } else {
 +
 +      if (currentExtent.getEndRow() == null) {
 +        iter = null;
 +        return;
 +      }
 +
 +      if (range.afterEndKey(new Key(currentExtent.getEndRow()).followingKey(PartialKey.ROW))) {
 +        iter = null;
 +        return;
 +      }
 +
 +      nextRange = new Range(currentExtent.getMetadataEntry(), false, null, false);
 +    }
 +
 +    TabletMetadata tablet = getTabletFiles(nextRange);
 +
 +    while (tablet.getLocation() != null) {
 +      if (Tables.getTableState(context, tableId) != TableState.OFFLINE) {
 +        Tables.clearCache(context);
 +        if (Tables.getTableState(context, tableId) != TableState.OFFLINE) {
 +          throw new AccumuloException("Table is online " + tableId
 +              + " cannot scan tablet in offline mode " + tablet.getExtent());
 +        }
 +      }
 +
 +      sleepUninterruptibly(250, TimeUnit.MILLISECONDS);
 +
 +      tablet = getTabletFiles(nextRange);
 +    }
 +
 +    if (!tablet.getExtent().getTableId().equals(tableId)) {
 +      throw new AccumuloException(
 +          " did not find tablets for table " + tableId + " " + tablet.getExtent());
 +    }
 +
 +    if (currentExtent != null && !tablet.getExtent().isPreviousExtent(currentExtent))
 +      throw new AccumuloException(
 +          " " + currentExtent + " is not previous extent " + tablet.getExtent());
 +
 +    // Old property is only used to resolve relative paths into absolute paths. For systems upgraded
 +    // with relative paths, it's assumed that correct instance.dfs.{uri,dir} is still correct in the
 +    // configuration
 +    @SuppressWarnings("deprecation")
 +    String tablesDir = config.get(Property.INSTANCE_DFS_DIR) + Constants.HDFS_TABLES_DIR;
 +
 +    List<String> absFiles = new ArrayList<>();
 +    for (String relPath : tablet.getFiles()) {
 +      if (relPath.contains(":")) {
 +        absFiles.add(relPath);
 +      } else {
 +        // handle old-style relative paths
 +        if (relPath.startsWith("..")) {
 +          absFiles.add(tablesDir + relPath.substring(2));
 +        } else {
 +          absFiles.add(tablesDir + "/" + tableId + relPath);
 +        }
 +      }
 +    }
 +
 +    iter = createIterator(tablet.getExtent(), absFiles);
 +    iter.seek(range, LocalityGroupUtil.families(options.fetchedColumns),
 +        options.fetchedColumns.size() != 0);
 +    currentExtent = tablet.getExtent();
 +
 +  }
 +
 +  private TabletMetadata getTabletFiles(Range nextRange) {
 +    try (TabletsMetadata tablets = TabletsMetadata.builder().scanMetadataTable()
 +        .overRange(nextRange).fetchFiles().fetchLocation().fetchPrev().build(context)) {
 +      return tablets.iterator().next();
 +    }
 +  }
 +
 +  private SortedKeyValueIterator<Key,Value> createIterator(KeyExtent extent, List<String> absFiles)
 +      throws TableNotFoundException, AccumuloException, IOException {
 +
 +    // TODO share code w/ tablet - ACCUMULO-1303
 +
 +    // possible race condition here, if table is renamed
 +    String tableName = Tables.getTableName(context, tableId);
-     AccumuloConfiguration acuTableConf = new ConfigurationCopy(
-         context.tableOperations().getProperties(tableName));
++    AccumuloConfiguration acuTableConf =
++        new ConfigurationCopy(context.tableOperations().getProperties(tableName));
 +
 +    Configuration conf = context.getHadoopConf();
 +
 +    for (SortedKeyValueIterator<Key,Value> reader : readers) {
 +      ((FileSKVIterator) reader).close();
 +    }
 +
 +    readers.clear();
 +
 +    SamplerConfiguration scannerSamplerConfig = options.getSamplerConfiguration();
-     SamplerConfigurationImpl scannerSamplerConfigImpl = scannerSamplerConfig == null ? null
-         : new SamplerConfigurationImpl(scannerSamplerConfig);
-     SamplerConfigurationImpl samplerConfImpl = SamplerConfigurationImpl
-         .newSamplerConfig(acuTableConf);
++    SamplerConfigurationImpl scannerSamplerConfigImpl =
++        scannerSamplerConfig == null ? null : new SamplerConfigurationImpl(scannerSamplerConfig);
++    SamplerConfigurationImpl samplerConfImpl =
++        SamplerConfigurationImpl.newSamplerConfig(acuTableConf);
 +
 +    if (scannerSamplerConfigImpl != null
 +        && ((samplerConfImpl != null && !scannerSamplerConfigImpl.equals(samplerConfImpl))
 +            || samplerConfImpl == null)) {
 +      throw new SampleNotPresentException();
 +    }
 +
 +    // TODO need to close files - ACCUMULO-1303
 +    for (String file : absFiles) {
 +      FileSystem fs = VolumeConfiguration.getVolume(file, conf, config).getFileSystem();
 +      FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder()
 +          .forFile(file, fs, conf, CryptoServiceFactory.newDefaultInstance())
 +          .withTableConfiguration(acuTableConf).build();
 +      if (scannerSamplerConfigImpl != null) {
 +        reader = reader.getSample(scannerSamplerConfigImpl);
 +        if (reader == null)
 +          throw new SampleNotPresentException();
 +      }
 +      readers.add(reader);
 +    }
 +
 +    MultiIterator multiIter = new MultiIterator(readers, extent);
 +
-     OfflineIteratorEnvironment iterEnv = new OfflineIteratorEnvironment(authorizations,
-         acuTableConf, false,
-         samplerConfImpl == null ? null : samplerConfImpl.toSamplerConfiguration());
++    OfflineIteratorEnvironment iterEnv =
++        new OfflineIteratorEnvironment(authorizations, acuTableConf, false,
++            samplerConfImpl == null ? null : samplerConfImpl.toSamplerConfiguration());
 +
 +    byte[] defaultSecurityLabel;
-     ColumnVisibility cv = new ColumnVisibility(
-         acuTableConf.get(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY));
++    ColumnVisibility cv =
++        new ColumnVisibility(acuTableConf.get(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY));
 +    defaultSecurityLabel = cv.getExpression();
 +
-     SortedKeyValueIterator<Key,Value> visFilter = SystemIteratorUtil.setupSystemScanIterators(
-         multiIter, new HashSet<>(options.fetchedColumns), authorizations, defaultSecurityLabel,
-         acuTableConf);
++    SortedKeyValueIterator<Key,
++        Value> visFilter = SystemIteratorUtil.setupSystemScanIterators(multiIter,
++            new HashSet<>(options.fetchedColumns), authorizations, defaultSecurityLabel,
++            acuTableConf);
 +    IterLoad iterLoad = IterConfigUtil.loadIterConf(IteratorScope.scan,
 +        options.serverSideIteratorList, options.serverSideIteratorOptions, acuTableConf);
 +
 +    return iterEnv.getTopLevelIterator(IterConfigUtil.loadIterators(visFilter,
 +        iterLoad.iterEnv(iterEnv).useAccumuloClassLoader(false)));
 +  }
 +
 +  @Override
 +  public void remove() {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/ReplicationClient.java
index d0209f2,0000000..6b5f4a3
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/ReplicationClient.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/ReplicationClient.java
@@@ -1,202 -1,0 +1,202 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.clientImpl;
 +
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +import static java.util.Objects.requireNonNull;
 +
 +import java.util.List;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException;
 +import org.apache.accumulo.core.replication.thrift.ReplicationCoordinator;
 +import org.apache.accumulo.core.replication.thrift.ReplicationServicer;
 +import org.apache.accumulo.core.rpc.ThriftUtil;
 +import org.apache.accumulo.core.util.HostAndPort;
 +import org.apache.accumulo.fate.zookeeper.ZooReader;
 +import org.apache.thrift.TServiceClient;
 +import org.apache.thrift.transport.TTransportException;
 +import org.apache.zookeeper.KeeperException;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +public class ReplicationClient {
 +  private static final Logger log = LoggerFactory.getLogger(ReplicationClient.class);
 +
 +  /**
 +   * @param context
 +   *          the client session for the peer replicant
 +   * @return Client to the ReplicationCoordinator service
 +   */
-   public static ReplicationCoordinator.Client getCoordinatorConnectionWithRetry(
-       ClientContext context) throws AccumuloException {
++  public static ReplicationCoordinator.Client
++      getCoordinatorConnectionWithRetry(ClientContext context) throws AccumuloException {
 +    requireNonNull(context);
 +
 +    for (int attempts = 1; attempts <= 10; attempts++) {
 +
 +      ReplicationCoordinator.Client result = getCoordinatorConnection(context);
 +      if (result != null)
 +        return result;
 +      log.debug("Could not get ReplicationCoordinator connection to {}, will retry",
 +          context.getInstanceName());
 +      try {
 +        Thread.sleep(attempts * 250L);
 +      } catch (InterruptedException e) {
 +        throw new AccumuloException(e);
 +      }
 +    }
 +
 +    throw new AccumuloException(
 +        "Timed out trying to communicate with master from " + context.getInstanceName());
 +  }
 +
 +  public static ReplicationCoordinator.Client getCoordinatorConnection(ClientContext context) {
 +    List<String> locations = context.getMasterLocations();
 +
 +    if (locations.size() == 0) {
 +      log.debug("No masters for replication to instance {}", context.getInstanceName());
 +      return null;
 +    }
 +
 +    // This is the master thrift service, we just want the hostname, not the port
 +    String masterThriftService = locations.get(0);
 +    if (masterThriftService.endsWith(":0")) {
 +      log.warn("Master found for {} did not have real location {}", context.getInstanceName(),
 +          masterThriftService);
 +      return null;
 +    }
 +
 +    String zkPath = context.getZooKeeperRoot() + Constants.ZMASTER_REPLICATION_COORDINATOR_ADDR;
 +    String replCoordinatorAddr;
 +
 +    log.debug("Using ZooKeeper quorum at {} with path {} to find peer Master information",
 +        context.getZooKeepers(), zkPath);
 +
 +    // Get the coordinator port for the master we're trying to connect to
 +    try {
-       ZooReader reader = new ZooReader(context.getZooKeepers(),
-           context.getZooKeepersSessionTimeOut());
++      ZooReader reader =
++          new ZooReader(context.getZooKeepers(), context.getZooKeepersSessionTimeOut());
 +      replCoordinatorAddr = new String(reader.getData(zkPath, null), UTF_8);
 +    } catch (KeeperException | InterruptedException e) {
 +      log.error("Could not fetch remote coordinator port", e);
 +      return null;
 +    }
 +
 +    // Throw the hostname and port through HostAndPort to get some normalization
 +    HostAndPort coordinatorAddr = HostAndPort.fromString(replCoordinatorAddr);
 +
 +    log.debug("Connecting to master at {}", coordinatorAddr);
 +
 +    try {
 +      // Master requests can take a long time: don't ever time out
 +      return ThriftUtil.getClientNoTimeout(new ReplicationCoordinator.Client.Factory(),
 +          coordinatorAddr, context);
 +    } catch (TTransportException tte) {
 +      log.debug("Failed to connect to master coordinator service ({})", coordinatorAddr, tte);
 +      return null;
 +    }
 +  }
 +
 +  /**
 +   * Attempt a single time to create a ReplicationServicer client to the given host
 +   *
 +   * @param context
 +   *          The client session for the peer replicant
 +   * @param server
 +   *          Server to connect to
 +   * @param timeout
 +   *          RPC timeout in milliseconds
 +   * @return A ReplicationServicer client to the given host in the given instance
 +   */
 +  public static ReplicationServicer.Client getServicerConnection(ClientContext context,
 +      HostAndPort server, long timeout) throws TTransportException {
 +    requireNonNull(context);
 +    requireNonNull(server);
 +
 +    try {
 +      return ThriftUtil.getClient(new ReplicationServicer.Client.Factory(), server, context,
 +          timeout);
 +    } catch (TTransportException tte) {
 +      log.debug("Failed to connect to servicer ({}), will retry...", server, tte);
 +      throw tte;
 +    }
 +  }
 +
 +  private static void close(TServiceClient client) {
 +    if (client != null && client.getInputProtocol() != null
 +        && client.getInputProtocol().getTransport() != null) {
 +      ThriftTransportPool.getInstance().returnTransport(client.getInputProtocol().getTransport());
 +    } else {
 +      log.debug("Attempt to close null connection to the remote system", new Exception());
 +    }
 +  }
 +
 +  public static <T> T executeCoordinatorWithReturn(ClientContext context,
 +      ClientExecReturn<T,ReplicationCoordinator.Client> exec)
 +      throws AccumuloException, AccumuloSecurityException {
 +    ReplicationCoordinator.Client client = null;
 +    for (int i = 0; i < 10; i++) {
 +      try {
 +        client = getCoordinatorConnectionWithRetry(context);
 +        return exec.execute(client);
 +      } catch (TTransportException tte) {
 +        log.debug("ReplicationClient coordinator request failed, retrying ... ", tte);
 +        try {
 +          Thread.sleep(100);
 +        } catch (InterruptedException e) {
 +          throw new AccumuloException(e);
 +        }
 +      } catch (ThriftSecurityException e) {
 +        throw new AccumuloSecurityException(e.user, e.code, e);
 +      } catch (AccumuloException e) {
 +        throw e;
 +      } catch (Exception e) {
 +        throw new AccumuloException(e);
 +      } finally {
 +        if (client != null)
 +          close(client);
 +      }
 +    }
 +
 +    throw new AccumuloException(
 +        "Could not connect to ReplicationCoordinator at " + context.getInstanceName());
 +  }
 +
 +  public static <T> T executeServicerWithReturn(ClientContext context, HostAndPort tserver,
 +      ClientExecReturn<T,ReplicationServicer.Client> exec, long timeout)
 +      throws AccumuloException, AccumuloSecurityException {
 +    ReplicationServicer.Client client = null;
 +    while (true) {
 +      try {
 +        client = getServicerConnection(context, tserver, timeout);
 +        return exec.execute(client);
 +      } catch (ThriftSecurityException e) {
 +        throw new AccumuloSecurityException(e.user, e.code, e);
 +      } catch (AccumuloException e) {
 +        throw e;
 +      } catch (Exception e) {
 +        throw new AccumuloException(e);
 +      } finally {
 +        if (client != null)
 +          close(client);
 +      }
 +    }
 +  }
 +
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/ScannerImpl.java
index edb2435,0000000..b160eea
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/ScannerImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/ScannerImpl.java
@@@ -1,204 -1,0 +1,204 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.clientImpl;
 +
 +import static com.google.common.base.Preconditions.checkArgument;
 +
 +import java.util.Iterator;
 +import java.util.LinkedHashMap;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.TableId;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +
 +/**
 + * provides scanner functionality
 + *
 + * "Clients can iterate over multiple column families, and there are several mechanisms for limiting
 + * the rows, columns, and timestamps traversed by a scan. For example, we could restrict [a] scan
 + * ... to only produce anchors whose columns match [a] regular expression ..., or to only produce
 + * anchors whose timestamps fall within ten days of the current time."
 + *
 + */
 +public class ScannerImpl extends ScannerOptions implements Scanner {
 +
 +  // keep a list of columns over which to scan
 +  // keep track of the last thing read
 +  // hopefully, we can track all the state in the scanner on the client
 +  // and just query for the next highest row from the tablet server
 +
 +  private final ClientContext context;
 +  private Authorizations authorizations;
 +  private TableId tableId;
 +
 +  private int size;
 +
 +  private Range range;
 +  private boolean isolated = false;
 +  private long readaheadThreshold = Constants.SCANNER_DEFAULT_READAHEAD_THRESHOLD;
 +
 +  boolean closed = false;
 +
 +  private static final int MAX_ENTRIES = 16;
 +
 +  private long iterCount = 0;
 +
 +  // Create an LRU map of iterators that tracks the MAX_ENTRIES most recently used iterators. An LRU
 +  // map is used to support the use case of a long lived scanner that constantly creates iterators
 +  // and does not read all of the data. For this case do not want iterator tracking to consume too
 +  // much memory. Also it would be best to avoid an RPC storm of close methods for thousands
 +  // sessions that may have timed out.
-   private Map<ScannerIterator,Long> iters = new LinkedHashMap<ScannerIterator,Long>(MAX_ENTRIES + 1,
-       .75F, true) {
-     private static final long serialVersionUID = 1L;
- 
-     // This method is called just after a new entry has been added
-     @Override
-     public boolean removeEldestEntry(Map.Entry<ScannerIterator,Long> eldest) {
-       return size() > MAX_ENTRIES;
-     }
-   };
++  private Map<ScannerIterator,Long> iters =
++      new LinkedHashMap<ScannerIterator,Long>(MAX_ENTRIES + 1, .75F, true) {
++        private static final long serialVersionUID = 1L;
++
++        // This method is called just after a new entry has been added
++        @Override
++        public boolean removeEldestEntry(Map.Entry<ScannerIterator,Long> eldest) {
++          return size() > MAX_ENTRIES;
++        }
++      };
 +
 +  /**
 +   * This is used for ScannerIterators to report their activity back to the scanner that created
 +   * them.
 +   */
 +  class Reporter {
 +
 +    void readBatch(ScannerIterator iter) {
 +      synchronized (ScannerImpl.this) {
 +        // This iter just had some activity, so access it in map so it becomes the most recently
 +        // used.
 +        iters.get(iter);
 +      }
 +    }
 +
 +    void finished(ScannerIterator iter) {
 +      synchronized (ScannerImpl.this) {
 +        iters.remove(iter);
 +      }
 +    }
 +  }
 +
 +  private synchronized void ensureOpen() {
 +    if (closed)
 +      throw new IllegalArgumentException("Scanner is closed");
 +  }
 +
 +  public ScannerImpl(ClientContext context, TableId tableId, Authorizations authorizations) {
 +    checkArgument(context != null, "context is null");
 +    checkArgument(tableId != null, "tableId is null");
 +    checkArgument(authorizations != null, "authorizations is null");
 +    this.context = context;
 +    this.tableId = tableId;
 +    this.range = new Range((Key) null, (Key) null);
 +    this.authorizations = authorizations;
 +
 +    this.size = Constants.SCAN_BATCH_SIZE;
 +  }
 +
 +  @Override
 +  public synchronized void setRange(Range range) {
 +    ensureOpen();
 +    checkArgument(range != null, "range is null");
 +    this.range = range;
 +  }
 +
 +  @Override
 +  public synchronized Range getRange() {
 +    ensureOpen();
 +    return range;
 +  }
 +
 +  @Override
 +  public synchronized void setBatchSize(int size) {
 +    ensureOpen();
 +    if (size > 0)
 +      this.size = size;
 +    else
 +      throw new IllegalArgumentException("size must be greater than zero");
 +  }
 +
 +  @Override
 +  public synchronized int getBatchSize() {
 +    ensureOpen();
 +    return size;
 +  }
 +
 +  @Override
 +  public synchronized Iterator<Entry<Key,Value>> iterator() {
 +    ensureOpen();
 +    ScannerIterator iter = new ScannerIterator(context, tableId, authorizations, range, size,
 +        getTimeout(TimeUnit.SECONDS), this, isolated, readaheadThreshold, new Reporter());
 +
 +    iters.put(iter, iterCount++);
 +
 +    return iter;
 +  }
 +
 +  @Override
 +  public Authorizations getAuthorizations() {
 +    ensureOpen();
 +    return authorizations;
 +  }
 +
 +  @Override
 +  public synchronized void enableIsolation() {
 +    ensureOpen();
 +    this.isolated = true;
 +  }
 +
 +  @Override
 +  public synchronized void disableIsolation() {
 +    ensureOpen();
 +    this.isolated = false;
 +  }
 +
 +  @Override
 +  public synchronized void setReadaheadThreshold(long batches) {
 +    ensureOpen();
 +    if (batches < 0) {
 +      throw new IllegalArgumentException(
 +          "Number of batches before read-ahead must be non-negative");
 +    }
 +
 +    readaheadThreshold = batches;
 +  }
 +
 +  @Override
 +  public synchronized long getReadaheadThreshold() {
 +    ensureOpen();
 +    return readaheadThreshold;
 +  }
 +
 +  @Override
 +  public synchronized void close() {
 +    if (!closed) {
 +      iters.forEach((iter, v) -> iter.close());
 +      iters.clear();
 +    }
 +
 +    closed = true;
 +  }
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/ScannerIterator.java
index c3133b7,0000000..3c52fdf
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/ScannerIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/ScannerIterator.java
@@@ -1,215 -1,0 +1,215 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.clientImpl;
 +
 +import java.util.Collections;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map.Entry;
 +import java.util.NoSuchElementException;
 +import java.util.concurrent.ExecutionException;
 +import java.util.concurrent.Future;
 +import java.util.concurrent.SynchronousQueue;
 +import java.util.concurrent.ThreadPoolExecutor;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.client.SampleNotPresentException;
 +import org.apache.accumulo.core.client.TableDeletedException;
 +import org.apache.accumulo.core.client.TableOfflineException;
 +import org.apache.accumulo.core.clientImpl.ThriftScanner.ScanState;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.KeyValue;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.TableId;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.util.NamingThreadFactory;
 +import org.slf4j.LoggerFactory;
 +
 +import com.google.common.base.Preconditions;
 +
 +public class ScannerIterator implements Iterator<Entry<Key,Value>> {
 +
 +  // scanner options
 +  private long timeOut;
 +
 +  // scanner state
 +  private Iterator<KeyValue> iter;
 +  private final ScanState scanState;
 +
 +  private ScannerOptions options;
 +
 +  private Future<List<KeyValue>> readAheadOperation;
 +
 +  private boolean finished = false;
 +
 +  private long batchCount = 0;
 +  private long readaheadThreshold;
 +
 +  private ScannerImpl.Reporter reporter;
 +
-   private static ThreadPoolExecutor readaheadPool = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 3L,
-       TimeUnit.SECONDS, new SynchronousQueue<>(),
-       new NamingThreadFactory("Accumulo scanner read ahead thread"));
++  private static ThreadPoolExecutor readaheadPool =
++      new ThreadPoolExecutor(0, Integer.MAX_VALUE, 3L, TimeUnit.SECONDS, new SynchronousQueue<>(),
++          new NamingThreadFactory("Accumulo scanner read ahead thread"));
 +
 +  private boolean closed = false;
 +
 +  ScannerIterator(ClientContext context, TableId tableId, Authorizations authorizations,
 +      Range range, int size, long timeOut, ScannerOptions options, boolean isolated,
 +      long readaheadThreshold, ScannerImpl.Reporter reporter) {
 +    this.timeOut = timeOut;
 +    this.readaheadThreshold = readaheadThreshold;
 +
 +    this.options = new ScannerOptions(options);
 +
 +    this.reporter = reporter;
 +
 +    if (this.options.fetchedColumns.size() > 0) {
 +      range = range.bound(this.options.fetchedColumns.first(), this.options.fetchedColumns.last());
 +    }
 +
-     scanState = new ScanState(context, tableId, authorizations, new Range(range),
-         options.fetchedColumns, size, options.serverSideIteratorList,
-         options.serverSideIteratorOptions, isolated, readaheadThreshold,
-         options.getSamplerConfiguration(), options.batchTimeOut, options.classLoaderContext,
-         options.executionHints);
++    scanState =
++        new ScanState(context, tableId, authorizations, new Range(range), options.fetchedColumns,
++            size, options.serverSideIteratorList, options.serverSideIteratorOptions, isolated,
++            readaheadThreshold, options.getSamplerConfiguration(), options.batchTimeOut,
++            options.classLoaderContext, options.executionHints);
 +
 +    // If we want to start readahead immediately, don't wait for hasNext to be called
 +    if (readaheadThreshold == 0L) {
 +      initiateReadAhead();
 +    }
 +    iter = null;
 +  }
 +
 +  @Override
 +  public boolean hasNext() {
 +    if (finished)
 +      return false;
 +
 +    if (iter != null && iter.hasNext()) {
 +      return true;
 +    }
 +
 +    iter = getNextBatch().iterator();
 +    if (!iter.hasNext()) {
 +      finished = true;
 +      reporter.finished(this);
 +      return false;
 +    }
 +
 +    return true;
 +  }
 +
 +  @Override
 +  public Entry<Key,Value> next() {
 +    if (hasNext())
 +      return iter.next();
 +    throw new NoSuchElementException();
 +  }
 +
 +  void close() {
 +    // run actual close operation in the background so this does not block.
 +    readaheadPool.execute(() -> {
 +      synchronized (scanState) {
 +        // this is synchronized so its mutually exclusive with readBatch()
 +        try {
 +          closed = true;
 +          ThriftScanner.close(scanState);
 +        } catch (Exception e) {
 +          LoggerFactory.getLogger(ScannerIterator.class)
 +              .debug("Exception when closing scan session", e);
 +        }
 +      }
 +    });
 +  }
 +
 +  private void initiateReadAhead() {
 +    Preconditions.checkState(readAheadOperation == null);
 +    readAheadOperation = readaheadPool.submit(this::readBatch);
 +  }
 +
 +  private List<KeyValue> readBatch() throws Exception {
 +
 +    List<KeyValue> batch;
 +
 +    do {
 +      synchronized (scanState) {
 +        // this is synchronized so its mutually exclusive with closing
 +        Preconditions.checkState(!closed, "Scanner was closed");
 +        batch = ThriftScanner.scan(scanState.context, scanState, timeOut);
 +      }
 +    } while (batch != null && batch.size() == 0);
 +
 +    if (batch != null) {
 +      reporter.readBatch(this);
 +    }
 +
 +    return batch == null ? Collections.emptyList() : batch;
 +  }
 +
 +  private List<KeyValue> getNextBatch() {
 +
 +    List<KeyValue> nextBatch;
 +
 +    try {
 +      if (readAheadOperation == null) {
 +        // no read ahead run, fetch the next batch right now
 +        nextBatch = readBatch();
 +      } else {
 +        nextBatch = readAheadOperation.get();
 +        readAheadOperation = null;
 +      }
 +    } catch (ExecutionException ee) {
 +      wrapExecutionException(ee);
 +      throw new RuntimeException(ee);
 +    } catch (RuntimeException e) {
 +      throw e;
 +    } catch (Exception e) {
 +      throw new RuntimeException(e);
 +    }
 +
 +    if (!nextBatch.isEmpty()) {
 +      batchCount++;
 +
 +      if (batchCount > readaheadThreshold) {
 +        // start a thread to read the next batch
 +        initiateReadAhead();
 +      }
 +    }
 +
 +    return nextBatch;
 +  }
 +
 +  private void wrapExecutionException(ExecutionException ee) {
 +    // Need preserve the type of exception that was the cause because some code depends on it.
 +    // However the cause is an exception that occurred in a background thread, so throwing it would
 +    // lose the stack trace for the user thread calling the scanner. Wrapping the exception with the
 +    // same type preserves the type and stack traces (foreground and background thread traces) that
 +    // are critical for debugging.
 +    if (ee.getCause() instanceof IsolationException)
 +      throw new IsolationException(ee);
 +    if (ee.getCause() instanceof TableDeletedException) {
 +      TableDeletedException cause = (TableDeletedException) ee.getCause();
 +      throw new TableDeletedException(cause.getTableId(), cause);
 +    }
 +    if (ee.getCause() instanceof TableOfflineException)
 +      throw new TableOfflineException(ee);
 +    if (ee.getCause() instanceof SampleNotPresentException)
 +      throw new SampleNotPresentException(ee.getCause().getMessage(), ee);
 +  }
 +
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/SecurityOperationsImpl.java
index 7a1d53f,0000000..881ad50
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/SecurityOperationsImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/SecurityOperationsImpl.java
@@@ -1,304 -1,0 +1,304 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.clientImpl;
 +
 +import static com.google.common.base.Preconditions.checkArgument;
 +import static org.apache.accumulo.core.client.security.SecurityErrorCode.NAMESPACE_DOESNT_EXIST;
 +
 +import java.nio.ByteBuffer;
 +import java.util.Set;
 +
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.client.admin.DelegationTokenConfig;
 +import org.apache.accumulo.core.client.admin.SecurityOperations;
 +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 +import org.apache.accumulo.core.client.security.tokens.DelegationToken;
 +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 +import org.apache.accumulo.core.clientImpl.thrift.ClientService;
 +import org.apache.accumulo.core.clientImpl.thrift.SecurityErrorCode;
 +import org.apache.accumulo.core.clientImpl.thrift.TableOperationExceptionType;
 +import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException;
 +import org.apache.accumulo.core.clientImpl.thrift.ThriftTableOperationException;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.security.NamespacePermission;
 +import org.apache.accumulo.core.security.SystemPermission;
 +import org.apache.accumulo.core.security.TablePermission;
 +import org.apache.accumulo.core.securityImpl.thrift.TDelegationToken;
 +import org.apache.accumulo.core.securityImpl.thrift.TDelegationTokenConfig;
 +import org.apache.accumulo.core.trace.TraceUtil;
 +import org.apache.accumulo.core.util.ByteBufferUtil;
 +
 +public class SecurityOperationsImpl implements SecurityOperations {
 +
 +  private final ClientContext context;
 +
 +  private void executeVoid(ClientExec<ClientService.Client> exec)
 +      throws AccumuloException, AccumuloSecurityException {
 +    try {
 +      ServerClient.executeRawVoid(context, exec);
 +    } catch (ThriftTableOperationException ttoe) {
 +      // recast missing table
 +      if (ttoe.getType() == TableOperationExceptionType.NOTFOUND)
 +        throw new AccumuloSecurityException(null, SecurityErrorCode.TABLE_DOESNT_EXIST);
 +      else if (ttoe.getType() == TableOperationExceptionType.NAMESPACE_NOTFOUND)
 +        throw new AccumuloSecurityException(null, SecurityErrorCode.NAMESPACE_DOESNT_EXIST);
 +      else
 +        throw new AccumuloException(ttoe);
 +    } catch (ThriftSecurityException e) {
 +      throw new AccumuloSecurityException(e.user, e.code, e);
 +    } catch (AccumuloException e) {
 +      throw e;
 +    } catch (Exception e) {
 +      throw new AccumuloException(e);
 +    }
 +  }
 +
 +  private <T> T execute(ClientExecReturn<T,ClientService.Client> exec)
 +      throws AccumuloException, AccumuloSecurityException {
 +    try {
 +      return ServerClient.executeRaw(context, exec);
 +    } catch (ThriftTableOperationException ttoe) {
 +      // recast missing table
 +      if (ttoe.getType() == TableOperationExceptionType.NOTFOUND)
 +        throw new AccumuloSecurityException(null, SecurityErrorCode.TABLE_DOESNT_EXIST);
 +      else if (ttoe.getType() == TableOperationExceptionType.NAMESPACE_NOTFOUND)
 +        throw new AccumuloSecurityException(null, SecurityErrorCode.NAMESPACE_DOESNT_EXIST);
 +      else
 +        throw new AccumuloException(ttoe);
 +    } catch (ThriftSecurityException e) {
 +      throw new AccumuloSecurityException(e.user, e.code, e);
 +    } catch (AccumuloException e) {
 +      throw e;
 +    } catch (Exception e) {
 +      throw new AccumuloException(e);
 +    }
 +  }
 +
 +  public SecurityOperationsImpl(ClientContext context) {
 +    checkArgument(context != null, "context is null");
 +    this.context = context;
 +  }
 +
 +  @Override
 +  public void createLocalUser(final String principal, final PasswordToken password)
 +      throws AccumuloException, AccumuloSecurityException {
 +    checkArgument(principal != null, "principal is null");
 +    if (context.getSaslParams() == null) {
 +      checkArgument(password != null, "password is null");
 +    }
 +    executeVoid(client -> {
 +      if (context.getSaslParams() == null) {
 +        client.createLocalUser(TraceUtil.traceInfo(), context.rpcCreds(), principal,
 +            ByteBuffer.wrap(password.getPassword()));
 +      } else {
 +        client.createLocalUser(TraceUtil.traceInfo(), context.rpcCreds(), principal,
 +            ByteBuffer.wrap(new byte[0]));
 +      }
 +    });
 +  }
 +
 +  @Override
 +  public void dropLocalUser(final String principal)
 +      throws AccumuloException, AccumuloSecurityException {
 +    checkArgument(principal != null, "principal is null");
 +    executeVoid(
 +        client -> client.dropLocalUser(TraceUtil.traceInfo(), context.rpcCreds(), principal));
 +  }
 +
 +  @Override
 +  public boolean authenticateUser(final String principal, final AuthenticationToken token)
 +      throws AccumuloException, AccumuloSecurityException {
 +    checkArgument(principal != null, "principal is null");
 +    checkArgument(token != null, "token is null");
 +    final Credentials toAuth = new Credentials(principal, token);
 +    return execute(client -> client.authenticateUser(TraceUtil.traceInfo(), context.rpcCreds(),
 +        toAuth.toThrift(context.getInstanceID())));
 +  }
 +
 +  @Override
 +  public void changeLocalUserPassword(final String principal, final PasswordToken token)
 +      throws AccumuloException, AccumuloSecurityException {
 +    checkArgument(principal != null, "principal is null");
 +    checkArgument(token != null, "token is null");
 +    final Credentials toChange = new Credentials(principal, token);
 +    executeVoid(client -> client.changeLocalUserPassword(TraceUtil.traceInfo(), context.rpcCreds(),
 +        principal, ByteBuffer.wrap(token.getPassword())));
 +    if (context.getCredentials().getPrincipal().equals(principal)) {
 +      context.setCredentials(toChange);
 +    }
 +  }
 +
 +  @Override
 +  public void changeUserAuthorizations(final String principal, final Authorizations authorizations)
 +      throws AccumuloException, AccumuloSecurityException {
 +    checkArgument(principal != null, "principal is null");
 +    checkArgument(authorizations != null, "authorizations is null");
 +    executeVoid(client -> client.changeAuthorizations(TraceUtil.traceInfo(), context.rpcCreds(),
 +        principal, ByteBufferUtil.toByteBuffers(authorizations.getAuthorizations())));
 +  }
 +
 +  @Override
 +  public Authorizations getUserAuthorizations(final String principal)
 +      throws AccumuloException, AccumuloSecurityException {
 +    checkArgument(principal != null, "principal is null");
 +    return execute(client -> new Authorizations(
 +        client.getUserAuthorizations(TraceUtil.traceInfo(), context.rpcCreds(), principal)));
 +  }
 +
 +  @Override
 +  public boolean hasSystemPermission(final String principal, final SystemPermission perm)
 +      throws AccumuloException, AccumuloSecurityException {
 +    checkArgument(principal != null, "principal is null");
 +    checkArgument(perm != null, "perm is null");
 +    return execute(client -> client.hasSystemPermission(TraceUtil.traceInfo(), context.rpcCreds(),
 +        principal, perm.getId()));
 +  }
 +
 +  @Override
 +  public boolean hasTablePermission(final String principal, final String table,
 +      final TablePermission perm) throws AccumuloException, AccumuloSecurityException {
 +    checkArgument(principal != null, "principal is null");
 +    checkArgument(table != null, "table is null");
 +    checkArgument(perm != null, "perm is null");
 +    try {
 +      return execute(client -> client.hasTablePermission(TraceUtil.traceInfo(), context.rpcCreds(),
 +          principal, table, perm.getId()));
 +    } catch (AccumuloSecurityException e) {
 +      if (e.getSecurityErrorCode() == NAMESPACE_DOESNT_EXIST)
 +        throw new AccumuloSecurityException(null, SecurityErrorCode.TABLE_DOESNT_EXIST, e);
 +      else
 +        throw e;
 +    }
 +  }
 +
 +  @Override
 +  public boolean hasNamespacePermission(final String principal, final String namespace,
 +      final NamespacePermission permission) throws AccumuloException, AccumuloSecurityException {
 +    checkArgument(principal != null, "principal is null");
 +    checkArgument(namespace != null, "namespace is null");
 +    checkArgument(permission != null, "permission is null");
 +    return execute(client -> client.hasNamespacePermission(TraceUtil.traceInfo(),
 +        context.rpcCreds(), principal, namespace, permission.getId()));
 +  }
 +
 +  @Override
 +  public void grantSystemPermission(final String principal, final SystemPermission permission)
 +      throws AccumuloException, AccumuloSecurityException {
 +    checkArgument(principal != null, "principal is null");
 +    checkArgument(permission != null, "permission is null");
 +    executeVoid(client -> client.grantSystemPermission(TraceUtil.traceInfo(), context.rpcCreds(),
 +        principal, permission.getId()));
 +  }
 +
 +  @Override
 +  public void grantTablePermission(final String principal, final String table,
 +      final TablePermission permission) throws AccumuloException, AccumuloSecurityException {
 +    checkArgument(principal != null, "principal is null");
 +    checkArgument(table != null, "table is null");
 +    checkArgument(permission != null, "permission is null");
 +    try {
 +      executeVoid(client -> client.grantTablePermission(TraceUtil.traceInfo(), context.rpcCreds(),
 +          principal, table, permission.getId()));
 +    } catch (AccumuloSecurityException e) {
 +      if (e.getSecurityErrorCode() == NAMESPACE_DOESNT_EXIST)
 +        throw new AccumuloSecurityException(null, SecurityErrorCode.TABLE_DOESNT_EXIST, e);
 +      else
 +        throw e;
 +    }
 +  }
 +
 +  @Override
 +  public void grantNamespacePermission(final String principal, final String namespace,
 +      final NamespacePermission permission) throws AccumuloException, AccumuloSecurityException {
 +    checkArgument(principal != null, "principal is null");
 +    checkArgument(namespace != null, "namespace is null");
 +    checkArgument(permission != null, "permission is null");
 +    executeVoid(client -> client.grantNamespacePermission(TraceUtil.traceInfo(), context.rpcCreds(),
 +        principal, namespace, permission.getId()));
 +  }
 +
 +  @Override
 +  public void revokeSystemPermission(final String principal, final SystemPermission permission)
 +      throws AccumuloException, AccumuloSecurityException {
 +    checkArgument(principal != null, "principal is null");
 +    checkArgument(permission != null, "permission is null");
 +    executeVoid(client -> client.revokeSystemPermission(TraceUtil.traceInfo(), context.rpcCreds(),
 +        principal, permission.getId()));
 +  }
 +
 +  @Override
 +  public void revokeTablePermission(final String principal, final String table,
 +      final TablePermission permission) throws AccumuloException, AccumuloSecurityException {
 +    checkArgument(principal != null, "principal is null");
 +    checkArgument(table != null, "table is null");
 +    checkArgument(permission != null, "permission is null");
 +    try {
 +      executeVoid(client -> client.revokeTablePermission(TraceUtil.traceInfo(), context.rpcCreds(),
 +          principal, table, permission.getId()));
 +    } catch (AccumuloSecurityException e) {
 +      if (e.getSecurityErrorCode() == NAMESPACE_DOESNT_EXIST)
 +        throw new AccumuloSecurityException(null, SecurityErrorCode.TABLE_DOESNT_EXIST, e);
 +      else
 +        throw e;
 +    }
 +  }
 +
 +  @Override
 +  public void revokeNamespacePermission(final String principal, final String namespace,
 +      final NamespacePermission permission) throws AccumuloException, AccumuloSecurityException {
 +    checkArgument(principal != null, "principal is null");
 +    checkArgument(namespace != null, "namespace is null");
 +    checkArgument(permission != null, "permission is null");
 +    executeVoid(client -> client.revokeNamespacePermission(TraceUtil.traceInfo(),
 +        context.rpcCreds(), principal, namespace, permission.getId()));
 +  }
 +
 +  @Override
 +  public Set<String> listLocalUsers() throws AccumuloException, AccumuloSecurityException {
 +    return execute(client -> client.listLocalUsers(TraceUtil.traceInfo(), context.rpcCreds()));
 +  }
 +
 +  @Override
 +  public DelegationToken getDelegationToken(DelegationTokenConfig cfg)
 +      throws AccumuloException, AccumuloSecurityException {
 +    final TDelegationTokenConfig tConfig;
 +    if (cfg != null) {
 +      tConfig = DelegationTokenConfigSerializer.serialize(cfg);
 +    } else {
 +      tConfig = new TDelegationTokenConfig();
 +    }
 +
 +    TDelegationToken thriftToken;
 +    try {
 +      thriftToken = MasterClient.execute(context,
 +          client -> client.getDelegationToken(TraceUtil.traceInfo(), context.rpcCreds(), tConfig));
 +    } catch (TableNotFoundException e) {
 +      // should never happen
 +      throw new AssertionError(
 +          "Received TableNotFoundException on method which should not throw that exception", e);
 +    }
 +
-     AuthenticationTokenIdentifier identifier = new AuthenticationTokenIdentifier(
-         thriftToken.getIdentifier());
++    AuthenticationTokenIdentifier identifier =
++        new AuthenticationTokenIdentifier(thriftToken.getIdentifier());
 +
 +    // Get the password out of the thrift delegation token
 +    return new DelegationTokenImpl(thriftToken.getPassword(), identifier);
 +  }
 +
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/ServerClient.java
index 5149508,0000000..9d79216
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/ServerClient.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/ServerClient.java
@@@ -1,204 -1,0 +1,204 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.clientImpl;
 +
 +import static com.google.common.base.Preconditions.checkArgument;
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +import static org.apache.accumulo.fate.util.UtilWaitThread.sleepUninterruptibly;
 +
 +import java.util.ArrayList;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.clientImpl.thrift.ClientService;
 +import org.apache.accumulo.core.clientImpl.thrift.ClientService.Client;
 +import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException;
 +import org.apache.accumulo.core.rpc.ThriftUtil;
 +import org.apache.accumulo.core.util.Pair;
 +import org.apache.accumulo.core.util.ServerServices;
 +import org.apache.accumulo.core.util.ServerServices.Service;
 +import org.apache.accumulo.fate.zookeeper.ZooCache;
 +import org.apache.accumulo.fate.zookeeper.ZooUtil;
 +import org.apache.thrift.TApplicationException;
 +import org.apache.thrift.TServiceClient;
 +import org.apache.thrift.TServiceClientFactory;
 +import org.apache.thrift.transport.TTransport;
 +import org.apache.thrift.transport.TTransportException;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +public class ServerClient {
 +  private static final Logger log = LoggerFactory.getLogger(ServerClient.class);
 +
 +  public static <T> T execute(ClientContext context, ClientExecReturn<T,ClientService.Client> exec)
 +      throws AccumuloException, AccumuloSecurityException {
 +    return execute(context, new ClientService.Client.Factory(), exec);
 +  }
 +
 +  public static <CT extends TServiceClient,RT> RT execute(ClientContext context,
 +      TServiceClientFactory<CT> factory, ClientExecReturn<RT,CT> exec)
 +      throws AccumuloException, AccumuloSecurityException {
 +    try {
 +      return executeRaw(context, factory, exec);
 +    } catch (ThriftSecurityException e) {
 +      throw new AccumuloSecurityException(e.user, e.code, e);
 +    } catch (AccumuloException e) {
 +      throw e;
 +    } catch (Exception e) {
 +      throw new AccumuloException(e);
 +    }
 +  }
 +
 +  public static void executeVoid(ClientContext context, ClientExec<ClientService.Client> exec)
 +      throws AccumuloException, AccumuloSecurityException {
 +    try {
 +      executeRawVoid(context, exec);
 +    } catch (ThriftSecurityException e) {
 +      throw new AccumuloSecurityException(e.user, e.code, e);
 +    } catch (AccumuloException e) {
 +      throw e;
 +    } catch (Exception e) {
 +      throw new AccumuloException(e);
 +    }
 +  }
 +
 +  public static <T> T executeRaw(ClientContext context,
 +      ClientExecReturn<T,ClientService.Client> exec) throws Exception {
 +    return executeRaw(context, new ClientService.Client.Factory(), exec);
 +  }
 +
 +  public static <CT extends TServiceClient,RT> RT executeRaw(ClientContext context,
 +      TServiceClientFactory<CT> factory, ClientExecReturn<RT,CT> exec) throws Exception {
 +    while (true) {
 +      CT client = null;
 +      String server = null;
 +      try {
 +        Pair<String,CT> pair = ServerClient.getConnection(context, factory);
 +        server = pair.getFirst();
 +        client = pair.getSecond();
 +        return exec.execute(client);
 +      } catch (TApplicationException tae) {
 +        throw new AccumuloServerException(server, tae);
 +      } catch (TTransportException tte) {
 +        log.debug("ClientService request failed " + server + ", retrying ... ", tte);
 +        sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
 +      } finally {
 +        if (client != null)
 +          ServerClient.close(client);
 +      }
 +    }
 +  }
 +
 +  public static void executeRawVoid(ClientContext context, ClientExec<ClientService.Client> exec)
 +      throws Exception {
 +    while (true) {
 +      ClientService.Client client = null;
 +      String server = null;
 +      try {
 +        Pair<String,Client> pair = ServerClient.getConnection(context);
 +        server = pair.getFirst();
 +        client = pair.getSecond();
 +        exec.execute(client);
 +        break;
 +      } catch (TApplicationException tae) {
 +        throw new AccumuloServerException(server, tae);
 +      } catch (TTransportException tte) {
 +        log.debug("ClientService request failed " + server + ", retrying ... ", tte);
 +        sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
 +      } finally {
 +        if (client != null)
 +          ServerClient.close(client);
 +      }
 +    }
 +  }
 +
 +  static volatile boolean warnedAboutTServersBeingDown = false;
 +
 +  public static Pair<String,ClientService.Client> getConnection(ClientContext context)
 +      throws TTransportException {
 +    return getConnection(context, true);
 +  }
 +
 +  public static <CT extends TServiceClient> Pair<String,CT> getConnection(ClientContext context,
 +      TServiceClientFactory<CT> factory) throws TTransportException {
 +    return getConnection(context, factory, true, context.getClientTimeoutInMillis());
 +  }
 +
 +  public static Pair<String,ClientService.Client> getConnection(ClientContext context,
 +      boolean preferCachedConnections) throws TTransportException {
 +    return getConnection(context, preferCachedConnections, context.getClientTimeoutInMillis());
 +  }
 +
 +  public static Pair<String,ClientService.Client> getConnection(ClientContext context,
 +      boolean preferCachedConnections, long rpcTimeout) throws TTransportException {
 +    return getConnection(context, new ClientService.Client.Factory(), preferCachedConnections,
 +        rpcTimeout);
 +  }
 +
 +  public static <CT extends TServiceClient> Pair<String,CT> getConnection(ClientContext context,
 +      TServiceClientFactory<CT> factory, boolean preferCachedConnections, long rpcTimeout)
 +      throws TTransportException {
 +    checkArgument(context != null, "context is null");
 +    // create list of servers
 +    ArrayList<ThriftTransportKey> servers = new ArrayList<>();
 +
 +    // add tservers
 +    ZooCache zc = context.getZooCache();
 +    for (String tserver : zc.getChildren(context.getZooKeeperRoot() + Constants.ZTSERVERS)) {
 +      String path = context.getZooKeeperRoot() + Constants.ZTSERVERS + "/" + tserver;
 +      byte[] data = ZooUtil.getLockData(zc, path);
 +      if (data != null) {
 +        String strData = new String(data, UTF_8);
 +        if (!strData.equals("master"))
 +          servers.add(new ThriftTransportKey(
 +              new ServerServices(strData).getAddress(Service.TSERV_CLIENT), rpcTimeout, context));
 +      }
 +    }
 +
 +    boolean opened = false;
 +    try {
-       Pair<String,TTransport> pair = ThriftTransportPool.getInstance().getAnyTransport(servers,
-           preferCachedConnections);
++      Pair<String,TTransport> pair =
++          ThriftTransportPool.getInstance().getAnyTransport(servers, preferCachedConnections);
 +      CT client = ThriftUtil.createClient(factory, pair.getSecond());
 +      opened = true;
 +      warnedAboutTServersBeingDown = false;
 +      return new Pair<>(pair.getFirst(), client);
 +    } finally {
 +      if (!opened) {
 +        if (!warnedAboutTServersBeingDown) {
 +          if (servers.isEmpty()) {
 +            log.warn("There are no tablet servers: check that zookeeper and accumulo are running.");
 +          } else {
 +            log.warn("Failed to find an available server in the list of servers: {}", servers);
 +          }
 +          warnedAboutTServersBeingDown = true;
 +        }
 +      }
 +    }
 +  }
 +
 +  public static void close(TServiceClient client) {
 +    if (client != null && client.getInputProtocol() != null
 +        && client.getInputProtocol().getTransport() != null) {
 +      ThriftTransportPool.getInstance().returnTransport(client.getInputProtocol().getTransport());
 +    } else {
 +      log.debug("Attempt to close null connection to a server", new Exception());
 +    }
 +  }
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/TableOperationsHelper.java
index 35bb85b,0000000..805c701
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/TableOperationsHelper.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/TableOperationsHelper.java
@@@ -1,233 -1,0 +1,233 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.clientImpl;
 +
 +import static com.google.common.base.Preconditions.checkArgument;
 +
 +import java.util.EnumSet;
 +import java.util.HashMap;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.TreeMap;
 +import java.util.TreeSet;
 +
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.client.admin.TableOperations;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
 +
 +public abstract class TableOperationsHelper implements TableOperations {
 +
 +  @Override
 +  public void attachIterator(String tableName, IteratorSetting setting)
 +      throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
 +    attachIterator(tableName, setting, EnumSet.allOf(IteratorScope.class));
 +  }
 +
 +  @Override
 +  public void attachIterator(String tableName, IteratorSetting setting,
 +      EnumSet<IteratorScope> scopes)
 +      throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
 +    checkArgument(tableName != null, "tableName is null");
 +    checkArgument(setting != null, "setting is null");
 +    checkArgument(scopes != null, "scopes is null");
 +    checkIteratorConflicts(tableName, setting, scopes);
 +    for (IteratorScope scope : scopes) {
 +      String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
 +          scope.name().toLowerCase(), setting.getName());
 +      for (Entry<String,String> prop : setting.getOptions().entrySet()) {
 +        this.setProperty(tableName, root + ".opt." + prop.getKey(), prop.getValue());
 +      }
 +      this.setProperty(tableName, root, setting.getPriority() + "," + setting.getIteratorClass());
 +    }
 +  }
 +
 +  @Override
 +  public void removeIterator(String tableName, String name, EnumSet<IteratorScope> scopes)
 +      throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
 +    Map<String,String> copy = new TreeMap<>();
 +    for (Entry<String,String> property : this.getProperties(tableName)) {
 +      copy.put(property.getKey(), property.getValue());
 +    }
 +    for (IteratorScope scope : scopes) {
 +      String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
 +          scope.name().toLowerCase(), name);
 +      for (Entry<String,String> property : copy.entrySet()) {
 +        if (property.getKey().equals(root) || property.getKey().startsWith(root + ".opt."))
 +          this.removeProperty(tableName, property.getKey());
 +      }
 +    }
 +  }
 +
 +  @Override
 +  public IteratorSetting getIteratorSetting(String tableName, String name, IteratorScope scope)
 +      throws AccumuloException, TableNotFoundException {
 +    checkArgument(tableName != null, "tableName is null");
 +    checkArgument(name != null, "name is null");
 +    checkArgument(scope != null, "scope is null");
 +    int priority = -1;
 +    String classname = null;
 +    Map<String,String> settings = new HashMap<>();
 +
-     String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
-         scope.name().toLowerCase(), name);
++    String root =
++        String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX, scope.name().toLowerCase(), name);
 +    String opt = root + ".opt.";
 +    for (Entry<String,String> property : this.getProperties(tableName)) {
 +      if (property.getKey().equals(root)) {
 +        String[] parts = property.getValue().split(",");
 +        if (parts.length != 2) {
 +          throw new AccumuloException("Bad value for iterator setting: " + property.getValue());
 +        }
 +        priority = Integer.parseInt(parts[0]);
 +        classname = parts[1];
 +      } else if (property.getKey().startsWith(opt)) {
 +        settings.put(property.getKey().substring(opt.length()), property.getValue());
 +      }
 +    }
 +    if (priority <= 0 || classname == null) {
 +      return null;
 +    }
 +    return new IteratorSetting(priority, name, classname, settings);
 +  }
 +
 +  @Override
 +  public Map<String,EnumSet<IteratorScope>> listIterators(String tableName)
 +      throws AccumuloException, TableNotFoundException {
 +    Map<String,EnumSet<IteratorScope>> result = new TreeMap<>();
 +    for (Entry<String,String> property : this.getProperties(tableName)) {
 +      String name = property.getKey();
 +      String[] parts = name.split("\\.");
 +      if (parts.length == 4) {
 +        if (parts[0].equals("table") && parts[1].equals("iterator")) {
 +          IteratorScope scope = IteratorScope.valueOf(parts[2]);
 +          if (!result.containsKey(parts[3]))
 +            result.put(parts[3], EnumSet.noneOf(IteratorScope.class));
 +          result.get(parts[3]).add(scope);
 +        }
 +      }
 +    }
 +    return result;
 +  }
 +
 +  public static void checkIteratorConflicts(Map<String,String> props, IteratorSetting setting,
 +      EnumSet<IteratorScope> scopes) throws AccumuloException {
 +    checkArgument(setting != null, "setting is null");
 +    checkArgument(scopes != null, "scopes is null");
 +    for (IteratorScope scope : scopes) {
-       String scopeStr = String.format("%s%s", Property.TABLE_ITERATOR_PREFIX,
-           scope.name().toLowerCase());
++      String scopeStr =
++          String.format("%s%s", Property.TABLE_ITERATOR_PREFIX, scope.name().toLowerCase());
 +      String nameStr = String.format("%s.%s", scopeStr, setting.getName());
 +      String optStr = String.format("%s.opt.", nameStr);
 +      Map<String,String> optionConflicts = new TreeMap<>();
 +      for (Entry<String,String> property : props.entrySet()) {
 +        if (property.getKey().startsWith(scopeStr)) {
 +          if (property.getKey().equals(nameStr))
 +            throw new AccumuloException(new IllegalArgumentException("iterator name conflict for "
 +                + setting.getName() + ": " + property.getKey() + "=" + property.getValue()));
 +          if (property.getKey().startsWith(optStr))
 +            optionConflicts.put(property.getKey(), property.getValue());
 +          if (property.getKey().contains(".opt."))
 +            continue;
 +          String[] parts = property.getValue().split(",");
 +          if (parts.length != 2)
 +            throw new AccumuloException("Bad value for existing iterator setting: "
 +                + property.getKey() + "=" + property.getValue());
 +          try {
 +            if (Integer.parseInt(parts[0]) == setting.getPriority())
 +              throw new AccumuloException(new IllegalArgumentException(
 +                  "iterator priority conflict: " + property.getKey() + "=" + property.getValue()));
 +          } catch (NumberFormatException e) {
 +            throw new AccumuloException("Bad value for existing iterator setting: "
 +                + property.getKey() + "=" + property.getValue());
 +          }
 +        }
 +      }
 +      if (optionConflicts.size() > 0)
 +        throw new AccumuloException(new IllegalArgumentException(
 +            "iterator options conflict for " + setting.getName() + ": " + optionConflicts));
 +    }
 +  }
 +
 +  @Override
 +  public void checkIteratorConflicts(String tableName, IteratorSetting setting,
 +      EnumSet<IteratorScope> scopes) throws AccumuloException, TableNotFoundException {
 +    checkArgument(tableName != null, "tableName is null");
 +    Map<String,String> iteratorProps = new HashMap<>();
 +    for (Entry<String,String> entry : this.getProperties(tableName))
 +      iteratorProps.put(entry.getKey(), entry.getValue());
 +    checkIteratorConflicts(iteratorProps, setting, scopes);
 +  }
 +
 +  @Override
 +  public int addConstraint(String tableName, String constraintClassName)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +    TreeSet<Integer> constraintNumbers = new TreeSet<>();
 +    TreeMap<String,Integer> constraintClasses = new TreeMap<>();
 +    int i;
 +    for (Entry<String,String> property : this.getProperties(tableName)) {
 +      if (property.getKey().startsWith(Property.TABLE_CONSTRAINT_PREFIX.toString())) {
 +        try {
 +          i = Integer.parseInt(
 +              property.getKey().substring(Property.TABLE_CONSTRAINT_PREFIX.toString().length()));
 +        } catch (NumberFormatException e) {
 +          throw new AccumuloException("Bad key for existing constraint: " + property);
 +        }
 +        constraintNumbers.add(i);
 +        constraintClasses.put(property.getValue(), i);
 +      }
 +    }
 +    i = 1;
 +    while (constraintNumbers.contains(i))
 +      i++;
 +    if (constraintClasses.containsKey(constraintClassName))
 +      throw new AccumuloException("Constraint " + constraintClassName + " already exists for table "
 +          + tableName + " with number " + constraintClasses.get(constraintClassName));
 +    this.setProperty(tableName, Property.TABLE_CONSTRAINT_PREFIX.toString() + i,
 +        constraintClassName);
 +    return i;
 +  }
 +
 +  @Override
 +  public void removeConstraint(String tableName, int number)
 +      throws AccumuloException, AccumuloSecurityException {
 +    this.removeProperty(tableName, Property.TABLE_CONSTRAINT_PREFIX.toString() + number);
 +  }
 +
 +  @Override
 +  public Map<String,Integer> listConstraints(String tableName)
 +      throws AccumuloException, TableNotFoundException {
 +    Map<String,Integer> constraints = new TreeMap<>();
 +    for (Entry<String,String> property : this.getProperties(tableName)) {
 +      if (property.getKey().startsWith(Property.TABLE_CONSTRAINT_PREFIX.toString())) {
 +        if (constraints.containsKey(property.getValue()))
 +          throw new AccumuloException("Same constraint configured twice: " + property.getKey() + "="
 +              + Property.TABLE_CONSTRAINT_PREFIX + constraints.get(property.getValue()) + "="
 +              + property.getKey());
 +        try {
 +          constraints.put(property.getValue(), Integer.parseInt(
 +              property.getKey().substring(Property.TABLE_CONSTRAINT_PREFIX.toString().length())));
 +        } catch (NumberFormatException e) {
 +          throw new AccumuloException("Bad key for existing constraint: " + property);
 +        }
 +      }
 +    }
 +    return constraints;
 +  }
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/TableOperationsImpl.java
index be70b51,0000000..15ce5fa
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/TableOperationsImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/TableOperationsImpl.java
@@@ -1,1924 -1,0 +1,1924 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.clientImpl;
 +
 +import static com.google.common.base.Preconditions.checkArgument;
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +import static java.util.Objects.requireNonNull;
 +import static java.util.concurrent.TimeUnit.MILLISECONDS;
 +import static java.util.concurrent.TimeUnit.SECONDS;
 +import static java.util.stream.Collectors.toSet;
 +import static org.apache.accumulo.fate.util.UtilWaitThread.sleepUninterruptibly;
 +
 +import java.io.BufferedReader;
 +import java.io.FileNotFoundException;
 +import java.io.IOException;
 +import java.io.InputStreamReader;
 +import java.nio.ByteBuffer;
 +import java.security.SecureRandom;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collection;
 +import java.util.Collections;
 +import java.util.EnumSet;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.LinkedList;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.Objects;
 +import java.util.Random;
 +import java.util.Set;
 +import java.util.SortedSet;
 +import java.util.TreeMap;
 +import java.util.TreeSet;
 +import java.util.concurrent.CountDownLatch;
 +import java.util.concurrent.ExecutorService;
 +import java.util.concurrent.Executors;
 +import java.util.concurrent.TimeUnit;
 +import java.util.concurrent.atomic.AtomicReference;
 +import java.util.function.Predicate;
 +import java.util.regex.Pattern;
 +import java.util.stream.Collectors;
 +import java.util.zip.ZipEntry;
 +import java.util.zip.ZipInputStream;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.NamespaceExistsException;
 +import org.apache.accumulo.core.client.NamespaceNotFoundException;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.TableDeletedException;
 +import org.apache.accumulo.core.client.TableExistsException;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.client.TableOfflineException;
 +import org.apache.accumulo.core.client.admin.CompactionConfig;
 +import org.apache.accumulo.core.client.admin.DiskUsage;
 +import org.apache.accumulo.core.client.admin.FindMax;
 +import org.apache.accumulo.core.client.admin.Locations;
 +import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 +import org.apache.accumulo.core.client.admin.SummaryRetriever;
 +import org.apache.accumulo.core.client.admin.TableOperations;
 +import org.apache.accumulo.core.client.sample.SamplerConfiguration;
 +import org.apache.accumulo.core.client.summary.SummarizerConfiguration;
 +import org.apache.accumulo.core.client.summary.Summary;
 +import org.apache.accumulo.core.clientImpl.TabletLocator.TabletLocation;
 +import org.apache.accumulo.core.clientImpl.bulk.BulkImport;
 +import org.apache.accumulo.core.clientImpl.thrift.ClientService.Client;
 +import org.apache.accumulo.core.clientImpl.thrift.TDiskUsage;
 +import org.apache.accumulo.core.clientImpl.thrift.ThriftNotActiveServiceException;
 +import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException;
 +import org.apache.accumulo.core.clientImpl.thrift.ThriftTableOperationException;
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.conf.ConfigurationCopy;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.constraints.Constraint;
 +import org.apache.accumulo.core.data.ByteSequence;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.TableId;
 +import org.apache.accumulo.core.data.TabletId;
 +import org.apache.accumulo.core.dataImpl.KeyExtent;
 +import org.apache.accumulo.core.dataImpl.TabletIdImpl;
 +import org.apache.accumulo.core.dataImpl.thrift.TRowRange;
 +import org.apache.accumulo.core.dataImpl.thrift.TSummaries;
 +import org.apache.accumulo.core.dataImpl.thrift.TSummarizerConfiguration;
 +import org.apache.accumulo.core.dataImpl.thrift.TSummaryRequest;
 +import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
 +import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 +import org.apache.accumulo.core.master.state.tables.TableState;
 +import org.apache.accumulo.core.master.thrift.FateOperation;
 +import org.apache.accumulo.core.master.thrift.MasterClientService;
 +import org.apache.accumulo.core.metadata.MetadataServicer;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.RootTable;
 +import org.apache.accumulo.core.metadata.schema.TabletMetadata;
 +import org.apache.accumulo.core.metadata.schema.TabletMetadata.Location;
 +import org.apache.accumulo.core.metadata.schema.TabletMetadata.LocationType;
 +import org.apache.accumulo.core.metadata.schema.TabletsMetadata;
 +import org.apache.accumulo.core.rpc.ThriftUtil;
 +import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.summary.SummarizerConfigurationUtil;
 +import org.apache.accumulo.core.summary.SummaryCollection;
 +import org.apache.accumulo.core.tabletserver.thrift.NotServingTabletException;
 +import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
 +import org.apache.accumulo.core.trace.TraceUtil;
 +import org.apache.accumulo.core.util.HostAndPort;
 +import org.apache.accumulo.core.util.LocalityGroupUtil;
 +import org.apache.accumulo.core.util.LocalityGroupUtil.LocalityGroupConfigurationError;
 +import org.apache.accumulo.core.util.MapCounter;
 +import org.apache.accumulo.core.util.NamingThreadFactory;
 +import org.apache.accumulo.core.util.OpTimer;
 +import org.apache.accumulo.core.util.Pair;
 +import org.apache.accumulo.core.util.SystemIteratorUtil;
 +import org.apache.accumulo.core.util.TextUtil;
 +import org.apache.accumulo.core.volume.VolumeConfiguration;
 +import org.apache.accumulo.fate.util.Retry;
 +import org.apache.hadoop.fs.FileStatus;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.io.Text;
 +import org.apache.thrift.TApplicationException;
 +import org.apache.thrift.TException;
 +import org.apache.thrift.transport.TTransportException;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import com.google.common.base.Joiner;
 +import com.google.common.base.Preconditions;
 +
 +public class TableOperationsImpl extends TableOperationsHelper {
 +
 +  public static final String CLONE_EXCLUDE_PREFIX = "!";
 +  private static final Logger log = LoggerFactory.getLogger(TableOperations.class);
 +  private final ClientContext context;
 +
 +  public TableOperationsImpl(ClientContext context) {
 +    checkArgument(context != null, "context is null");
 +    this.context = context;
 +  }
 +
 +  @Override
 +  public SortedSet<String> list() {
 +
 +    OpTimer timer = null;
 +
 +    if (log.isTraceEnabled()) {
 +      log.trace("tid={} Fetching list of tables...", Thread.currentThread().getId());
 +      timer = new OpTimer().start();
 +    }
 +
 +    TreeSet<String> tableNames = new TreeSet<>(Tables.getNameToIdMap(context).keySet());
 +
 +    if (timer != null) {
 +      timer.stop();
 +      log.trace("tid={} Fetched {} table names in {}", Thread.currentThread().getId(),
 +          tableNames.size(), String.format("%.3f secs", timer.scale(TimeUnit.SECONDS)));
 +    }
 +
 +    return tableNames;
 +  }
 +
 +  @Override
 +  public boolean exists(String tableName) {
 +    checkArgument(tableName != null, "tableName is null");
 +    if (tableName.equals(MetadataTable.NAME) || tableName.equals(RootTable.NAME))
 +      return true;
 +
 +    OpTimer timer = null;
 +
 +    if (log.isTraceEnabled()) {
 +      log.trace("tid={} Checking if table {} exists...", Thread.currentThread().getId(), tableName);
 +      timer = new OpTimer().start();
 +    }
 +
 +    boolean exists = Tables.getNameToIdMap(context).containsKey(tableName);
 +
 +    if (timer != null) {
 +      timer.stop();
 +      log.trace("tid={} Checked existance of {} in {}", Thread.currentThread().getId(), exists,
 +          String.format("%.3f secs", timer.scale(TimeUnit.SECONDS)));
 +    }
 +
 +    return exists;
 +  }
 +
 +  @Override
 +  public void create(String tableName)
 +      throws AccumuloException, AccumuloSecurityException, TableExistsException {
 +    create(tableName, new NewTableConfiguration());
 +  }
 +
 +  @Override
 +  public void create(String tableName, NewTableConfiguration ntc)
 +      throws AccumuloException, AccumuloSecurityException, TableExistsException {
 +    checkArgument(tableName != null, "tableName is null");
 +    checkArgument(ntc != null, "ntc is null");
 +
 +    List<ByteBuffer> args = new ArrayList<>();
 +    args.add(ByteBuffer.wrap(tableName.getBytes(UTF_8)));
 +    args.add(ByteBuffer.wrap(ntc.getTimeType().name().getBytes(UTF_8)));
 +    // Send info relating to initial table creation i.e, create online or offline
 +    args.add(ByteBuffer.wrap(ntc.getInitialTableState().name().getBytes(UTF_8)));
 +    // Check for possible initial splits to be added at table creation
 +    // Always send number of initial splits to be created, even if zero. If greater than zero,
 +    // add the splits to the argument List which will be used by the FATE operations.
 +    int numSplits = ntc.getSplits().size();
 +    args.add(ByteBuffer.wrap(String.valueOf(numSplits).getBytes(UTF_8)));
 +    if (numSplits > 0) {
 +      for (Text t : ntc.getSplits()) {
 +        args.add(TextUtil.getByteBuffer(t));
 +      }
 +    }
 +
 +    Map<String,String> opts = ntc.getProperties();
 +
 +    try {
 +      doTableFateOperation(tableName, AccumuloException.class, FateOperation.TABLE_CREATE, args,
 +          opts);
 +    } catch (TableNotFoundException e) {
 +      // should not happen
 +      throw new AssertionError(e);
 +    }
 +  }
 +
 +  private long beginFateOperation() throws ThriftSecurityException, TException {
 +    while (true) {
 +      MasterClientService.Iface client = null;
 +      try {
 +        client = MasterClient.getConnectionWithRetry(context);
 +        return client.beginFateOperation(TraceUtil.traceInfo(), context.rpcCreds());
 +      } catch (TTransportException tte) {
 +        log.debug("Failed to call beginFateOperation(), retrying ... ", tte);
 +        sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
 +      } catch (ThriftNotActiveServiceException e) {
 +        // Let it loop, fetching a new location
 +        log.debug("Contacted a Master which is no longer active, retrying");
 +        sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
 +      } finally {
 +        MasterClient.close(client);
 +      }
 +    }
 +  }
 +
 +  // This method is for retrying in the case of network failures; anything else it passes to the
 +  // caller to deal with
 +  private void executeFateOperation(long opid, FateOperation op, List<ByteBuffer> args,
 +      Map<String,String> opts, boolean autoCleanUp)
 +      throws ThriftSecurityException, TException, ThriftTableOperationException {
 +    while (true) {
 +      MasterClientService.Iface client = null;
 +      try {
 +        client = MasterClient.getConnectionWithRetry(context);
 +        client.executeFateOperation(TraceUtil.traceInfo(), context.rpcCreds(), opid, op, args, opts,
 +            autoCleanUp);
 +        return;
 +      } catch (TTransportException tte) {
 +        log.debug("Failed to call executeFateOperation(), retrying ... ", tte);
 +        sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
 +      } catch (ThriftNotActiveServiceException e) {
 +        // Let it loop, fetching a new location
 +        log.debug("Contacted a Master which is no longer active, retrying");
 +        sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
 +      } finally {
 +        MasterClient.close(client);
 +      }
 +    }
 +  }
 +
 +  private String waitForFateOperation(long opid)
 +      throws ThriftSecurityException, TException, ThriftTableOperationException {
 +    while (true) {
 +      MasterClientService.Iface client = null;
 +      try {
 +        client = MasterClient.getConnectionWithRetry(context);
 +        return client.waitForFateOperation(TraceUtil.traceInfo(), context.rpcCreds(), opid);
 +      } catch (TTransportException tte) {
 +        log.debug("Failed to call waitForFateOperation(), retrying ... ", tte);
 +        sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
 +      } catch (ThriftNotActiveServiceException e) {
 +        // Let it loop, fetching a new location
 +        log.debug("Contacted a Master which is no longer active, retrying");
 +        sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
 +      } finally {
 +        MasterClient.close(client);
 +      }
 +    }
 +  }
 +
 +  private void finishFateOperation(long opid) throws ThriftSecurityException, TException {
 +    while (true) {
 +      MasterClientService.Iface client = null;
 +      try {
 +        client = MasterClient.getConnectionWithRetry(context);
 +        client.finishFateOperation(TraceUtil.traceInfo(), context.rpcCreds(), opid);
 +        break;
 +      } catch (TTransportException tte) {
 +        log.debug("Failed to call finishFateOperation(), retrying ... ", tte);
 +        sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
 +      } catch (ThriftNotActiveServiceException e) {
 +        // Let it loop, fetching a new location
 +        log.debug("Contacted a Master which is no longer active, retrying");
 +        sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
 +      } finally {
 +        MasterClient.close(client);
 +      }
 +    }
 +  }
 +
 +  public String doBulkFateOperation(List<ByteBuffer> args, String tableName)
 +      throws AccumuloSecurityException, AccumuloException {
 +    try {
 +      return doFateOperation(FateOperation.TABLE_BULK_IMPORT2, args, Collections.emptyMap(),
 +          tableName);
 +    } catch (TableExistsException | TableNotFoundException | NamespaceNotFoundException
 +        | NamespaceExistsException e) {
 +      // should not happen
 +      throw new AssertionError(e);
 +    }
 +  }
 +
 +  String doFateOperation(FateOperation op, List<ByteBuffer> args, Map<String,String> opts,
 +      String tableOrNamespaceName)
 +      throws AccumuloSecurityException, TableExistsException, TableNotFoundException,
 +      AccumuloException, NamespaceExistsException, NamespaceNotFoundException {
 +    return doFateOperation(op, args, opts, tableOrNamespaceName, true);
 +  }
 +
 +  String doFateOperation(FateOperation op, List<ByteBuffer> args, Map<String,String> opts,
 +      String tableOrNamespaceName, boolean wait)
 +      throws AccumuloSecurityException, TableExistsException, TableNotFoundException,
 +      AccumuloException, NamespaceExistsException, NamespaceNotFoundException {
 +    Long opid = null;
 +
 +    try {
 +      opid = beginFateOperation();
 +      executeFateOperation(opid, op, args, opts, !wait);
 +      if (!wait) {
 +        opid = null;
 +        return null;
 +      }
 +      return waitForFateOperation(opid);
 +    } catch (ThriftSecurityException e) {
 +      switch (e.getCode()) {
 +        case TABLE_DOESNT_EXIST:
 +          throw new TableNotFoundException(null, tableOrNamespaceName,
 +              "Target table does not exist");
 +        case NAMESPACE_DOESNT_EXIST:
 +          throw new NamespaceNotFoundException(null, tableOrNamespaceName,
 +              "Target namespace does not exist");
 +        default:
 +          String tableInfo = Tables.getPrintableTableInfoFromName(context, tableOrNamespaceName);
 +          throw new AccumuloSecurityException(e.user, e.code, tableInfo, e);
 +      }
 +    } catch (ThriftTableOperationException e) {
 +      switch (e.getType()) {
 +        case EXISTS:
 +          throw new TableExistsException(e);
 +        case NOTFOUND:
 +          throw new TableNotFoundException(e);
 +        case NAMESPACE_EXISTS:
 +          throw new NamespaceExistsException(e);
 +        case NAMESPACE_NOTFOUND:
 +          throw new NamespaceNotFoundException(e);
 +        case OFFLINE:
 +          throw new TableOfflineException(
 +              Tables.getTableOfflineMsg(context, Tables.getTableId(context, tableOrNamespaceName)));
 +        default:
 +          throw new AccumuloException(e.description, e);
 +      }
 +    } catch (Exception e) {
 +      throw new AccumuloException(e.getMessage(), e);
 +    } finally {
 +      Tables.clearCache(context);
 +      // always finish table op, even when exception
 +      if (opid != null)
 +        try {
 +          finishFateOperation(opid);
 +        } catch (Exception e) {
 +          log.warn("Exception thrown while finishing fate table operation", e);
 +        }
 +    }
 +  }
 +
 +  private static class SplitEnv {
 +    private String tableName;
 +    private TableId tableId;
 +    private ExecutorService executor;
 +    private CountDownLatch latch;
 +    private AtomicReference<Throwable> exception;
 +
 +    SplitEnv(String tableName, TableId tableId, ExecutorService executor, CountDownLatch latch,
 +        AtomicReference<Throwable> exception) {
 +      this.tableName = tableName;
 +      this.tableId = tableId;
 +      this.executor = executor;
 +      this.latch = latch;
 +      this.exception = exception;
 +    }
 +  }
 +
 +  private class SplitTask implements Runnable {
 +
 +    private List<Text> splits;
 +    private SplitEnv env;
 +
 +    SplitTask(SplitEnv env, List<Text> splits) {
 +      this.env = env;
 +      this.splits = splits;
 +    }
 +
 +    @Override
 +    public void run() {
 +      try {
 +        if (env.exception.get() != null)
 +          return;
 +
 +        if (splits.size() <= 2) {
 +          addSplits(env.tableName, new TreeSet<>(splits), env.tableId);
 +          for (int i = 0; i < splits.size(); i++)
 +            env.latch.countDown();
 +          return;
 +        }
 +
 +        int mid = splits.size() / 2;
 +
 +        // split the middle split point to ensure that child task split different tablets and can
 +        // therefore
 +        // run in parallel
 +        addSplits(env.tableName, new TreeSet<>(splits.subList(mid, mid + 1)), env.tableId);
 +        env.latch.countDown();
 +
 +        env.executor.execute(new SplitTask(env, splits.subList(0, mid)));
 +        env.executor.execute(new SplitTask(env, splits.subList(mid + 1, splits.size())));
 +
 +      } catch (Throwable t) {
 +        env.exception.compareAndSet(null, t);
 +      }
 +    }
 +
 +  }
 +
 +  @Override
 +  public void addSplits(String tableName, SortedSet<Text> partitionKeys)
 +      throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
 +    TableId tableId = Tables.getTableId(context, tableName);
 +
 +    List<Text> splits = new ArrayList<>(partitionKeys);
 +    // should be sorted because we copied from a sorted set, but that makes assumptions about
 +    // how the copy was done so resort to be sure.
 +    Collections.sort(splits);
 +
 +    CountDownLatch latch = new CountDownLatch(splits.size());
 +    AtomicReference<Throwable> exception = new AtomicReference<>(null);
 +
-     ExecutorService executor = Executors.newFixedThreadPool(16,
-         new NamingThreadFactory("addSplits"));
++    ExecutorService executor =
++        Executors.newFixedThreadPool(16, new NamingThreadFactory("addSplits"));
 +    try {
 +      executor.execute(
 +          new SplitTask(new SplitEnv(tableName, tableId, executor, latch, exception), splits));
 +
 +      while (!latch.await(100, TimeUnit.MILLISECONDS)) {
 +        if (exception.get() != null) {
 +          executor.shutdownNow();
 +          Throwable excep = exception.get();
 +          // Below all exceptions are wrapped and rethrown. This is done so that the user knows what
 +          // code path got them here. If the wrapping was not done, the
 +          // user would only have the stack trace for the background thread.
 +          if (excep instanceof TableNotFoundException) {
 +            TableNotFoundException tnfe = (TableNotFoundException) excep;
 +            throw new TableNotFoundException(tableId.canonical(), tableName,
 +                "Table not found by background thread", tnfe);
 +          } else if (excep instanceof TableOfflineException) {
 +            log.debug("TableOfflineException occurred in background thread. Throwing new exception",
 +                excep);
 +            throw new TableOfflineException(Tables.getTableOfflineMsg(context, tableId));
 +          } else if (excep instanceof AccumuloSecurityException) {
 +            // base == background accumulo security exception
 +            AccumuloSecurityException base = (AccumuloSecurityException) excep;
 +            throw new AccumuloSecurityException(base.getUser(), base.asThriftException().getCode(),
 +                base.getTableInfo(), excep);
 +          } else if (excep instanceof AccumuloServerException) {
 +            throw new AccumuloServerException((AccumuloServerException) excep);
 +          } else if (excep instanceof Error) {
 +            throw new Error(excep);
 +          } else {
 +            throw new AccumuloException(excep);
 +          }
 +        }
 +      }
 +    } catch (InterruptedException e) {
 +      throw new RuntimeException(e);
 +    } finally {
 +      executor.shutdown();
 +    }
 +  }
 +
 +  private void addSplits(String tableName, SortedSet<Text> partitionKeys, TableId tableId)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException,
 +      AccumuloServerException {
 +    TabletLocator tabLocator = TabletLocator.getLocator(context, tableId);
 +
 +    for (Text split : partitionKeys) {
 +      boolean successful = false;
 +      int attempt = 0;
 +      long locationFailures = 0;
 +
 +      while (!successful) {
 +
 +        if (attempt > 0)
 +          sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
 +
 +        attempt++;
 +
 +        TabletLocation tl = tabLocator.locateTablet(context, split, false, false);
 +
 +        if (tl == null) {
 +          if (!Tables.exists(context, tableId))
 +            throw new TableNotFoundException(tableId.canonical(), tableName, null);
 +          else if (Tables.getTableState(context, tableId) == TableState.OFFLINE)
 +            throw new TableOfflineException(Tables.getTableOfflineMsg(context, tableId));
 +          continue;
 +        }
 +
 +        HostAndPort address = HostAndPort.fromString(tl.tablet_location);
 +
 +        try {
 +          TabletClientService.Client client = ThriftUtil.getTServerClient(address, context);
 +          try {
 +
 +            OpTimer timer = null;
 +
 +            if (log.isTraceEnabled()) {
 +              log.trace("tid={} Splitting tablet {} on {} at {}", Thread.currentThread().getId(),
 +                  tl.tablet_extent, address, split);
 +              timer = new OpTimer().start();
 +            }
 +
 +            client.splitTablet(TraceUtil.traceInfo(), context.rpcCreds(),
 +                tl.tablet_extent.toThrift(), TextUtil.getByteBuffer(split));
 +
 +            // just split it, might as well invalidate it in the cache
 +            tabLocator.invalidateCache(tl.tablet_extent);
 +
 +            if (timer != null) {
 +              timer.stop();
 +              log.trace("Split tablet in {}",
 +                  String.format("%.3f secs", timer.scale(TimeUnit.SECONDS)));
 +            }
 +
 +          } finally {
 +            ThriftUtil.returnClient(client);
 +          }
 +
 +        } catch (TApplicationException tae) {
 +          throw new AccumuloServerException(address.toString(), tae);
 +        } catch (TTransportException e) {
 +          tabLocator.invalidateCache(context, tl.tablet_location);
 +          continue;
 +        } catch (ThriftSecurityException e) {
 +          Tables.clearCache(context);
 +          if (!Tables.exists(context, tableId))
 +            throw new TableNotFoundException(tableId.canonical(), tableName, null);
 +          throw new AccumuloSecurityException(e.user, e.code, e);
 +        } catch (NotServingTabletException e) {
 +          // Do not silently spin when we repeatedly fail to get the location for a tablet
 +          locationFailures++;
 +          if (locationFailures == 5 || locationFailures % 50 == 0) {
 +            log.warn("Having difficulty locating hosting tabletserver for split {} on table {}."
 +                + " Seen {} failures.", split, tableName, locationFailures);
 +          }
 +
 +          tabLocator.invalidateCache(tl.tablet_extent);
 +          continue;
 +        } catch (TException e) {
 +          tabLocator.invalidateCache(context, tl.tablet_location);
 +          continue;
 +        }
 +
 +        successful = true;
 +      }
 +    }
 +  }
 +
 +  @Override
 +  public void merge(String tableName, Text start, Text end)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +
 +    checkArgument(tableName != null, "tableName is null");
 +    ByteBuffer EMPTY = ByteBuffer.allocate(0);
 +    List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableName.getBytes(UTF_8)),
 +        start == null ? EMPTY : TextUtil.getByteBuffer(start),
 +        end == null ? EMPTY : TextUtil.getByteBuffer(end));
 +    Map<String,String> opts = new HashMap<>();
 +    try {
 +      doTableFateOperation(tableName, TableNotFoundException.class, FateOperation.TABLE_MERGE, args,
 +          opts);
 +    } catch (TableExistsException e) {
 +      // should not happen
 +      throw new AssertionError(e);
 +    }
 +  }
 +
 +  @Override
 +  public void deleteRows(String tableName, Text start, Text end)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +
 +    checkArgument(tableName != null, "tableName is null");
 +    ByteBuffer EMPTY = ByteBuffer.allocate(0);
 +    List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableName.getBytes(UTF_8)),
 +        start == null ? EMPTY : TextUtil.getByteBuffer(start),
 +        end == null ? EMPTY : TextUtil.getByteBuffer(end));
 +    Map<String,String> opts = new HashMap<>();
 +    try {
 +      doTableFateOperation(tableName, TableNotFoundException.class,
 +          FateOperation.TABLE_DELETE_RANGE, args, opts);
 +    } catch (TableExistsException e) {
 +      // should not happen
 +      throw new AssertionError(e);
 +    }
 +  }
 +
 +  @Override
 +  public Collection<Text> listSplits(String tableName)
 +      throws TableNotFoundException, AccumuloSecurityException {
 +    return _listSplits(tableName);
 +  }
 +
 +  private List<Text> _listSplits(String tableName)
 +      throws TableNotFoundException, AccumuloSecurityException {
 +    checkArgument(tableName != null, "tableName is null");
 +
 +    TableId tableId = Tables.getTableId(context, tableName);
 +
 +    TreeMap<KeyExtent,String> tabletLocations = new TreeMap<>();
 +
 +    while (true) {
 +      try {
 +        tabletLocations.clear();
 +        // the following method throws AccumuloException for some conditions that should be retried
 +        MetadataServicer.forTableId(context, tableId).getTabletLocations(tabletLocations);
 +        break;
 +      } catch (AccumuloSecurityException ase) {
 +        throw ase;
 +      } catch (Exception e) {
 +        if (!Tables.exists(context, tableId)) {
 +          throw new TableNotFoundException(tableId.canonical(), tableName, null);
 +        }
 +
 +        if (e instanceof RuntimeException && e.getCause() instanceof AccumuloSecurityException) {
 +          throw (AccumuloSecurityException) e.getCause();
 +        }
 +
 +        log.info("{} ... retrying ...", e.getMessage());
 +        sleepUninterruptibly(3, TimeUnit.SECONDS);
 +      }
 +    }
 +
 +    ArrayList<Text> endRows = new ArrayList<>(tabletLocations.size());
 +
 +    for (KeyExtent ke : tabletLocations.keySet())
 +      if (ke.getEndRow() != null)
 +        endRows.add(ke.getEndRow());
 +
 +    return endRows;
 +
 +  }
 +
 +  @Override
 +  public Collection<Text> listSplits(String tableName, int maxSplits)
 +      throws TableNotFoundException, AccumuloSecurityException {
 +    List<Text> endRows = _listSplits(tableName);
 +
 +    if (endRows.size() <= maxSplits)
 +      return endRows;
 +
 +    double r = (maxSplits + 1) / (double) (endRows.size());
 +    double pos = 0;
 +
 +    ArrayList<Text> subset = new ArrayList<>(maxSplits);
 +
 +    int j = 0;
 +    for (int i = 0; i < endRows.size() && j < maxSplits; i++) {
 +      pos += r;
 +      while (pos > 1) {
 +        subset.add(endRows.get(i));
 +        j++;
 +        pos -= 1;
 +      }
 +    }
 +
 +    return subset;
 +  }
 +
 +  @Override
 +  public void delete(String tableName)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +    checkArgument(tableName != null, "tableName is null");
 +
 +    List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableName.getBytes(UTF_8)));
 +    Map<String,String> opts = new HashMap<>();
 +
 +    try {
 +      doTableFateOperation(tableName, TableNotFoundException.class, FateOperation.TABLE_DELETE,
 +          args, opts);
 +    } catch (TableExistsException e) {
 +      // should not happen
 +      throw new AssertionError(e);
 +    }
 +
 +  }
 +
 +  @Override
 +  public void clone(String srcTableName, String newTableName, boolean flush,
 +      Map<String,String> propertiesToSet, Set<String> propertiesToExclude)
 +      throws AccumuloSecurityException, TableNotFoundException, AccumuloException,
 +      TableExistsException {
 +
 +    checkArgument(srcTableName != null, "srcTableName is null");
 +    checkArgument(newTableName != null, "newTableName is null");
 +
 +    TableId srcTableId = Tables.getTableId(context, srcTableName);
 +
 +    if (flush)
 +      _flush(srcTableId, null, null, true);
 +
 +    if (propertiesToExclude == null)
 +      propertiesToExclude = Collections.emptySet();
 +
 +    if (propertiesToSet == null)
 +      propertiesToSet = Collections.emptyMap();
 +
 +    List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(srcTableId.canonical().getBytes(UTF_8)),
 +        ByteBuffer.wrap(newTableName.getBytes(UTF_8)));
 +    Map<String,String> opts = new HashMap<>();
 +    for (Entry<String,String> entry : propertiesToSet.entrySet()) {
 +      if (entry.getKey().startsWith(CLONE_EXCLUDE_PREFIX))
 +        throw new IllegalArgumentException("Property can not start with " + CLONE_EXCLUDE_PREFIX);
 +      opts.put(entry.getKey(), entry.getValue());
 +    }
 +
 +    for (String prop : propertiesToExclude) {
 +      opts.put(CLONE_EXCLUDE_PREFIX + prop, "");
 +    }
 +
 +    doTableFateOperation(newTableName, AccumuloException.class, FateOperation.TABLE_CLONE, args,
 +        opts);
 +  }
 +
 +  @Override
 +  public void rename(String oldTableName, String newTableName) throws AccumuloSecurityException,
 +      TableNotFoundException, AccumuloException, TableExistsException {
 +
 +    List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(oldTableName.getBytes(UTF_8)),
 +        ByteBuffer.wrap(newTableName.getBytes(UTF_8)));
 +    Map<String,String> opts = new HashMap<>();
 +    doTableFateOperation(oldTableName, TableNotFoundException.class, FateOperation.TABLE_RENAME,
 +        args, opts);
 +  }
 +
 +  @Override
 +  public void flush(String tableName) throws AccumuloException, AccumuloSecurityException {
 +    try {
 +      flush(tableName, null, null, false);
 +    } catch (TableNotFoundException e) {
 +      throw new AccumuloException(e.getMessage(), e);
 +    }
 +  }
 +
 +  @Override
 +  public void flush(String tableName, Text start, Text end, boolean wait)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +    checkArgument(tableName != null, "tableName is null");
 +
 +    TableId tableId = Tables.getTableId(context, tableName);
 +    _flush(tableId, start, end, wait);
 +  }
 +
 +  @Override
 +  public void compact(String tableName, Text start, Text end, boolean flush, boolean wait)
 +      throws AccumuloSecurityException, TableNotFoundException, AccumuloException {
 +    compact(tableName, start, end, new ArrayList<>(), flush, wait);
 +  }
 +
 +  @Override
 +  public void compact(String tableName, Text start, Text end, List<IteratorSetting> iterators,
 +      boolean flush, boolean wait)
 +      throws AccumuloSecurityException, TableNotFoundException, AccumuloException {
 +    compact(tableName, new CompactionConfig().setStartRow(start).setEndRow(end)
 +        .setIterators(iterators).setFlush(flush).setWait(wait));
 +  }
 +
 +  @Override
 +  public void compact(String tableName, CompactionConfig config)
 +      throws AccumuloSecurityException, TableNotFoundException, AccumuloException {
 +    checkArgument(tableName != null, "tableName is null");
 +    ByteBuffer EMPTY = ByteBuffer.allocate(0);
 +
 +    // Ensure compaction iterators exist on a tabletserver
 +    final String skviName = SortedKeyValueIterator.class.getName();
 +    for (IteratorSetting setting : config.getIterators()) {
 +      String iteratorClass = setting.getIteratorClass();
 +      if (!testClassLoad(tableName, iteratorClass, skviName)) {
 +        throw new AccumuloException("TabletServer could not load iterator class " + iteratorClass);
 +      }
 +    }
 +
 +    // Make sure the specified compaction strategy exists on a tabletserver
 +    final String compactionStrategyName = config.getCompactionStrategy().getClassName();
 +    if (!CompactionStrategyConfigUtil.DEFAULT_STRATEGY.getClassName()
 +        .equals(compactionStrategyName)) {
 +      if (!testClassLoad(tableName, compactionStrategyName,
 +          "org.apache.accumulo.tserver.compaction.CompactionStrategy")) {
 +        throw new AccumuloException(
 +            "TabletServer could not load CompactionStrategy class " + compactionStrategyName);
 +      }
 +    }
 +
 +    TableId tableId = Tables.getTableId(context, tableName);
 +
 +    Text start = config.getStartRow();
 +    Text end = config.getEndRow();
 +
 +    if (config.getFlush())
 +      _flush(tableId, start, end, true);
 +
 +    List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.canonical().getBytes(UTF_8)),
 +        start == null ? EMPTY : TextUtil.getByteBuffer(start),
 +        end == null ? EMPTY : TextUtil.getByteBuffer(end),
 +        ByteBuffer.wrap(SystemIteratorUtil.encodeIteratorSettings(config.getIterators())),
 +        ByteBuffer.wrap(CompactionStrategyConfigUtil.encode(config.getCompactionStrategy())));
 +
 +    Map<String,String> opts = new HashMap<>();
 +    try {
 +      doFateOperation(FateOperation.TABLE_COMPACT, args, opts, tableName, config.getWait());
 +    } catch (TableExistsException | NamespaceExistsException e) {
 +      // should not happen
 +      throw new AssertionError(e);
 +    } catch (NamespaceNotFoundException e) {
 +      throw new TableNotFoundException(null, tableName, "Namespace not found", e);
 +    }
 +  }
 +
 +  @Override
 +  public void cancelCompaction(String tableName)
 +      throws AccumuloSecurityException, TableNotFoundException, AccumuloException {
 +    TableId tableId = Tables.getTableId(context, tableName);
 +
 +    List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.canonical().getBytes(UTF_8)));
 +
 +    Map<String,String> opts = new HashMap<>();
 +    try {
 +      doTableFateOperation(tableName, TableNotFoundException.class,
 +          FateOperation.TABLE_CANCEL_COMPACT, args, opts);
 +    } catch (TableExistsException e) {
 +      // should not happen
 +      throw new AssertionError(e);
 +    }
 +
 +  }
 +
 +  private void _flush(TableId tableId, Text start, Text end, boolean wait)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +
 +    try {
 +      long flushID;
 +
 +      // used to pass the table name. but the tableid associated with a table name could change
 +      // between calls.
 +      // so pass the tableid to both calls
 +
 +      while (true) {
 +        MasterClientService.Iface client = null;
 +        try {
 +          client = MasterClient.getConnectionWithRetry(context);
-           flushID = client.initiateFlush(TraceUtil.traceInfo(), context.rpcCreds(),
-               tableId.canonical());
++          flushID =
++              client.initiateFlush(TraceUtil.traceInfo(), context.rpcCreds(), tableId.canonical());
 +          break;
 +        } catch (TTransportException tte) {
 +          log.debug("Failed to call initiateFlush, retrying ... ", tte);
 +          sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
 +        } catch (ThriftNotActiveServiceException e) {
 +          // Let it loop, fetching a new location
 +          log.debug("Contacted a Master which is no longer active, retrying");
 +          sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
 +        } finally {
 +          MasterClient.close(client);
 +        }
 +      }
 +
 +      while (true) {
 +        MasterClientService.Iface client = null;
 +        try {
 +          client = MasterClient.getConnectionWithRetry(context);
 +          client.waitForFlush(TraceUtil.traceInfo(), context.rpcCreds(), tableId.canonical(),
 +              TextUtil.getByteBuffer(start), TextUtil.getByteBuffer(end), flushID,
 +              wait ? Long.MAX_VALUE : 1);
 +          break;
 +        } catch (TTransportException tte) {
 +          log.debug("Failed to call initiateFlush, retrying ... ", tte);
 +          sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
 +        } catch (ThriftNotActiveServiceException e) {
 +          // Let it loop, fetching a new location
 +          log.debug("Contacted a Master which is no longer active, retrying");
 +          sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
 +        } finally {
 +          MasterClient.close(client);
 +        }
 +      }
 +    } catch (ThriftSecurityException e) {
 +      switch (e.getCode()) {
 +        case TABLE_DOESNT_EXIST:
 +          throw new TableNotFoundException(tableId.canonical(), null, e.getMessage(), e);
 +        default:
 +          log.debug("flush security exception on table id {}", tableId);
 +          throw new AccumuloSecurityException(e.user, e.code, e);
 +      }
 +    } catch (ThriftTableOperationException e) {
 +      switch (e.getType()) {
 +        case NOTFOUND:
 +          throw new TableNotFoundException(e);
 +        default:
 +          throw new AccumuloException(e.description, e);
 +      }
 +    } catch (Exception e) {
 +      throw new AccumuloException(e);
 +    }
 +  }
 +
 +  @Override
 +  public void setProperty(final String tableName, final String property, final String value)
 +      throws AccumuloException, AccumuloSecurityException {
 +    checkArgument(tableName != null, "tableName is null");
 +    checkArgument(property != null, "property is null");
 +    checkArgument(value != null, "value is null");
 +    try {
 +      setPropertyNoChecks(tableName, property, value);
 +
 +      checkLocalityGroups(tableName, property);
 +    } catch (TableNotFoundException e) {
 +      throw new AccumuloException(e);
 +    }
 +  }
 +
 +  private void setPropertyNoChecks(final String tableName, final String property,
 +      final String value)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +    MasterClient.executeTable(context, client -> client.setTableProperty(TraceUtil.traceInfo(),
 +        context.rpcCreds(), tableName, property, value));
 +  }
 +
 +  @Override
 +  public void removeProperty(final String tableName, final String property)
 +      throws AccumuloException, AccumuloSecurityException {
 +    checkArgument(tableName != null, "tableName is null");
 +    checkArgument(property != null, "property is null");
 +    try {
 +      removePropertyNoChecks(tableName, property);
 +
 +      checkLocalityGroups(tableName, property);
 +    } catch (TableNotFoundException e) {
 +      throw new AccumuloException(e);
 +    }
 +  }
 +
 +  private void removePropertyNoChecks(final String tableName, final String property)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +    MasterClient.executeTable(context, client -> client.removeTableProperty(TraceUtil.traceInfo(),
 +        context.rpcCreds(), tableName, property));
 +  }
 +
 +  void checkLocalityGroups(String tableName, String propChanged)
 +      throws AccumuloException, TableNotFoundException {
 +    if (LocalityGroupUtil.isLocalityGroupProperty(propChanged)) {
 +      Iterable<Entry<String,String>> allProps = getProperties(tableName);
 +      try {
 +        LocalityGroupUtil.checkLocalityGroups(allProps);
 +      } catch (LocalityGroupConfigurationError | RuntimeException e) {
 +        LoggerFactory.getLogger(this.getClass()).warn("Changing '" + propChanged + "' for table '"
 +            + tableName
 +            + "' resulted in bad locality group config.  This may be a transient situation since "
 +            + "the config spreads over multiple properties.  Setting properties in a different "
 +            + "order may help.  Even though this warning was displayed, the property was updated. "
 +            + "Please check your config to ensure consistency.", e);
 +      }
 +    }
 +  }
 +
 +  @Override
 +  public Iterable<Entry<String,String>> getProperties(final String tableName)
 +      throws AccumuloException, TableNotFoundException {
 +    checkArgument(tableName != null, "tableName is null");
 +    try {
 +      return ServerClient.executeRaw(context, client -> client
 +          .getTableConfiguration(TraceUtil.traceInfo(), context.rpcCreds(), tableName)).entrySet();
 +    } catch (ThriftTableOperationException e) {
 +      switch (e.getType()) {
 +        case NOTFOUND:
 +          throw new TableNotFoundException(e);
 +        case NAMESPACE_NOTFOUND:
 +          throw new TableNotFoundException(tableName, new NamespaceNotFoundException(e));
 +        default:
 +          throw new AccumuloException(e.description, e);
 +      }
 +    } catch (AccumuloException e) {
 +      throw e;
 +    } catch (Exception e) {
 +      throw new AccumuloException(e);
 +    }
 +
 +  }
 +
 +  @Override
 +  public void setLocalityGroups(String tableName, Map<String,Set<Text>> groups)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +    // ensure locality groups do not overlap
 +    LocalityGroupUtil.ensureNonOverlappingGroups(groups);
 +
 +    for (Entry<String,Set<Text>> entry : groups.entrySet()) {
 +      Set<Text> colFams = entry.getValue();
 +      String value = LocalityGroupUtil.encodeColumnFamilies(colFams);
 +      setPropertyNoChecks(tableName, Property.TABLE_LOCALITY_GROUP_PREFIX + entry.getKey(), value);
 +    }
 +
 +    try {
 +      setPropertyNoChecks(tableName, Property.TABLE_LOCALITY_GROUPS.getKey(),
 +          Joiner.on(",").join(groups.keySet()));
 +    } catch (AccumuloException e) {
 +      if (e.getCause() instanceof TableNotFoundException)
 +        throw (TableNotFoundException) e.getCause();
 +      throw e;
 +    }
 +
 +    // remove anything extraneous
 +    String prefix = Property.TABLE_LOCALITY_GROUP_PREFIX.getKey();
 +    for (Entry<String,String> entry : getProperties(tableName)) {
 +      String property = entry.getKey();
 +      if (property.startsWith(prefix)) {
 +        // this property configures a locality group, find out which
 +        // one:
 +        String[] parts = property.split("\\.");
 +        String group = parts[parts.length - 1];
 +
 +        if (!groups.containsKey(group)) {
 +          removePropertyNoChecks(tableName, property);
 +        }
 +      }
 +    }
 +  }
 +
 +  @Override
 +  public Map<String,Set<Text>> getLocalityGroups(String tableName)
 +      throws AccumuloException, TableNotFoundException {
 +    AccumuloConfiguration conf = new ConfigurationCopy(this.getProperties(tableName));
 +    Map<String,Set<ByteSequence>> groups = LocalityGroupUtil.getLocalityGroups(conf);
 +
 +    Map<String,Set<Text>> groups2 = new HashMap<>();
 +    for (Entry<String,Set<ByteSequence>> entry : groups.entrySet()) {
 +
 +      HashSet<Text> colFams = new HashSet<>();
 +
 +      for (ByteSequence bs : entry.getValue()) {
 +        colFams.add(new Text(bs.toArray()));
 +      }
 +
 +      groups2.put(entry.getKey(), colFams);
 +    }
 +
 +    return groups2;
 +  }
 +
 +  @Override
 +  public Set<Range> splitRangeByTablets(String tableName, Range range, int maxSplits)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +    checkArgument(tableName != null, "tableName is null");
 +    checkArgument(range != null, "range is null");
 +    if (maxSplits < 1)
 +      throw new IllegalArgumentException("maximum splits must be >= 1");
 +    if (maxSplits == 1)
 +      return Collections.singleton(range);
 +
 +    Random random = new SecureRandom();
 +    Map<String,Map<KeyExtent,List<Range>>> binnedRanges = new HashMap<>();
 +    TableId tableId = Tables.getTableId(context, tableName);
 +    TabletLocator tl = TabletLocator.getLocator(context, tableId);
 +    // its possible that the cache could contain complete, but old information about a tables
 +    // tablets... so clear it
 +    tl.invalidateCache();
 +    while (!tl.binRanges(context, Collections.singletonList(range), binnedRanges).isEmpty()) {
 +      if (!Tables.exists(context, tableId))
 +        throw new TableDeletedException(tableId.canonical());
 +      if (Tables.getTableState(context, tableId) == TableState.OFFLINE)
 +        throw new TableOfflineException(Tables.getTableOfflineMsg(context, tableId));
 +
 +      log.warn("Unable to locate bins for specified range. Retrying.");
 +      // sleep randomly between 100 and 200ms
 +      sleepUninterruptibly(100 + random.nextInt(100), TimeUnit.MILLISECONDS);
 +      binnedRanges.clear();
 +      tl.invalidateCache();
 +    }
 +
 +    // group key extents to get <= maxSplits
 +    LinkedList<KeyExtent> unmergedExtents = new LinkedList<>();
 +    List<KeyExtent> mergedExtents = new ArrayList<>();
 +
 +    for (Map<KeyExtent,List<Range>> map : binnedRanges.values())
 +      unmergedExtents.addAll(map.keySet());
 +
 +    // the sort method is efficient for linked list
 +    Collections.sort(unmergedExtents);
 +
 +    while (unmergedExtents.size() + mergedExtents.size() > maxSplits) {
 +      if (unmergedExtents.size() >= 2) {
 +        KeyExtent first = unmergedExtents.removeFirst();
 +        KeyExtent second = unmergedExtents.removeFirst();
 +        first.setEndRow(second.getEndRow());
 +        mergedExtents.add(first);
 +      } else {
 +        mergedExtents.addAll(unmergedExtents);
 +        unmergedExtents.clear();
 +        unmergedExtents.addAll(mergedExtents);
 +        mergedExtents.clear();
 +      }
 +
 +    }
 +
 +    mergedExtents.addAll(unmergedExtents);
 +
 +    Set<Range> ranges = new HashSet<>();
 +    for (KeyExtent k : mergedExtents)
 +      ranges.add(k.toDataRange().clip(range));
 +
 +    return ranges;
 +  }
 +
 +  private Path checkPath(String dir, String kind, String type)
 +      throws IOException, AccumuloException, AccumuloSecurityException {
 +    Path ret;
 +    Map<String,String> props = context.instanceOperations().getSystemConfiguration();
 +    AccumuloConfiguration conf = new ConfigurationCopy(props);
 +
-     FileSystem fs = VolumeConfiguration.getVolume(dir, context.getHadoopConf(), conf)
-         .getFileSystem();
++    FileSystem fs =
++        VolumeConfiguration.getVolume(dir, context.getHadoopConf(), conf).getFileSystem();
 +
 +    if (dir.contains(":")) {
 +      ret = new Path(dir);
 +    } else {
 +      ret = fs.makeQualified(new Path(dir));
 +    }
 +
 +    try {
 +      if (!fs.getFileStatus(ret).isDirectory()) {
 +        throw new AccumuloException(
 +            kind + " import " + type + " directory " + dir + " is not a directory!");
 +      }
 +    } catch (FileNotFoundException fnf) {
 +      throw new AccumuloException(
 +          kind + " import " + type + " directory " + dir + " does not exist!");
 +    }
 +
 +    if (type.equals("failure")) {
 +      FileStatus[] listStatus = fs.listStatus(ret);
 +      if (listStatus != null && listStatus.length != 0) {
 +        throw new AccumuloException("Bulk import failure directory " + ret + " is not empty");
 +      }
 +    }
 +
 +    return ret;
 +  }
 +
 +  @Override
 +  @Deprecated
 +  public void importDirectory(String tableName, String dir, String failureDir, boolean setTime)
 +      throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException {
 +    checkArgument(tableName != null, "tableName is null");
 +    checkArgument(dir != null, "dir is null");
 +    checkArgument(failureDir != null, "failureDir is null");
 +    // check for table existence
 +    Tables.getTableId(context, tableName);
 +
 +    Path dirPath = checkPath(dir, "Bulk", "");
 +    Path failPath = checkPath(failureDir, "Bulk", "failure");
 +
 +    List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableName.getBytes(UTF_8)),
 +        ByteBuffer.wrap(dirPath.toString().getBytes(UTF_8)),
 +        ByteBuffer.wrap(failPath.toString().getBytes(UTF_8)),
 +        ByteBuffer.wrap((setTime + "").getBytes(UTF_8)));
 +    Map<String,String> opts = new HashMap<>();
 +
 +    try {
 +      doTableFateOperation(tableName, TableNotFoundException.class, FateOperation.TABLE_BULK_IMPORT,
 +          args, opts);
 +    } catch (TableExistsException e) {
 +      // should not happen
 +      throw new AssertionError(e);
 +    }
 +  }
 +
 +  private void waitForTableStateTransition(TableId tableId, TableState expectedState)
 +      throws AccumuloException, TableNotFoundException {
 +
 +    Text startRow = null;
 +    Text lastRow = null;
 +
 +    while (true) {
 +
 +      if (Tables.getTableState(context, tableId) != expectedState) {
 +        Tables.clearCache(context);
 +        TableState currentState = Tables.getTableState(context, tableId);
 +        if (currentState != expectedState) {
 +          if (!Tables.exists(context, tableId))
 +            throw new TableDeletedException(tableId.canonical());
 +          if (currentState == TableState.DELETING)
 +            throw new TableNotFoundException(tableId.canonical(), "", "Table is being deleted.");
 +          throw new AccumuloException("Unexpected table state " + tableId + " "
 +              + Tables.getTableState(context, tableId) + " != " + expectedState);
 +        }
 +      }
 +
 +      Range range;
 +      if (startRow == null || lastRow == null)
 +        range = new KeyExtent(tableId, null, null).toMetadataRange();
 +      else
 +        range = new Range(startRow, lastRow);
 +
 +      TabletsMetadata tablets = TabletsMetadata.builder().scanMetadataTable().overRange(range)
 +          .fetchLocation().fetchPrev().build(context);
 +
 +      KeyExtent lastExtent = null;
 +
 +      int total = 0;
 +      int waitFor = 0;
 +      int holes = 0;
 +      Text continueRow = null;
 +      MapCounter<String> serverCounts = new MapCounter<>();
 +
 +      for (TabletMetadata tablet : tablets) {
 +        total++;
 +
 +        Location loc = tablet.getLocation();
 +
 +        if ((expectedState == TableState.ONLINE
 +            && (loc == null || loc.getType() == LocationType.FUTURE))
 +            || (expectedState == TableState.OFFLINE && loc != null)) {
 +          if (continueRow == null)
 +            continueRow = tablet.getExtent().getMetadataEntry();
 +          waitFor++;
 +          lastRow = tablet.getExtent().getMetadataEntry();
 +
 +          if (loc != null) {
 +            serverCounts.increment(loc.getId(), 1);
 +          }
 +        }
 +
 +        if (!tablet.getExtent().getTableId().equals(tableId)) {
 +          throw new AccumuloException(
 +              "Saw unexpected table Id " + tableId + " " + tablet.getExtent());
 +        }
 +
 +        if (lastExtent != null && !tablet.getExtent().isPreviousExtent(lastExtent)) {
 +          holes++;
 +        }
 +
 +        lastExtent = tablet.getExtent();
 +      }
 +
 +      if (continueRow != null) {
 +        startRow = continueRow;
 +      }
 +
 +      if (holes > 0 || total == 0) {
 +        startRow = null;
 +        lastRow = null;
 +      }
 +
 +      if (waitFor > 0 || holes > 0 || total == 0) {
 +        long waitTime;
 +        long maxPerServer = 0;
 +        if (serverCounts.size() > 0) {
 +          maxPerServer = serverCounts.max();
 +          waitTime = maxPerServer * 10;
 +        } else
 +          waitTime = waitFor * 10L;
 +        waitTime = Math.max(100, waitTime);
 +        waitTime = Math.min(5000, waitTime);
 +        log.trace("Waiting for {}({}) tablets, startRow = {} lastRow = {}, holes={} sleeping:{}ms",
 +            waitFor, maxPerServer, startRow, lastRow, holes, waitTime);
 +        sleepUninterruptibly(waitTime, TimeUnit.MILLISECONDS);
 +      } else {
 +        break;
 +      }
 +
 +    }
 +  }
 +
 +  @Override
 +  public void offline(String tableName)
 +      throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
 +    offline(tableName, false);
 +  }
 +
 +  @Override
 +  public void offline(String tableName, boolean wait)
 +      throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
 +
 +    checkArgument(tableName != null, "tableName is null");
 +    TableId tableId = Tables.getTableId(context, tableName);
 +    List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.canonical().getBytes(UTF_8)));
 +    Map<String,String> opts = new HashMap<>();
 +
 +    try {
 +      doTableFateOperation(tableName, TableNotFoundException.class, FateOperation.TABLE_OFFLINE,
 +          args, opts);
 +    } catch (TableExistsException e) {
 +      // should not happen
 +      throw new AssertionError(e);
 +    }
 +
 +    if (wait)
 +      waitForTableStateTransition(tableId, TableState.OFFLINE);
 +  }
 +
 +  @Override
 +  public void online(String tableName)
 +      throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
 +    online(tableName, false);
 +  }
 +
 +  @Override
 +  public void online(String tableName, boolean wait)
 +      throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
 +    checkArgument(tableName != null, "tableName is null");
 +
 +    TableId tableId = Tables.getTableId(context, tableName);
 +
 +    /**
 +     * ACCUMULO-4574 if table is already online return without executing fate operation.
 +     */
 +
 +    TableState expectedState = Tables.getTableState(context, tableId, true);
 +    if (expectedState == TableState.ONLINE) {
 +      if (wait)
 +        waitForTableStateTransition(tableId, TableState.ONLINE);
 +      return;
 +    }
 +
 +    List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.canonical().getBytes(UTF_8)));
 +    Map<String,String> opts = new HashMap<>();
 +
 +    try {
 +      doTableFateOperation(tableName, TableNotFoundException.class, FateOperation.TABLE_ONLINE,
 +          args, opts);
 +    } catch (TableExistsException e) {
 +      // should not happen
 +      throw new AssertionError(e);
 +    }
 +
 +    if (wait)
 +      waitForTableStateTransition(tableId, TableState.ONLINE);
 +  }
 +
 +  @Override
 +  public void clearLocatorCache(String tableName) throws TableNotFoundException {
 +    checkArgument(tableName != null, "tableName is null");
-     TabletLocator tabLocator = TabletLocator.getLocator(context,
-         Tables.getTableId(context, tableName));
++    TabletLocator tabLocator =
++        TabletLocator.getLocator(context, Tables.getTableId(context, tableName));
 +    tabLocator.invalidateCache();
 +  }
 +
 +  @Override
 +  public Map<String,String> tableIdMap() {
 +    return Tables.getNameToIdMap(context).entrySet().stream()
 +        .collect(Collectors.toMap(Entry::getKey, e -> e.getValue().canonical(), (v1, v2) -> {
 +          throw new RuntimeException(String.format("Duplicate key for values %s and %s", v1, v2));
 +        }, TreeMap::new));
 +  }
 +
 +  @Override
 +  public Text getMaxRow(String tableName, Authorizations auths, Text startRow,
 +      boolean startInclusive, Text endRow, boolean endInclusive) throws TableNotFoundException {
 +    checkArgument(tableName != null, "tableName is null");
 +    checkArgument(auths != null, "auths is null");
 +    Scanner scanner = context.createScanner(tableName, auths);
 +    return FindMax.findMax(scanner, startRow, startInclusive, endRow, endInclusive);
 +  }
 +
 +  @Override
 +  public List<DiskUsage> getDiskUsage(Set<String> tableNames)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +
 +    List<TDiskUsage> diskUsages = null;
 +    while (diskUsages == null) {
 +      Pair<String,Client> pair = null;
 +      try {
 +        // this operation may us a lot of memory... its likely that connections to tabletservers
 +        // hosting metadata tablets will be cached, so do not use cached
 +        // connections
 +        pair = ServerClient.getConnection(context, false);
 +        diskUsages = pair.getSecond().getDiskUsage(tableNames, context.rpcCreds());
 +      } catch (ThriftTableOperationException e) {
 +        switch (e.getType()) {
 +          case NOTFOUND:
 +            throw new TableNotFoundException(e);
 +          case NAMESPACE_NOTFOUND:
 +            throw new TableNotFoundException(e.getTableName(), new NamespaceNotFoundException(e));
 +          default:
 +            throw new AccumuloException(e.description, e);
 +        }
 +      } catch (ThriftSecurityException e) {
 +        throw new AccumuloSecurityException(e.getUser(), e.getCode());
 +      } catch (TTransportException e) {
 +        // some sort of communication error occurred, retry
 +        if (pair == null) {
 +          log.debug("Disk usage request failed.  Pair is null.  Retrying request...", e);
 +        } else {
 +          log.debug("Disk usage request failed {}, retrying ... ", pair.getFirst(), e);
 +        }
 +        sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
 +      } catch (TException e) {
 +        // may be a TApplicationException which indicates error on the server side
 +        throw new AccumuloException(e);
 +      } finally {
 +        // must always return thrift connection
 +        if (pair != null)
 +          ServerClient.close(pair.getSecond());
 +      }
 +    }
 +
 +    List<DiskUsage> finalUsages = new ArrayList<>();
 +    for (TDiskUsage diskUsage : diskUsages) {
 +      finalUsages.add(new DiskUsage(new TreeSet<>(diskUsage.getTables()), diskUsage.getUsage()));
 +    }
 +
 +    return finalUsages;
 +  }
 +
 +  public static Map<String,String> getExportedProps(FileSystem fs, Path path) throws IOException {
 +    HashMap<String,String> props = new HashMap<>();
 +
 +    try (ZipInputStream zis = new ZipInputStream(fs.open(path))) {
 +      ZipEntry zipEntry;
 +      while ((zipEntry = zis.getNextEntry()) != null) {
 +        if (zipEntry.getName().equals(Constants.EXPORT_TABLE_CONFIG_FILE)) {
 +          try (BufferedReader in = new BufferedReader(new InputStreamReader(zis, UTF_8))) {
 +            String line;
 +            while ((line = in.readLine()) != null) {
 +              String[] sa = line.split("=", 2);
 +              props.put(sa[0], sa[1]);
 +            }
 +          }
 +
 +          break;
 +        }
 +      }
 +    }
 +    return props;
 +  }
 +
 +  @Override
 +  public void importTable(String tableName, String importDir)
 +      throws TableExistsException, AccumuloException, AccumuloSecurityException {
 +    checkArgument(tableName != null, "tableName is null");
 +    checkArgument(importDir != null, "importDir is null");
 +
 +    try {
 +      importDir = checkPath(importDir, "Table", "").toString();
 +    } catch (IOException e) {
 +      throw new AccumuloException(e);
 +    }
 +
 +    try {
 +      FileSystem fs = new Path(importDir).getFileSystem(context.getHadoopConf());
 +      Map<String,String> props = getExportedProps(fs, new Path(importDir, Constants.EXPORT_FILE));
 +
 +      for (Entry<String,String> entry : props.entrySet()) {
 +        if (Property.isClassProperty(entry.getKey())
 +            && !entry.getValue().contains(Constants.CORE_PACKAGE_NAME)) {
 +          LoggerFactory.getLogger(this.getClass()).info(
 +              "Imported table sets '{}' to '{}'.  Ensure this class is on Accumulo classpath.",
 +              sanitize(entry.getKey()), sanitize(entry.getValue()));
 +        }
 +      }
 +
 +    } catch (IOException ioe) {
 +      LoggerFactory.getLogger(this.getClass()).warn(
 +          "Failed to check if imported table references external java classes : {}",
 +          ioe.getMessage());
 +    }
 +
 +    List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableName.getBytes(UTF_8)),
 +        ByteBuffer.wrap(importDir.getBytes(UTF_8)));
 +
 +    Map<String,String> opts = Collections.emptyMap();
 +
 +    try {
 +      doTableFateOperation(tableName, AccumuloException.class, FateOperation.TABLE_IMPORT, args,
 +          opts);
 +    } catch (TableNotFoundException e) {
 +      // should not happen
 +      throw new AssertionError(e);
 +    }
 +
 +  }
 +
 +  /**
 +   * Prevent potential CRLF injection into logs from read in user data See
 +   * https://find-sec-bugs.github.io/bugs.htm#CRLF_INJECTION_LOGS
 +   */
 +  private String sanitize(String msg) {
 +    return msg.replaceAll("[\r\n]", "");
 +  }
 +
 +  @Override
 +  public void exportTable(String tableName, String exportDir)
 +      throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
 +    checkArgument(tableName != null, "tableName is null");
 +    checkArgument(exportDir != null, "exportDir is null");
 +
 +    List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableName.getBytes(UTF_8)),
 +        ByteBuffer.wrap(exportDir.getBytes(UTF_8)));
 +
 +    Map<String,String> opts = Collections.emptyMap();
 +
 +    try {
 +      doTableFateOperation(tableName, TableNotFoundException.class, FateOperation.TABLE_EXPORT,
 +          args, opts);
 +    } catch (TableExistsException e) {
 +      // should not happen
 +      throw new AssertionError(e);
 +    }
 +  }
 +
 +  @Override
 +  public boolean testClassLoad(final String tableName, final String className,
 +      final String asTypeName)
 +      throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
 +    checkArgument(tableName != null, "tableName is null");
 +    checkArgument(className != null, "className is null");
 +    checkArgument(asTypeName != null, "asTypeName is null");
 +
 +    try {
 +      return ServerClient.executeRaw(context,
 +          client -> client.checkTableClass(TraceUtil.traceInfo(), context.rpcCreds(), tableName,
 +              className, asTypeName));
 +    } catch (ThriftTableOperationException e) {
 +      switch (e.getType()) {
 +        case NOTFOUND:
 +          throw new TableNotFoundException(e);
 +        case NAMESPACE_NOTFOUND:
 +          throw new TableNotFoundException(tableName, new NamespaceNotFoundException(e));
 +        default:
 +          throw new AccumuloException(e.description, e);
 +      }
 +    } catch (ThriftSecurityException e) {
 +      throw new AccumuloSecurityException(e.user, e.code, e);
 +    } catch (AccumuloException e) {
 +      throw e;
 +    } catch (Exception e) {
 +      throw new AccumuloException(e);
 +    }
 +  }
 +
 +  @Override
 +  public void attachIterator(String tableName, IteratorSetting setting,
 +      EnumSet<IteratorScope> scopes)
 +      throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
 +    testClassLoad(tableName, setting.getIteratorClass(), SortedKeyValueIterator.class.getName());
 +    super.attachIterator(tableName, setting, scopes);
 +  }
 +
 +  @Override
 +  public int addConstraint(String tableName, String constraintClassName)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +    testClassLoad(tableName, constraintClassName, Constraint.class.getName());
 +    return super.addConstraint(tableName, constraintClassName);
 +  }
 +
 +  private void doTableFateOperation(String tableOrNamespaceName,
 +      Class<? extends Exception> namespaceNotFoundExceptionClass, FateOperation op,
 +      List<ByteBuffer> args, Map<String,String> opts) throws AccumuloSecurityException,
 +      AccumuloException, TableExistsException, TableNotFoundException {
 +    try {
 +      doFateOperation(op, args, opts, tableOrNamespaceName);
 +    } catch (NamespaceExistsException e) {
 +      // should not happen
 +      throw new AssertionError(e);
 +    } catch (NamespaceNotFoundException e) {
 +      if (namespaceNotFoundExceptionClass == null) {
 +        // should not happen
 +        throw new AssertionError(e);
 +      } else if (AccumuloException.class.isAssignableFrom(namespaceNotFoundExceptionClass)) {
 +        throw new AccumuloException("Cannot create table in non-existent namespace", e);
 +      } else if (TableNotFoundException.class.isAssignableFrom(namespaceNotFoundExceptionClass)) {
 +        throw new TableNotFoundException(null, tableOrNamespaceName, "Namespace not found", e);
 +      } else {
 +        // should not happen
 +        throw new AssertionError(e);
 +      }
 +    }
 +  }
 +
 +  private void clearSamplerOptions(String tableName)
 +      throws AccumuloException, TableNotFoundException, AccumuloSecurityException {
 +    String prefix = Property.TABLE_SAMPLER_OPTS.getKey();
 +    for (Entry<String,String> entry : getProperties(tableName)) {
 +      String property = entry.getKey();
 +      if (property.startsWith(prefix)) {
 +        removeProperty(tableName, property);
 +      }
 +    }
 +  }
 +
 +  @Override
 +  public void setSamplerConfiguration(String tableName, SamplerConfiguration samplerConfiguration)
 +      throws AccumuloException, TableNotFoundException, AccumuloSecurityException {
 +    clearSamplerOptions(tableName);
 +
-     List<Pair<String,String>> props = new SamplerConfigurationImpl(samplerConfiguration)
-         .toTableProperties();
++    List<Pair<String,String>> props =
++        new SamplerConfigurationImpl(samplerConfiguration).toTableProperties();
 +    for (Pair<String,String> pair : props) {
 +      setProperty(tableName, pair.getFirst(), pair.getSecond());
 +    }
 +  }
 +
 +  @Override
 +  public void clearSamplerConfiguration(String tableName)
 +      throws AccumuloException, TableNotFoundException, AccumuloSecurityException {
 +    removeProperty(tableName, Property.TABLE_SAMPLER.getKey());
 +    clearSamplerOptions(tableName);
 +  }
 +
 +  @Override
 +  public SamplerConfiguration getSamplerConfiguration(String tableName)
 +      throws TableNotFoundException, AccumuloException {
 +    AccumuloConfiguration conf = new ConfigurationCopy(this.getProperties(tableName));
 +    SamplerConfigurationImpl sci = SamplerConfigurationImpl.newSamplerConfig(conf);
 +    if (sci == null) {
 +      return null;
 +    }
 +    return sci.toSamplerConfiguration();
 +  }
 +
 +  private static class LoctionsImpl implements Locations {
 +
 +    private Map<Range,List<TabletId>> groupedByRanges;
 +    private Map<TabletId,List<Range>> groupedByTablets;
 +    private Map<TabletId,String> tabletLocations;
 +
 +    public LoctionsImpl(Map<String,Map<KeyExtent,List<Range>>> binnedRanges) {
 +      groupedByTablets = new HashMap<>();
 +      groupedByRanges = null;
 +      tabletLocations = new HashMap<>();
 +
 +      for (Entry<String,Map<KeyExtent,List<Range>>> entry : binnedRanges.entrySet()) {
 +        String location = entry.getKey();
 +
 +        for (Entry<KeyExtent,List<Range>> entry2 : entry.getValue().entrySet()) {
 +          TabletIdImpl tabletId = new TabletIdImpl(entry2.getKey());
 +          tabletLocations.put(tabletId, location);
-           List<Range> prev = groupedByTablets.put(tabletId,
-               Collections.unmodifiableList(entry2.getValue()));
++          List<Range> prev =
++              groupedByTablets.put(tabletId, Collections.unmodifiableList(entry2.getValue()));
 +          if (prev != null) {
 +            throw new RuntimeException(
 +                "Unexpected : tablet at multiple locations : " + location + " " + tabletId);
 +          }
 +        }
 +      }
 +
 +      groupedByTablets = Collections.unmodifiableMap(groupedByTablets);
 +    }
 +
 +    @Override
 +    public String getTabletLocation(TabletId tabletId) {
 +      return tabletLocations.get(tabletId);
 +    }
 +
 +    @Override
 +    public Map<Range,List<TabletId>> groupByRange() {
 +      if (groupedByRanges == null) {
 +        Map<Range,List<TabletId>> tmp = new HashMap<>();
 +
 +        for (Entry<TabletId,List<Range>> entry : groupedByTablets.entrySet()) {
 +          for (Range range : entry.getValue()) {
 +            List<TabletId> tablets = tmp.get(range);
 +            if (tablets == null) {
 +              tablets = new ArrayList<>();
 +              tmp.put(range, tablets);
 +            }
 +
 +            tablets.add(entry.getKey());
 +          }
 +        }
 +
 +        Map<Range,List<TabletId>> tmp2 = new HashMap<>();
 +        for (Entry<Range,List<TabletId>> entry : tmp.entrySet()) {
 +          tmp2.put(entry.getKey(), Collections.unmodifiableList(entry.getValue()));
 +        }
 +
 +        groupedByRanges = Collections.unmodifiableMap(tmp2);
 +      }
 +
 +      return groupedByRanges;
 +    }
 +
 +    @Override
 +    public Map<TabletId,List<Range>> groupByTablet() {
 +      return groupedByTablets;
 +    }
 +  }
 +
 +  @Override
 +  public Locations locate(String tableName, Collection<Range> ranges)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +    requireNonNull(tableName, "tableName must be non null");
 +    requireNonNull(ranges, "ranges must be non null");
 +
 +    TableId tableId = Tables.getTableId(context, tableName);
 +    TabletLocator locator = TabletLocator.getLocator(context, tableId);
 +
 +    List<Range> rangeList = null;
 +    if (ranges instanceof List) {
 +      rangeList = (List<Range>) ranges;
 +    } else {
 +      rangeList = new ArrayList<>(ranges);
 +    }
 +
 +    Map<String,Map<KeyExtent,List<Range>>> binnedRanges = new HashMap<>();
 +
 +    locator.invalidateCache();
 +
 +    Retry retry = Retry.builder().infiniteRetries().retryAfter(100, MILLISECONDS)
 +        .incrementBy(100, MILLISECONDS).maxWait(2, SECONDS).logInterval(3, TimeUnit.MINUTES)
 +        .createRetry();
 +
 +    while (!locator.binRanges(context, rangeList, binnedRanges).isEmpty()) {
 +
 +      if (!Tables.exists(context, tableId))
 +        throw new TableNotFoundException(tableId.canonical(), tableName, null);
 +      if (Tables.getTableState(context, tableId) == TableState.OFFLINE)
 +        throw new TableOfflineException(Tables.getTableOfflineMsg(context, tableId));
 +
 +      binnedRanges.clear();
 +
 +      try {
 +        retry.waitForNextAttempt();
 +      } catch (InterruptedException e) {
 +        throw new RuntimeException(e);
 +      }
 +
 +      locator.invalidateCache();
 +    }
 +
 +    return new LoctionsImpl(binnedRanges);
 +  }
 +
 +  @Override
 +  public SummaryRetriever summaries(String tableName) {
 +
 +    return new SummaryRetriever() {
 +
 +      private Text startRow = null;
 +      private Text endRow = null;
 +      private List<TSummarizerConfiguration> summariesToFetch = Collections.emptyList();
 +      private String summarizerClassRegex;
 +      private boolean flush = false;
 +
 +      @Override
 +      public SummaryRetriever startRow(Text startRow) {
 +        Objects.requireNonNull(startRow);
 +        if (endRow != null) {
 +          Preconditions.checkArgument(startRow.compareTo(endRow) < 0,
 +              "Start row must be less than end row : %s >= %s", startRow, endRow);
 +        }
 +        this.startRow = startRow;
 +        return this;
 +      }
 +
 +      @Override
 +      public SummaryRetriever startRow(CharSequence startRow) {
 +        return startRow(new Text(startRow.toString()));
 +      }
 +
 +      @Override
 +      public List<Summary> retrieve()
 +          throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +        TableId tableId = Tables.getTableId(context, tableName);
 +        if (Tables.getTableState(context, tableId) == TableState.OFFLINE)
 +          throw new TableOfflineException(Tables.getTableOfflineMsg(context, tableId));
 +
-         TRowRange range = new TRowRange(TextUtil.getByteBuffer(startRow),
-             TextUtil.getByteBuffer(endRow));
-         TSummaryRequest request = new TSummaryRequest(tableId.canonical(), range, summariesToFetch,
-             summarizerClassRegex);
++        TRowRange range =
++            new TRowRange(TextUtil.getByteBuffer(startRow), TextUtil.getByteBuffer(endRow));
++        TSummaryRequest request =
++            new TSummaryRequest(tableId.canonical(), range, summariesToFetch, summarizerClassRegex);
 +        if (flush) {
 +          _flush(tableId, startRow, endRow, true);
 +        }
 +
-         TSummaries ret = ServerClient.execute(context, new TabletClientService.Client.Factory(),
-             client -> {
-               TSummaries tsr = client.startGetSummaries(TraceUtil.traceInfo(), context.rpcCreds(),
-                   request);
++        TSummaries ret =
++            ServerClient.execute(context, new TabletClientService.Client.Factory(), client -> {
++              TSummaries tsr =
++                  client.startGetSummaries(TraceUtil.traceInfo(), context.rpcCreds(), request);
 +              while (!tsr.finished) {
 +                tsr = client.contiuneGetSummaries(TraceUtil.traceInfo(), tsr.sessionId);
 +              }
 +              return tsr;
 +            });
 +        return new SummaryCollection(ret).getSummaries();
 +      }
 +
 +      @Override
 +      public SummaryRetriever endRow(Text endRow) {
 +        Objects.requireNonNull(endRow);
 +        if (startRow != null) {
 +          Preconditions.checkArgument(startRow.compareTo(endRow) < 0,
 +              "Start row must be less than end row : %s >= %s", startRow, endRow);
 +        }
 +        this.endRow = endRow;
 +        return this;
 +      }
 +
 +      @Override
 +      public SummaryRetriever endRow(CharSequence endRow) {
 +        return endRow(new Text(endRow.toString()));
 +      }
 +
 +      @Override
 +      public SummaryRetriever withConfiguration(Collection<SummarizerConfiguration> configs) {
 +        Objects.requireNonNull(configs);
 +        summariesToFetch = configs.stream().map(SummarizerConfigurationUtil::toThrift)
 +            .collect(Collectors.toList());
 +        return this;
 +      }
 +
 +      @Override
 +      public SummaryRetriever withConfiguration(SummarizerConfiguration... config) {
 +        Objects.requireNonNull(config);
 +        return withConfiguration(Arrays.asList(config));
 +      }
 +
 +      @Override
 +      public SummaryRetriever withMatchingConfiguration(String regex) {
 +        Objects.requireNonNull(regex);
 +        // Do a sanity check here to make sure that regex compiles, instead of having it fail on a
 +        // tserver.
 +        Pattern.compile(regex);
 +        this.summarizerClassRegex = regex;
 +        return this;
 +      }
 +
 +      @Override
 +      public SummaryRetriever flush(boolean b) {
 +        this.flush = b;
 +        return this;
 +      }
 +    };
 +  }
 +
 +  @Override
 +  public void addSummarizers(String tableName, SummarizerConfiguration... newConfigs)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-     HashSet<SummarizerConfiguration> currentConfigs = new HashSet<>(
-         SummarizerConfiguration.fromTableProperties(getProperties(tableName)));
++    HashSet<SummarizerConfiguration> currentConfigs =
++        new HashSet<>(SummarizerConfiguration.fromTableProperties(getProperties(tableName)));
 +    HashSet<SummarizerConfiguration> newConfigSet = new HashSet<>(Arrays.asList(newConfigs));
 +
 +    newConfigSet.removeIf(currentConfigs::contains);
 +
-     Set<String> newIds = newConfigSet.stream().map(SummarizerConfiguration::getPropertyId)
-         .collect(toSet());
++    Set<String> newIds =
++        newConfigSet.stream().map(SummarizerConfiguration::getPropertyId).collect(toSet());
 +
 +    for (SummarizerConfiguration csc : currentConfigs) {
 +      if (newIds.contains(csc.getPropertyId())) {
 +        throw new IllegalArgumentException("Summarizer property id is in use by " + csc);
 +      }
 +    }
 +
-     Set<Entry<String,String>> es = SummarizerConfiguration.toTableProperties(newConfigSet)
-         .entrySet();
++    Set<Entry<String,String>> es =
++        SummarizerConfiguration.toTableProperties(newConfigSet).entrySet();
 +    for (Entry<String,String> entry : es) {
 +      setProperty(tableName, entry.getKey(), entry.getValue());
 +    }
 +  }
 +
 +  @Override
 +  public void removeSummarizers(String tableName, Predicate<SummarizerConfiguration> predicate)
 +      throws AccumuloException, TableNotFoundException, AccumuloSecurityException {
-     Collection<SummarizerConfiguration> summarizerConfigs = SummarizerConfiguration
-         .fromTableProperties(getProperties(tableName));
++    Collection<SummarizerConfiguration> summarizerConfigs =
++        SummarizerConfiguration.fromTableProperties(getProperties(tableName));
 +    for (SummarizerConfiguration sc : summarizerConfigs) {
 +      if (predicate.test(sc)) {
 +        Set<String> ks = sc.toTableProperties().keySet();
 +        for (String key : ks) {
 +          removeProperty(tableName, key);
 +        }
 +      }
 +    }
 +  }
 +
 +  @Override
 +  public List<SummarizerConfiguration> listSummarizers(String tableName)
 +      throws AccumuloException, TableNotFoundException {
 +    return new ArrayList<>(SummarizerConfiguration.fromTableProperties(getProperties(tableName)));
 +  }
 +
 +  @Override
 +  public ImportDestinationArguments importDirectory(String directory) {
 +    return new BulkImport(directory, context);
 +  }
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/Tables.java
index 23d6c31,0000000..793dec0
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/Tables.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/Tables.java
@@@ -1,323 -1,0 +1,323 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.clientImpl;
 +
 +import static com.google.common.base.Preconditions.checkArgument;
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +
 +import java.security.SecurityPermission;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.concurrent.ExecutionException;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.NamespaceNotFoundException;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.data.NamespaceId;
 +import org.apache.accumulo.core.data.TableId;
 +import org.apache.accumulo.core.master.state.tables.TableState;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.singletons.SingletonManager;
 +import org.apache.accumulo.core.singletons.SingletonService;
 +import org.apache.accumulo.core.util.Pair;
 +import org.apache.accumulo.fate.zookeeper.ZooCache;
 +import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
 +
 +import com.google.common.cache.Cache;
 +import com.google.common.cache.CacheBuilder;
 +
 +public class Tables {
 +
 +  public static final String VALID_NAME_REGEX = "^(\\w+\\.)?(\\w+)$";
 +
-   private static final SecurityPermission TABLES_PERMISSION = new SecurityPermission(
-       "tablesPermission");
++  private static final SecurityPermission TABLES_PERMISSION =
++      new SecurityPermission("tablesPermission");
 +  // Per instance cache will expire after 10 minutes in case we encounter an instance not used
 +  // frequently
-   private static Cache<String,TableMap> instanceToMapCache = CacheBuilder.newBuilder()
-       .expireAfterAccess(10, TimeUnit.MINUTES).build();
++  private static Cache<String,TableMap> instanceToMapCache =
++      CacheBuilder.newBuilder().expireAfterAccess(10, TimeUnit.MINUTES).build();
 +
 +  static {
 +    SingletonManager.register(new SingletonService() {
 +
 +      boolean enabled = false;
 +
 +      @Override
 +      public synchronized boolean isEnabled() {
 +        return enabled;
 +      }
 +
 +      @Override
 +      public synchronized void enable() {
 +        enabled = true;
 +      }
 +
 +      @Override
 +      public synchronized void disable() {
 +        try {
 +          instanceToMapCache.invalidateAll();
 +        } finally {
 +          enabled = false;
 +        }
 +      }
 +    });
 +  }
 +
 +  /**
 +   * Lookup table ID in ZK. Throw TableNotFoundException if not found. Also wraps
 +   * NamespaceNotFoundException in TableNotFoundException if namespace is not found.
 +   */
 +
 +  public static TableId getTableId(ClientContext context, String tableName)
 +      throws TableNotFoundException {
 +    try {
 +      return _getTableId(context, tableName);
 +    } catch (NamespaceNotFoundException e) {
 +      throw new TableNotFoundException(tableName, e);
 +    }
 +  }
 +
 +  /**
 +   * Return the cached ZooCache for provided context. ZooCache is initially created with a watcher
 +   * that will clear the TableMap cache for that instance when WatchedEvent occurs.
 +   */
 +  private static ZooCache getZooCache(final ClientContext context) {
 +    SecurityManager sm = System.getSecurityManager();
 +    if (sm != null) {
 +      sm.checkPermission(TABLES_PERMISSION);
 +    }
 +
 +    return new ZooCacheFactory().getZooCache(context.getZooKeepers(),
 +        context.getZooKeepersSessionTimeOut());
 +  }
 +
 +  /**
 +   * Lookup table ID in ZK. If not found, clears cache and tries again.
 +   */
 +  public static TableId _getTableId(ClientContext context, String tableName)
 +      throws NamespaceNotFoundException, TableNotFoundException {
 +    TableId tableId = getNameToIdMap(context).get(tableName);
 +    if (tableId == null) {
 +      // maybe the table exist, but the cache was not updated yet... so try to clear the cache and
 +      // check again
 +      clearCache(context);
 +      tableId = getNameToIdMap(context).get(tableName);
 +      if (tableId == null) {
 +        String namespace = qualify(tableName).getFirst();
 +        if (Namespaces.getNameToIdMap(context).containsKey(namespace))
 +          throw new TableNotFoundException(null, tableName, null);
 +        else
 +          throw new NamespaceNotFoundException(null, namespace, null);
 +      }
 +    }
 +    return tableId;
 +  }
 +
 +  public static String getTableName(ClientContext context, TableId tableId)
 +      throws TableNotFoundException {
 +    String tableName = getIdToNameMap(context).get(tableId);
 +    if (tableName == null)
 +      throw new TableNotFoundException(tableId.canonical(), null, null);
 +    return tableName;
 +  }
 +
 +  public static String getTableOfflineMsg(ClientContext context, TableId tableId) {
 +    if (tableId == null)
 +      return "Table <unknown table> is offline";
 +    try {
 +      String tableName = Tables.getTableName(context, tableId);
 +      return "Table " + tableName + " (" + tableId.canonical() + ") is offline";
 +    } catch (TableNotFoundException e) {
 +      return "Table <unknown table> (" + tableId.canonical() + ") is offline";
 +    }
 +  }
 +
 +  public static Map<String,TableId> getNameToIdMap(ClientContext context) {
 +    return getTableMap(context).getNameToIdMap();
 +  }
 +
 +  public static Map<TableId,String> getIdToNameMap(ClientContext context) {
 +    return getTableMap(context).getIdtoNameMap();
 +  }
 +
 +  /**
 +   * Get the TableMap from the cache. A new one will be populated when needed. Cache is cleared
 +   * manually by calling {@link #clearCache(ClientContext)}
 +   */
 +  private static TableMap getTableMap(final ClientContext context) {
 +    TableMap map;
 +
 +    final ZooCache zc = getZooCache(context);
 +
 +    map = getTableMap(context, zc);
 +
 +    if (!map.isCurrent(zc)) {
 +      instanceToMapCache.invalidate(context.getInstanceID());
 +      map = getTableMap(context, zc);
 +    }
 +
 +    return map;
 +  }
 +
 +  private static TableMap getTableMap(final ClientContext context, final ZooCache zc) {
 +    try {
 +      return instanceToMapCache.get(context.getInstanceID(), () -> new TableMap(context, zc));
 +    } catch (ExecutionException e) {
 +      throw new RuntimeException(e);
 +    }
 +  }
 +
 +  public static boolean exists(ClientContext context, TableId tableId) {
 +    ZooCache zc = getZooCache(context);
 +    List<String> tableIds = zc.getChildren(context.getZooKeeperRoot() + Constants.ZTABLES);
 +    return tableIds.contains(tableId.canonical());
 +  }
 +
 +  public static void clearCache(ClientContext context) {
 +    getZooCache(context).clear(context.getZooKeeperRoot() + Constants.ZTABLES);
 +    getZooCache(context).clear(context.getZooKeeperRoot() + Constants.ZNAMESPACES);
 +    instanceToMapCache.invalidate(context.getInstanceID());
 +  }
 +
 +  /**
 +   * Clears the zoo cache from instance/root/{PATH}
 +   *
 +   * @param context
 +   *          The Accumulo client context
 +   * @param zooPath
 +   *          A zookeeper path
 +   */
 +  public static void clearCacheByPath(ClientContext context, final String zooPath) {
 +    String thePath = zooPath.startsWith("/") ? zooPath : "/" + zooPath;
 +    getZooCache(context).clear(context.getZooKeeperRoot() + thePath);
 +    instanceToMapCache.invalidate(context.getInstanceID());
 +  }
 +
 +  public static String getPrintableTableInfoFromId(ClientContext context, TableId tableId) {
 +    String tableName = null;
 +    try {
 +      tableName = getTableName(context, tableId);
 +    } catch (TableNotFoundException e) {
 +      // handled in the string formatting
 +    }
 +    return tableName == null ? String.format("?(ID:%s)", tableId.canonical())
 +        : String.format("%s(ID:%s)", tableName, tableId.canonical());
 +  }
 +
 +  public static String getPrintableTableInfoFromName(ClientContext context, String tableName) {
 +    TableId tableId = null;
 +    try {
 +      tableId = getTableId(context, tableName);
 +    } catch (TableNotFoundException e) {
 +      // handled in the string formatting
 +    }
 +    return tableId == null ? String.format("%s(?)", tableName)
 +        : String.format("%s(ID:%s)", tableName, tableId.canonical());
 +  }
 +
 +  public static TableState getTableState(ClientContext context, TableId tableId) {
 +    return getTableState(context, tableId, false);
 +  }
 +
 +  /**
 +   * Get the current state of the table using the tableid. The boolean clearCache, if true will
 +   * clear the table state in zookeeper before fetching the state. Added with ACCUMULO-4574.
 +   *
 +   * @param context
 +   *          the Accumulo client context
 +   * @param tableId
 +   *          the table id
 +   * @param clearCachedState
 +   *          if true clear the table state in zookeeper before checking status
 +   * @return the table state.
 +   */
 +  public static TableState getTableState(ClientContext context, TableId tableId,
 +      boolean clearCachedState) {
 +
 +    String statePath = context.getZooKeeperRoot() + Constants.ZTABLES + "/" + tableId.canonical()
 +        + Constants.ZTABLE_STATE;
 +
 +    if (clearCachedState) {
 +      Tables.clearCacheByPath(context, statePath);
 +    }
 +
 +    ZooCache zc = getZooCache(context);
 +    byte[] state = zc.get(statePath);
 +    if (state == null)
 +      return TableState.UNKNOWN;
 +
 +    return TableState.valueOf(new String(state, UTF_8));
 +
 +  }
 +
 +  public static String qualified(String tableName) {
 +    return qualified(tableName, Namespace.DEFAULT.name());
 +  }
 +
 +  public static String qualified(String tableName, String defaultNamespace) {
 +    Pair<String,String> qualifiedTableName = qualify(tableName, defaultNamespace);
 +    if (Namespace.DEFAULT.name().equals(qualifiedTableName.getFirst()))
 +      return qualifiedTableName.getSecond();
 +    else
 +      return qualifiedTableName.toString("", ".", "");
 +  }
 +
 +  public static Pair<String,String> qualify(String tableName) {
 +    return qualify(tableName, Namespace.DEFAULT.name());
 +  }
 +
 +  public static Pair<String,String> qualify(String tableName, String defaultNamespace) {
 +    if (!tableName.matches(VALID_NAME_REGEX))
 +      throw new IllegalArgumentException("Invalid table name '" + tableName + "'");
 +    if (MetadataTable.OLD_NAME.equals(tableName))
 +      tableName = MetadataTable.NAME;
 +    if (tableName.contains(".")) {
 +      String[] s = tableName.split("\\.", 2);
 +      return new Pair<>(s[0], s[1]);
 +    }
 +    return new Pair<>(defaultNamespace, tableName);
 +  }
 +
 +  /**
 +   * Returns the namespace id for a given table ID.
 +   *
 +   * @param context
 +   *          The Accumulo client context
 +   * @param tableId
 +   *          The tableId
 +   * @return The namespace id which this table resides in.
 +   * @throws IllegalArgumentException
 +   *           if the table doesn't exist in ZooKeeper
 +   */
 +  public static NamespaceId getNamespaceId(ClientContext context, TableId tableId)
 +      throws TableNotFoundException {
 +    checkArgument(context != null, "instance is null");
 +    checkArgument(tableId != null, "tableId is null");
 +
 +    ZooCache zc = getZooCache(context);
 +    byte[] n = zc.get(context.getZooKeeperRoot() + Constants.ZTABLES + "/" + tableId
 +        + Constants.ZTABLE_NAMESPACE);
 +
 +    // We might get null out of ZooCache if this tableID doesn't exist
 +    if (n == null) {
 +      throw new TableNotFoundException(tableId.canonical(), null, null);
 +    }
 +
 +    return NamespaceId.of(new String(n, UTF_8));
 +  }
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/clientImpl/TabletLocatorImpl.java
index 8dbe2c3,0000000..341e99d
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/TabletLocatorImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/TabletLocatorImpl.java
@@@ -1,746 -1,0 +1,746 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.clientImpl;
 +
 +import static org.apache.accumulo.fate.util.UtilWaitThread.sleepUninterruptibly;
 +
 +import java.util.ArrayList;
 +import java.util.Collection;
 +import java.util.Collections;
 +import java.util.Comparator;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.SortedMap;
 +import java.util.TreeMap;
 +import java.util.TreeSet;
 +import java.util.concurrent.TimeUnit;
 +import java.util.concurrent.locks.Lock;
 +import java.util.concurrent.locks.ReentrantReadWriteLock;
 +
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.PartialKey;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.TableId;
 +import org.apache.accumulo.core.dataImpl.KeyExtent;
 +import org.apache.accumulo.core.util.OpTimer;
 +import org.apache.accumulo.core.util.Pair;
 +import org.apache.accumulo.core.util.TextUtil;
 +import org.apache.hadoop.io.Text;
 +import org.apache.hadoop.io.WritableComparator;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 +
 +public class TabletLocatorImpl extends TabletLocator {
 +
 +  private static final Logger log = LoggerFactory.getLogger(TabletLocatorImpl.class);
 +
 +  // MAX_TEXT represents a TEXT object that is greater than all others. Attempted to use null for
 +  // this purpose, but there seems to be a bug in TreeMap.tailMap with null. Therefore instead of
 +  // using null, created MAX_TEXT.
 +  static final Text MAX_TEXT = new Text();
 +
 +  static final Comparator<Text> END_ROW_COMPARATOR = (o1, o2) -> {
 +    if (o1 == o2)
 +      return 0;
 +    if (o1 == MAX_TEXT)
 +      return 1;
 +    if (o2 == MAX_TEXT)
 +      return -1;
 +    return o1.compareTo(o2);
 +  };
 +
 +  protected TableId tableId;
 +  protected TabletLocator parent;
 +  protected TreeMap<Text,TabletLocation> metaCache = new TreeMap<>(END_ROW_COMPARATOR);
 +  protected TabletLocationObtainer locationObtainer;
 +  private TabletServerLockChecker lockChecker;
 +  protected Text lastTabletRow;
 +
 +  private TreeSet<KeyExtent> badExtents = new TreeSet<>();
 +  private ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock();
 +  private final Lock rLock = rwLock.readLock();
 +  private final Lock wLock = rwLock.writeLock();
 +
 +  public interface TabletLocationObtainer {
 +    /**
 +     * @return null when unable to read information successfully
 +     */
 +    TabletLocations lookupTablet(ClientContext context, TabletLocation src, Text row, Text stopRow,
 +        TabletLocator parent) throws AccumuloSecurityException, AccumuloException;
 +
 +    List<TabletLocation> lookupTablets(ClientContext context, String tserver,
 +        Map<KeyExtent,List<Range>> map, TabletLocator parent)
 +        throws AccumuloSecurityException, AccumuloException;
 +  }
 +
 +  public interface TabletServerLockChecker {
 +    boolean isLockHeld(String tserver, String session);
 +
 +    void invalidateCache(String server);
 +  }
 +
 +  private class LockCheckerSession {
 +
 +    private HashSet<Pair<String,String>> okLocks = new HashSet<>();
 +    private HashSet<Pair<String,String>> invalidLocks = new HashSet<>();
 +
 +    private TabletLocation checkLock(TabletLocation tl) {
 +      // the goal of this class is to minimize calls out to lockChecker under that assumption that
 +      // its a resource synchronized among many threads... want to
 +      // avoid fine grained synchronization when binning lots of mutations or ranges... remember
 +      // decisions from the lockChecker in thread local unsynchronized
 +      // memory
 +
 +      if (tl == null)
 +        return null;
 +
 +      Pair<String,String> lock = new Pair<>(tl.tablet_location, tl.tablet_session);
 +
 +      if (okLocks.contains(lock))
 +        return tl;
 +
 +      if (invalidLocks.contains(lock))
 +        return null;
 +
 +      if (lockChecker.isLockHeld(tl.tablet_location, tl.tablet_session)) {
 +        okLocks.add(lock);
 +        return tl;
 +      }
 +
 +      if (log.isTraceEnabled())
 +        log.trace("Tablet server {} {} no longer holds its lock", tl.tablet_location,
 +            tl.tablet_session);
 +
 +      invalidLocks.add(lock);
 +
 +      return null;
 +    }
 +  }
 +
 +  public TabletLocatorImpl(TableId tableId, TabletLocator parent, TabletLocationObtainer tlo,
 +      TabletServerLockChecker tslc) {
 +    this.tableId = tableId;
 +    this.parent = parent;
 +    this.locationObtainer = tlo;
 +    this.lockChecker = tslc;
 +
 +    this.lastTabletRow = new Text(tableId.canonical());
 +    lastTabletRow.append(new byte[] {'<'}, 0, 1);
 +  }
 +
 +  @Override
 +  public <T extends Mutation> void binMutations(ClientContext context, List<T> mutations,
 +      Map<String,TabletServerMutations<T>> binnedMutations, List<T> failures)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +
 +    OpTimer timer = null;
 +
 +    if (log.isTraceEnabled()) {
 +      log.trace("tid={} Binning {} mutations for table {}", Thread.currentThread().getId(),
 +          mutations.size(), tableId);
 +      timer = new OpTimer().start();
 +    }
 +
 +    ArrayList<T> notInCache = new ArrayList<>();
 +    Text row = new Text();
 +
 +    LockCheckerSession lcSession = new LockCheckerSession();
 +
 +    rLock.lock();
 +    try {
 +      processInvalidated(context, lcSession);
 +
 +      // for this to be efficient rows need to be in sorted order, but always sorting is slow...
 +      // therefore only sort the
 +      // stuff not in the cache.... it is most efficient to pass _locateTablet rows in sorted order
 +
 +      // For this to be efficient, need to avoid fine grained synchronization and fine grained
 +      // logging.
 +      // Therefore methods called by this are not synchronized and should not log.
 +
 +      for (T mutation : mutations) {
 +        row.set(mutation.getRow());
 +        TabletLocation tl = locateTabletInCache(row);
 +        if (tl == null || !addMutation(binnedMutations, mutation, tl, lcSession))
 +          notInCache.add(mutation);
 +      }
 +    } finally {
 +      rLock.unlock();
 +    }
 +
 +    if (notInCache.size() > 0) {
 +      Collections.sort(notInCache, (o1, o2) -> WritableComparator.compareBytes(o1.getRow(), 0,
 +          o1.getRow().length, o2.getRow(), 0, o2.getRow().length));
 +
 +      wLock.lock();
 +      try {
 +        boolean failed = false;
 +        for (T mutation : notInCache) {
 +          if (failed) {
 +            // when one table does not return a location, something is probably
 +            // screwy, go ahead and fail everything.
 +            failures.add(mutation);
 +            continue;
 +          }
 +
 +          row.set(mutation.getRow());
 +
 +          TabletLocation tl = _locateTablet(context, row, false, false, false, lcSession);
 +
 +          if (tl == null || !addMutation(binnedMutations, mutation, tl, lcSession)) {
 +            failures.add(mutation);
 +            failed = true;
 +          }
 +        }
 +      } finally {
 +        wLock.unlock();
 +      }
 +    }
 +
 +    if (timer != null) {
 +      timer.stop();
 +      log.trace("tid={} Binned {} mutations for table {} to {} tservers in {}",
 +          Thread.currentThread().getId(), mutations.size(), tableId, binnedMutations.size(),
 +          String.format("%.3f secs", timer.scale(TimeUnit.SECONDS)));
 +    }
 +
 +  }
 +
 +  private <T extends Mutation> boolean addMutation(
 +      Map<String,TabletServerMutations<T>> binnedMutations, T mutation, TabletLocation tl,
 +      LockCheckerSession lcSession) {
 +    TabletServerMutations<T> tsm = binnedMutations.get(tl.tablet_location);
 +
 +    if (tsm == null) {
 +      // do lock check once per tserver here to make binning faster
 +      boolean lockHeld = lcSession.checkLock(tl) != null;
 +      if (lockHeld) {
 +        tsm = new TabletServerMutations<>(tl.tablet_session);
 +        binnedMutations.put(tl.tablet_location, tsm);
 +      } else {
 +        return false;
 +      }
 +    }
 +
 +    // its possible the same tserver could be listed with different sessions
 +    if (tsm.getSession().equals(tl.tablet_session)) {
 +      tsm.addMutation(tl.tablet_extent, mutation);
 +      return true;
 +    }
 +
 +    return false;
 +  }
 +
 +  private List<Range> binRanges(ClientContext context, List<Range> ranges,
 +      Map<String,Map<KeyExtent,List<Range>>> binnedRanges, boolean useCache,
 +      LockCheckerSession lcSession)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +    List<Range> failures = new ArrayList<>();
 +    List<TabletLocation> tabletLocations = new ArrayList<>();
 +
 +    boolean lookupFailed = false;
 +
 +    l1: for (Range range : ranges) {
 +
 +      tabletLocations.clear();
 +
 +      Text startRow;
 +
 +      if (range.getStartKey() != null) {
 +        startRow = range.getStartKey().getRow();
 +      } else
 +        startRow = new Text();
 +
 +      TabletLocation tl = null;
 +
 +      if (useCache)
 +        tl = lcSession.checkLock(locateTabletInCache(startRow));
 +      else if (!lookupFailed)
 +        tl = _locateTablet(context, startRow, false, false, false, lcSession);
 +
 +      if (tl == null) {
 +        failures.add(range);
 +        if (!useCache)
 +          lookupFailed = true;
 +        continue;
 +      }
 +
 +      tabletLocations.add(tl);
 +
 +      while (tl.tablet_extent.getEndRow() != null && !range
 +          .afterEndKey(new Key(tl.tablet_extent.getEndRow()).followingKey(PartialKey.ROW))) {
 +        if (useCache) {
 +          Text row = new Text(tl.tablet_extent.getEndRow());
 +          row.append(new byte[] {0}, 0, 1);
 +          tl = lcSession.checkLock(locateTabletInCache(row));
 +        } else {
 +          tl = _locateTablet(context, tl.tablet_extent.getEndRow(), true, false, false, lcSession);
 +        }
 +
 +        if (tl == null) {
 +          failures.add(range);
 +          if (!useCache)
 +            lookupFailed = true;
 +          continue l1;
 +        }
 +        tabletLocations.add(tl);
 +      }
 +
 +      for (TabletLocation tl2 : tabletLocations) {
 +        TabletLocatorImpl.addRange(binnedRanges, tl2.tablet_location, tl2.tablet_extent, range);
 +      }
 +
 +    }
 +
 +    return failures;
 +  }
 +
 +  @Override
 +  public List<Range> binRanges(ClientContext context, List<Range> ranges,
 +      Map<String,Map<KeyExtent,List<Range>>> binnedRanges)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +
 +    /*
 +     * For this to be efficient, need to avoid fine grained synchronization and fine grained
 +     * logging. Therefore methods called by this are not synchronized and should not log.
 +     */
 +
 +    OpTimer timer = null;
 +
 +    if (log.isTraceEnabled()) {
 +      log.trace("tid={} Binning {} ranges for table {}", Thread.currentThread().getId(),
 +          ranges.size(), tableId);
 +      timer = new OpTimer().start();
 +    }
 +
 +    LockCheckerSession lcSession = new LockCheckerSession();
 +
 +    List<Range> failures;
 +    rLock.lock();
 +    try {
 +      processInvalidated(context, lcSession);
 +
 +      // for this to be optimal, need to look ranges up in sorted order when
 +      // ranges are not present in cache... however do not want to always
 +      // sort ranges... therefore try binning ranges using only the cache
 +      // and sort whatever fails and retry
 +
 +      failures = binRanges(context, ranges, binnedRanges, true, lcSession);
 +    } finally {
 +      rLock.unlock();
 +    }
 +
 +    if (failures.size() > 0) {
 +      // sort failures by range start key
 +      Collections.sort(failures);
 +
 +      // try lookups again
 +      wLock.lock();
 +      try {
 +        failures = binRanges(context, failures, binnedRanges, false, lcSession);
 +      } finally {
 +        wLock.unlock();
 +      }
 +    }
 +
 +    if (timer != null) {
 +      timer.stop();
 +      log.trace("tid={} Binned {} ranges for table {} to {} tservers in {}",
 +          Thread.currentThread().getId(), ranges.size(), tableId, binnedRanges.size(),
 +          String.format("%.3f secs", timer.scale(TimeUnit.SECONDS)));
 +    }
 +
 +    return failures;
 +  }
 +
 +  @Override
 +  public void invalidateCache(KeyExtent failedExtent) {
 +    wLock.lock();
 +    try {
 +      badExtents.add(failedExtent);
 +    } finally {
 +      wLock.unlock();
 +    }
 +    if (log.isTraceEnabled())
 +      log.trace("Invalidated extent={}", failedExtent);
 +  }
 +
 +  @Override
 +  public void invalidateCache(Collection<KeyExtent> keySet) {
 +    wLock.lock();
 +    try {
 +      badExtents.addAll(keySet);
 +    } finally {
 +      wLock.unlock();
 +    }
 +    if (log.isTraceEnabled())
 +      log.trace("Invalidated {} cache entries for table {}", keySet.size(), tableId);
 +  }
 +
 +  @Override
 +  public void invalidateCache(ClientContext context, String server) {
 +    int invalidatedCount = 0;
 +
 +    wLock.lock();
 +    try {
 +      for (TabletLocation cacheEntry : metaCache.values())
 +        if (cacheEntry.tablet_location.equals(server)) {
 +          badExtents.add(cacheEntry.tablet_extent);
 +          invalidatedCount++;
 +        }
 +    } finally {
 +      wLock.unlock();
 +    }
 +
 +    lockChecker.invalidateCache(server);
 +
 +    if (log.isTraceEnabled())
 +      log.trace("invalidated {} cache entries  table={} server={}", invalidatedCount, tableId,
 +          server);
 +
 +  }
 +
 +  @Override
 +  public void invalidateCache() {
 +    int invalidatedCount;
 +    wLock.lock();
 +    try {
 +      invalidatedCount = metaCache.size();
 +      metaCache.clear();
 +    } finally {
 +      wLock.unlock();
 +    }
 +    if (log.isTraceEnabled())
 +      log.trace("invalidated all {} cache entries for table={}", invalidatedCount, tableId);
 +  }
 +
 +  @Override
 +  public TabletLocation locateTablet(ClientContext context, Text row, boolean skipRow,
 +      boolean retry) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +
 +    OpTimer timer = null;
 +
 +    if (log.isTraceEnabled()) {
 +      log.trace("tid={} Locating tablet  table={} row={} skipRow={} retry={}",
 +          Thread.currentThread().getId(), tableId, TextUtil.truncate(row), skipRow, retry);
 +      timer = new OpTimer().start();
 +    }
 +
 +    while (true) {
 +
 +      LockCheckerSession lcSession = new LockCheckerSession();
 +      TabletLocation tl = _locateTablet(context, row, skipRow, retry, true, lcSession);
 +
 +      if (retry && tl == null) {
 +        sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
 +        if (log.isTraceEnabled())
 +          log.trace("Failed to locate tablet containing row {} in table {}, will retry...",
 +              TextUtil.truncate(row), tableId);
 +        continue;
 +      }
 +
 +      if (timer != null) {
 +        timer.stop();
 +        log.trace("tid={} Located tablet {} at {} in {}", Thread.currentThread().getId(),
 +            (tl == null ? "null" : tl.tablet_extent), (tl == null ? "null" : tl.tablet_location),
 +            String.format("%.3f secs", timer.scale(TimeUnit.SECONDS)));
 +      }
 +
 +      return tl;
 +    }
 +  }
 +
 +  private void lookupTabletLocation(ClientContext context, Text row, boolean retry,
 +      LockCheckerSession lcSession)
 +      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +    Text metadataRow = new Text(tableId.canonical());
 +    metadataRow.append(new byte[] {';'}, 0, 1);
 +    metadataRow.append(row.getBytes(), 0, row.getLength());
 +    TabletLocation ptl = parent.locateTablet(context, metadataRow, false, retry);
 +
 +    if (ptl != null) {
-       TabletLocations locations = locationObtainer.lookupTablet(context, ptl, metadataRow,
-           lastTabletRow, parent);
++      TabletLocations locations =
++          locationObtainer.lookupTablet(context, ptl, metadataRow, lastTabletRow, parent);
 +      while (locations != null && locations.getLocations().isEmpty()
 +          && locations.getLocationless().isEmpty()) {
 +        // try the next tablet, the current tablet does not have any tablets that overlap the row
 +        Text er = ptl.tablet_extent.getEndRow();
 +        if (er != null && er.compareTo(lastTabletRow) < 0) {
 +          // System.out.println("er "+er+" ltr "+lastTabletRow);
 +          ptl = parent.locateTablet(context, er, true, retry);
 +          if (ptl != null)
-             locations = locationObtainer.lookupTablet(context, ptl, metadataRow, lastTabletRow,
-                 parent);
++            locations =
++                locationObtainer.lookupTablet(context, ptl, metadataRow, lastTabletRow, parent);
 +          else
 +            break;
 +        } else {
 +          break;
 +        }
 +      }
 +
 +      if (locations == null)
 +        return;
 +
 +      // cannot assume the list contains contiguous key extents... so it is probably
 +      // best to deal with each extent individually
 +
 +      Text lastEndRow = null;
 +      for (TabletLocation tabletLocation : locations.getLocations()) {
 +
 +        KeyExtent ke = tabletLocation.tablet_extent;
 +        TabletLocation locToCache;
 +
 +        // create new location if current prevEndRow == endRow
 +        if ((lastEndRow != null) && (ke.getPrevEndRow() != null)
 +            && ke.getPrevEndRow().equals(lastEndRow)) {
-           locToCache = new TabletLocation(
-               new KeyExtent(ke.getTableId(), ke.getEndRow(), lastEndRow),
-               tabletLocation.tablet_location, tabletLocation.tablet_session);
++          locToCache =
++              new TabletLocation(new KeyExtent(ke.getTableId(), ke.getEndRow(), lastEndRow),
++                  tabletLocation.tablet_location, tabletLocation.tablet_session);
 +        } else {
 +          locToCache = tabletLocation;
 +        }
 +
 +        // save endRow for next iteration
 +        lastEndRow = locToCache.tablet_extent.getEndRow();
 +
 +        updateCache(locToCache, lcSession);
 +      }
 +    }
 +
 +  }
 +
 +  private void updateCache(TabletLocation tabletLocation, LockCheckerSession lcSession) {
 +    if (!tabletLocation.tablet_extent.getTableId().equals(tableId)) {
 +      // sanity check
 +      throw new IllegalStateException(
 +          "Unexpected extent returned " + tableId + "  " + tabletLocation.tablet_extent);
 +    }
 +
 +    if (tabletLocation.tablet_location == null) {
 +      // sanity check
 +      throw new IllegalStateException(
... 62881 lines suppressed ...