You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2018/04/06 09:53:09 UTC

[accumulo] 03/04: Merge branch '1.8'

This is an automated email from the ASF dual-hosted git repository.

ctubbsii pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo.git

commit ddfe3b933669f7b0dd4d63b43c4603a6df157357
Merge: e113e2f b8c19f8
Author: Christopher Tubbs <ct...@apache.org>
AuthorDate: Fri Apr 6 02:14:36 2018 -0400

    Merge branch '1.8'

 .../accumulo/core/bloomfilter/BloomFilter.java     |   3 +-
 .../core/bloomfilter/DynamicBloomFilter.java       |   3 +-
 .../org/apache/accumulo/core/cli/ClientOpts.java   |   8 +-
 .../core/cli/MapReduceClientOnRequiredTable.java   |   2 +-
 .../accumulo/core/cli/MapReduceClientOpts.java     |   6 +-
 .../core/client/AccumuloSecurityException.java     |   3 +-
 .../accumulo/core/client/ClientConfiguration.java  |   3 +-
 .../core/client/ClientSideIteratorScanner.java     |  18 +-
 .../apache/accumulo/core/client/ScannerBase.java   |   3 +-
 .../accumulo/core/client/ZooKeeperInstance.java    |   5 +-
 .../core/client/admin/CompactionConfig.java        |   4 +-
 .../accumulo/core/client/impl/ClientContext.java   |  21 +-
 .../accumulo/core/client/impl/ConnectorImpl.java   |   3 +-
 .../core/client/impl/SecurityOperationsImpl.java   |  10 +-
 .../core/client/impl/TableOperationsImpl.java      |   5 +-
 .../core/client/impl/TabletServerBatchWriter.java  |  15 +-
 .../core/client/lexicoder/PairLexicoder.java       |  21 +-
 .../core/client/mapred/AbstractInputFormat.java    |   9 +-
 .../core/client/mapred/AccumuloInputFormat.java    |   5 +-
 .../mapred/AccumuloMultiTableInputFormat.java      |   3 +-
 .../core/client/mapred/AccumuloOutputFormat.java   |   5 +-
 .../core/client/mapred/AccumuloRowInputFormat.java |  54 ++--
 .../core/client/mapreduce/AbstractInputFormat.java |   9 +-
 .../core/client/mapreduce/AccumuloInputFormat.java |   5 +-
 .../client/mapreduce/AccumuloOutputFormat.java     |   5 +-
 .../mapreduce/lib/impl/InputConfigurator.java      |   4 +-
 .../accumulo/core/client/rfile/RFileWriter.java    |  16 +-
 .../core/client/sample/RowColumnSampler.java       |   8 +-
 .../accumulo/core/client/sample/RowSampler.java    |   8 +-
 .../security/tokens/AuthenticationToken.java       |   6 +-
 .../core/client/security/tokens/KerberosToken.java |   4 +-
 .../accumulo/core/conf/AccumuloConfiguration.java  |   4 +-
 .../core/conf/CredentialProviderFactoryShim.java   |  22 +-
 .../org/apache/accumulo/core/conf/Property.java    | 331 +++++++++++++--------
 .../apache/accumulo/core/conf/PropertyType.java    |  25 +-
 .../accumulo/core/conf/SiteConfiguration.java      |   5 +-
 .../core/data/ConstraintViolationSummary.java      |   3 +-
 .../apache/accumulo/core/file/FileOperations.java  |  10 +-
 .../file/blockfile/cache/lru/LruBlockCache.java    |   3 +-
 .../accumulo/core/file/rfile/CreateEmpty.java      |   8 +-
 .../accumulo/core/file/rfile/KeyShortener.java     |   5 +-
 .../org/apache/accumulo/core/file/rfile/RFile.java |   5 +-
 .../accumulo/core/file/rfile/bcfile/BCFile.java    |   9 +-
 .../core/iterators/AggregatingIterator.java        |   5 +-
 .../apache/accumulo/core/iterators/Combiner.java   |  11 +-
 .../org/apache/accumulo/core/iterators/Filter.java |   4 +-
 .../core/iterators/IteratorEnvironment.java        |   3 +-
 .../accumulo/core/iterators/IteratorUtil.java      |  65 ++--
 .../accumulo/core/iterators/LongCombiner.java      |   4 +-
 .../accumulo/core/iterators/OptionDescriber.java   |   5 +-
 .../core/iterators/TypedValueCombiner.java         |   4 +-
 .../accumulo/core/iterators/user/AgeOffFilter.java |   4 +-
 .../core/iterators/user/CfCqSliceOpts.java         |  40 +--
 .../core/iterators/user/ColumnAgeOffFilter.java    |   4 +-
 .../core/iterators/user/ColumnSliceFilter.java     |   4 +-
 .../core/iterators/user/LargeRowFilter.java        |   3 +-
 .../accumulo/core/iterators/user/MaxCombiner.java  |   5 +-
 .../accumulo/core/iterators/user/MinCombiner.java  |   5 +-
 .../accumulo/core/iterators/user/RegExFilter.java  |   4 +-
 .../core/iterators/user/RowEncodingIterator.java   |   6 +-
 .../core/iterators/user/SummingArrayCombiner.java  |   6 +-
 .../core/iterators/user/SummingCombiner.java       |   5 +-
 .../core/iterators/user/TransformingIterator.java  |  12 +-
 .../core/iterators/user/VisibilityFilter.java      |  10 +-
 .../core/metadata/schema/MetadataSchema.java       |   8 +-
 .../core/replication/ReplicationSchema.java        |  27 +-
 .../apache/accumulo/core/rpc/FilterTransport.java  |   2 +-
 .../org/apache/accumulo/core/rpc/ThriftUtil.java   |  16 +-
 .../accumulo/core/rpc/UGIAssumingTransport.java    |   2 +-
 .../accumulo/core/security/ColumnVisibility.java   |   5 +-
 .../core/security/crypto/BlockedOutputStream.java  |   7 +-
 .../core/security/crypto/CryptoModuleFactory.java  |   2 -
 .../core/security/crypto/DefaultCryptoModule.java  |  18 +-
 .../security/crypto/DefaultCryptoModuleUtils.java  |  18 +-
 .../NonCachingSecretKeyEncryptionStrategy.java     |   6 +-
 .../org/apache/accumulo/core/util/CreateToken.java |   5 +-
 .../java/org/apache/accumulo/core/util/Merge.java  |   3 +-
 .../accumulo/core/volume/NonConfiguredVolume.java  |   4 +-
 .../client/security/SecurityErrorCodeTest.java     |   5 +-
 .../accumulo/core/conf/PropertyTypeTest.java       |   3 +-
 .../core/data/ConstraintViolationSummaryTest.java  |   9 +-
 .../apache/accumulo/core/data/MutationTest.java    |  14 +-
 .../conf/AggregatorConfigurationTest.java          |  19 +-
 .../core/iterators/user/VisibilityFilterTest.java  |   3 +-
 .../accumulo/core/security/crypto/CryptoTest.java  |   5 +-
 .../org/apache/accumulo/fate/util/AddressUtil.java |  19 +-
 .../apache/accumulo/fate/util/AddressUtilTest.java |  16 +-
 .../testcases/MultipleHasTopCalls.java             |   6 +-
 .../accumulo/minicluster/MiniAccumuloRunner.java   |   4 +-
 .../minicluster/impl/MiniAccumuloConfigImpl.java   |   4 +-
 .../main/java/org/apache/accumulo/proxy/Proxy.java |   4 +-
 .../org/apache/accumulo/proxy/ProxyServer.java     |  38 ++-
 .../java/org/apache/accumulo/server/Accumulo.java  |   7 +-
 .../accumulo/server/GarbageCollectionLogger.java   |   4 +-
 .../apache/accumulo/server/init/Initialize.java    |   8 +-
 .../accumulo/server/log/WalStateManager.java       |  38 ++-
 .../server/master/balancer/GroupBalancer.java      |   5 +-
 .../balancer/HostRegexTableLoadBalancer.java       |  34 +--
 .../server/metrics/MetricsConfiguration.java       |   3 +-
 .../rpc/TCredentialsUpdatingInvocationHandler.java |  13 +-
 .../apache/accumulo/server/rpc/TServerUtils.java   |   4 +-
 .../server/security/AuditedSecurityOperation.java  |  95 ++++--
 .../server/security/SecurityOperation.java         |   3 +-
 .../server/security/UserImpersonation.java         |  14 +-
 .../server/security/handler/ZKAuthenticator.java   |   5 +-
 .../accumulo/server/util/SendLogToChainsaw.java    |   6 +-
 .../server/util/FileSystemMonitorTest.java         |   6 +-
 .../apache/accumulo/gc/SimpleGarbageCollector.java |   4 +-
 .../java/org/apache/accumulo/master/Master.java    |  36 +--
 .../DistributedWorkQueueWorkAssigner.java          |   5 +-
 .../master/replication/FinishedWorkUpdater.java    |  10 +-
 .../RemoveCompleteReplicationRecords.java          |   9 +-
 .../org/apache/accumulo/master/util/FateAdmin.java |   5 +-
 .../org/apache/accumulo/tracer/TraceServer.java    |  15 +-
 .../org/apache/accumulo/tserver/InMemoryMap.java   |   6 +-
 .../org/apache/accumulo/tserver/TabletServer.java  |  33 +-
 .../org/apache/accumulo/tserver/log/DfsLogger.java |  18 +-
 .../accumulo/tserver/log/SortedLogRecovery.java    |   8 +-
 .../tserver/metrics/TabletServerUpdateMetrics.java |   5 +-
 .../tserver/replication/AccumuloReplicaSystem.java |  22 +-
 .../apache/accumulo/tserver/tablet/Compactor.java  |   3 +-
 .../accumulo/tserver/tablet/DatafileManager.java   |   5 +-
 .../org/apache/accumulo/tserver/tablet/Tablet.java |   6 +-
 .../accumulo/tserver/tablet/TabletCommitter.java   |   5 +-
 .../main/java/org/apache/accumulo/shell/Shell.java |  12 +-
 .../org/apache/accumulo/shell/ShellOptionsJC.java  |  14 +-
 .../shell/commands/ActiveScanIterator.java         |   3 +-
 .../accumulo/shell/commands/CloneTableCommand.java |   4 +-
 .../accumulo/shell/commands/CompactCommand.java    |  42 ++-
 .../shell/commands/CreateTableCommand.java         |   3 +-
 .../apache/accumulo/shell/commands/DUCommand.java  |   5 +-
 .../accumulo/shell/commands/DeleteCommand.java     |   3 +-
 .../accumulo/shell/commands/DeleteRowsCommand.java |   3 +-
 .../shell/commands/DeleteScanIterCommand.java      |   3 +-
 .../accumulo/shell/commands/EGrepCommand.java      |   8 +-
 .../accumulo/shell/commands/FateCommand.java       |   3 +-
 .../accumulo/shell/commands/GrepCommand.java       |   3 +-
 .../shell/commands/ImportDirectoryCommand.java     |   3 +-
 .../accumulo/shell/commands/InsertCommand.java     |   3 +-
 .../shell/commands/ListCompactionsCommand.java     |   4 +-
 .../accumulo/shell/commands/ListScansCommand.java  |   3 +-
 .../accumulo/shell/commands/MergeCommand.java      |   3 +-
 .../shell/commands/QuotedStringTokenizer.java      |   5 +-
 .../accumulo/shell/commands/ScanCommand.java       |   3 +-
 .../accumulo/shell/commands/SetIterCommand.java    |  20 +-
 .../shell/commands/SetShellIterCommand.java        |   4 -
 .../java/org/apache/accumulo/shell/ShellTest.java  |   3 +-
 .../shell/commands/SetIterCommandTest.java         |   3 +-
 .../classloader/vfs/AccumuloVFSClassLoader.java    |  19 +-
 .../accumulo/harness/MiniClusterHarness.java       |   9 +-
 .../apache/accumulo/test/ConditionalWriterIT.java  |  10 +-
 .../org/apache/accumulo/test/InMemoryMapIT.java    |   6 +-
 .../java/org/apache/accumulo/test/MetaSplitIT.java |   4 +-
 .../org/apache/accumulo/test/NamespacesIT.java     |   5 +-
 .../accumulo/test/NativeMapPerformanceTest.java    |   3 +-
 .../org/apache/accumulo/test/ShellServerIT.java    |  36 +--
 .../org/apache/accumulo/test/TestBinaryRows.java   |   3 +-
 .../java/org/apache/accumulo/test/TestIngest.java  |   7 +-
 .../org/apache/accumulo/test/VerifyIngest.java     |   3 +-
 .../test/functional/GarbageCollectorIT.java        |   4 +-
 .../accumulo/test/functional/PermissionsIT.java    |   6 +-
 .../test/functional/RecoveryWithEmptyRFileIT.java  |   4 +-
 .../test/functional/WatchTheWatchCountIT.java      |   5 +-
 .../test/mapreduce/AccumuloInputFormatIT.java      |  10 +-
 .../accumulo/test/proxy/SimpleProxyBase.java       |   4 +-
 .../accumulo/test/proxy/TestProxyReadWrite.java    |  18 +-
 .../test/replication/FinishedWorkUpdaterIT.java    |  34 ++-
 ...GarbageCollectorCommunicatesWithTServersIT.java |   4 +-
 .../org/apache/accumulo/test/util/CertUtils.java   |  12 +-
 169 files changed, 1165 insertions(+), 826 deletions(-)

diff --cc core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormat.java
index a105c70,6604bf0..871efa8
--- a/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormat.java
@@@ -20,7 -20,7 +20,8 @@@ import java.io.IOException
  import java.util.Map;
  
  import org.apache.accumulo.core.client.ClientConfiguration;
 +import org.apache.accumulo.core.client.ConnectionInfo;
+ import org.apache.accumulo.core.client.mapred.InputFormatBase.RecordReaderBase;
  import org.apache.accumulo.core.client.mapreduce.InputTableConfig;
  import org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator;
  import org.apache.accumulo.core.data.Key;
diff --cc core/src/main/java/org/apache/accumulo/core/conf/Property.java
index 6fc25fc,eea039f..8298abd
--- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@@ -45,39 -46,34 +45,42 @@@ public enum Property 
    // Crypto-related properties
    @Experimental
    CRYPTO_PREFIX("crypto.", null, PropertyType.PREFIX,
-       "Properties in this category related to the configuration of both default and custom crypto modules."),
+       "Properties in this category related to the configuration of both default and custom crypto"
+           + " modules."),
    @Experimental
    CRYPTO_MODULE_CLASS("crypto.module.class", "NullCryptoModule", PropertyType.STRING,
-       "Fully qualified class name of the class that implements the CryptoModule interface, to be used in setting up encryption at rest for the WAL and "
-           + "(future) other parts of the code."),
+       "Fully qualified class name of the class that implements the CryptoModule"
+           + " interface, to be used in setting up encryption at rest for the WAL and"
+           + " (future) other parts of the code."),
    @Experimental
    CRYPTO_CIPHER_SUITE("crypto.cipher.suite", "NullCipher", PropertyType.STRING,
 -      "Describes the cipher suite to use for the write-ahead log"),
 +      "Describes the cipher suite to use for rfile encryption. The value must be either NullCipher or in the form of algorithm/mode/padding, "
 +          + "e.g. AES/CBC/NoPadding"),
    @Experimental
 -  CRYPTO_CIPHER_ALGORITHM_NAME("crypto.cipher.algorithm.name", "NullCipher", PropertyType.STRING,
 -      "States the name of the algorithm used in the corresponding cipher suite. "
 -          + "Do not make these different, unless you enjoy mysterious exceptions and bugs."),
 +  CRYPTO_WAL_CIPHER_SUITE("crypto.wal.cipher.suite", "", PropertyType.STRING,
 +      "Describes the cipher suite to use for the write-ahead log. Defaults to 'cyrpto.cipher.suite' "
 +          + "and will use that value for WAL encryption unless otherwise specified. Valid suite values include: an empty string, NullCipher, or a string the "
 +          + "form of algorithm/mode/padding, e.g. AES/CBC/NOPadding"),
    @Experimental
 -  CRYPTO_BLOCK_STREAM_SIZE("crypto.block.stream.size", "1K", PropertyType.MEMORY,
 -      "The size of the buffer above the cipher stream."
 -          + " Used for reading files and padding walog entries."),
 +  CRYPTO_CIPHER_KEY_ALGORITHM_NAME("crypto.cipher.key.algorithm.name", "NullCipher",
 +      PropertyType.STRING,
 +      "States the name of the algorithm used for the key for the corresponding cipher suite. The key type must be compatible with the cipher suite."),
 +  @Experimental
 +  CRYPTO_BLOCK_STREAM_SIZE("crypto.block.stream.size", "1K", PropertyType.BYTES,
 +      "The size of the buffer above the cipher stream. Used for reading files and padding walog entries."),
    @Experimental
    CRYPTO_CIPHER_KEY_LENGTH("crypto.cipher.key.length", "128", PropertyType.STRING,
-       "Specifies the key length *in bits* to use for the symmetric key, should probably be 128 or 256 unless you really know what you're doing"),
+       "Specifies the key length *in bits* to use for the symmetric key, "
+           + "should probably be 128 or 256 unless you really know what you're doing"),
    @Experimental
 +  CRYPTO_SECURITY_PROVIDER("crypto.security.provider", "", PropertyType.STRING,
 +      "States the security provider to use, and defaults to the system configured provider"),
 +  @Experimental
    CRYPTO_SECURE_RNG("crypto.secure.rng", "SHA1PRNG", PropertyType.STRING,
-       "States the secure random number generator to use, and defaults to the built-in Sun SHA1PRNG"),
+       "States the secure random number generator to use, and defaults to the built-in SHA1PRNG"),
    @Experimental
    CRYPTO_SECURE_RNG_PROVIDER("crypto.secure.rng.provider", "SUN", PropertyType.STRING,
-       "States the secure random number generator provider to use, and defaults to the built-in SUN provider"),
+       "States the secure random number generator provider to use."),
    @Experimental
    CRYPTO_SECRET_KEY_ENCRYPTION_STRATEGY_CLASS("crypto.secret.key.encryption.strategy.class",
        "NullSecretKeyEncryptionStrategy", PropertyType.STRING,
@@@ -105,16 -108,18 +115,17 @@@
    // SSL properties local to each node (see also instance.ssl.enabled which must be consistent
    // across all nodes in an instance)
    RPC_PREFIX("rpc.", null, PropertyType.PREFIX,
 -      "Properties in this category related to the configuration of SSL keys for RPC."
 -          + " See also instance.ssl.enabled"),
 -  RPC_SSL_KEYSTORE_PATH("rpc.javax.net.ssl.keyStore", "$ACCUMULO_CONF_DIR/ssl/keystore.jks",
 -      PropertyType.PATH, "Path of the keystore file for the servers' private SSL key"),
 +      "Properties in this category related to the configuration of SSL keys for RPC. See also instance.ssl.enabled"),
 +  RPC_SSL_KEYSTORE_PATH("rpc.javax.net.ssl.keyStore", "", PropertyType.PATH,
 +      "Path of the keystore file for the server's private SSL key"),
    @Sensitive
    RPC_SSL_KEYSTORE_PASSWORD("rpc.javax.net.ssl.keyStorePassword", "", PropertyType.STRING,
-       "Password used to encrypt the SSL private keystore. Leave blank to use the Accumulo instance secret"),
+       "Password used to encrypt the SSL private keystore. "
+           + "Leave blank to use the Accumulo instance secret"),
    RPC_SSL_KEYSTORE_TYPE("rpc.javax.net.ssl.keyStoreType", "jks", PropertyType.STRING,
        "Type of SSL keystore"),
 -  RPC_SSL_TRUSTSTORE_PATH("rpc.javax.net.ssl.trustStore", "$ACCUMULO_CONF_DIR/ssl/truststore.jks",
 -      PropertyType.PATH, "Path of the truststore file for the root cert"),
 +  RPC_SSL_TRUSTSTORE_PATH("rpc.javax.net.ssl.trustStore", "", PropertyType.PATH,
 +      "Path of the truststore file for the root cert"),
    @Sensitive
    RPC_SSL_TRUSTSTORE_PASSWORD("rpc.javax.net.ssl.trustStorePassword", "", PropertyType.STRING,
        "Password used to encrypt the SSL truststore. Leave blank to use no password"),
@@@ -147,44 -156,64 +162,61 @@@
            + Integer.MAX_VALUE),
    @Deprecated
    INSTANCE_DFS_URI("instance.dfs.uri", "", PropertyType.URI,
-       "A url accumulo should use to connect to DFS. If this is empty, accumulo will obtain this information from the hadoop configuration. This property "
-           + "will only be used when creating new files if instance.volumes is empty. After an upgrade to 1.6.0 Accumulo will start using absolute paths to "
-           + "reference files. Files created before a 1.6.0 upgrade are referenced via relative paths. Relative paths will always be resolved using this "
-           + "config (if empty using the hadoop config)."),
+       "A url accumulo should use to connect to DFS. If this is empty, accumulo"
+           + " will obtain this information from the hadoop configuration. This property"
+           + " will only be used when creating new files if instance.volumes is empty."
+           + " After an upgrade to 1.6.0 Accumulo will start using absolute paths to"
+           + " reference files. Files created before a 1.6.0 upgrade are referenced via"
+           + " relative paths. Relative paths will always be resolved using this config"
+           + " (if empty using the hadoop config)."),
    @Deprecated
    INSTANCE_DFS_DIR("instance.dfs.dir", "/accumulo", PropertyType.ABSOLUTEPATH,
-       "HDFS directory in which accumulo instance will run. Do not change after accumulo is initialized."),
+       "HDFS directory in which accumulo instance will run. "
+           + "Do not change after accumulo is initialized."),
    @Sensitive
    INSTANCE_SECRET("instance.secret", "DEFAULT", PropertyType.STRING,
 -      "A secret unique to a given instance that all servers must know in order"
 -          + " to communicate with one another. It should be changed prior to the"
 -          + " initialization of Accumulo. To change it after Accumulo has been"
 -          + " initialized, use the ChangeSecret tool and then update"
 -          + " conf/accumulo-site.xml everywhere. Before using the ChangeSecret tool,"
 -          + " make sure Accumulo is not running and you are logged in as the user that"
 -          + " controls Accumulo files in HDFS. To use the ChangeSecret tool, run the"
 -          + " command: ./bin/accumulo org.apache.accumulo.server.util.ChangeSecret"),
 +      "A secret unique to a given instance that all servers must know in order to communicate with one another."
 +          + "It should be changed prior to the initialization of Accumulo. To change it after Accumulo has been initialized, use the ChangeSecret tool "
 +          + "and then update accumulo-site.xml everywhere. Before using the ChangeSecret tool, make sure Accumulo is not running and you are logged "
 +          + "in as the user that controls Accumulo files in HDFS.  To use the ChangeSecret tool, run the command: "
 +          + "./bin/accumulo org.apache.accumulo.server.util.ChangeSecret"),
    INSTANCE_VOLUMES("instance.volumes", "", PropertyType.STRING,
-       "A comma seperated list of dfs uris to use. Files will be stored across these filesystems. If this is empty, then instance.dfs.uri will be used. "
-           + "After adding uris to this list, run 'accumulo init --add-volume' and then restart tservers. If entries are removed from this list then tservers "
-           + "will need to be restarted. After a uri is removed from the list Accumulo will not create new files in that location, however Accumulo can still "
-           + "reference files created at that location before the config change. To use a comma or other reserved characters in a URI use standard URI hex "
-           + "encoding. For example replace commas with %2C."),
+       "A comma seperated list of dfs uris to use. Files will be stored across"
+           + " these filesystems. If this is empty, then instance.dfs.uri will be used."
+           + " After adding uris to this list, run 'accumulo init --add-volume' and then"
+           + " restart tservers. If entries are removed from this list then tservers"
+           + " will need to be restarted. After a uri is removed from the list Accumulo"
+           + " will not create new files in that location, however Accumulo can still"
+           + " reference files created at that location before the config change. To use"
+           + " a comma or other reserved characters in a URI use standard URI hex"
+           + " encoding. For example replace commas with %2C."),
    INSTANCE_VOLUMES_REPLACEMENTS("instance.volumes.replacements", "", PropertyType.STRING,
-       "Since accumulo stores absolute URIs changing the location of a namenode could prevent Accumulo from starting. The property helps deal with that "
-           + "situation. Provide a comma separated list of uri replacement pairs here if a namenode location changes. Each pair shold be separated with a "
-           + "space. For example, if hdfs://nn1 was replaced with hdfs://nnA and hdfs://nn2 was replaced with hdfs://nnB, then set this property to "
-           + "'hdfs://nn1 hdfs://nnA,hdfs://nn2 hdfs://nnB' Replacements must be configured for use. To see which volumes are currently in use, run "
-           + "'accumulo admin volumes -l'. To use a comma or other reserved characters in a URI use standard URI hex encoding. For example replace commas with "
-           + "%2C."),
+       "Since accumulo stores absolute URIs changing the location of a namenode "
+           + "could prevent Accumulo from starting. The property helps deal with "
+           + "that situation. Provide a comma separated list of uri replacement "
+           + "pairs here if a namenode location changes. Each pair shold be separated "
+           + "with a space. For example, if hdfs://nn1 was replaced with "
+           + "hdfs://nnA and hdfs://nn2 was replaced with hdfs://nnB, then set this "
+           + "property to 'hdfs://nn1 hdfs://nnA,hdfs://nn2 hdfs://nnB' "
+           + "Replacements must be configured for use. To see which volumes are "
+           + "currently in use, run 'accumulo admin volumes -l'. To use a comma or "
+           + "other reserved characters in a URI use standard URI hex encoding. For "
+           + "example replace commas with %2C."),
    INSTANCE_SECURITY_AUTHENTICATOR("instance.security.authenticator",
        "org.apache.accumulo.server.security.handler.ZKAuthenticator", PropertyType.CLASSNAME,
-       "The authenticator class that accumulo will use to determine if a user has privilege to perform an action"),
+       "The authenticator class that accumulo will use to determine if a user "
+           + "has privilege to perform an action"),
    INSTANCE_SECURITY_AUTHORIZOR("instance.security.authorizor",
        "org.apache.accumulo.server.security.handler.ZKAuthorizor", PropertyType.CLASSNAME,
-       "The authorizor class that accumulo will use to determine what labels a user has privilege to see"),
+       "The authorizor class that accumulo will use to determine what labels a "
+           + "user has privilege to see"),
    INSTANCE_SECURITY_PERMISSION_HANDLER("instance.security.permissionHandler",
        "org.apache.accumulo.server.security.handler.ZKPermHandler", PropertyType.CLASSNAME,
-       "The permission handler class that accumulo will use to determine if a user has privilege to perform an action"),
+       "The permission handler class that accumulo will use to determine if a "
+           + "user has privilege to perform an action"),
    INSTANCE_RPC_SSL_ENABLED("instance.rpc.ssl.enabled", "false", PropertyType.BOOLEAN,
-       "Use SSL for socket connections from clients and among accumulo services. Mutually exclusive with SASL RPC configuration."),
+       "Use SSL for socket connections from clients and among accumulo services. "
+           + "Mutually exclusive with SASL RPC configuration."),
    INSTANCE_RPC_SSL_CLIENT_AUTH("instance.rpc.ssl.clientAuth", "false", PropertyType.BOOLEAN,
        "Require clients to present certs signed by a trusted root"),
    /**
@@@ -204,15 -236,21 +239,16 @@@
  
    // general properties
    GENERAL_PREFIX("general.", null, PropertyType.PREFIX,
 -      "Properties in this category affect the behavior of accumulo overall, but "
 -          + "do not have to be consistent throughout a cloud."),
 -  GENERAL_CLASSPATHS(AccumuloClassLoader.CLASSPATH_PROPERTY_NAME,
 -      AccumuloClassLoader.ACCUMULO_CLASSPATH_VALUE, PropertyType.STRING,
 -      "A list of all of the places to look for a class. Order does matter, as "
 -          + "it will look for the jar starting in the first location to the last. "
 -          + "Please note, hadoop conf and hadoop lib directories NEED to be here, "
 -          + "along with accumulo lib and zookeeper directory. Supports full regex on "
 -          + " filename alone."),
 -
 -  // needs special treatment in accumulo start jar
 +      "Properties in this category affect the behavior of accumulo overall, but do not have to be consistent throughout a cloud."),
 +  @Deprecated
 +  GENERAL_CLASSPATHS(AccumuloClassLoader.GENERAL_CLASSPATHS, "", PropertyType.STRING,
 +      "This property is deprecated. The class path should instead be configured by the launch environment (for example, accumulo-env.sh). "
 +          + "A list of all of the places to look for a class. Order does matter, as it will look for the jar "
 +          + "starting in the first location to the last. Supports full regex on filename alone."),
    GENERAL_DYNAMIC_CLASSPATHS(AccumuloVFSClassLoader.DYNAMIC_CLASSPATH_PROPERTY_NAME,
        AccumuloVFSClassLoader.DEFAULT_DYNAMIC_CLASSPATH_VALUE, PropertyType.STRING,
-       "A list of all of the places where changes in jars or classes will force a reload of the classloader."),
+       "A list of all of the places where changes in jars or classes will force "
+           + "a reload of the classloader."),
    GENERAL_RPC_TIMEOUT("general.rpc.timeout", "120s", PropertyType.TIMEDURATION,
        "Time to wait on I/O for simple, short RPC calls"),
    @Experimental
@@@ -222,12 -262,14 +260,13 @@@
        "Path to the kerberos keytab to use. Leave blank if not using kerberoized hdfs"),
    GENERAL_KERBEROS_PRINCIPAL("general.kerberos.principal", "", PropertyType.STRING,
        "Name of the kerberos principal to use. _HOST will automatically be "
-           + "replaced by the machines hostname in the hostname portion of the principal. Leave blank if not using kerberoized hdfs"),
+           + "replaced by the machines hostname in the hostname portion of the "
+           + "principal. Leave blank if not using kerberoized hdfs"),
    GENERAL_KERBEROS_RENEWAL_PERIOD("general.kerberos.renewal.period", "30s",
        PropertyType.TIMEDURATION,
 -      "The amount of time between attempts to perform Kerberos ticket renewals. "
 -          + "This does not equate to how often tickets are actually renewed (which is "
 -          + "performed at 80% of the ticket lifetime)."),
 -  GENERAL_MAX_MESSAGE_SIZE("general.server.message.size.max", "1G", PropertyType.MEMORY,
 +      "The amount of time between attempts to perform "
 +          + "Kerberos ticket renewals. This does not equate to how often tickets are actually renewed (which is performed at 80% of the ticket lifetime)."),
 +  GENERAL_MAX_MESSAGE_SIZE("general.server.message.size.max", "1G", PropertyType.BYTES,
        "The maximum size of a message that can be sent to a server."),
    GENERAL_SIMPLETIMER_THREADPOOL_SIZE("general.server.simpletimer.threadpool.size", "1",
        PropertyType.COUNT, "The number of threads to use for " + "server-internal scheduled tasks"),
@@@ -273,26 -313,28 +313,29 @@@
    MASTER_BULK_TIMEOUT("master.bulk.timeout", "5m", PropertyType.TIMEDURATION,
        "The time to wait for a tablet server to process a bulk import request"),
    MASTER_BULK_RENAME_THREADS("master.bulk.rename.threadpool.size", "20", PropertyType.COUNT,
-       "The number of threads to use when moving user files to bulk ingest directories under accumulo control"),
+       "The number of threads to use when moving user files to bulk ingest "
+           + "directories under accumulo control"),
 +  MASTER_BULK_TSERVER_REGEX("master.bulk.tserver.regex", "", PropertyType.STRING,
 +      "Regular expression that defines the set of Tablet Servers that will perform bulk imports"),
    MASTER_MINTHREADS("master.server.threads.minimum", "20", PropertyType.COUNT,
        "The minimum number of threads to use to handle incoming requests."),
    MASTER_THREADCHECK("master.server.threadcheck.time", "1s", PropertyType.TIMEDURATION,
        "The time between adjustments of the server thread pool."),
    MASTER_RECOVERY_DELAY("master.recovery.delay", "10s", PropertyType.TIMEDURATION,
-       "When a tablet server's lock is deleted, it takes time for it to completely quit. This delay gives it time before log recoveries begin."),
+       "When a tablet server's lock is deleted, it takes time for it to "
+           + "completely quit. This delay gives it time before log recoveries begin."),
    MASTER_LEASE_RECOVERY_WAITING_PERIOD("master.lease.recovery.interval", "5s",
        PropertyType.TIMEDURATION,
 -      "The amount of time to wait after requesting a WAL file to be recovered"),
 +      "The amount of time to wait after requesting a write-ahead log to be recovered"),
    MASTER_WALOG_CLOSER_IMPLEMETATION("master.walog.closer.implementation",
        "org.apache.accumulo.server.master.recovery.HadoopLogCloser", PropertyType.CLASSNAME,
 -      "A class that implements a mechansim to steal write access to a file"),
 +      "A class that implements a mechanism to steal write access to a write-ahead log"),
    MASTER_FATE_THREADPOOL_SIZE("master.fate.threadpool.size", "4", PropertyType.COUNT,
 -      "The number of threads used to run FAult-Tolerant Executions. These are "
 -          + "primarily table operations like merge."),
 +      "The number of threads used to run fault-tolerant executions (FATE). These are primarily table operations like merge."),
    MASTER_REPLICATION_SCAN_INTERVAL("master.replication.status.scan.interval", "30s",
        PropertyType.TIMEDURATION,
-       "Amount of time to sleep before scanning the status section of the replication table for new data"),
+       "Amount of time to sleep before scanning the status section of the "
+           + "replication table for new data"),
    MASTER_REPLICATION_COORDINATOR_PORT("master.replication.coordinator.port", "10001",
        PropertyType.PORT, "Port for the replication coordinator service"),
    MASTER_REPLICATION_COORDINATOR_MINTHREADS("master.replication.coordinator.minthreads", "4",
@@@ -404,46 -453,59 +449,51 @@@
        "The number of concurrent threads that will load bloom filters in the background. "
            + "Setting this to zero will make bloom filters load in the foreground."),
    TSERV_MONITOR_FS("tserver.monitor.fs", "true", PropertyType.BOOLEAN,
-       "When enabled the tserver will monitor file systems and kill itself when one switches from rw to ro. This is usually and indication that Linux has"
+       "When enabled the tserver will monitor file systems and kill itself when"
+           + " one switches from rw to ro. This is usually and indication that Linux has"
            + " detected a bad disk."),
    TSERV_MEMDUMP_DIR("tserver.dir.memdump", "/tmp", PropertyType.PATH,
-       "A long running scan could possibly hold memory that has been minor compacted. To prevent this, the in memory map is dumped to a local file and the "
-           + "scan is switched to that local file. We can not switch to the minor compacted file because it may have been modified by iterators. The file "
-           + "dumped to the local dir is an exact copy of what was in memory."),
+       "A long running scan could possibly hold memory that has been minor"
+           + " compacted. To prevent this, the in memory map is dumped to a local file"
+           + " and the scan is switched to that local file. We can not switch to the"
+           + " minor compacted file because it may have been modified by iterators. The"
+           + " file dumped to the local dir is an exact copy of what was in memory."),
    TSERV_BULK_PROCESS_THREADS("tserver.bulk.process.threads", "1", PropertyType.COUNT,
 -      "The master will task a tablet server with pre-processing a bulk file"
 -          + " prior to assigning it to the appropriate tablet servers. This"
 -          + " configuration value controls the number of threads used to process the" + " files."),
 +      "The master will task a tablet server with pre-processing a bulk import RFile prior to assigning it to the appropriate tablet servers. This configuration"
 +          + " value controls the number of threads used to process the files."),
    TSERV_BULK_ASSIGNMENT_THREADS("tserver.bulk.assign.threads", "1", PropertyType.COUNT,
 -      "The master delegates bulk file processing and assignment to tablet"
 -          + " servers. After the bulk file has been processed, the tablet server will"
 -          + " assign the file to the appropriate tablets on all servers. This property"
 -          + " controls the number of threads used to communicate to the other servers."),
 +      "The master delegates bulk import RFile processing and assignment to tablet servers. After file has been processed, the tablet server will assign"
 +          + " the file to the appropriate tablets on all servers. This property controls the number of threads used to communicate to the other servers."),
    TSERV_BULK_RETRY("tserver.bulk.retry.max", "5", PropertyType.COUNT,
 -      "The number of times the tablet server will attempt to assign a file to a"
 -          + " tablet as it migrates and splits."),
 +      "The number of times the tablet server will attempt to assign a RFile to a tablet as it migrates and splits."),
    TSERV_BULK_TIMEOUT("tserver.bulk.timeout", "5m", PropertyType.TIMEDURATION,
        "The time to wait for a tablet server to process a bulk import request."),
    TSERV_MINTHREADS("tserver.server.threads.minimum", "20", PropertyType.COUNT,
        "The minimum number of threads to use to handle incoming requests."),
    TSERV_THREADCHECK("tserver.server.threadcheck.time", "1s", PropertyType.TIMEDURATION,
        "The time between adjustments of the server thread pool."),
 -  TSERV_MAX_MESSAGE_SIZE("tserver.server.message.size.max", "1G", PropertyType.MEMORY,
 +  TSERV_MAX_MESSAGE_SIZE("tserver.server.message.size.max", "1G", PropertyType.BYTES,
        "The maximum size of a message that can be sent to a tablet server."),
    TSERV_HOLD_TIME_SUICIDE("tserver.hold.time.max", "5m", PropertyType.TIMEDURATION,
 -      "The maximum time for a tablet server to be in the \"memory full\" state."
 -          + " If the tablet server cannot write out memory in this much time, it will"
 -          + " assume there is some failure local to its node, and quit. A value of zero"
 -          + " is equivalent to forever."),
 -  TSERV_WAL_BLOCKSIZE("tserver.wal.blocksize", "0", PropertyType.MEMORY,
 -      "The size of the HDFS blocks used to write to the Write-Ahead log. If"
 -          + " zero, it will be 110% of tserver.walog.max.size (that is, try to use"
 -          + " just one block)"),
 +      "The maximum time for a tablet server to be in the \"memory full\" state. If the tablet server cannot write out memory"
 +          + " in this much time, it will assume there is some failure local to its node, and quit. A value of zero is equivalent to forever."),
 +  TSERV_WAL_BLOCKSIZE("tserver.wal.blocksize", "0", PropertyType.BYTES,
 +      "The size of the HDFS blocks used to write to the Write-Ahead log. If zero, it will be 110% of tserver.walog.max.size (that is, try to use just one"
 +          + " block)"),
    TSERV_WAL_REPLICATION("tserver.wal.replication", "0", PropertyType.COUNT,
-       "The replication to use when writing the Write-Ahead log to HDFS. If zero, it will use the HDFS default replication setting."),
+       "The replication to use when writing the Write-Ahead log to HDFS. If"
+           + " zero, it will use the HDFS default replication setting."),
    TSERV_RECOVERY_MAX_CONCURRENT("tserver.recovery.concurrent.max", "2", PropertyType.COUNT,
        "The maximum number of threads to use to sort logs during" + " recovery"),
 -  TSERV_SORT_BUFFER_SIZE("tserver.sort.buffer.size", "200M", PropertyType.MEMORY,
 +  TSERV_SORT_BUFFER_SIZE("tserver.sort.buffer.size", "10%", PropertyType.MEMORY,
        "The amount of memory to use when sorting logs during recovery."),
    TSERV_ARCHIVE_WALOGS("tserver.archive.walogs", "false", PropertyType.BOOLEAN,
        "Keep copies of the WALOGs for debugging purposes"),
    TSERV_WORKQ_THREADS("tserver.workq.threads", "2", PropertyType.COUNT,
 -      "The number of threads for the distributed work queue. These threads are"
 -          + " used for copying failed bulk files."),
 +      "The number of threads for the distributed work queue. These threads are used for copying failed bulk import RFiles."),
    TSERV_WAL_SYNC("tserver.wal.sync", "true", PropertyType.BOOLEAN,
-       "Use the SYNC_BLOCK create flag to sync WAL writes to disk. Prevents problems recovering from sudden system resets."),
+       "Use the SYNC_BLOCK create flag to sync WAL writes to disk. Prevents"
+           + " problems recovering from sudden system resets."),
    @Deprecated
    TSERV_WAL_SYNC_METHOD("tserver.wal.sync.method", "hsync", PropertyType.STRING,
        "This property is deprecated. Use table.durability instead."),
@@@ -574,20 -627,24 +629,21 @@@
    TABLE_ARBITRARY_PROP_PREFIX("table.custom.", null, PropertyType.PREFIX,
        "Prefix to be used for user defined arbitrary properties."),
    TABLE_MAJC_RATIO("table.compaction.major.ratio", "3", PropertyType.FRACTION,
 -      "minimum ratio of total input size to maximum input file size for running"
 -          + " a major compactionWhen adjusting this property you may want to also"
 -          + " adjust table.file.max. Want to avoid the situation where only merging"
 -          + " minor compactions occur."),
 +      "Minimum ratio of total input size to maximum input RFile size for running a major compaction. When adjusting this property you may want to also "
 +          + "adjust table.file.max. Want to avoid the situation where only merging minor compactions occur."),
    TABLE_MAJC_COMPACTALL_IDLETIME("table.compaction.major.everything.idle", "1h",
        PropertyType.TIMEDURATION,
 -      "After a tablet has been idle (no mutations) for this time period it may"
 -          + " have all of its files compacted into one. There is no guarantee an idle"
 -          + " tablet will be compacted. Compactions of idle tablets are only started"
 -          + " when regular compactions are not running. Idle compactions only take"
 -          + " place for tablets that have one or more files."),
 -  TABLE_SPLIT_THRESHOLD("table.split.threshold", "1G", PropertyType.MEMORY,
 -      "When combined size of files exceeds this amount a tablet is split."),
 -  TABLE_MAX_END_ROW_SIZE("table.split.endrow.size.max", "10K", PropertyType.MEMORY,
 +      "After a tablet has been idle (no mutations) for this time period it may have all "
 +          + "of its RFiles compacted into one. There is no guarantee an idle tablet will be compacted. "
 +          + "Compactions of idle tablets are only started when regular compactions are not running. Idle "
 +          + "compactions only take place for tablets that have one or more RFiles."),
 +  TABLE_SPLIT_THRESHOLD("table.split.threshold", "1G", PropertyType.BYTES,
 +      "A tablet is split when the combined size of RFiles exceeds this amount."),
 +  TABLE_MAX_END_ROW_SIZE("table.split.endrow.size.max", "10K", PropertyType.BYTES,
        "Maximum size of end row"),
    TABLE_MINC_LOGS_MAX("table.compaction.minor.logs.threshold", "3", PropertyType.COUNT,
-       "When there are more than this many write-ahead logs against a tablet, it will be minor compacted. See comment for property tserver.memory.maps.max"),
+       "When there are more than this many write-ahead logs against a tablet, it"
+           + " will be minor compacted. See comment for property" + " tserver.memory.maps.max"),
    TABLE_MINC_COMPACT_IDLETIME("table.compaction.minor.idle", "5m", PropertyType.TIMEDURATION,
        "After a tablet has been idle (no mutations) for this time period it may have its "
            + "in-memory map flushed to disk in a minor compaction. There is no guarantee an idle "
@@@ -602,26 -660,34 +658,27 @@@
        "Change the type of file a table writes"),
    TABLE_LOAD_BALANCER("table.balancer",
        "org.apache.accumulo.server.master.balancer.DefaultLoadBalancer", PropertyType.STRING,
-       "This property can be set to allow the LoadBalanceByTable load balancer to change the called Load Balancer for this table"),
+       "This property can be set to allow the LoadBalanceByTable load balancer"
+           + " to change the called Load Balancer for this table"),
    TABLE_FILE_COMPRESSION_TYPE("table.file.compress.type", "gz", PropertyType.STRING,
 -      "One of gz,snappy,lzo,none"),
 -  TABLE_FILE_COMPRESSED_BLOCK_SIZE("table.file.compress.blocksize", "100K", PropertyType.MEMORY,
 -      "Similar to the hadoop io.seqfile.compress.blocksize setting, so that"
 -          + " files have better query performance. The maximum value for this is "
 -          + Integer.MAX_VALUE + ". (This setting is the size threshold prior to"
 -          + " compression, and applies even compression is disabled.)"),
 +      "Compression algorithm used on index and data blocks before they are written. Possible values: gz, snappy, lzo, none"),
 +  TABLE_FILE_COMPRESSED_BLOCK_SIZE("table.file.compress.blocksize", "100K", PropertyType.BYTES,
 +      "The maximum size of data blocks in RFiles before they are compressed and written."),
    TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX("table.file.compress.blocksize.index", "128K",
 -      PropertyType.MEMORY,
 -      "Determines how large index blocks can be in files that support"
 -          + " multilevel indexes. The maximum value for this is " + Integer.MAX_VALUE
 -          + ". (This setting is the size threshold prior to compression, and applies"
 -          + " even compression is disabled.)"),
 -  TABLE_FILE_BLOCK_SIZE("table.file.blocksize", "0B", PropertyType.MEMORY,
 -      "Overrides the hadoop dfs.block.size setting so that files have better"
 -          + " query performance. The maximum value for this is " + Integer.MAX_VALUE),
 +      PropertyType.BYTES,
 +      "The maximum size of index blocks in RFiles before they are compressed and written."),
 +  TABLE_FILE_BLOCK_SIZE("table.file.blocksize", "0B", PropertyType.BYTES,
 +      "The HDFS block size used when writing RFiles. When set to 0B, the value/defaults of HDFS property 'dfs.block.size' will be used."),
    TABLE_FILE_REPLICATION("table.file.replication", "0", PropertyType.COUNT,
 -      "Determines how many replicas to keep of a tables' files in HDFS. "
 -          + "When this value is LTE 0, HDFS defaults are used."),
 +      "The number of replicas for a table's RFiles in HDFS. When set to 0, HDFS defaults are used."),
    TABLE_FILE_MAX("table.file.max", "15", PropertyType.COUNT,
 -      "Determines the max # of files each tablet in a table can have. When"
 -          + " adjusting this property you may want to consider adjusting"
 -          + " table.compaction.major.ratio also. Setting this property to 0 will make"
 -          + " it default to tserver.scan.files.open.max-1, this will prevent a tablet"
 -          + " from having more files than can be opened. Setting this property low may"
 -          + " throttle ingest and increase query performance."),
 +      "The maximum number of RFiles each tablet in a table can have. When adjusting this property you may want to consider adjusting"
 +          + " table.compaction.major.ratio also. Setting this property to 0 will make it default to tserver.scan.files.open.max-1, this will prevent a"
 +          + " tablet from having more RFiles than can be opened. Setting this property low may throttle ingest and increase query performance."),
 +  TABLE_FILE_SUMMARY_MAX_SIZE("table.file.summary.maxSize", "256K", PropertyType.BYTES,
 +      "The maximum size summary that will be stored. The number of"
 +          + " RFiles that had summary data exceeding this threshold is reported by Summary.getFileStatistics().getLarge().  When adjusting this"
 +          + " consider the expected number RFiles with summaries on each tablet server and the summary cache size."),
    @Deprecated
    TABLE_WALOG_ENABLED("table.walog.enabled", "true", PropertyType.BOOLEAN,
        "This setting is deprecated.  Use table.durability=none instead."),
@@@ -662,23 -736,28 +726,28 @@@
    TABLE_LOCALITY_GROUPS("table.groups.enabled", "", PropertyType.STRING,
        "A comma separated list of locality group names to enable for this table."),
    TABLE_CONSTRAINT_PREFIX("table.constraint.", null, PropertyType.PREFIX,
-       "Properties in this category are per-table properties that add constraints to a table. "
-           + "These properties start with the category prefix, followed by a number, and their values "
-           + "correspond to a fully qualified Java class that implements the Constraint interface.\n"
-           + "For example:\ntable.constraint.1 = org.apache.accumulo.core.constraints.MyCustomConstraint\n"
-           + "and:\ntable.constraint.2 = my.package.constraints.MySecondConstraint"),
+       "Properties in this category are per-table properties that add"
+           + " constraints to a table. These properties start with the category"
+           + " prefix, followed by a number, and their values correspond to a fully"
+           + " qualified Java class that implements the Constraint interface.\n" + "For example:\n"
+           + "table.constraint.1 = org.apache.accumulo.core.constraints.MyCustomConstraint\n"
+           + "and:\n" + " table.constraint.2 = my.package.constraints.MySecondConstraint"),
    TABLE_INDEXCACHE_ENABLED("table.cache.index.enable", "true", PropertyType.BOOLEAN,
 -      "Determines whether index cache is enabled."),
 +      "Determines whether index block cache is enabled for a table."),
    TABLE_BLOCKCACHE_ENABLED("table.cache.block.enable", "false", PropertyType.BOOLEAN,
 -      "Determines whether file block cache is enabled."),
 +      "Determines whether data block cache is enabled for a table."),
    TABLE_ITERATOR_PREFIX("table.iterator.", null, PropertyType.PREFIX,
-       "Properties in this category specify iterators that are applied at various stages (scopes) of interaction "
-           + "with a table. These properties start with the category prefix, followed by a scope (minc, majc, scan, etc.), "
-           + "followed by a period, followed by a name, as in table.iterator.scan.vers, or table.iterator.scan.custom. "
-           + "The values for these properties are a number indicating the ordering in which it is applied, and a class name "
-           + "such as:\n table.iterator.scan.vers = 10,org.apache.accumulo.core.iterators.VersioningIterator\n "
-           + "These iterators can take options if additional properties are set that look like this property, "
-           + "but are suffixed with a period, followed by 'opt' followed by another period, and a property name.\n"
+       "Properties in this category specify iterators that are applied at"
+           + " various stages (scopes) of interaction with a table. These properties"
+           + " start with the category prefix, followed by a scope (minc, majc, scan,"
+           + " etc.), followed by a period, followed by a name, as in"
+           + " table.iterator.scan.vers, or table.iterator.scan.custom. The values for"
+           + " these properties are a number indicating the ordering in which it is"
+           + " applied, and a class name such as:\n"
+           + "table.iterator.scan.vers = 10,org.apache.accumulo.core.iterators.VersioningIterator\n"
+           + "These iterators can take options if additional properties are set that"
+           + " look like this property, but are suffixed with a period, followed by 'opt'"
+           + " followed by another period, and a property name.\n"
            + "For example, table.iterator.minc.vers.opt.maxVersions = 3"),
    TABLE_ITERATOR_SCAN_PREFIX(TABLE_ITERATOR_PREFIX.getKey() + IteratorScope.scan.name() + ".", null,
        PropertyType.PREFIX, "Convenience prefix to find options for the scan iterator scope"),
@@@ -708,23 -790,32 +780,25 @@@
    TABLE_REPLICATION("table.replication", "false", PropertyType.BOOLEAN,
        "Is replication enabled for the given table"),
    TABLE_REPLICATION_TARGET("table.replication.target.", null, PropertyType.PREFIX,
 -      "Enumerate a mapping of other systems which this table should replicate"
 -          + " their data to. The key suffix is the identifying cluster name and the"
 -          + " value is an identifier for a location on the target system,"
 -          + " e.g. the ID of the table on the target to replicate to"),
 -  @Experimental
 -  TABLE_VOLUME_CHOOSER("table.volume.chooser", "org.apache.accumulo.server.fs.RandomVolumeChooser",
 -      PropertyType.CLASSNAME,
 -      "The class that will be used to select which volume will be used to"
 -          + " create new files for this table."),
 +      "Enumerate a mapping of other systems which this table should "
 +          + "replicate their data to. The key suffix is the identifying cluster name and the value is an identifier for a location on the target system, "
 +          + "e.g. the ID of the table on the target to replicate to"),
    TABLE_SAMPLER("table.sampler", "", PropertyType.CLASSNAME,
 -      "The name of a class that implements org.apache.accumulo.core.Sampler."
 -          + " Setting this option enables storing a sample of data which can be"
 -          + " scanned. Always having a current sample can useful for query optimization"
 -          + " and data comprehension. After enabling sampling for an existing table,"
 -          + " a compaction is needed to compute the sample for existing data. The"
 -          + " compact command in the shell has an option to only compact files without"
 -          + " sample data."),
 +      "The name of a class that implements org.apache.accumulo.core.Sampler.  Setting this option enables storing a sample of data which can be scanned."
 +          + "  Always having a current sample can useful for query optimization and data comprehension.   After enabling sampling for an existing table, a compaction "
 +          + "is needed to compute the sample for existing data.  The compact command in the shell has an option to only compact RFiles without sample data."),
    TABLE_SAMPLER_OPTS("table.sampler.opt.", null, PropertyType.PREFIX,
-       "The property is used to set options for a sampler.  If a sample had two options like hasher and modulous, then the two properties "
-           + "table.sampler.opt.hasher=${hash algorithm} and table.sampler.opt.modulous=${mod} would be set."),
+       "The property is used to set options for a sampler. If a sample had two"
+           + " options like hasher and modulous, then the two properties"
+           + " table.sampler.opt.hasher=${hash algorithm} and"
+           + " table.sampler.opt.modulous=${mod} would be set."),
    TABLE_SUSPEND_DURATION("table.suspend.duration", "0s", PropertyType.TIMEDURATION,
 -      "For tablets belonging to this table: When a tablet server dies, allow"
 -          + " the tablet server this duration to revive before reassigning its tablets"
 -          + " to other tablet servers."),
 +      "For tablets belonging to this table: When a tablet server dies, allow the tablet server this duration to revive before reassigning its tablets"
 +          + "to other tablet servers."),
 +  TABLE_SUMMARIZER_PREFIX("table.summarizer.", null, PropertyType.PREFIX,
 +      "Prefix for configuring summarizers for a table.  Using this prefix multiple summarizers can be configured with options for each one. Each summarizer configured "
 +          + "should have a unique id, this id can be anything. To add a summarizer set table.summarizer.<unique id>=<summarizer class name>.  If the summarizer has options, "
 +          + "then for each option set table.summarizer.<unique id>.opt.<key>=<value>."),
  
    // VFS ClassLoader properties
    VFS_CLASSLOADER_SYSTEM_CLASSPATH_PROPERTY(
diff --cc core/src/main/java/org/apache/accumulo/core/conf/PropertyType.java
index 0a3b520,0fc500a..e448ef8
--- a/core/src/main/java/org/apache/accumulo/core/conf/PropertyType.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/PropertyType.java
@@@ -37,31 -38,27 +37,33 @@@ import com.google.common.base.Precondit
   * valid values match. All of these fields are optional.
   */
  public enum PropertyType {
 -  PREFIX(null, Predicates.<String> alwaysFalse(), null),
 +  PREFIX(null, x -> false, null),
  
    TIMEDURATION("duration", boundedUnits(0, Long.MAX_VALUE, true, "", "ms", "s", "m", "h", "d"),
-       "A non-negative integer optionally followed by a unit of time (whitespace disallowed), as in 30s.\n"
-           + "If no unit of time is specified, seconds are assumed. Valid units are 'ms', 's', 'm', 'h' for milliseconds, seconds, minutes, and hours.\n"
-           + "Examples of valid durations are '600', '30s', '45m', '30000ms', '3d', and '1h'.\n"
-           + "Examples of invalid durations are '1w', '1h30m', '1s 200ms', 'ms', '', and 'a'.\n"
-           + "Unless otherwise stated, the max value for the duration represented in milliseconds is "
-           + Long.MAX_VALUE),
+       "A non-negative integer optionally followed by a unit of time (whitespace"
+           + " disallowed), as in 30s.\n"
+           + "If no unit of time is specified, seconds are assumed. Valid units"
+           + " are 'ms', 's', 'm', 'h' for milliseconds, seconds," + " minutes, and" + " hours.\n"
+           + "Examples of valid durations are '600', '30s', '45m', '30000ms'," + " '3d', and '1h'.\n"
+           + "Examples of invalid durations are '1w', '1h30m', '1s 200ms', 'ms', '',"
+           + " and 'a'.\nUnless otherwise stated, the max value for the duration"
+           + " represented in milliseconds is " + Long.MAX_VALUE),
  
 -  MEMORY("memory", boundedUnits(0, Long.MAX_VALUE, false, "", "B", "K", "M", "G"),
 -      "A positive integer optionally followed by a unit of memory (whitespace"
 -          + " disallowed), as in 2G.\n"
 -          + "If no unit is specified, bytes are assumed. Valid units are 'B', 'K',"
 -          + " 'M', 'G', for bytes, kilobytes, megabytes, and gigabytes.\n"
 -          + "Examples of valid memories are '1024', '20B', '100K', '1500M', '2G'.\n"
 -          + "Examples of invalid memories are '1M500K', '1M 2K', '1MB', '1.5G',"
 -          + " '1,024K', '', and 'a'.\n"
 -          + "Unless otherwise stated, the max value for the memory represented in" + " bytes is "
 +  BYTES("bytes", boundedUnits(0, Long.MAX_VALUE, false, "", "B", "K", "M", "G"),
 +      "A positive integer optionally followed by a unit of memory (whitespace disallowed).\n"
 +          + "If no unit is specified, bytes are assumed. Valid units are 'B', 'K', 'M' or 'G' for bytes, kilobytes, megabytes, gigabytes.\n"
 +          + "Examples of valid memories are '1024', '20B', '100K', '1500M', '2G', '20%'.\n"
 +          + "Examples of invalid memories are '1M500K', '1M 2K', '1MB', '1.5G', '1,024K', '', and 'a'.\n"
 +          + "Unless otherwise stated, the max value for the memory represented in bytes is "
 +          + Long.MAX_VALUE),
 +
 +  MEMORY("memory", boundedUnits(0, Long.MAX_VALUE, false, "", "B", "K", "M", "G", "%"),
 +      "A positive integer optionally followed by a unit of memory or a percentage (whitespace disallowed).\n"
 +          + "If a percentage is specified, memory will be a percentage of the max memory allocated to a Java process (set by the JVM option -Xmx).\n"
 +          + "If no unit is specified, bytes are assumed. Valid units are 'B', 'K', 'M', 'G', '%' for bytes, kilobytes, megabytes, gigabytes, and percentage.\n"
 +          + "Examples of valid memories are '1024', '20B', '100K', '1500M', '2G', '20%'.\n"
 +          + "Examples of invalid memories are '1M500K', '1M 2K', '1MB', '1.5G', '1,024K', '', and 'a'.\n"
 +          + "Unless otherwise stated, the max value for the memory represented in bytes is "
            + Long.MAX_VALUE),
  
    HOSTLIST("host list",
@@@ -80,18 -81,25 +83,20 @@@
        "A non-negative integer in the range of 0-" + Integer.MAX_VALUE),
  
    FRACTION("fraction/percentage", new FractionPredicate(),
-       "A floating point number that represents either a fraction or, if suffixed with the '%' character, a percentage.\n"
-           + "Examples of valid fractions/percentages are '10', '1000%', '0.05', '5%', '0.2%', '0.0005'.\n"
-           + "Examples of invalid fractions/percentages are '', '10 percent', 'Hulk Hogan'"),
+       "A floating point number that represents either a fraction or, if"
+           + " suffixed with the '%' character, a percentage.\n"
+           + "Examples of valid fractions/percentages are '10', '1000%', '0.05',"
+           + " '5%', '0.2%', '0.0005'.\n"
+           + "Examples of invalid fractions/percentages are '', '10 percent'," + " 'Hulk Hogan'"),
  
 -  PATH("path", Predicates.<String> alwaysTrue(),
 -      "A string that represents a filesystem path, which can be either relative"
 -          + " or absolute to some directory. The filesystem depends on the property."
 -          + " The following environment variables will be substituted: "
 +  PATH("path", x -> true,
 +      "A string that represents a filesystem path, which can be either relative or absolute to some directory. The filesystem depends on the property. The "
 +          + "following environment variables will be substituted: "
            + Constants.PATH_PROPERTY_ENV_VARS),
  
 -  ABSOLUTEPATH("absolute path", new Predicate<String>() {
 -    @Override
 -    public boolean apply(final String input) {
 -      return input == null || input.trim().isEmpty() || new Path(input.trim()).isAbsolute();
 -    }
 -  }, "An absolute filesystem path. The filesystem depends on the property."
 -      + " This is the same as path, but enforces that its root is explicitly" + " specified."),
 +  ABSOLUTEPATH("absolute path",
 +      x -> x == null || x.trim().isEmpty() || new Path(x.trim()).isAbsolute(),
 +      "An absolute filesystem path. The filesystem depends on the property. This is the same as path, but enforces that its root is explicitly specified."),
  
    CLASSNAME("java class", new Matches("[\\w$.]*"),
        "A fully qualified java class name representing a class on the classpath.\n"
diff --cc core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
index dc10e57,14c5183..001b1db
--- a/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
@@@ -197,12 -164,10 +197,11 @@@ public class SiteConfiguration extends 
            }
          }
        } catch (IOException e) {
-         log.warn(
-             "Failed to extract sensitive properties from Hadoop CredentialProvider, falling back to accumulo-site.xml",
-             e);
+         log.warn("Failed to extract sensitive properties from Hadoop"
+             + " CredentialProvider, falling back to accumulo-site.xml", e);
        }
      }
 +    CliConfiguration.getProperties(props, filter);
    }
  
    protected Configuration getHadoopConfiguration() {
diff --cc core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/lru/LruBlockCache.java
index 24d5055,0000000..5d8381b
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/lru/LruBlockCache.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/lru/LruBlockCache.java
@@@ -1,704 -1,0 +1,705 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.file.blockfile.cache.lru;
 +
 +import java.lang.ref.WeakReference;
 +import java.util.Objects;
 +import java.util.PriorityQueue;
 +import java.util.concurrent.ConcurrentHashMap;
 +import java.util.concurrent.Executors;
 +import java.util.concurrent.ScheduledExecutorService;
 +import java.util.concurrent.TimeUnit;
 +import java.util.concurrent.atomic.AtomicLong;
 +import java.util.concurrent.locks.ReentrantLock;
 +import java.util.function.Supplier;
 +
 +import org.apache.accumulo.core.file.blockfile.cache.BlockCache;
 +import org.apache.accumulo.core.file.blockfile.cache.CacheEntry;
 +import org.apache.accumulo.core.file.blockfile.cache.impl.ClassSize;
 +import org.apache.accumulo.core.file.blockfile.cache.impl.SizeConstants;
 +import org.apache.accumulo.core.util.NamingThreadFactory;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +/**
 + * A block cache implementation that is memory-aware using {@link HeapSize}, memory-bound using an
 + * LRU eviction algorithm, and concurrent: backed by a {@link ConcurrentHashMap} and with a
 + * non-blocking eviction thread giving constant-time {@link #cacheBlock} and {@link #getBlock}
 + * operations.
 + *
 + * <p>
 + * Contains three levels of block priority to allow for scan-resistance and in-memory families. A
 + * block is added with an inMemory flag if necessary, otherwise a block becomes a single access
 + * priority. Once a blocked is accessed again, it changes to multiple access. This is used to
 + * prevent scans from thrashing the cache, adding a least-frequently-used element to the eviction
 + * algorithm.
 + *
 + * <p>
 + * Each priority is given its own chunk of the total cache to ensure fairness during eviction. Each
 + * priority will retain close to its maximum size, however, if any priority is not using its entire
 + * chunk the others are able to grow beyond their chunk size.
 + *
 + * <p>
 + * Instantiated at a minimum with the total size and average block size. All sizes are in bytes. The
 + * block size is not especially important as this cache is fully dynamic in its sizing of blocks. It
 + * is only used for pre-allocating data structures and in initial heap estimation of the map.
 + *
 + * <p>
 + * The detailed constructor defines the sizes for the three priorities (they should total to the
 + * maximum size defined). It also sets the levels that trigger and control the eviction thread.
 + *
 + * <p>
 + * The acceptable size is the cache size level which triggers the eviction process to start. It
 + * evicts enough blocks to get the size below the minimum size specified.
 + *
 + * <p>
 + * Eviction happens in a separate thread and involves a single full-scan of the map. It determines
 + * how many bytes must be freed to reach the minimum size, and then while scanning determines the
 + * fewest least-recently-used blocks necessary from each of the three priorities (would be 3 times
 + * bytes to free). It then uses the priority chunk sizes to evict fairly according to the relative
 + * sizes and usage.
 + */
 +public class LruBlockCache extends SynchronousLoadingBlockCache implements BlockCache, HeapSize {
 +
 +  private static final Logger log = LoggerFactory.getLogger(LruBlockCache.class);
 +
 +  /** Statistics thread */
 +  static final int statThreadPeriod = 60;
 +
 +  /** Concurrent map (the cache) */
 +  private final ConcurrentHashMap<String,CachedBlock> map;
 +
 +  /** Eviction lock (locked when eviction in process) */
 +  private final ReentrantLock evictionLock = new ReentrantLock(true);
 +
 +  /** Volatile boolean to track if we are in an eviction process or not */
 +  private volatile boolean evictionInProgress = false;
 +
 +  /** Eviction thread */
 +  private final EvictionThread evictionThread;
 +
 +  /** Statistics thread schedule pool (for heavy debugging, could remove) */
 +  private final ScheduledExecutorService scheduleThreadPool = Executors.newScheduledThreadPool(1,
 +      new NamingThreadFactory("LRUBlockCacheStats"));
 +
 +  /** Current size of cache */
 +  private final AtomicLong size;
 +
 +  /** Current number of cached elements */
 +  private final AtomicLong elements;
 +
 +  /** Cache access count (sequential ID) */
 +  private final AtomicLong count;
 +
 +  /** Cache statistics */
 +  private final CacheStats stats;
 +
 +  /** Overhead of the structure itself */
 +  private final long overhead;
 +
 +  private final LruBlockCacheConfiguration conf;
 +
 +  /**
 +   * Default constructor. Specify maximum size and expected average block size (approximation is
 +   * fine).
 +   *
 +   * <p>
 +   * All other factors will be calculated based on defaults specified in this class.
 +   *
 +   * @param conf
 +   *          block cache configuration
 +   */
 +  public LruBlockCache(final LruBlockCacheConfiguration conf) {
 +    super();
 +    this.conf = conf;
 +
 +    int mapInitialSize = (int) Math.ceil(1.2 * conf.getMaxSize() / conf.getBlockSize());
 +
 +    map = new ConcurrentHashMap<>(mapInitialSize, conf.getMapLoadFactor(),
 +        conf.getMapConcurrencyLevel());
 +    this.stats = new CacheStats();
 +    this.count = new AtomicLong(0);
 +    this.elements = new AtomicLong(0);
 +    this.overhead = calculateOverhead(conf.getMaxSize(), conf.getBlockSize(),
 +        conf.getMapConcurrencyLevel());
 +    this.size = new AtomicLong(this.overhead);
 +
 +    if (conf.isUseEvictionThread()) {
 +      this.evictionThread = new EvictionThread(this);
 +      this.evictionThread.start();
 +      while (!this.evictionThread.running()) {
 +        try {
 +          Thread.sleep(10);
 +        } catch (InterruptedException ex) {
 +          throw new RuntimeException(ex);
 +        }
 +      }
 +    } else {
 +      this.evictionThread = null;
 +    }
 +    this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), statThreadPeriod,
 +        statThreadPeriod, TimeUnit.SECONDS);
 +  }
 +
 +  public long getOverhead() {
 +    return overhead;
 +  }
 +
 +  /*
 +   * This class exists so that every cache entry does not have a reference to the cache.
 +   */
 +  private class LruCacheEntry implements CacheEntry {
 +    private final CachedBlock block;
 +
 +    LruCacheEntry(CachedBlock block) {
 +      this.block = block;
 +    }
 +
 +    @Override
 +    public byte[] getBuffer() {
 +      return block.getBuffer();
 +    }
 +
 +    @Override
 +    public <T extends Weighbable> T getIndex(Supplier<T> supplier) {
 +      return block.getIndex(supplier);
 +    }
 +
 +    @Override
 +    public void indexWeightChanged() {
 +      long newSize = block.recordSize(size);
 +      if (newSize > acceptableSize() && !evictionInProgress) {
 +        runEviction();
 +      }
 +    }
 +  }
 +
 +  private CacheEntry wrap(CachedBlock cb) {
 +    if (cb == null) {
 +      return null;
 +    }
 +
 +    return new LruCacheEntry(cb);
 +  }
 +
 +  // BlockCache implementation
 +
 +  /**
 +   * Cache the block with the specified name and buffer.
 +   * <p>
 +   * It is assumed this will NEVER be called on an already cached block. If that is done, it is
 +   * assumed that you are reinserting the same exact block due to a race condition and will update
 +   * the buffer but not modify the size of the cache.
 +   *
 +   * @param blockName
 +   *          block name
 +   * @param buf
 +   *          block buffer
 +   * @param inMemory
 +   *          if block is in-memory
 +   */
 +  public CacheEntry cacheBlock(String blockName, byte buf[], boolean inMemory) {
 +    CachedBlock cb = map.get(blockName);
 +    if (cb != null) {
 +      stats.duplicateReads();
 +      cb.access(count.incrementAndGet());
 +    } else {
 +      cb = new CachedBlock(blockName, buf, count.incrementAndGet(), inMemory);
 +      CachedBlock currCb = map.putIfAbsent(blockName, cb);
 +      if (currCb != null) {
 +        stats.duplicateReads();
 +        cb = currCb;
 +        cb.access(count.incrementAndGet());
 +      } else {
 +        // Actually added block to cache
 +        long newSize = cb.recordSize(size);
 +        elements.incrementAndGet();
 +        if (newSize > acceptableSize() && !evictionInProgress) {
 +          runEviction();
 +        }
 +      }
 +    }
 +
 +    return wrap(cb);
 +  }
 +
 +  /**
 +   * Cache the block with the specified name and buffer.
 +   * <p>
 +   * It is assumed this will NEVER be called on an already cached block. If that is done, it is
 +   * assumed that you are reinserting the same exact block due to a race condition and will update
 +   * the buffer but not modify the size of the cache.
 +   *
 +   * @param blockName
 +   *          block name
 +   * @param buf
 +   *          block buffer
 +   */
 +  @Override
 +  public CacheEntry cacheBlock(String blockName, byte buf[]) {
 +    return cacheBlock(blockName, buf, false);
 +  }
 +
 +  /**
 +   * Get the buffer of the block with the specified name.
 +   *
 +   * @param blockName
 +   *          block name
 +   * @return buffer of specified block name, or null if not in cache
 +   */
 +  @Override
 +  public CacheEntry getBlock(String blockName) {
 +    CachedBlock cb = map.get(blockName);
 +    if (cb == null) {
 +      stats.miss();
 +      return null;
 +    }
 +    stats.hit();
 +    cb.access(count.incrementAndGet());
 +    return wrap(cb);
 +  }
 +
 +  @Override
 +  protected CacheEntry getBlockNoStats(String blockName) {
 +    CachedBlock cb = map.get(blockName);
 +    if (cb != null) {
 +      cb.access(count.incrementAndGet());
 +    }
 +    return wrap(cb);
 +  }
 +
 +  protected long evictBlock(CachedBlock block) {
 +    if (map.remove(block.getName()) != null) {
 +      elements.decrementAndGet();
 +      stats.evicted();
 +      return block.evicted(size);
 +    }
 +    return 0;
 +  }
 +
 +  /**
 +   * Multi-threaded call to run the eviction process.
 +   */
 +  private void runEviction() {
 +    if (evictionThread == null) {
 +      evict();
 +    } else {
 +      evictionThread.evict();
 +    }
 +  }
 +
 +  /**
 +   * Eviction method.
 +   */
 +  void evict() {
 +
 +    // Ensure only one eviction at a time
 +    if (!evictionLock.tryLock())
 +      return;
 +
 +    try {
 +      evictionInProgress = true;
 +
 +      long bytesToFree = size.get() - minSize();
 +
 +      log.trace("Block cache LRU eviction started.  Attempting to free {} bytes", bytesToFree);
 +
 +      if (bytesToFree <= 0)
 +        return;
 +
 +      // Instantiate priority buckets
 +      BlockBucket bucketSingle = new BlockBucket(bytesToFree, conf.getBlockSize(), singleSize());
 +      BlockBucket bucketMulti = new BlockBucket(bytesToFree, conf.getBlockSize(), multiSize());
 +      BlockBucket bucketMemory = new BlockBucket(bytesToFree, conf.getBlockSize(), memorySize());
 +
 +      // Scan entire map putting into appropriate buckets
 +      for (CachedBlock cachedBlock : map.values()) {
 +        switch (cachedBlock.getPriority()) {
 +          case SINGLE: {
 +            bucketSingle.add(cachedBlock);
 +            break;
 +          }
 +          case MULTI: {
 +            bucketMulti.add(cachedBlock);
 +            break;
 +          }
 +          case MEMORY: {
 +            bucketMemory.add(cachedBlock);
 +            break;
 +          }
 +        }
 +      }
 +
 +      PriorityQueue<BlockBucket> bucketQueue = new PriorityQueue<>(3);
 +
 +      bucketQueue.add(bucketSingle);
 +      bucketQueue.add(bucketMulti);
 +      bucketQueue.add(bucketMemory);
 +
 +      int remainingBuckets = 3;
 +      long bytesFreed = 0;
 +
 +      BlockBucket bucket;
 +      while ((bucket = bucketQueue.poll()) != null) {
 +        long overflow = bucket.overflow();
 +        if (overflow > 0) {
 +          long bucketBytesToFree = Math.min(overflow,
 +              (long) Math.ceil((bytesToFree - bytesFreed) / (double) remainingBuckets));
 +          bytesFreed += bucket.free(bucketBytesToFree);
 +        }
 +        remainingBuckets--;
 +      }
 +
 +      float singleMB = ((float) bucketSingle.totalSize()) / ((float) (1024 * 1024));
 +      float multiMB = ((float) bucketMulti.totalSize()) / ((float) (1024 * 1024));
 +      float memoryMB = ((float) bucketMemory.totalSize()) / ((float) (1024 * 1024));
 +
 +      log.trace(
-           "Block cache LRU eviction completed. Freed {} bytes. Priority Sizes: Single={}MB ({}), Multi={}MB ({}), Memory={}MB ({})",
++          "Block cache LRU eviction completed. Freed {} bytes. Priority Sizes:"
++              + " Single={}MB ({}), Multi={}MB ({}), Memory={}MB ({})",
 +          bytesFreed, singleMB, bucketSingle.totalSize(), multiMB, bucketMulti.totalSize(),
 +          memoryMB, bucketMemory.totalSize());
 +
 +    } finally {
 +      stats.evict();
 +      evictionInProgress = false;
 +      evictionLock.unlock();
 +    }
 +  }
 +
 +  /**
 +   * Used to group blocks into priority buckets. There will be a BlockBucket for each priority
 +   * (single, multi, memory). Once bucketed, the eviction algorithm takes the appropriate number of
 +   * elements out of each according to configuration parameters and their relatives sizes.
 +   */
 +  private class BlockBucket implements Comparable<BlockBucket> {
 +
 +    private CachedBlockQueue queue;
 +    private long totalSize = 0;
 +    private long bucketSize;
 +
 +    public BlockBucket(long bytesToFree, long blockSize, long bucketSize) {
 +      this.bucketSize = bucketSize;
 +      queue = new CachedBlockQueue(bytesToFree, blockSize);
 +      totalSize = 0;
 +    }
 +
 +    public void add(CachedBlock block) {
 +      totalSize += block.heapSize();
 +      queue.add(block);
 +    }
 +
 +    public long free(long toFree) {
 +      CachedBlock[] blocks = queue.get();
 +      long freedBytes = 0;
 +      for (CachedBlock block : blocks) {
 +        freedBytes += evictBlock(block);
 +        if (freedBytes >= toFree) {
 +          return freedBytes;
 +        }
 +      }
 +      return freedBytes;
 +    }
 +
 +    public long overflow() {
 +      return totalSize - bucketSize;
 +    }
 +
 +    public long totalSize() {
 +      return totalSize;
 +    }
 +
 +    @Override
 +    public int compareTo(BlockBucket that) {
 +      if (this.overflow() == that.overflow())
 +        return 0;
 +      return this.overflow() > that.overflow() ? 1 : -1;
 +    }
 +
 +    @Override
 +    public int hashCode() {
 +      return Objects.hashCode(overflow());
 +    }
 +
 +    @Override
 +    public boolean equals(Object that) {
 +      if (that instanceof BlockBucket)
 +        return compareTo((BlockBucket) that) == 0;
 +      return false;
 +    }
 +  }
 +
 +  @Override
 +  public long getMaxHeapSize() {
 +    return getMaxSize();
 +  }
 +
 +  @Override
 +  public long getMaxSize() {
 +    return this.conf.getMaxSize();
 +  }
 +
 +  @Override
 +  public int getMaxEntrySize() {
 +    return (int) Math.min(Integer.MAX_VALUE, getMaxSize());
 +  }
 +
 +  /**
 +   * Get the current size of this cache.
 +   *
 +   * @return current size in bytes
 +   */
 +  public long getCurrentSize() {
 +    return this.size.get();
 +  }
 +
 +  /**
 +   * Get the current size of this cache.
 +   *
 +   * @return current size in bytes
 +   */
 +  public long getFreeSize() {
 +    return getMaxSize() - getCurrentSize();
 +  }
 +
 +  /**
 +   * Get the size of this cache (number of cached blocks)
 +   *
 +   * @return number of cached blocks
 +   */
 +  public long size() {
 +    return this.elements.get();
 +  }
 +
 +  /**
 +   * Get the number of eviction runs that have occurred
 +   */
 +  public long getEvictionCount() {
 +    return this.stats.getEvictionCount();
 +  }
 +
 +  /**
 +   * Get the number of blocks that have been evicted during the lifetime of this cache.
 +   */
 +  public long getEvictedCount() {
 +    return this.stats.getEvictedCount();
 +  }
 +
 +  /**
 +   * Eviction thread. Sits in waiting state until an eviction is triggered when the cache size grows
 +   * above the acceptable level.
 +   *
 +   * <p>
 +   * Thread is triggered into action by {@link LruBlockCache#runEviction()}
 +   */
 +  private static class EvictionThread extends Thread {
 +    private WeakReference<LruBlockCache> cache;
 +    private boolean running = false;
 +
 +    public EvictionThread(LruBlockCache cache) {
 +      super("LruBlockCache.EvictionThread");
 +      setDaemon(true);
 +      this.cache = new WeakReference<>(cache);
 +    }
 +
 +    public synchronized boolean running() {
 +      return running;
 +    }
 +
 +    @Override
 +    public void run() {
 +      while (true) {
 +        synchronized (this) {
 +          running = true;
 +          try {
 +            this.wait();
 +          } catch (InterruptedException e) {}
 +        }
 +        LruBlockCache cache = this.cache.get();
 +        if (cache == null)
 +          break;
 +        cache.evict();
 +      }
 +    }
 +
 +    public void evict() {
 +      synchronized (this) {
 +        this.notify();
 +      }
 +    }
 +  }
 +
 +  /*
 +   * Statistics thread. Periodically prints the cache statistics to the log.
 +   */
 +  private static class StatisticsThread extends Thread {
 +    LruBlockCache lru;
 +
 +    public StatisticsThread(LruBlockCache lru) {
 +      super("LruBlockCache.StatisticsThread");
 +      setDaemon(true);
 +      this.lru = lru;
 +    }
 +
 +    @Override
 +    public void run() {
 +      lru.logStats();
 +    }
 +  }
 +
 +  public void logStats() {
 +    // Log size
 +    long totalSize = heapSize();
 +    long freeSize = this.conf.getMaxSize() - totalSize;
 +    float sizeMB = ((float) totalSize) / ((float) (1024 * 1024));
 +    float freeMB = ((float) freeSize) / ((float) (1024 * 1024));
 +    float maxMB = ((float) this.conf.getMaxSize()) / ((float) (1024 * 1024));
 +    log.debug(
 +        "Cache Stats: Sizes: Total={}MB ({}), Free={}MB ({}), Max={}MB ({}), Counts: Blocks={}, Access={}, Hit={}, Miss={}, Evictions={}, Evicted={},"
 +            + "Ratios: Hit Ratio={}%, Miss Ratio={}%, Evicted/Run={}, Duplicate Reads={}",
 +        sizeMB, totalSize, freeMB, freeSize, maxMB, this.conf.getMaxSize(), size(),
 +        stats.requestCount(), stats.hitCount(), stats.getMissCount(), stats.getEvictionCount(),
 +        stats.getEvictedCount(), stats.getHitRatio() * 100, stats.getMissRatio() * 100,
 +        stats.evictedPerEviction(), stats.getDuplicateReads());
 +  }
 +
 +  /**
 +   * Get counter statistics for this cache.
 +   *
 +   * <p>
 +   * Includes: total accesses, hits, misses, evicted blocks, and runs of the eviction processes.
 +   */
 +  @Override
 +  public CacheStats getStats() {
 +    return this.stats;
 +  }
 +
 +  public static class CacheStats implements BlockCache.Stats {
 +    private final AtomicLong accessCount = new AtomicLong(0);
 +    private final AtomicLong hitCount = new AtomicLong(0);
 +    private final AtomicLong missCount = new AtomicLong(0);
 +    private final AtomicLong evictionCount = new AtomicLong(0);
 +    private final AtomicLong evictedCount = new AtomicLong(0);
 +    private final AtomicLong duplicateReads = new AtomicLong(0);
 +
 +    public void miss() {
 +      missCount.incrementAndGet();
 +      accessCount.incrementAndGet();
 +    }
 +
 +    public void hit() {
 +      hitCount.incrementAndGet();
 +      accessCount.incrementAndGet();
 +    }
 +
 +    public void evict() {
 +      evictionCount.incrementAndGet();
 +    }
 +
 +    public void duplicateReads() {
 +      duplicateReads.incrementAndGet();
 +    }
 +
 +    public void evicted() {
 +      evictedCount.incrementAndGet();
 +    }
 +
 +    @Override
 +    public long requestCount() {
 +      return accessCount.get();
 +    }
 +
 +    public long getMissCount() {
 +      return missCount.get();
 +    }
 +
 +    @Override
 +    public long hitCount() {
 +      return hitCount.get();
 +    }
 +
 +    public long getEvictionCount() {
 +      return evictionCount.get();
 +    }
 +
 +    public long getDuplicateReads() {
 +      return duplicateReads.get();
 +    }
 +
 +    public long getEvictedCount() {
 +      return evictedCount.get();
 +    }
 +
 +    public double getHitRatio() {
 +      return ((float) hitCount() / (float) requestCount());
 +    }
 +
 +    public double getMissRatio() {
 +      return ((float) getMissCount() / (float) requestCount());
 +    }
 +
 +    public double evictedPerEviction() {
 +      return (float) getEvictedCount() / (float) getEvictionCount();
 +    }
 +  }
 +
 +  public final static long CACHE_FIXED_OVERHEAD = ClassSize
 +      .align((3 * SizeConstants.SIZEOF_LONG) + (8 * ClassSize.REFERENCE)
 +          + (5 * SizeConstants.SIZEOF_FLOAT) + SizeConstants.SIZEOF_BOOLEAN + ClassSize.OBJECT);
 +
 +  // HeapSize implementation
 +  @Override
 +  public long heapSize() {
 +    return getCurrentSize();
 +  }
 +
 +  public static long calculateOverhead(long maxSize, long blockSize, int concurrency) {
 +    return CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP
 +        + ((int) Math.ceil(maxSize * 1.2 / blockSize) * ClassSize.CONCURRENT_HASHMAP_ENTRY)
 +        + (concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT);
 +  }
 +
 +  // Simple calculators of sizes given factors and maxSize
 +
 +  private long acceptableSize() {
 +    return (long) Math.floor(this.conf.getMaxSize() * this.conf.getAcceptableFactor());
 +  }
 +
 +  private long minSize() {
 +    return (long) Math.floor(this.conf.getMaxSize() * this.conf.getMinFactor());
 +  }
 +
 +  private long singleSize() {
 +    return (long) Math
 +        .floor(this.conf.getMaxSize() * this.conf.getSingleFactor() * this.conf.getMinFactor());
 +  }
 +
 +  private long multiSize() {
 +    return (long) Math
 +        .floor(this.conf.getMaxSize() * this.conf.getMultiFactor() * this.conf.getMinFactor());
 +  }
 +
 +  private long memorySize() {
 +    return (long) Math
 +        .floor(this.conf.getMaxSize() * this.conf.getMemoryFactor() * this.conf.getMinFactor());
 +  }
 +
 +  public void shutdown() {
 +    this.scheduleThreadPool.shutdown();
 +  }
 +}
diff --cc core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/BCFile.java
index 38ea80a,778be3b..25462db
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/BCFile.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/BCFile.java
@@@ -17,7 -17,8 +17,9 @@@
  
  package org.apache.accumulo.core.file.rfile.bcfile;
  
+ import static java.nio.charset.StandardCharsets.UTF_8;
+ 
 +import java.io.ByteArrayInputStream;
  import java.io.ByteArrayOutputStream;
  import java.io.Closeable;
  import java.io.DataInput;
@@@ -27,8 -28,6 +29,7 @@@ import java.io.DataOutputStream
  import java.io.IOException;
  import java.io.InputStream;
  import java.io.OutputStream;
 +import java.io.UncheckedIOException;
- import java.nio.charset.StandardCharsets;
  import java.util.ArrayList;
  import java.util.Arrays;
  import java.util.Collections;
diff --cc core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
index 942dc19,ce603d3..1c2119b
--- a/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
@@@ -272,14 -294,17 +284,17 @@@ public class IteratorUtil 
      return loadIterators(source, iters, iterOpts, env, useAccumuloClassLoader, context, null);
    }
  
-   public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V> loadIterators(
-       SortedKeyValueIterator<K,V> source, Collection<IterInfo> iters,
-       Map<String,Map<String,String>> iterOpts, IteratorEnvironment env,
-       boolean useAccumuloClassLoader, String context,
-       Map<String,Class<? extends SortedKeyValueIterator<K,V>>> classCache) throws IOException {
+   // @formatter:off
+   public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V>
+     loadIterators(
+   // @formatter:on
+           SortedKeyValueIterator<K,V> source, Collection<IterInfo> iters,
+           Map<String,Map<String,String>> iterOpts, IteratorEnvironment env,
+           boolean useAccumuloClassLoader, String context,
+           Map<String,Class<? extends SortedKeyValueIterator<K,V>>> classCache) throws IOException {
      // wrap the source in a SynchronizedIterator in case any of the additional configured iterators
      // want to use threading
 -    SortedKeyValueIterator<K,V> prev = new SynchronizedIterator<>(source);
 +    SortedKeyValueIterator<K,V> prev = source;
  
      try {
        for (IterInfo iterInfo : iters) {
@@@ -315,12 -345,16 +330,16 @@@
    }
  
    @SuppressWarnings("unchecked")
-   private static <K extends WritableComparable<?>,V extends Writable> Class<? extends SortedKeyValueIterator<K,V>> loadClass(
-       boolean useAccumuloClassLoader, String context, IterInfo iterInfo)
-       throws ClassNotFoundException, IOException {
+   // @formatter:off
+   private static
+   <K extends WritableComparable<?>,V extends Writable> Class<? extends SortedKeyValueIterator<K,V>>
+     loadClass(
+   // @formatter:on
+           boolean useAccumuloClassLoader, String context, IterInfo iterInfo)
+           throws ClassNotFoundException, IOException {
      Class<? extends SortedKeyValueIterator<K,V>> clazz;
      if (useAccumuloClassLoader) {
 -      if (context != null && !context.equals(""))
 +      if (context != null && !context.equals("")) {
          clazz = (Class<? extends SortedKeyValueIterator<K,V>>) AccumuloVFSClassLoader
              .getContextManager()
              .loadClass(context, iterInfo.className, SortedKeyValueIterator.class);
diff --cc core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java
index b5bcf9d,b7ba44f..ebbd8fe
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java
@@@ -150,14 -158,15 +150,15 @@@ public class DefaultCryptoModule implem
  
      if (cipherMode == Cipher.ENCRYPT_MODE) {
  
-       StringBuilder errorBuf = new StringBuilder(
-           "The following problems were found with the CryptoModuleParameters object you provided for an encrypt operation:\n");
+       StringBuilder errorBuf = new StringBuilder("The following problems were"
+           + " found with the CryptoModuleParameters object you provided for an"
+           + " encrypt operation:\n");
        boolean allIsWell = true;
  
 -      allIsWell = validateNotEmpty(params.getAlgorithmName(), allIsWell, errorBuf,
 -          "No algorithm name was specified.");
 +      allIsWell = validateNotEmpty(params.getCipherSuite(), allIsWell, errorBuf,
 +          "No cipher suite was specified.");
  
 -      if (allIsWell && params.getAlgorithmName().equals("NullCipher")) {
 +      if (allIsWell && params.getCipherSuite().equals("NullCipher")) {
          return true;
        }
  
@@@ -181,12 -192,17 +182,13 @@@
        return allIsWell;
  
      } else if (cipherMode == Cipher.DECRYPT_MODE) {
-       StringBuilder errorBuf = new StringBuilder(
-           "The following problems were found with the CryptoModuleParameters object you provided for a decrypt operation:\n");
+       StringBuilder errorBuf = new StringBuilder("The following problems were"
+           + " found with the CryptoModuleParameters object you provided for a"
+           + " decrypt operation:\n");
        boolean allIsWell = true;
  
 -      allIsWell = validateNotEmpty(params.getPadding(), allIsWell, errorBuf,
 -          "No padding was specified.");
        allIsWell = validateNotZero(params.getKeyLength(), allIsWell, errorBuf,
            "No key length was specified.");
 -      allIsWell = validateNotEmpty(params.getEncryptionMode(), allIsWell, errorBuf,
 -          "No encryption mode was specified.");
        allIsWell = validateNotEmpty(params.getRandomNumberGenerator(), allIsWell, errorBuf,
            "No random number generator was specified.");
        allIsWell = validateNotEmpty(params.getRandomNumberGeneratorProvider(), allIsWell, errorBuf,
diff --cc core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModuleUtils.java
index 3bd4de3,d32cdf9..079c579
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModuleUtils.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModuleUtils.java
@@@ -61,26 -59,17 +59,26 @@@ public class DefaultCryptoModuleUtils 
        cipher = new NullCipher();
      } else {
        try {
 -        cipher = Cipher.getInstance(cipherSuite);
 +        if (securityProvider == null || securityProvider.equals("")) {
 +          cipher = Cipher.getInstance(cipherSuite);
 +        } else {
 +          cipher = Cipher.getInstance(cipherSuite, securityProvider);
 +        }
        } catch (NoSuchAlgorithmException e) {
-         log.error(String.format(
-             "Accumulo configuration file contained a cipher suite \"%s\" that was not recognized by any providers",
-             cipherSuite));
+         log.error(String.format("Accumulo configuration file contained a cipher"
+             + " suite \"%s\" that was not recognized by any providers", cipherSuite));
          throw new RuntimeException(e);
        } catch (NoSuchPaddingException e) {
          log.error(String.format(
-             "Accumulo configuration file contained a cipher, \"%s\" with a padding that was not recognized by any providers",
+             "Accumulo configuration file contained a"
+                 + " cipher, \"%s\" with a padding that was not recognized by any" + " providers",
              cipherSuite));
          throw new RuntimeException(e);
 +      } catch (NoSuchProviderException e) {
 +        log.error(String.format(
 +            "Accumulo configuration file contained a provider, \"%s\" an unrecognized provider",
 +            securityProvider));
 +        throw new RuntimeException(e);
        }
      }
      return cipher;
diff --cc core/src/test/java/org/apache/accumulo/core/data/MutationTest.java
index d18f523,5bbd2b5..a190c17
--- a/core/src/test/java/org/apache/accumulo/core/data/MutationTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/data/MutationTest.java
@@@ -687,16 -687,8 +687,16 @@@ public class MutationTest 
        m.put("cf", "cq", "v");
        m2.put("cf", "cq", "v");
      } catch (IllegalStateException e) {
-       fail(
-           "Calling Mutation#equals then Mutation#put should not result in an IllegalStateException.");
+       fail("Calling Mutation#equals then Mutation#put should not result in an"
+           + " IllegalStateException.");
      }
    }
 +
 +  @Test(expected = IllegalArgumentException.class)
 +  public void testSanityCheck() {
 +    Mutation m = new Mutation("too big mutation");
 +    m.put("cf", "cq1", "v");
 +    m.estRowAndLargeValSize += (Long.MAX_VALUE / 2);
 +    m.put("cf", "cq2", "v");
 +  }
  }
diff --cc core/src/test/java/org/apache/accumulo/core/security/crypto/CryptoTest.java
index a0954f8,b3045ce..e49dbef
--- a/core/src/test/java/org/apache/accumulo/core/security/crypto/CryptoTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/security/crypto/CryptoTest.java
@@@ -63,15 -56,14 +63,18 @@@ public class CryptoTest 
  
    private static final int MARKER_INT = 0xCADEFEDD;
    private static final String MARKER_STRING = "1 2 3 a b c";
 -  public static final String CONFIG_FILE_SYSTEM_PROP = "org.apache.accumulo.config.file";
    public static final String CRYPTO_ON_CONF = "crypto-on-accumulo-site.xml";
    public static final String CRYPTO_OFF_CONF = "crypto-off-accumulo-site.xml";
-   public static final String CRYPTO_ON_KEK_OFF_CONF = "crypto-on-no-key-encryption-accumulo-site.xml";
+   // @formatter:off
+   public static final String CRYPTO_ON_KEK_OFF_CONF =
+     "crypto-on-no-key-encryption-accumulo-site.xml";
+   // @formatter:on
  
 +  // Used for kek file testing
 +  private static File kekWorks;
 +  private static File kekTooLong;
 +  private static File kekTooShort;
 +
    @Rule
    public ExpectedException exception = ExpectedException.none();
  
diff --cc proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
index ee1a76b,89c755d..2f24f30
--- a/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
@@@ -128,32 -118,16 +128,32 @@@ public class Proxy implements KeywordEx
      Opts opts = new Opts();
      opts.parseArgs(Proxy.class.getName(), args);
  
 +    Properties props = new Properties();
 +    if (opts.prop != null) {
 +      props = opts.prop;
 +    } else {
 +      try (InputStream is = this.getClass().getClassLoader()
 +          .getResourceAsStream("proxy.properties")) {
 +        if (is != null) {
 +          props.load(is);
 +        } else {
 +          System.err.println(
 +              "proxy.properties needs to be specified as argument (using -p) or on the classpath (by putting the file in conf/)");
 +          System.exit(-1);
 +        }
 +      }
 +    }
 +
      boolean useMini = Boolean
 -        .parseBoolean(opts.prop.getProperty(USE_MINI_ACCUMULO_KEY, USE_MINI_ACCUMULO_DEFAULT));
 +        .parseBoolean(props.getProperty(USE_MINI_ACCUMULO_KEY, USE_MINI_ACCUMULO_DEFAULT));
      boolean useMock = Boolean
 -        .parseBoolean(opts.prop.getProperty(USE_MOCK_INSTANCE_KEY, USE_MOCK_INSTANCE_DEFAULT));
 -    String instance = opts.prop.getProperty(ACCUMULO_INSTANCE_NAME_KEY);
 -    String zookeepers = opts.prop.getProperty(ZOOKEEPERS_KEY);
 +        .parseBoolean(props.getProperty(USE_MOCK_INSTANCE_KEY, USE_MOCK_INSTANCE_DEFAULT));
 +    String instance = props.getProperty(ACCUMULO_INSTANCE_NAME_KEY);
 +    String zookeepers = props.getProperty(ZOOKEEPERS_KEY);
  
      if (!useMini && !useMock && instance == null) {
-       System.err.println(
-           "Properties file must contain one of : useMiniAccumulo=true, useMockInstance=true, or instance=<instance name>");
+       System.err.println("Properties file must contain one of : useMiniAccumulo=true,"
+           + " useMockInstance=true, or instance=<instance name>");
        System.exit(1);
      }
  
diff --cc server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
index dc2dead,2f98fdd..44b18a8
--- a/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
@@@ -270,12 -341,13 +270,13 @@@ public class Accumulo 
            new ZooStore<Accumulo>(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZFATE,
                ZooReaderWriter.getInstance()));
        if (!(fate.list().isEmpty())) {
-         throw new AccumuloException(
-             "Aborting upgrade because there are outstanding FATE transactions from a previous Accumulo version. "
-                 + "Please see the README document for instructions on what to do under your previous version.");
+         throw new AccumuloException("Aborting upgrade because there are"
+             + " outstanding FATE transactions from a previous Accumulo version."
+             + " Please see the README document for instructions on what to do under"
+             + " your previous version.");
        }
      } catch (Exception exception) {
 -      log.fatal("Problem verifying Fate readiness", exception);
 +      log.error("Problem verifying Fate readiness", exception);
        System.exit(1);
      }
    }
diff --cc server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
index 5cfb756,b2beaa5..0b2ffa3
--- a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
@@@ -82,24 -86,27 +82,25 @@@ import com.google.common.collect.Multim
   */
  public class HostRegexTableLoadBalancer extends TableLoadBalancer implements ConfigurationObserver {
  
+   private static final String PROP_PREFIX = Property.TABLE_ARBITRARY_PROP_PREFIX.getKey();
+ 
    private static final Logger LOG = LoggerFactory.getLogger(HostRegexTableLoadBalancer.class);
-   public static final String HOST_BALANCER_PREFIX = Property.TABLE_ARBITRARY_PROP_PREFIX.getKey()
-       + "balancer.host.regex.";
-   public static final String HOST_BALANCER_OOB_CHECK_KEY = Property.TABLE_ARBITRARY_PROP_PREFIX
-       .getKey() + "balancer.host.regex.oob.period";
+   public static final String HOST_BALANCER_PREFIX = PROP_PREFIX + "balancer.host.regex.";
+   public static final String HOST_BALANCER_OOB_CHECK_KEY = PROP_PREFIX
+       + "balancer.host.regex.oob.period";
    private static final String HOST_BALANCER_OOB_DEFAULT = "5m";
 -  @Deprecated
 -  public static final String HOST_BALANCER_POOL_RECHECK_KEY = PROP_PREFIX
 -      + "balancer.host.regex.pool.check";
 -  public static final String HOST_BALANCER_REGEX_USING_IPS_KEY = PROP_PREFIX
 -      + "balancer.host.regex.is.ip";
 -  public static final String HOST_BALANCER_REGEX_MAX_MIGRATIONS_KEY = PROP_PREFIX
 -      + "balancer.host.regex.concurrent.migrations";
 +  public static final String HOST_BALANCER_REGEX_USING_IPS_KEY = Property.TABLE_ARBITRARY_PROP_PREFIX
 +      .getKey() + "balancer.host.regex.is.ip";
 +  public static final String HOST_BALANCER_REGEX_MAX_MIGRATIONS_KEY = Property.TABLE_ARBITRARY_PROP_PREFIX
 +      .getKey() + "balancer.host.regex.concurrent.migrations";
    private static final int HOST_BALANCER_REGEX_MAX_MIGRATIONS_DEFAULT = 250;
    protected static final String DEFAULT_POOL = "HostTableLoadBalancer.ALL";
    private static final int DEFAULT_OUTSTANDING_MIGRATIONS = 0;
-   public static final String HOST_BALANCER_OUTSTANDING_MIGRATIONS_KEY = Property.TABLE_ARBITRARY_PROP_PREFIX
-       .getKey() + "balancer.host.regex.max.outstanding.migrations";
+   public static final String HOST_BALANCER_OUTSTANDING_MIGRATIONS_KEY = PROP_PREFIX
+       + "balancer.host.regex.max.outstanding.migrations";
  
 -  protected long oobCheckMillis = AccumuloConfiguration.getTimeInMillis(HOST_BALANCER_OOB_DEFAULT);
 +  protected long oobCheckMillis = ConfigurationTypeHelper
 +      .getTimeInMillis(HOST_BALANCER_OOB_DEFAULT);
  
    private static final long ONE_HOUR = 60 * 60 * 1000;
    private static final Set<KeyExtent> EMPTY_MIGRATIONS = Collections.emptySet();
diff --cc server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java
index b7b6264,f2a4bd9..2aa07e1
--- a/server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java
@@@ -457,12 -457,12 +457,12 @@@ public class TServerUtils 
      // this host, fail quickly and inform them to update
      // their configuration.
      if (!hostname.equals(fqdn)) {
 -      log.error("Expected hostname of '{}' but got '{}'. Ensure the entries in"
 -          + " the Accumulo hosts files (e.g. masters, slaves) are the FQDN for each"
 -          + " host when using SASL.", fqdn, hostname);
 +      log.error(
 +          "Expected hostname of '{}' but got '{}'. Ensure the entries in the Accumulo hosts files (e.g. masters, tservers) are the FQDN for each host when using SASL.",
 +          fqdn, hostname);
        transport.close();
-       throw new RuntimeException(
-           "SASL requires that the address the thrift server listens on is the same as the FQDN for this host");
+       throw new RuntimeException("SASL requires that the address the thrift"
+           + " server listens on is the same as the FQDN for this host");
      }
  
      final UserGroupInformation serverUser;
diff --cc server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
index 6610fa6,321dd93..183725e
--- a/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
@@@ -180,10 -179,12 +182,12 @@@ public class AuditedSecurityOperation e
      }
    }
  
-   public static final String CAN_SCAN_BATCH_AUDIT_TEMPLATE = "action: scan; targetTable: %s; authorizations: %s; range: %s; columns: %s; iterators: %s; iteratorOptions: %s;";
+   public static final String CAN_SCAN_BATCH_AUDIT_TEMPLATE = "action: scan;"
+       + " targetTable: %s; authorizations: %s; range: %s; columns: %s;"
+       + " iterators: %s; iteratorOptions: %s;";
  
    @Override
 -  public boolean canScan(TCredentials credentials, String tableId, String namespaceId,
 +  public boolean canScan(TCredentials credentials, Table.ID tableId, Namespace.ID namespaceId,
        Map<TKeyExtent,List<TRange>> tbatch, List<TColumn> tcolumns, List<IterInfo> ssiList,
        Map<String,Map<String,String>> ssio, List<ByteBuffer> authorizations)
        throws ThriftSecurityException {
@@@ -258,10 -262,11 +265,11 @@@
      }
    }
  
-   public static final String CAN_CREATE_TABLE_AUDIT_TEMPLATE = "action: createTable; targetTable: %s;";
+   public static final String CAN_CREATE_TABLE_AUDIT_TEMPLATE = "action:"
+       + " createTable; targetTable: %s;";
  
    @Override
 -  public boolean canCreateTable(TCredentials c, String tableName, String namespaceId)
 +  public boolean canCreateTable(TCredentials c, String tableName, Namespace.ID namespaceId)
        throws ThriftSecurityException {
      try {
        boolean result = super.canCreateTable(c, tableName, namespaceId);
@@@ -273,10 -278,11 +281,11 @@@
      }
    }
  
-   public static final String CAN_DELETE_TABLE_AUDIT_TEMPLATE = "action: deleteTable; targetTable: %s;";
+   public static final String CAN_DELETE_TABLE_AUDIT_TEMPLATE = "action:"
+       + " deleteTable; targetTable: %s;";
  
    @Override
 -  public boolean canDeleteTable(TCredentials c, String tableId, String namespaceId)
 +  public boolean canDeleteTable(TCredentials c, Table.ID tableId, Namespace.ID namespaceId)
        throws ThriftSecurityException {
      String tableName = getTableName(tableId);
      try {
@@@ -289,11 -295,12 +298,12 @@@
      }
    }
  
-   public static final String CAN_RENAME_TABLE_AUDIT_TEMPLATE = "action: renameTable; targetTable: %s; newTableName: %s;";
+   public static final String CAN_RENAME_TABLE_AUDIT_TEMPLATE = "action:"
+       + " renameTable; targetTable: %s; newTableName: %s;";
  
    @Override
 -  public boolean canRenameTable(TCredentials c, String tableId, String oldTableName,
 -      String newTableName, String namespaceId) throws ThriftSecurityException {
 +  public boolean canRenameTable(TCredentials c, Table.ID tableId, String oldTableName,
 +      String newTableName, Namespace.ID namespaceId) throws ThriftSecurityException {
      try {
        boolean result = super.canRenameTable(c, tableId, oldTableName, newTableName, namespaceId);
        audit(c, result, CAN_RENAME_TABLE_AUDIT_TEMPLATE, oldTableName, newTableName);
@@@ -304,10 -311,11 +314,11 @@@
      }
    }
  
-   public static final String CAN_SPLIT_TABLE_AUDIT_TEMPLATE = "action: splitTable; targetTable: %s; targetNamespace: %s;";
+   public static final String CAN_SPLIT_TABLE_AUDIT_TEMPLATE = "action:"
+       + " splitTable; targetTable: %s; targetNamespace: %s;";
  
    @Override
 -  public boolean canSplitTablet(TCredentials credentials, String table, String namespaceId)
 +  public boolean canSplitTablet(TCredentials credentials, Table.ID table, Namespace.ID namespaceId)
        throws ThriftSecurityException {
      try {
        boolean result = super.canSplitTablet(credentials, table, namespaceId);
@@@ -334,10 -343,11 +346,11 @@@
      }
    }
  
-   public static final String CAN_FLUSH_TABLE_AUDIT_TEMPLATE = "action: flushTable; targetTable: %s; targetNamespace: %s;";
+   public static final String CAN_FLUSH_TABLE_AUDIT_TEMPLATE = "action:"
+       + " flushTable; targetTable: %s; targetNamespace: %s;";
  
    @Override
 -  public boolean canFlush(TCredentials c, String tableId, String namespaceId)
 +  public boolean canFlush(TCredentials c, Table.ID tableId, Namespace.ID namespaceId)
        throws ThriftSecurityException {
      try {
        boolean result = super.canFlush(c, tableId, namespaceId);
@@@ -349,10 -359,11 +362,11 @@@
      }
    }
  
-   public static final String CAN_ALTER_TABLE_AUDIT_TEMPLATE = "action: alterTable; targetTable: %s; targetNamespace: %s;";
+   public static final String CAN_ALTER_TABLE_AUDIT_TEMPLATE = "action:"
+       + " alterTable; targetTable: %s; targetNamespace: %s;";
  
    @Override
 -  public boolean canAlterTable(TCredentials c, String tableId, String namespaceId)
 +  public boolean canAlterTable(TCredentials c, Table.ID tableId, Namespace.ID namespaceId)
        throws ThriftSecurityException {
      try {
        boolean result = super.canAlterTable(c, tableId, namespaceId);
@@@ -364,12 -375,12 +378,13 @@@
      }
    }
  
-   public static final String CAN_CLONE_TABLE_AUDIT_TEMPLATE = "action: cloneTable; targetTable: %s; newTableName: %s";
+   public static final String CAN_CLONE_TABLE_AUDIT_TEMPLATE = "action:"
+       + " cloneTable; targetTable: %s; newTableName: %s";
  
    @Override
 -  public boolean canCloneTable(TCredentials c, String tableId, String tableName,
 -      String destinationNamespaceId, String sourceNamespaceId) throws ThriftSecurityException {
 +  public boolean canCloneTable(TCredentials c, Table.ID tableId, String tableName,
 +      Namespace.ID destinationNamespaceId, Namespace.ID sourceNamespaceId)
 +      throws ThriftSecurityException {
      String oldTableName = getTableName(tableId);
      try {
        boolean result = super.canCloneTable(c, tableId, tableName, destinationNamespaceId,
@@@ -382,11 -393,12 +397,12 @@@
      }
    }
  
-   public static final String CAN_DELETE_RANGE_AUDIT_TEMPLATE = "action: deleteData; targetTable: %s; startRange: %s; endRange: %s;";
+   public static final String CAN_DELETE_RANGE_AUDIT_TEMPLATE = "action:"
+       + " deleteData; targetTable: %s; startRange: %s; endRange: %s;";
  
    @Override
 -  public boolean canDeleteRange(TCredentials c, String tableId, String tableName, Text startRow,
 -      Text endRow, String namespaceId) throws ThriftSecurityException {
 +  public boolean canDeleteRange(TCredentials c, Table.ID tableId, String tableName, Text startRow,
 +      Text endRow, Namespace.ID namespaceId) throws ThriftSecurityException {
      try {
        boolean result = super.canDeleteRange(c, tableId, tableName, startRow, endRow, namespaceId);
        audit(c, result, CAN_DELETE_RANGE_AUDIT_TEMPLATE, tableName, startRow.toString(),
@@@ -399,11 -411,12 +415,12 @@@
      }
    }
  
-   public static final String CAN_BULK_IMPORT_AUDIT_TEMPLATE = "action: bulkImport; targetTable: %s; dataDir: %s; failDir: %s;";
+   public static final String CAN_BULK_IMPORT_AUDIT_TEMPLATE = "action:"
+       + " bulkImport; targetTable: %s; dataDir: %s; failDir: %s;";
  
    @Override
 -  public boolean canBulkImport(TCredentials c, String tableId, String tableName, String dir,
 -      String failDir, String namespaceId) throws ThriftSecurityException {
 +  public boolean canBulkImport(TCredentials c, Table.ID tableId, String tableName, String dir,
 +      String failDir, Namespace.ID namespaceId) throws ThriftSecurityException {
      try {
        boolean result = super.canBulkImport(c, tableId, namespaceId);
        audit(c, result, CAN_BULK_IMPORT_AUDIT_TEMPLATE, tableName, dir, failDir);
@@@ -414,10 -427,11 +431,11 @@@
      }
    }
  
-   public static final String CAN_COMPACT_TABLE_AUDIT_TEMPLATE = "action: compactTable; targetTable: %s; targetNamespace: %s;";
+   public static final String CAN_COMPACT_TABLE_AUDIT_TEMPLATE = "action:"
+       + " compactTable; targetTable: %s; targetNamespace: %s;";
  
    @Override
 -  public boolean canCompact(TCredentials c, String tableId, String namespaceId)
 +  public boolean canCompact(TCredentials c, Table.ID tableId, Namespace.ID namespaceId)
        throws ThriftSecurityException {
      try {
        boolean result = super.canCompact(c, tableId, namespaceId);
@@@ -504,11 -521,12 +525,12 @@@
      }
    }
  
-   public static final String CAN_GRANT_TABLE_AUDIT_TEMPLATE = "action: grantTable; targetUser: %s; targetTable: %s; targetNamespace: %s;";
+   public static final String CAN_GRANT_TABLE_AUDIT_TEMPLATE = "action:"
+       + " grantTable; targetUser: %s; targetTable: %s; targetNamespace: %s;";
  
    @Override
 -  public boolean canGrantTable(TCredentials c, String user, String table, String namespaceId)
 -      throws ThriftSecurityException {
 +  public boolean canGrantTable(TCredentials c, String user, Table.ID table,
 +      Namespace.ID namespaceId) throws ThriftSecurityException {
      try {
        boolean result = super.canGrantTable(c, user, table, namespaceId);
        audit(c, result, CAN_GRANT_TABLE_AUDIT_TEMPLATE, user, table, namespaceId);
@@@ -534,11 -553,12 +557,12 @@@
      }
    }
  
-   public static final String CAN_REVOKE_TABLE_AUDIT_TEMPLATE = "action: revokeTable; targetUser: %s; targetTable %s; targetNamespace: %s;";
+   public static final String CAN_REVOKE_TABLE_AUDIT_TEMPLATE = "action:"
+       + " revokeTable; targetUser: %s; targetTable %s; targetNamespace: %s;";
  
    @Override
 -  public boolean canRevokeTable(TCredentials c, String user, String table, String namespaceId)
 -      throws ThriftSecurityException {
 +  public boolean canRevokeTable(TCredentials c, String user, Table.ID table,
 +      Namespace.ID namespaceId) throws ThriftSecurityException {
      try {
        boolean result = super.canRevokeTable(c, user, table, namespaceId);
        audit(c, result, CAN_REVOKE_TABLE_AUDIT_TEMPLATE, user, table, namespaceId);
@@@ -565,11 -586,12 +590,12 @@@
      }
    }
  
-   public static final String CAN_EXPORT_AUDIT_TEMPLATE = "action: export; targetTable: %s; dataDir: %s;";
+   public static final String CAN_EXPORT_AUDIT_TEMPLATE = "action: export;"
+       + " targetTable: %s; dataDir: %s;";
  
    @Override
 -  public boolean canExport(TCredentials credentials, String tableId, String tableName,
 -      String exportDir, String namespaceId) throws ThriftSecurityException {
 +  public boolean canExport(TCredentials credentials, Table.ID tableId, String tableName,
 +      String exportDir, Namespace.ID namespaceId) throws ThriftSecurityException {
  
      try {
        boolean result = super.canExport(credentials, tableId, tableName, exportDir, namespaceId);
@@@ -608,11 -631,12 +635,12 @@@
      }
    }
  
-   public static final String GRANT_TABLE_PERMISSION_AUDIT_TEMPLATE = "action: grantTablePermission; permission: %s; targetTable: %s; targetUser: %s;";
+   public static final String GRANT_TABLE_PERMISSION_AUDIT_TEMPLATE = "action:"
+       + " grantTablePermission; permission: %s; targetTable: %s; targetUser: %s;";
  
    @Override
 -  public void grantTablePermission(TCredentials credentials, String user, String tableId,
 -      TablePermission permission, String namespaceId) throws ThriftSecurityException {
 +  public void grantTablePermission(TCredentials credentials, String user, Table.ID tableId,
 +      TablePermission permission, Namespace.ID namespaceId) throws ThriftSecurityException {
      String tableName = getTableName(tableId);
      try {
        super.grantTablePermission(credentials, user, tableId, permission, namespaceId);
@@@ -638,11 -663,12 +667,12 @@@
      }
    }
  
-   public static final String REVOKE_TABLE_PERMISSION_AUDIT_TEMPLATE = "action: revokeTablePermission; permission: %s; targetTable: %s; targetUser: %s;";
+   public static final String REVOKE_TABLE_PERMISSION_AUDIT_TEMPLATE = "action:"
+       + " revokeTablePermission; permission: %s; targetTable: %s; targetUser: %s;";
  
    @Override
 -  public void revokeTablePermission(TCredentials credentials, String user, String tableId,
 -      TablePermission permission, String namespaceId) throws ThriftSecurityException {
 +  public void revokeTablePermission(TCredentials credentials, String user, Table.ID tableId,
 +      TablePermission permission, Namespace.ID namespaceId) throws ThriftSecurityException {
      String tableName = getTableName(tableId);
      try {
        super.revokeTablePermission(credentials, user, tableId, permission, namespaceId);
@@@ -668,11 -695,12 +699,12 @@@
      }
    }
  
-   public static final String CAN_ONLINE_OFFLINE_TABLE_AUDIT_TEMPLATE = "action: %s; targetTable: %s;";
+   public static final String CAN_ONLINE_OFFLINE_TABLE_AUDIT_TEMPLATE = "action:"
+       + " %s; targetTable: %s;";
  
    @Override
 -  public boolean canOnlineOfflineTable(TCredentials credentials, String tableId, FateOperation op,
 -      String namespaceId) throws ThriftSecurityException {
 +  public boolean canOnlineOfflineTable(TCredentials credentials, Table.ID tableId, FateOperation op,
 +      Namespace.ID namespaceId) throws ThriftSecurityException {
      String tableName = getTableName(tableId);
      String operation = null;
      if (op == FateOperation.TABLE_ONLINE)
@@@ -691,10 -719,11 +723,11 @@@
      }
    }
  
-   public static final String CAN_MERGE_TABLE_AUDIT_TEMPLATE = "action: mergeTable; targetTable: %s; targetNamespace: %s;";
+   public static final String CAN_MERGE_TABLE_AUDIT_TEMPLATE = "action:"
+       + " mergeTable; targetTable: %s; targetNamespace: %s;";
  
    @Override
 -  public boolean canMerge(TCredentials c, String tableId, String namespaceId)
 +  public boolean canMerge(TCredentials c, Table.ID tableId, Namespace.ID namespaceId)
        throws ThriftSecurityException {
      try {
        boolean result = super.canMerge(c, tableId, namespaceId);
diff --cc server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
index 4905205,472b414..91f5f6c
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
@@@ -1048,12 -1033,13 +1049,11 @@@ public class TabletServer extends Accum
                us.walogTimes.addStat(t2 - t1);
                updateWalogWriteTime((t2 - t1));
                break;
 -            } catch (IOException ex) {
 -              log.warn("logging mutations failed, retrying");
 -            } catch (FSError ex) { // happens when DFS is localFS
 +            } catch (IOException | FSError ex) {
                log.warn("logging mutations failed, retrying");
              } catch (Throwable t) {
-               log.error(
-                   "Unknown exception logging mutations, counts for mutations in flight not decremented!",
-                   t);
+               log.error("Unknown exception logging mutations, counts"
+                   + " for mutations in flight not decremented!", t);
                throw new RuntimeException(t);
              }
            }
@@@ -1370,12 -1356,13 +1370,11 @@@
              long t2 = System.currentTimeMillis();
              updateWalogWriteTime(t2 - t1);
              break;
 -          } catch (IOException ex) {
 -            log.warn("logging mutations failed, retrying");
 -          } catch (FSError ex) { // happens when DFS is localFS
 +          } catch (IOException | FSError ex) {
              log.warn("logging mutations failed, retrying");
            } catch (Throwable t) {
-             log.error(
-                 "Unknown exception logging mutations, counts for mutations in flight not decremented!",
-                 t);
+             log.error("Unknown exception logging mutations, counts for"
+                 + " mutations in flight not decremented!", t);
              throw new RuntimeException(t);
            }
          }
@@@ -1617,12 -1603,11 +1616,11 @@@
                SecurityErrorCode.PERMISSION_DENIED);
          }
        } catch (ThriftSecurityException e) {
 -        log.warn("Got " + request + " message from unauthenticatable user: " + e.getUser());
 +        log.warn("Got {} message from unauthenticatable user: {}", request, e.getUser());
          if (getCredentials().getToken().getClass().getName()
              .equals(credentials.getTokenClassName())) {
-           log.error(
-               "Got message from a service with a mismatched configuration. Please ensure a compatible configuration.",
-               e);
+           log.error("Got message from a service with a mismatched configuration."
+               + " Please ensure a compatible configuration.", e);
          }
          throw e;
        }
@@@ -1965,129 -1951,10 +1963,129 @@@
      public void removeLogs(TInfo tinfo, TCredentials credentials, List<String> filenames)
          throws TException {
        log.warn("Garbage collector is attempting to remove logs through the tablet server");
-       log.warn(
-           "This is probably because your file Garbage Collector is an older version than your tablet servers.\n"
-               + "Restart your file Garbage Collector.");
+       log.warn("This is probably because your file"
+           + " Garbage Collector is an older version than your tablet servers.\n"
+           + "Restart your file Garbage Collector.");
      }
 +
 +    private TSummaries getSummaries(Future<SummaryCollection> future) throws TimeoutException {
 +      try {
 +        SummaryCollection sc = future.get(MAX_TIME_TO_WAIT_FOR_SCAN_RESULT_MILLIS,
 +            TimeUnit.MILLISECONDS);
 +        return sc.toThrift();
 +      } catch (InterruptedException e) {
 +        Thread.currentThread().interrupt();
 +        throw new RuntimeException(e);
 +      } catch (ExecutionException e) {
 +        throw new RuntimeException(e);
 +      }
 +    }
 +
 +    private TSummaries handleTimeout(long sessionId) {
 +      long timeout = TabletServer.this.getConfiguration()
 +          .getTimeInMillis(Property.TSERV_CLIENT_TIMEOUT);
 +      sessionManager.removeIfNotAccessed(sessionId, timeout);
 +      return new TSummaries(false, sessionId, -1, -1, null);
 +    }
 +
 +    private TSummaries startSummaryOperation(TCredentials credentials,
 +        Future<SummaryCollection> future) {
 +      try {
 +        return getSummaries(future);
 +      } catch (TimeoutException e) {
 +        long sid = sessionManager.createSession(new SummarySession(credentials, future), false);
 +        while (sid == 0) {
 +          sessionManager.removeSession(sid);
 +          sid = sessionManager.createSession(new SummarySession(credentials, future), false);
 +        }
 +        return handleTimeout(sid);
 +      }
 +    }
 +
 +    @Override
 +    public TSummaries startGetSummaries(TInfo tinfo, TCredentials credentials,
 +        TSummaryRequest request) throws ThriftSecurityException, ThriftTableOperationException,
 +        NoSuchScanIDException, TException {
 +      Namespace.ID namespaceId;
 +      Table.ID tableId = Table.ID.of(request.getTableId());
 +      try {
 +        namespaceId = Tables.getNamespaceId(TabletServer.this.getInstance(), tableId);
 +      } catch (TableNotFoundException e1) {
 +        throw new ThriftTableOperationException(tableId.canonicalID(), null, null,
 +            TableOperationExceptionType.NOTFOUND, null);
 +      }
 +
 +      if (!security.canGetSummaries(credentials, tableId, namespaceId)) {
 +        throw new AccumuloSecurityException(credentials.getPrincipal(),
 +            SecurityErrorCode.PERMISSION_DENIED).asThriftException();
 +      }
 +
 +      ServerConfigurationFactory factory = TabletServer.this.getServerConfigurationFactory();
 +      ExecutorService es = resourceManager.getSummaryPartitionExecutor();
 +      Future<SummaryCollection> future = new Gatherer(TabletServer.this, request,
 +          factory.getTableConfiguration(tableId)).gather(es);
 +
 +      return startSummaryOperation(credentials, future);
 +    }
 +
 +    @Override
 +    public TSummaries startGetSummariesForPartition(TInfo tinfo, TCredentials credentials,
 +        TSummaryRequest request, int modulus, int remainder)
 +        throws ThriftSecurityException, NoSuchScanIDException, TException {
 +      // do not expect users to call this directly, expect other tservers to call this method
 +      if (!security.canPerformSystemActions(credentials)) {
 +        throw new AccumuloSecurityException(credentials.getPrincipal(),
 +            SecurityErrorCode.PERMISSION_DENIED).asThriftException();
 +      }
 +
 +      ServerConfigurationFactory factory = TabletServer.this.getServerConfigurationFactory();
 +      ExecutorService spe = resourceManager.getSummaryRemoteExecutor();
 +      Future<SummaryCollection> future = new Gatherer(TabletServer.this, request,
 +          factory.getTableConfiguration(Table.ID.of(request.getTableId()))).processPartition(spe,
 +              modulus, remainder);
 +
 +      return startSummaryOperation(credentials, future);
 +    }
 +
 +    @Override
 +    public TSummaries startGetSummariesFromFiles(TInfo tinfo, TCredentials credentials,
 +        TSummaryRequest request, Map<String,List<TRowRange>> files)
 +        throws ThriftSecurityException, NoSuchScanIDException, TException {
 +      // do not expect users to call this directly, expect other tservers to call this method
 +      if (!security.canPerformSystemActions(credentials)) {
 +        throw new AccumuloSecurityException(credentials.getPrincipal(),
 +            SecurityErrorCode.PERMISSION_DENIED).asThriftException();
 +      }
 +
 +      ExecutorService srp = resourceManager.getSummaryRetrievalExecutor();
 +      TableConfiguration tableCfg = confFactory
 +          .getTableConfiguration(Table.ID.of(request.getTableId()));
 +      BlockCache summaryCache = resourceManager.getSummaryCache();
 +      BlockCache indexCache = resourceManager.getIndexCache();
 +      FileSystemResolver volMgr = p -> fs.getVolumeByPath(p).getFileSystem();
 +      Future<SummaryCollection> future = new Gatherer(TabletServer.this, request, tableCfg)
 +          .processFiles(volMgr, files, summaryCache, indexCache, srp);
 +
 +      return startSummaryOperation(credentials, future);
 +    }
 +
 +    @Override
 +    public TSummaries contiuneGetSummaries(TInfo tinfo, long sessionId)
 +        throws NoSuchScanIDException, TException {
 +      SummarySession session = (SummarySession) sessionManager.getSession(sessionId);
 +      if (session == null) {
 +        throw new NoSuchScanIDException();
 +      }
 +
 +      Future<SummaryCollection> future = session.getFuture();
 +      try {
 +        TSummaries tsums = getSummaries(future);
 +        sessionManager.removeSession(sessionId);
 +        return tsums;
 +      } catch (TimeoutException e) {
 +        return handleTimeout(sessionId);
 +      }
 +    }
    }
  
    private class SplitRunner implements Runnable {
@@@ -3509,10 -3392,10 +3506,10 @@@
  
    private static final String MAJC_READ_LIMITER_KEY = "tserv_majc_read";
    private static final String MAJC_WRITE_LIMITER_KEY = "tserv_majc_write";
-   private final SharedRateLimiterFactory.RateProvider rateProvider = new SharedRateLimiterFactory.RateProvider() {
+   private final RateProvider rateProvider = new RateProvider() {
      @Override
      public long getDesiredRate() {
 -      return getConfiguration().getMemoryInBytes(Property.TSERV_MAJC_THROUGHPUT);
 +      return getConfiguration().getAsBytes(Property.TSERV_MAJC_THROUGHPUT);
      }
    };
  
diff --cc server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
index aeea1e8,af822ef..63397ff
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
@@@ -492,17 -492,17 +492,16 @@@ class DatafileManager 
      removeFilesAfterScan(filesInUseByScans);
  
      if (absMergeFile != null)
 -      log.log(TLevel.TABLET_HIST,
 -          tablet.getExtent() + " MinC [" + absMergeFile + ",memory] -> " + newDatafile);
 +      log.debug("TABLET_HIST {} MinC [{},memory] -> {}", tablet.getExtent(), absMergeFile,
 +          newDatafile);
      else
 -      log.log(TLevel.TABLET_HIST, tablet.getExtent() + " MinC [memory] -> " + newDatafile);
 +      log.debug("TABLET_HIST {} MinC [memory] -> {}", tablet.getExtent(), newDatafile);
      log.debug(String.format("MinC finish lock %.2f secs %s", (t2 - t1) / 1000.0,
          tablet.getExtent().toString()));
 -    long splitSize = tablet.getTableConfiguration()
 -        .getMemoryInBytes(Property.TABLE_SPLIT_THRESHOLD);
 +    long splitSize = tablet.getTableConfiguration().getAsBytes(Property.TABLE_SPLIT_THRESHOLD);
      if (dfv.getSize() > splitSize) {
-       log.debug(String.format(
-           "Minor Compaction wrote out file larger than split threshold.  split threshold = %,d  file size = %,d",
-           splitSize, dfv.getSize()));
+       log.debug(String.format("Minor Compaction wrote out file larger than split threshold."
+           + " split threshold = %,d  file size = %,d", splitSize, dfv.getSize()));
      }
    }
  
diff --cc shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java
index aa8bbd8,2fab019..5278c76
--- a/shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java
@@@ -174,9 -175,13 +175,10 @@@ public class ShellOptionsJC 
        converter = FileConverter.class)
    private File execFileVerbose;
  
 -  @Parameter(names = {"-h", "--hdfsZooInstance"}, description = "use hdfs zoo instance")
 -  private boolean hdfsZooInstance;
 -
    @Parameter(names = {"-z", "--zooKeeperInstance"},
-       description = "use a zookeeper instance with the given instance name and list of zoo hosts. "
-           + "Syntax: -z <zoo-instance-name> <zoo-hosts>. Where <zoo-hosts> is a comma separated list of zookeeper servers.",
+       description = "use a zookeeper instance with the given instance name and"
+           + " list of zoo hosts. Syntax: -z <zoo-instance-name> <zoo-hosts>. Where"
+           + " <zoo-hosts> is a comma separated list of zookeeper servers.",
        arity = 2)
    private List<String> zooKeeperInstance = new ArrayList<>();
  
diff --cc shell/src/main/java/org/apache/accumulo/shell/commands/CompactCommand.java
index d65ecc4,da782fa..dfb21e9
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/CompactCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/CompactCommand.java
@@@ -205,44 -206,49 +208,55 @@@ public class CompactCommand extends Tab
      cancelOpt = new Option(null, "cancel", false, "cancel user initiated compactions");
      opts.addOption(cancelOpt);
  
 +    enoSummaryOption = new Option(null, "sf-no-summary", false,
 +        "Select files that do not have the summaries specified in the table configuration.");
 +    opts.addOption(enoSummaryOption);
 +    extraSummaryOption = new Option(null, "sf-extra-summary", false,
 +        "Select files that have summary information which exceeds the tablets boundries.");
 +    opts.addOption(extraSummaryOption);
      enoSampleOption = new Option(null, "sf-no-sample", false,
-         "Select files that have no sample data or sample data that differes from the table configuration.");
+         "Select files that have no sample data or sample data that differes"
+             + " from the table configuration.");
      opts.addOption(enoSampleOption);
      enameOption = newLAO("sf-ename",
-         "Select files using regular expression to match file names. Only matches against last part of path.");
+         "Select files using regular expression to match file names. Only"
+             + " matches against last part of path.");
      opts.addOption(enameOption);
      epathOption = newLAO("sf-epath",
-         "Select files using regular expression to match file paths to compact. Matches against full path.");
+         "Select files using regular expression to match file paths to compact."
+             + " Matches against full path.");
      opts.addOption(epathOption);
      sizeLtOption = newLAO("sf-lt-esize",
-         "Selects files less than specified size.  Uses the estimated size of file in metadata table.  Can use K,M, and G suffixes");
+         "Selects files less than specified size.  Uses the estimated size of"
+             + " file in metadata table. Can use K,M, and G suffixes");
      opts.addOption(sizeLtOption);
      sizeGtOption = newLAO("sf-gt-esize",
-         "Selects files greater than specified size.  Uses the estimated size of file in metadata table.  Can use K,M, and G suffixes");
+         "Selects files greater than specified size. Uses the estimated size of"
+             + " file in metadata table. Can use K,M, and G suffixes");
      opts.addOption(sizeGtOption);
      minFilesOption = newLAO("min-files",
-         "Only compacts if at least the specified number of files are selected.  When no file selection criteria are given, all files are selected.");
+         "Only compacts if at least the specified number of files are selected."
+             + " When no file selection criteria are given, all files are selected.");
      opts.addOption(minFilesOption);
      outBlockSizeOpt = newLAO("out-data-bs",
-         "Rfile data block size to use for compaction output file.  Can use K,M, and G suffixes. Uses table settings if not specified.");
+         "Rfile data block size to use for compaction output file. Can use K,M,"
+             + " and G suffixes. Uses table settings if not specified.");
      opts.addOption(outBlockSizeOpt);
      outHdfsBlockSizeOpt = newLAO("out-hdfs-bs",
-         "HDFS block size to use for compaction output file.  Can use K,M, and G suffixes. Uses table settings if not specified.");
+         "HDFS block size to use for compaction output file. Can use K,M, and G"
+             + " suffixes. Uses table settings if not specified.");
      opts.addOption(outHdfsBlockSizeOpt);
      outIndexBlockSizeOpt = newLAO("out-index-bs",
-         "Rfile index block size to use for compaction output file.  Can use K,M, and G suffixes. Uses table settings if not specified.");
+         "Rfile index block size to use for compaction output file. Can use"
+             + " K,M, and G suffixes. Uses table settings if not specified.");
      opts.addOption(outIndexBlockSizeOpt);
      outCompressionOpt = newLAO("out-compress",
-         "Compression to use for compaction output file. Either snappy, gz, lzo, or none. Uses table settings if not specified.");
+         "Compression to use for compaction output file. Either snappy, gz, lzo,"
+             + " or none. Uses table settings if not specified.");
      opts.addOption(outCompressionOpt);
      outReplication = newLAO("out-replication",
-         "HDFS replication to use for compaction output file. Uses table settings if not specified.");
+         "HDFS replication to use for compaction output file. Uses table"
+             + " settings if not specified.");
      opts.addOption(outReplication);
  
      return opts;
diff --cc test/src/main/java/org/apache/accumulo/test/InMemoryMapIT.java
index 983e022,82b8ae9..c4c76d3
--- a/test/src/main/java/org/apache/accumulo/test/InMemoryMapIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/InMemoryMapIT.java
@@@ -94,11 -95,11 +94,9 @@@ public class InMemoryMapIT 
      if (!NativeMap.isLoaded()) {
        fail("Missing the native library from " + nativeMapLocation.getAbsolutePath()
            + "\nYou need to build the libaccumulo binary first. "
-           + "\nTry running 'mvn clean install -Dit.test=InMemoryMapIT -Dtest=foo -DfailIfNoTests=false -Dfindbugs.skip -Dcheckstyle.skip'");
-       // afterwards, you can run the following
-       // mvn clean verify -Dit.test=InMemoryMapIT -Dtest=foo -DfailIfNoTests=false -Dfindbugs.skip
-       // -Dcheckstyle.skip -pl :accumulo-test
+           + "\nTry running 'mvn clean verify -Dit.test=InMemoryMapIT -Dtest=foo"
+           + " -DfailIfNoTests=false -Dfindbugs.skip -Dcheckstyle.skip'");
      }
 -    log.debug("Native map loaded");
 -
    }
  
    @Test
diff --cc test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
index 0bbcbc2,1533392..9c5b57f
--- a/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
@@@ -67,12 -67,12 +67,12 @@@ public class RecoveryWithEmptyRFileIT e
  
    @Test
    public void replaceMissingRFile() throws Exception {
-     log.info(
-         "Ingest some data, verify it was stored properly, replace an underlying rfile with an empty one and verify we can scan.");
+     log.info("Ingest some data, verify it was stored properly, replace an"
+         + " underlying rfile with an empty one and verify we can scan.");
      Connector connector = getConnector();
      String tableName = getUniqueNames(1)[0];
 -    ReadWriteIT.ingest(connector, cluster.getClientConfig(), "root", ROWS, COLS, 50, 0, tableName);
 -    ReadWriteIT.verify(connector, cluster.getClientConfig(), "root", ROWS, COLS, 50, 0, tableName);
 +    ReadWriteIT.ingest(connector, "root", ROWS, COLS, 50, 0, tableName);
 +    ReadWriteIT.verify(connector, "root", ROWS, COLS, 50, 0, tableName);
  
      connector.tableOperations().flush(tableName, null, null, true);
      connector.tableOperations().offline(tableName, true);
diff --cc test/src/main/java/org/apache/accumulo/test/functional/WatchTheWatchCountIT.java
index 01814bc,90dcb6e..476e061
--- a/test/src/main/java/org/apache/accumulo/test/functional/WatchTheWatchCountIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/WatchTheWatchCountIT.java
@@@ -65,10 -66,11 +65,9 @@@ public class WatchTheWatchCountIT exten
          if (total > MIN && total < MAX) {
            break;
          }
-         log.debug(
-             "Expected number of watchers to be contained in ({}, {}), but actually was {}. Sleeping and retrying",
-             MIN, MAX, total);
+         log.debug("Expected number of watchers to be contained in ({}, {}), but"
+             + " actually was {}. Sleeping and retrying", MIN, MAX, total);
          Thread.sleep(5000);
 -      } finally {
 -        socket.close();
        }
      }
  
diff --cc test/src/main/java/org/apache/accumulo/test/replication/FinishedWorkUpdaterIT.java
index 03d3db5,2cdc853..8172bc2
--- a/test/src/main/java/org/apache/accumulo/test/replication/FinishedWorkUpdaterIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/replication/FinishedWorkUpdaterIT.java
@@@ -104,14 -101,15 +104,15 @@@ public class FinishedWorkUpdaterIT exte
      ReplicationTable.setOnline(conn);
  
      String file = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
-     // @formatter:off
-     Status stat1 = Status.newBuilder().setBegin(100).setEnd(1000).setClosed(true).setInfiniteEnd(false).build(),
-         stat2 = Status.newBuilder().setBegin(500).setEnd(1000).setClosed(true).setInfiniteEnd(false).build(),
-         stat3 = Status.newBuilder().setBegin(1).setEnd(1000).setClosed(true).setInfiniteEnd(false).build();
-     ReplicationTarget target1 = new ReplicationTarget("peer1", "table1", Table.ID.of("1")),
-         target2 = new ReplicationTarget("peer2", "table2", Table.ID.of("1")),
-         target3 = new ReplicationTarget("peer3", "table3", Table.ID.of("1"));
-     // @formatter:on
+     Status stat1 = Status.newBuilder().setBegin(100).setEnd(1000).setClosed(true)
+         .setInfiniteEnd(false).build();
+     Status stat2 = Status.newBuilder().setBegin(500).setEnd(1000).setClosed(true)
+         .setInfiniteEnd(false).build();
+     Status stat3 = Status.newBuilder().setBegin(1).setEnd(1000).setClosed(true)
+         .setInfiniteEnd(false).build();
 -    ReplicationTarget target1 = new ReplicationTarget("peer1", "table1", "1");
 -    ReplicationTarget target2 = new ReplicationTarget("peer2", "table2", "1");
 -    ReplicationTarget target3 = new ReplicationTarget("peer3", "table3", "1");
++    ReplicationTarget target1 = new ReplicationTarget("peer1", "table1", Table.ID.of("1"));
++    ReplicationTarget target2 = new ReplicationTarget("peer2", "table2", Table.ID.of("1"));
++    ReplicationTarget target3 = new ReplicationTarget("peer3", "table3", Table.ID.of("1"));
  
      // Create a single work record for a file to some peer
      BatchWriter bw = ReplicationTable.getBatchWriter(conn);
@@@ -148,14 -144,15 +149,15 @@@
      ReplicationTable.setOnline(conn);
  
      String file = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
-     // @formatter:off
-     Status stat1 = Status.newBuilder().setBegin(100).setEnd(1000).setClosed(true).setInfiniteEnd(true).build(),
-         stat2 = Status.newBuilder().setBegin(1).setEnd(1000).setClosed(true).setInfiniteEnd(true).build(),
-         stat3 = Status.newBuilder().setBegin(500).setEnd(1000).setClosed(true).setInfiniteEnd(true).build();
-     ReplicationTarget target1 = new ReplicationTarget("peer1", "table1", Table.ID.of("1")),
-         target2 = new ReplicationTarget("peer2", "table2", Table.ID.of("1")),
-         target3 = new ReplicationTarget("peer3", "table3", Table.ID.of("1"));
-     // @formatter:on
+     Status stat1 = Status.newBuilder().setBegin(100).setEnd(1000).setClosed(true)
+         .setInfiniteEnd(true).build();
+     Status stat2 = Status.newBuilder().setBegin(1).setEnd(1000).setClosed(true).setInfiniteEnd(true)
+         .build();
+     Status stat3 = Status.newBuilder().setBegin(500).setEnd(1000).setClosed(true)
+         .setInfiniteEnd(true).build();
 -    ReplicationTarget target1 = new ReplicationTarget("peer1", "table1", "1");
 -    ReplicationTarget target2 = new ReplicationTarget("peer2", "table2", "1");
 -    ReplicationTarget target3 = new ReplicationTarget("peer3", "table3", "1");
++    ReplicationTarget target1 = new ReplicationTarget("peer1", "table1", Table.ID.of("1"));
++    ReplicationTarget target2 = new ReplicationTarget("peer2", "table2", Table.ID.of("1"));
++    ReplicationTarget target3 = new ReplicationTarget("peer3", "table3", Table.ID.of("1"));
  
      // Create a single work record for a file to some peer
      BatchWriter bw = ReplicationTable.getBatchWriter(conn);

-- 
To stop receiving notification emails like this one, please contact
ctubbsii@apache.org.