You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by mm...@apache.org on 2017/09/05 16:15:03 UTC
[accumulo] 01/02: ACCUMULO-3652 Refactor for slf4j without string
concatenation
This is an automated email from the ASF dual-hosted git repository.
mmiller pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo.git
commit 559882ada9ea74bfeaa27425eafe8288510b8f04
Author: Bob Thorman <rt...@att.com>
AuthorDate: Mon May 4 08:23:51 2015 -0500
ACCUMULO-3652 Refactor for slf4j without string concatenation
---
.../accumulo/core/client/impl/MasterClient.java | 2 +-
.../client/impl/MultiTableBatchWriterImpl.java | 8 +-
.../accumulo/core/client/impl/ServerClient.java | 6 +-
.../core/client/impl/TableOperationsImpl.java | 7 +-
.../core/client/impl/TabletServerBatchReader.java | 2 +-
.../impl/TabletServerBatchReaderIterator.java | 9 +-
.../core/client/impl/TabletServerBatchWriter.java | 13 +--
.../apache/accumulo/core/client/impl/Writer.java | 8 +-
.../core/client/mock/MockInstanceOperations.java | 2 +-
.../core/client/mock/MockNamespaceOperations.java | 2 +-
.../core/client/mock/MockTableOperations.java | 2 +-
.../accumulo/core/conf/AccumuloConfiguration.java | 8 +-
.../accumulo/core/conf/ConfigSanityCheck.java | 2 +-
.../org/apache/accumulo/core/conf/Property.java | 10 +-
.../accumulo/core/conf/SiteConfiguration.java | 4 +-
.../accumulo/core/file/BloomFilterLayer.java | 20 ++--
.../file/blockfile/impl/CachableBlockFile.java | 6 +-
.../accumulo/core/file/rfile/CreateEmpty.java | 2 +-
.../apache/accumulo/core/file/rfile/PrintInfo.java | 2 +-
.../core/file/rfile/bcfile/Compression.java | 47 +++++-----
.../accumulo/core/iterators/DebugIterator.java | 12 +--
.../accumulo/core/iterators/IteratorUtil.java | 2 +-
.../core/iterators/aggregation/LongSummation.java | 2 +-
.../core/iterators/system/MapFileIterator.java | 2 +-
.../core/iterators/user/IndexedDocIterator.java | 12 +--
.../core/iterators/user/TransformingIterator.java | 2 +-
.../core/metadata/MetadataLocationObtainer.java | 1 -
.../accumulo/core/rpc/SslConnectionParams.java | 2 +-
.../org/apache/accumulo/core/rpc/ThriftUtil.java | 2 +-
.../core/security/crypto/CryptoModuleFactory.java | 34 +++----
.../core/security/crypto/DefaultCryptoModule.java | 2 +-
.../accumulo/core/trace/DistributedTrace.java | 6 +-
.../apache/accumulo/core/volume/VolumeImpl.java | 2 +-
.../apache/accumulo/core/zookeeper/ZooUtil.java | 10 +-
.../core/file/BloomFilterLayerLookupTest.java | 14 +--
.../java/org/apache/accumulo/fate/AdminUtil.java | 2 +-
.../java/org/apache/accumulo/fate/AgeOffStore.java | 4 +-
.../main/java/org/apache/accumulo/fate/Fate.java | 4 +-
.../fate/zookeeper/DistributedReadWriteLock.java | 6 +-
.../org/apache/accumulo/fate/zookeeper/Retry.java | 2 +-
.../fate/zookeeper/TransactionWatcher.java | 4 +-
.../apache/accumulo/fate/zookeeper/ZooCache.java | 12 +--
.../apache/accumulo/fate/zookeeper/ZooLock.java | 26 +++---
.../apache/accumulo/fate/zookeeper/ZooReader.java | 2 +-
.../accumulo/fate/zookeeper/ZooReservation.java | 2 +-
.../apache/accumulo/fate/zookeeper/ZooSession.java | 8 +-
.../apache/accumulo/fate/zookeeper/ZooUtil.java | 2 +-
.../org/apache/accumulo/cluster/RemoteShell.java | 2 +-
.../minicluster/impl/MiniAccumuloClusterImpl.java | 2 +-
.../minicluster/impl/MiniAccumuloConfigImpl.java | 2 +-
.../main/java/org/apache/accumulo/proxy/Proxy.java | 6 +-
.../org/apache/accumulo/proxy/ProxyServer.java | 2 +-
.../java/org/apache/accumulo/server/Accumulo.java | 32 ++++---
.../accumulo/server/client/BulkImporter.java | 24 ++---
.../server/client/ClientServiceHandler.java | 2 +-
.../server/conf/ZooCachePropertyAccessor.java | 2 +-
.../accumulo/server/conf/ZooConfiguration.java | 2 +-
.../accumulo/server/fs/PreferredVolumeChooser.java | 6 +-
.../accumulo/server/fs/VolumeManagerImpl.java | 10 +-
.../org/apache/accumulo/server/fs/VolumeUtil.java | 32 +++----
.../apache/accumulo/server/init/Initialize.java | 36 ++++----
.../accumulo/server/master/LiveTServerSet.java | 2 +-
.../server/master/balancer/TableLoadBalancer.java | 10 +-
.../server/master/balancer/TabletBalancer.java | 18 ++--
.../server/master/recovery/HadoopLogCloser.java | 8 +-
.../server/master/recovery/MapRLogCloser.java | 2 +-
.../server/master/state/DeadServerList.java | 2 +-
.../master/state/TabletStateChangeIterator.java | 4 +-
.../accumulo/server/master/state/ZooStore.java | 4 +-
.../server/master/state/ZooTabletStateStore.java | 4 +-
.../server/metrics/AbstractMetricsImpl.java | 2 +-
.../server/metrics/MetricsConfiguration.java | 2 +-
.../apache/accumulo/server/monitor/LogService.java | 2 +-
.../accumulo/server/problems/ProblemReports.java | 4 +-
.../server/replication/StatusCombiner.java | 6 +-
.../rpc/TCredentialsUpdatingInvocationHandler.java | 4 +-
.../server/security/AuditedSecurityOperation.java | 16 ++--
.../server/security/SecurityOperation.java | 26 +++---
.../accumulo/server/security/SecurityUtil.java | 6 +-
.../server/security/UserImpersonation.java | 2 +-
.../delegation/ZooAuthenticationKeyWatcher.java | 2 +-
.../security/handler/KerberosAuthenticator.java | 2 +-
.../server/security/handler/ZKAuthenticator.java | 2 +-
.../accumulo/server/tables/TableManager.java | 22 ++---
.../org/apache/accumulo/server/util/Admin.java | 2 +-
.../accumulo/server/util/CleanZookeeper.java | 4 +-
.../org/apache/accumulo/server/util/FileUtil.java | 18 ++--
.../accumulo/server/util/FindOfflineTablets.java | 4 +-
.../apache/accumulo/server/util/ListInstances.java | 2 +-
.../accumulo/server/util/MasterMetadataUtil.java | 8 +-
.../accumulo/server/util/MetadataTableUtil.java | 12 +--
.../apache/accumulo/server/util/RandomWriter.java | 10 +-
.../accumulo/server/util/RandomizeVolumes.java | 16 ++--
.../accumulo/server/util/ReplicationTableUtil.java | 2 +-
.../accumulo/server/util/TableDiskUsage.java | 4 +-
.../accumulo/server/util/TabletIterator.java | 8 +-
.../server/util/VerifyTabletAssignments.java | 2 +-
.../accumulo/server/util/time/SimpleTimer.java | 2 +-
.../server/zookeeper/DistributedWorkQueue.java | 16 ++--
.../accumulo/gc/GarbageCollectWriteAheadLogs.java | 8 +-
.../accumulo/gc/GarbageCollectionAlgorithm.java | 10 +-
.../apache/accumulo/gc/SimpleGarbageCollector.java | 71 +++++++-------
.../replication/CloseWriteAheadLogReferences.java | 14 +--
.../apache/accumulo/master/FateServiceHandler.java | 2 +-
.../java/org/apache/accumulo/master/Master.java | 72 +++++++--------
.../master/MasterClientServiceHandler.java | 24 ++---
.../apache/accumulo/master/TabletGroupWatcher.java | 74 +++++++--------
.../accumulo/master/recovery/RecoveryManager.java | 14 +--
.../master/replication/ReplicationDriver.java | 2 +-
.../accumulo/master/replication/WorkMaker.java | 6 +-
.../apache/accumulo/master/state/MergeStats.java | 51 ++++++-----
.../accumulo/master/tableOps/BulkImport.java | 20 ++--
.../accumulo/master/tableOps/ChangeTableState.java | 2 +-
.../accumulo/master/tableOps/CompactRange.java | 4 +-
.../accumulo/master/tableOps/RenameNamespace.java | 2 +-
.../accumulo/master/tableOps/RenameTable.java | 2 +-
.../accumulo/master/tableOps/TableRangeOp.java | 4 +-
.../org/apache/accumulo/master/tableOps/Utils.java | 8 +-
.../master/tserverOps/ShutdownTServer.java | 6 +-
.../java/org/apache/accumulo/monitor/Monitor.java | 16 ++--
.../apache/accumulo/monitor/ZooKeeperStatus.java | 2 +-
.../apache/accumulo/tracer/AsyncSpanReceiver.java | 6 +-
.../apache/accumulo/tracer/SendSpansViaThrift.java | 4 +-
.../org/apache/accumulo/tracer/TraceServer.java | 24 ++---
.../org/apache/accumulo/tracer/ZooTraceClient.java | 6 +-
.../accumulo/tserver/BulkFailedCopyProcessor.java | 6 +-
.../org/apache/accumulo/tserver/FileManager.java | 4 +-
.../org/apache/accumulo/tserver/InMemoryMap.java | 6 +-
.../org/apache/accumulo/tserver/NativeMap.java | 12 +--
.../java/org/apache/accumulo/tserver/TLevel.java | 25 ++++-
.../org/apache/accumulo/tserver/TabletServer.java | 102 ++++++++++-----------
.../tserver/TabletServerResourceManager.java | 18 ++--
.../tserver/constraints/ConstraintChecker.java | 4 +-
.../org/apache/accumulo/tserver/log/DfsLogger.java | 6 +-
.../org/apache/accumulo/tserver/log/LogSorter.java | 12 +--
.../accumulo/tserver/log/SortedLogRecovery.java | 16 ++--
.../accumulo/tserver/log/TabletServerLogger.java | 10 +-
.../apache/accumulo/tserver/logger/LogReader.java | 2 +-
.../apache/accumulo/tserver/scan/LookupTask.java | 2 +-
.../accumulo/tserver/scan/NextBatchTask.java | 2 +-
.../accumulo/tserver/session/SessionManager.java | 4 +-
.../accumulo/tserver/tablet/CompactionWatcher.java | 2 +-
.../apache/accumulo/tserver/tablet/Compactor.java | 16 ++--
.../accumulo/tserver/tablet/DatafileManager.java | 42 +++++----
.../tserver/tablet/MinorCompactionTask.java | 2 +-
.../accumulo/tserver/tablet/MinorCompactor.java | 8 +-
.../apache/accumulo/tserver/tablet/RootFiles.java | 10 +-
.../org/apache/accumulo/tserver/tablet/Tablet.java | 102 ++++++++++-----------
.../main/java/org/apache/accumulo/start/Main.java | 6 +-
.../start/classloader/AccumuloClassLoader.java | 6 +-
.../vfs/AccumuloReloadingVFSClassLoader.java | 16 ++--
.../classloader/vfs/AccumuloVFSClassLoader.java | 4 +-
.../classloader/vfs/UniqueFileReplicator.java | 6 +-
.../accumulo/test/BalanceWithOfflineTableIT.java | 12 +--
.../java/org/apache/accumulo/test/CleanWalIT.java | 6 +-
.../org/apache/accumulo/test/CreateRFiles.java | 2 +-
.../org/apache/accumulo/test/FileArchiveIT.java | 24 ++---
.../test/MissingWalHeaderCompletesRecoveryIT.java | 4 +-
.../accumulo/test/NativeMapConcurrencyTest.java | 2 +-
.../apache/accumulo/test/QueryMetadataTable.java | 7 +-
.../accumulo/test/RewriteTabletDirectoriesIT.java | 4 +-
.../org/apache/accumulo/test/ShellServerIT.java | 14 +--
.../accumulo/test/TableConfigurationUpdateIT.java | 4 +-
.../apache/accumulo/test/TestRandomDeletes.java | 4 +-
.../test/TracerRecoversAfterOfflineTableIT.java | 6 +-
.../apache/accumulo/test/TransportCachingIT.java | 6 +-
.../org/apache/accumulo/test/VerifyIngest.java | 17 ++--
.../functional/BalanceAfterCommsFailureIT.java | 2 +-
.../BalanceInPresenceOfOfflineTableIT.java | 10 +-
.../accumulo/test/functional/BatchScanSplitIT.java | 2 +-
.../apache/accumulo/test/functional/CleanUpIT.java | 2 +-
.../accumulo/test/functional/DeleteRowsIT.java | 2 +-
.../test/functional/DeleteRowsSplitIT.java | 2 +-
.../test/functional/HalfDeadTServerIT.java | 2 +-
.../accumulo/test/functional/KerberosProxyIT.java | 12 +--
.../accumulo/test/functional/LargeRowIT.java | 2 +-
.../accumulo/test/functional/LogicalTimeIT.java | 2 +-
.../test/functional/MetadataMaxFilesIT.java | 2 +-
.../accumulo/test/functional/MonitorSslIT.java | 2 +-
.../accumulo/test/functional/PermissionsIT.java | 14 +--
.../accumulo/test/functional/ReadWriteIT.java | 4 +-
.../test/functional/RecoveryWithEmptyRFileIT.java | 2 +-
.../test/functional/SimpleBalancerFairnessIT.java | 4 +-
.../accumulo/test/functional/ZombieTServer.java | 2 +-
.../test/performance/scan/CollectTabletStats.java | 2 +-
.../test/replication/CyclicReplicationIT.java | 4 +-
.../replication/MultiInstanceReplicationIT.java | 18 ++--
.../accumulo/test/replication/ReplicationIT.java | 22 ++---
.../UnorderedWorkAssignerReplicationIT.java | 20 ++--
.../UnusedWalDoesntCloseReplicationStatusIT.java | 8 +-
.../apache/accumulo/test/start/KeywordStartIT.java | 4 +-
.../org/apache/accumulo/test/util/CertUtils.java | 4 +-
192 files changed, 977 insertions(+), 953 deletions(-)
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/MasterClient.java b/core/src/main/java/org/apache/accumulo/core/client/impl/MasterClient.java
index 166f623..500006f 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/MasterClient.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/MasterClient.java
@@ -76,7 +76,7 @@ public class MasterClient {
// do not expect to recover from this
throw new RuntimeException(tte);
}
- log.debug("Failed to connect to master=" + master + ", will retry... ", tte);
+ log.debug("Failed to connect to master={}, will retry... ", master, tte);
return null;
}
}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/MultiTableBatchWriterImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/MultiTableBatchWriterImpl.java
index 34a3137..433ea42 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/MultiTableBatchWriterImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/MultiTableBatchWriterImpl.java
@@ -139,11 +139,11 @@ public class MultiTableBatchWriterImpl implements MultiTableBatchWriter {
@Override
protected void finalize() {
if (!closed.get()) {
- log.warn(MultiTableBatchWriterImpl.class.getSimpleName() + " not shutdown; did you forget to call close()?");
+ log.warn("{} not shutdown; did you forget to call close()?", MultiTableBatchWriterImpl.class.getSimpleName());
try {
close();
} catch (MutationsRejectedException mre) {
- log.error(MultiTableBatchWriterImpl.class.getSimpleName() + " internal error.", mre);
+ log.error("{} internal error.", MultiTableBatchWriterImpl.class.getSimpleName() , mre);
throw new RuntimeException("Exception when closing " + MultiTableBatchWriterImpl.class.getSimpleName(), mre);
}
}
@@ -162,7 +162,7 @@ public class MultiTableBatchWriterImpl implements MultiTableBatchWriter {
} catch (UncheckedExecutionException e) {
Throwable cause = e.getCause();
- log.error("Unexpected exception when fetching table id for " + tableName);
+ log.error("Unexpected exception when fetching table id for {}", tableName);
if (null == cause) {
throw new RuntimeException(e);
@@ -176,7 +176,7 @@ public class MultiTableBatchWriterImpl implements MultiTableBatchWriter {
} catch (ExecutionException e) {
Throwable cause = e.getCause();
- log.error("Unexpected exception when fetching table id for " + tableName);
+ log.error("Unexpected exception when fetching table id for {}", tableName);
if (null == cause) {
throw new RuntimeException(e);
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ServerClient.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ServerClient.java
index 359192f..d1cb2d6 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ServerClient.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ServerClient.java
@@ -94,7 +94,7 @@ public class ServerClient {
} catch (TApplicationException tae) {
throw new AccumuloServerException(server, tae);
} catch (TTransportException tte) {
- log.debug("ClientService request failed " + server + ", retrying ... ", tte);
+ log.debug("ClientService request failed {}, retrying ...", server, tte);
sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
} finally {
if (client != null)
@@ -116,7 +116,7 @@ public class ServerClient {
} catch (TApplicationException tae) {
throw new AccumuloServerException(server, tae);
} catch (TTransportException tte) {
- log.debug("ClientService request failed " + server + ", retrying ... ", tte);
+ log.debug("ClientService request failed {}, retrying ...", server, tte);
sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
} finally {
if (client != null)
@@ -176,7 +176,7 @@ public class ServerClient {
if (servers.isEmpty()) {
log.warn("There are no tablet servers: check that zookeeper and accumulo are running.");
} else {
- log.warn("Failed to find an available server in the list of servers: " + servers);
+ log.warn("Failed to find an available server in the list of servers: {}", servers);
}
warnedAboutTServersBeingDown = true;
}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsImpl.java
index a5b72c1..0a8e0f7 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsImpl.java
@@ -1239,7 +1239,8 @@ public class TableOperationsImpl extends TableOperationsHelper {
waitTime = waitFor * 10;
waitTime = Math.max(100, waitTime);
waitTime = Math.min(5000, waitTime);
- log.trace("Waiting for {}({}) tablets, startRow = {} lastRow = {}, holes={} sleeping:{}ms", waitFor, maxPerServer, startRow, lastRow, holes, waitTime);
+ log.trace("Waiting for {}({}) tablets, startRow = {} lastRow = {}, holes={} sleeping:{}ms",
+ waitFor, maxPerServer, startRow, lastRow, holes, waitTime);
sleepUninterruptibly(waitTime, TimeUnit.MILLISECONDS);
} else {
break;
@@ -1435,8 +1436,8 @@ public class TableOperationsImpl extends TableOperationsHelper {
for (Entry<String,String> entry : props.entrySet()) {
if (Property.isClassProperty(entry.getKey()) && !entry.getValue().contains(Constants.CORE_PACKAGE_NAME)) {
- LoggerFactory.getLogger(this.getClass()).info("Imported table sets '{}' to '{}'. Ensure this class is on Accumulo classpath.", entry.getKey(),
- entry.getValue());
+ LoggerFactory.getLogger(this.getClass()).info(
+ "Imported table sets '{}' to '{}'. Ensure this class is on Accumulo classpath.", entry.getKey(), entry.getValue());
}
}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReader.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReader.java
index 0999090..61b5248 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReader.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReader.java
@@ -83,7 +83,7 @@ public class TabletServerBatchReader extends ScannerOptions implements BatchScan
@Override
protected void finalize() {
if (!queryThreadPool.isShutdown()) {
- log.warn(TabletServerBatchReader.class.getSimpleName() + " not shutdown; did you forget to call close()?", ex);
+ log.warn("{} not shutdown; did you forget to call close()? ", TabletServerBatchReader.class.getSimpleName(), ex);
close();
}
}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java
index 62b7eb3..e2d6070 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java
@@ -185,8 +185,8 @@ public class TabletServerBatchReaderIterator implements Iterator<Entry<Key,Value
if (queryThreadPool.isShutdown()) {
String shortMsg = "The BatchScanner was unexpectedly closed while this Iterator was still in use.";
- log.error(shortMsg + " Ensure that a reference to the BatchScanner is retained so that it can be closed when this Iterator is exhausted."
- + " Not retaining a reference to the BatchScanner guarantees that you are leaking threads in your client JVM.");
+ log.error("{} Ensure that a reference to the BatchScanner is retained so that it can be closed when this Iterator is exhausted."
+ + " Not retaining a reference to the BatchScanner guarantees that you are leaking threads in your client JVM.", shortMsg);
throw new RuntimeException(shortMsg + " Ensure proper handling of the BatchScanner.");
}
@@ -250,7 +250,6 @@ public class TabletServerBatchReaderIterator implements Iterator<Entry<Key,Value
if (log.isTraceEnabled())
log.trace("Failed to bin {} ranges, tablet locations were null, retrying in 100ms", failures.size());
-
try {
Thread.sleep(100);
} catch (InterruptedException e) {
@@ -710,7 +709,7 @@ public class TabletServerBatchReaderIterator implements Iterator<Entry<Key,Value
ThriftUtil.returnClient(client);
}
} catch (TTransportException e) {
- log.debug("Server : {} msg : {}", server, e.getMessage());
+ log.debug("Server : {} msg : {}", server, e.getMessage(), e);
timeoutTracker.errorOccured(e);
throw new IOException(e);
} catch (ThriftSecurityException e) {
@@ -723,7 +722,7 @@ public class TabletServerBatchReaderIterator implements Iterator<Entry<Key,Value
log.debug("Server : {} msg : {}", server, e.getMessage(), e);
throw new IOException(e);
} catch (TSampleNotPresentException e) {
- log.debug("Server : " + server + " msg : " + e.getMessage(), e);
+ log.debug("Server : {} msg : {}", server, e.getMessage(), e);
String tableInfo = "?";
if (e.getExtent() != null) {
Table.ID tableId = new KeyExtent(e.getExtent()).getTableId();
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchWriter.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchWriter.java
index 49d70bf..bd80da4 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchWriter.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchWriter.java
@@ -546,7 +546,7 @@ public class TabletServerBatchWriter {
somethingFailed = true;
this.serverSideErrors.add(server);
this.notifyAll();
- log.error("Server side error on " + server + ": " + e);
+ log.error("Server side error on {}: ", server, e);
}
private synchronized void updateUnknownErrors(String msg, Throwable t) {
@@ -555,9 +555,9 @@ public class TabletServerBatchWriter {
this.lastUnknownError = t;
this.notifyAll();
if (t instanceof TableDeletedException || t instanceof TableOfflineException || t instanceof TimedOutException)
- log.debug("{}", msg, t); // this is not unknown
+ log.debug(msg, t); // this is not unknown
else
- log.error("{}", msg, t);
+ log.error(msg, t);
}
private void checkForFailures() throws MutationsRejectedException {
@@ -639,7 +639,7 @@ public class TabletServerBatchWriter {
if (rf != null) {
if (log.isTraceEnabled())
- log.trace("tid=" + Thread.currentThread().getId() + " Requeuing " + rf.size() + " failed mutations");
+ log.trace("tid={} Requeuing {} failed mutations", Thread.currentThread().getId(), rf.size());
addFailedMutations(rf);
}
} catch (Throwable t) {
@@ -866,8 +866,9 @@ public class TabletServerBatchWriter {
failures = sendMutationsToTabletServer(location, mutationBatch, timeoutTracker);
long st2 = System.currentTimeMillis();
if (log.isTraceEnabled())
- log.trace("sent " + String.format("%,d", count) + " mutations to " + location + " in "
- + String.format("%.2f secs (%,.2f mutations/sec) with %,d failures", (st2 - st1) / 1000.0, count / ((st2 - st1) / 1000.0), failures.size()));
+ log.trace("{}",
+ String.format("sent %,d mutations to %s in %.2f secs (%,.2f mutations/sec) with %,d failures",
+ count, location, (st2 - st1) / 1000.0, count / ((st2 - st1) / 1000.0), failures.size()));
long successBytes = 0;
for (Entry<KeyExtent,List<Mutation>> entry : mutationBatch.entrySet()) {
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/Writer.java b/core/src/main/java/org/apache/accumulo/core/client/impl/Writer.java
index 206b113..3836d58 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/Writer.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/Writer.java
@@ -86,7 +86,7 @@ public class Writer {
TabletLocation tabLoc = TabletLocator.getLocator(context, tableId).locateTablet(context, new Text(m.getRow()), false, true);
if (tabLoc == null) {
- log.trace("No tablet location found for row " + new String(m.getRow(), UTF_8));
+ log.trace("No tablet location found for row {}", new String(m.getRow(), UTF_8));
sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
continue;
}
@@ -96,15 +96,15 @@ public class Writer {
updateServer(context, m, tabLoc.tablet_extent, parsedLocation);
return;
} catch (NotServingTabletException e) {
- log.trace("Not serving tablet, server = " + parsedLocation);
+ log.trace("Not serving tablet, server = {}", parsedLocation);
TabletLocator.getLocator(context, tableId).invalidateCache(tabLoc.tablet_extent);
} catch (ConstraintViolationException cve) {
- log.error("error sending update to " + parsedLocation + ": " + cve);
+ log.error("error sending update to {}: ", parsedLocation, cve);
// probably do not need to invalidate cache, but it does not hurt
TabletLocator.getLocator(context, tableId).invalidateCache(tabLoc.tablet_extent);
throw cve;
} catch (TException e) {
- log.error("error sending update to " + parsedLocation + ": " + e);
+ log.error("error sending update to {}: ", parsedLocation, e);
TabletLocator.getLocator(context, tableId).invalidateCache(tabLoc.tablet_extent);
}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstanceOperations.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstanceOperations.java
index e264104..5975bf4 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstanceOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstanceOperations.java
@@ -76,7 +76,7 @@ class MockInstanceOperations implements InstanceOperations {
try {
AccumuloVFSClassLoader.loadClass(className, Class.forName(asTypeName));
} catch (ClassNotFoundException e) {
- log.warn("Could not find class named '" + className + "' in testClassLoad.", e);
+ log.warn("Could not find class named '{}' in testClassLoad. ", className, e);
return false;
}
return true;
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockNamespaceOperations.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockNamespaceOperations.java
index b1cb980..49c11ce 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockNamespaceOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockNamespaceOperations.java
@@ -130,7 +130,7 @@ class MockNamespaceOperations extends NamespaceOperationsHelper {
try {
AccumuloVFSClassLoader.loadClass(className, Class.forName(asTypeName));
} catch (ClassNotFoundException e) {
- log.warn("Could not load class '" + className + "' with type name '" + asTypeName + "' in testClassLoad()", e);
+ log.warn("Could not load class '{}' with type name '{}' in testClassLoad() ", className, asTypeName, e);
return false;
}
return true;
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
index 112b6a9..6d4bd72 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
@@ -480,7 +480,7 @@ class MockTableOperations extends TableOperationsHelper {
try {
AccumuloVFSClassLoader.loadClass(className, Class.forName(asTypeName));
} catch (ClassNotFoundException e) {
- log.warn("Could not load class '" + className + "' with type name '" + asTypeName + "' in testClassLoad().", e);
+ log.warn("Could not load class '{}' with type name '{}' in testClassLoad(). ", className, asTypeName, e);
return false;
}
return true;
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/AccumuloConfiguration.java b/core/src/main/java/org/apache/accumulo/core/conf/AccumuloConfiguration.java
index 3beeb66..8de21b8 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/AccumuloConfiguration.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/AccumuloConfiguration.java
@@ -205,7 +205,7 @@ public abstract class AccumuloConfiguration implements Iterable<Entry<String,Str
int port = Integer.parseInt(portString);
if (port != 0) {
if (port < 1024 || port > 65535) {
- log.error("Invalid port number " + port + "; Using default " + property.getDefaultValue());
+ log.error("Invalid port number {}; Using default {}", port, property.getDefaultValue());
ports[0] = Integer.parseInt(property.getDefaultValue());
} else {
ports[0] = port;
@@ -273,7 +273,7 @@ public abstract class AccumuloConfiguration implements Iterable<Entry<String,Str
int maxFilesPerTablet = getCount(Property.TABLE_FILE_MAX);
if (maxFilesPerTablet <= 0) {
maxFilesPerTablet = getCount(Property.TSERV_SCAN_MAX_OPENFILES) - 1;
- log.debug("Max files per tablet " + maxFilesPerTablet);
+ log.debug("Max files per tablet {}", maxFilesPerTablet);
}
return maxFilesPerTablet;
@@ -303,13 +303,13 @@ public abstract class AccumuloConfiguration implements Iterable<Entry<String,Str
try {
Class<? extends T> clazz = AccumuloVFSClassLoader.loadClass(clazzName, base);
instance = clazz.newInstance();
- log.info("Loaded class : " + clazzName);
+ log.info("Loaded class : {}", clazzName);
} catch (Exception e) {
log.warn("Failed to load class ", e);
}
if (instance == null) {
- log.info("Using " + defaultInstance.getClass().getName());
+ log.info("Using {}", defaultInstance.getClass().getName());
instance = defaultInstance;
}
return instance;
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/ConfigSanityCheck.java b/core/src/main/java/org/apache/accumulo/core/conf/ConfigSanityCheck.java
index d9b7fc0..130863c 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/ConfigSanityCheck.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/ConfigSanityCheck.java
@@ -73,7 +73,7 @@ public class ConfigSanityCheck {
}
if (!usingVolumes) {
- log.warn("Use of " + INSTANCE_DFS_URI + " and " + INSTANCE_DFS_DIR + " are deprecated. Consider using " + Property.INSTANCE_VOLUMES + " instead.");
+ log.warn("Use of {} and {} are deprecated. Consider using {} instead.", INSTANCE_DFS_URI, INSTANCE_DFS_DIR, Property.INSTANCE_VOLUMES);
}
}
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
index 4c74903..d5a7295 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@ -384,8 +384,8 @@ public enum Property {
+ "no longer in use are removed from the filesystem."),
GC_PORT("gc.port.client", "9998", PropertyType.PORT, "The listening port for the garbage collector's monitor service"),
GC_DELETE_THREADS("gc.threads.delete", "16", PropertyType.COUNT, "The number of threads used to delete files"),
- GC_TRASH_IGNORE("gc.trash.ignore", "false", PropertyType.BOOLEAN, "Do not use the Trash, even if it is configured."),
- GC_FILE_ARCHIVE("gc.file.archive", "false", PropertyType.BOOLEAN, "Archive any files/directories instead of moving to the HDFS trash or deleting."),
+ GC_TRASH_IGNORE("gc.trash.ignore", "false", PropertyType.BOOLEAN, "Do not use the Trash, even if it is configured"),
+ GC_FILE_ARCHIVE("gc.file.archive", "false", PropertyType.BOOLEAN, "Archive any files/directories instead of moving to the HDFS trash or deleting"),
GC_TRACE_PERCENT("gc.trace.percent", "0.01", PropertyType.FRACTION, "Percent of gc cycles to trace"),
// properties that are specific to the monitor server behavior
@@ -929,13 +929,13 @@ public enum Property {
instance = clazz.newInstance();
if (loaded.put(clazzName, clazz) != clazz)
- log.debug("Loaded class : " + clazzName);
+ log.debug("Loaded class : {}", clazzName);
} catch (Exception e) {
- log.warn("Failed to load class ", e);
+ log.warn("Failed to load class {}", e);
}
if (instance == null) {
- log.info("Using default class " + defaultInstance.getClass().getName());
+ log.info("Using default class {}", defaultInstance.getClass().getName());
instance = defaultInstance;
}
return instance;
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java b/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
index 213a895..8549e33 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
@@ -132,7 +132,7 @@ public class SiteConfiguration extends AccumuloConfiguration {
return new String(value);
}
} catch (IOException e) {
- log.warn("Failed to extract sensitive property (" + key + ") from Hadoop CredentialProvider, falling back to accumulo-site.xml", e);
+ log.warn("Failed to extract sensitive property ({}) from Hadoop CredentialProvider, falling back to accumulo-site.xml", key, e);
}
}
}
@@ -142,7 +142,7 @@ public class SiteConfiguration extends AccumuloConfiguration {
if (value == null || !property.getType().isValidFormat(value)) {
if (value != null)
- log.error("Using default value for " + key + " due to improperly formatted " + property.getType() + ": " + value);
+ log.error("Using default value for {} due to improperly formatted {}: ", key, property.getType(), value);
value = parent.get(property);
}
return value;
diff --git a/core/src/main/java/org/apache/accumulo/core/file/BloomFilterLayer.java b/core/src/main/java/org/apache/accumulo/core/file/BloomFilterLayer.java
index 01fb699..4dffc64 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/BloomFilterLayer.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/BloomFilterLayer.java
@@ -69,7 +69,7 @@ import org.slf4j.LoggerFactory;
*
*/
public class BloomFilterLayer {
- private static final Logger LOG = LoggerFactory.getLogger(BloomFilterLayer.class);
+ private static final Logger log = LoggerFactory.getLogger(BloomFilterLayer.class);
public static final String BLOOM_FILE_NAME = "acu_bloom";
public static final int HASH_COUNT = 5;
@@ -132,7 +132,7 @@ public class BloomFilterLayer {
transformer = clazz.newInstance();
} catch (Exception e) {
- LOG.error("Failed to find KeyFunctor: " + acuconf.get(Property.TABLE_BLOOM_KEY_FUNCTOR), e);
+ log.error("Failed to find KeyFunctor: {}", acuconf.get(Property.TABLE_BLOOM_KEY_FUNCTOR), e);
throw new IllegalArgumentException("Failed to find KeyFunctor: " + acuconf.get(Property.TABLE_BLOOM_KEY_FUNCTOR));
}
@@ -252,31 +252,31 @@ public class BloomFilterLayer {
// file does not have a bloom filter, ignore it
} catch (IOException ioe) {
if (!closed)
- LOG.warn("Can't open BloomFilter", ioe);
+ log.warn("Can't open BloomFilter", ioe);
else
- LOG.debug("Can't open BloomFilter, file closed : {}", ioe.getMessage());
+ log.debug("Can't open BloomFilter, file closed : {}", ioe.getMessage());
bloomFilter = null;
} catch (ClassNotFoundException e) {
- LOG.error("Failed to find KeyFunctor in config: " + ClassName, e);
+ log.error("Failed to find KeyFunctor in config: {}", ClassName, e);
bloomFilter = null;
} catch (InstantiationException e) {
- LOG.error("Could not instantiate KeyFunctor: " + ClassName, e);
+ log.error("Could not instantiate KeyFunctor: {}", ClassName, e);
bloomFilter = null;
} catch (IllegalAccessException e) {
- LOG.error("Illegal acess exception", e);
+ log.error("Illegal acess exception", e);
bloomFilter = null;
} catch (RuntimeException rte) {
if (!closed)
throw rte;
else
- LOG.debug("Can't open BloomFilter, RTE after closed ", rte);
+ log.debug("Can't open BloomFilter, RTE after closed", rte);
} finally {
if (in != null) {
try {
in.close();
} catch (IOException e) {
- LOG.warn("Failed to close ", e);
+ log.warn("Failed to close", e);
}
}
}
@@ -298,7 +298,7 @@ public class BloomFilterLayer {
loadTask.run();
} else {
// load the bloom filter in the background
- ltp.execute(new LoggingRunnable(LOG, loadTask));
+ ltp.execute(new LoggingRunnable(log, loadTask));
}
} finally {
// set load task to null so no one else can initiate the load
diff --git a/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java b/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java
index 46824ca..3047ebc 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java
@@ -310,7 +310,7 @@ public class CachableBlockFile {
try {
_iCache.cacheBlock(_lookup, b);
} catch (Exception e) {
- log.warn("Already cached block: " + _lookup, e);
+ log.warn("Already cached block: {}", _lookup, e);
}
}
@@ -355,7 +355,7 @@ public class CachableBlockFile {
b = new byte[(int) _currBlock.getRawSize()];
_currBlock.readFully(b);
} catch (IOException e) {
- log.debug("Error full blockRead for file " + fileName + " for block " + block, e);
+ log.debug("Error full blockRead for file {} for block {}", fileName, block, e);
throw e;
} finally {
_currBlock.close();
@@ -365,7 +365,7 @@ public class CachableBlockFile {
try {
ce = cache.cacheBlock(_lookup, b);
} catch (Exception e) {
- log.warn("Already cached block: " + _lookup, e);
+ log.warn("Already cached block: {}", _lookup, e);
}
if (ce == null)
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/CreateEmpty.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/CreateEmpty.java
index d48a0eb..cd397c3 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/CreateEmpty.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/CreateEmpty.java
@@ -76,7 +76,7 @@ public class CreateEmpty {
for (String arg : opts.files) {
Path path = new Path(arg);
- log.info("Writing to file '" + path + "'");
+ log.info("Writing to file '{}'", path);
FileSKVWriter writer = (new RFileOperations()).newWriterBuilder().forFile(arg, path.getFileSystem(conf), conf)
.withTableConfiguration(DefaultConfiguration.getInstance()).withCompression(opts.codec).build();
writer.close();
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
index 857fd1c..53c48b9 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
@@ -133,7 +133,7 @@ public class PrintInfo implements KeywordExecutable {
Configuration conf = new Configuration();
for (String confFile : opts.configFiles) {
- log.debug("Adding Hadoop configuration file " + confFile);
+ log.debug("Adding Hadoop configuration file {}", confFile);
conf.addResource(new Path(confFile));
}
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/Compression.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/Compression.java
index 2b81541..cdd188e 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/Compression.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/Compression.java
@@ -27,8 +27,6 @@ import java.util.Map.Entry;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicBoolean;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec;
@@ -38,6 +36,8 @@ import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
@@ -48,8 +48,7 @@ import com.google.common.collect.Maps;
* Compression related stuff.
*/
public final class Compression {
- static final Log LOG = LogFactory.getLog(Compression.class);
-
+ private static final Logger log = LoggerFactory.getLogger(Compression.class);
/**
* Prevent the instantiation of class.
*/
@@ -133,32 +132,30 @@ public final class Compression {
private static final int DEFAULT_BUFFER_SIZE = 64 * 1024;
@Override
- public boolean isSupported() {
+ public boolean isSupported() {
return codec != null;
}
@Override
- public void initializeDefaultCodec() {
- if (!checked.get()) {
- checked.set(true);
+ public void initializeDefaultCodec() {if (!checked.get()) {
+ checked .set( true);
codec = createNewCodec(DEFAULT_BUFFER_SIZE);
}
}
@Override
- CompressionCodec createNewCodec(int bufferSize) {
- String extClazz = (conf.get(CONF_LZO_CLASS) == null ? System.getProperty(CONF_LZO_CLASS) : null);
- String clazz = (extClazz != null) ? extClazz : defaultClazz;
- try {
- LOG.info("Trying to load Lzo codec class: " + clazz);
- Configuration myConf = new Configuration(conf);
+ CompressionCodec createNewCodec(int bufferSize) {String extClazz = (conf.get(CONF_LZO_CLASS) == null ? System.getProperty(CONF_LZO_CLASS) : null);
+ String clazz = (extClazz != null) ? extClazz : defaultClazz;
+ try {
+ log.info("Trying to load Lzo codec class: {}" , clazz);
+ Configuration myConf = new Configuration(conf);
// only use the buffersize if > 0, otherwise we'll use
- // the default defined within the codec
- if (bufferSize > 0)
+ // the default defined within the codec if (bufferSize > 0)
myConf.setInt(BUFFER_SIZE_OPT, bufferSize);
return (CompressionCodec) ReflectionUtils.newInstance(Class.forName(clazz), myConf);
- } catch (ClassNotFoundException e) {
- // that is okay
+ } catch (ClassNotFoundException e) {
+ // that is okay
+
}
return null;
}
@@ -367,7 +364,7 @@ public final class Compression {
String extClazz = (conf.get(CONF_SNAPPY_CLASS) == null ? System.getProperty(CONF_SNAPPY_CLASS) : null);
String clazz = (extClazz != null) ? extClazz : defaultClazz;
try {
- LOG.info("Trying to load snappy codec class: " + clazz);
+ log.info("Trying to load snappy codec class: {}", clazz);
Configuration myConf = new Configuration(conf);
// only use the buffersize if > 0, otherwise we'll use
@@ -504,9 +501,9 @@ public final class Compression {
if (compressor.finished()) {
// Somebody returns the compressor to CodecPool but is still using
// it.
- LOG.warn("Compressor obtained from CodecPool already finished()");
+ log.warn("Compressor obtained from CodecPool already finished()");
} else {
- LOG.debug("Got a compressor: " + compressor.hashCode());
+ log.debug("Got a compressor: {}", compressor.hashCode());
}
/**
* Following statement is necessary to get around bugs in 0.18 where a compressor is referenced after returned back to the codec pool.
@@ -520,7 +517,7 @@ public final class Compression {
public void returnCompressor(Compressor compressor) {
if (compressor != null) {
- LOG.debug("Return a compressor: " + compressor.hashCode());
+ log.debug("Return a compressor: {}", compressor.hashCode());
CodecPool.returnCompressor(compressor);
}
}
@@ -533,9 +530,9 @@ public final class Compression {
if (decompressor.finished()) {
// Somebody returns the decompressor to CodecPool but is still using
// it.
- LOG.warn("Decompressor obtained from CodecPool already finished()");
+ log.warn("Decompressor obtained from CodecPool already finished()");
} else {
- LOG.debug("Got a decompressor: " + decompressor.hashCode());
+ log.debug("Got a decompressor: {}", decompressor.hashCode());
}
/**
* Following statement is necessary to get around bugs in 0.18 where a decompressor is referenced after returned back to the codec pool.
@@ -550,7 +547,7 @@ public final class Compression {
public void returnDecompressor(Decompressor decompressor) {
if (decompressor != null) {
- LOG.debug("Returned a decompressor: " + decompressor.hashCode());
+ log.debug("Returned a decompressor: {}", decompressor.hashCode());
CodecPool.returnDecompressor(decompressor);
}
}
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/DebugIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/DebugIterator.java
index c2c7ad5..af9c62c 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/DebugIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/DebugIterator.java
@@ -53,39 +53,39 @@ public class DebugIterator extends WrappingIterator implements OptionDescriber {
@Override
public Key getTopKey() {
Key wc = super.getTopKey();
- log.debug(prefix + " getTopKey() --> " + wc);
+ log.debug("{} getTopKey() --> {}", prefix, wc);
return wc;
}
@Override
public Value getTopValue() {
Value w = super.getTopValue();
- log.debug(prefix + " getTopValue() --> " + w);
+ log.debug("{} getTopKey() --> {}", prefix, w);
return w;
}
@Override
public boolean hasTop() {
boolean b = super.hasTop();
- log.debug(prefix + " hasTop() --> " + b);
+ log.debug("{} getTopKey() --> {}", prefix, b);
return b;
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
- log.debug(prefix + " seek(" + range + ", " + columnFamilies + ", " + inclusive + ")");
+ log.debug("{} seek({}, {}, {})", prefix, range, columnFamilies, inclusive);
super.seek(range, columnFamilies, inclusive);
}
@Override
public void next() throws IOException {
- log.debug(prefix + " next()");
+ log.debug("{} next()", prefix);
super.next();
}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
- log.debug("init(" + source + ", " + options + ", " + env + ")");
+ log.debug("init({}, {}, {})", source, options, env);
if (null == prefix) {
prefix = String.format("0x%08X", this.hashCode());
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java b/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
index 2e09782..d130775 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
@@ -165,7 +165,7 @@ public class IteratorUtil {
options.put(optName, entry.getValue());
} else {
- log.warn("Unrecognizable option: " + entry.getKey());
+ log.warn("Unrecognizable option: {}", entry.getKey());
}
}
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/aggregation/LongSummation.java b/core/src/main/java/org/apache/accumulo/core/iterators/aggregation/LongSummation.java
index 7692ecb..3707d5b 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/aggregation/LongSummation.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/aggregation/LongSummation.java
@@ -41,7 +41,7 @@ public class LongSummation implements Aggregator {
try {
sum += bytesToLong(value.get());
} catch (IOException e) {
- log.error(LongSummation.class.getSimpleName() + " trying to convert bytes to long, but byte array isn't length 8");
+ log.error("{} trying to convert bytes to long, but byte array isn't length 8", LongSummation.class.getSimpleName());
}
}
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/system/MapFileIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/system/MapFileIterator.java
index f9f0600..b33a14b 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/system/MapFileIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/system/MapFileIterator.java
@@ -120,7 +120,7 @@ public class MapFileIterator implements FileSKVIterator {
try {
SortedKeyValueIterator<Key,Value> other = env.reserveMapFileReader(dirName);
((InterruptibleIterator) other).setInterruptFlag(interruptFlag);
- log.debug("deep copying MapFile: " + this + " -> " + other);
+ log.debug("deep copying MapFile: {} -> {}", this, other);
return other;
} catch (IOException e) {
log.error("failed to clone map file reader", e);
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/IndexedDocIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/IndexedDocIterator.java
index 9ef2bf6..5320b22 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/IndexedDocIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/IndexedDocIterator.java
@@ -160,19 +160,19 @@ public class IndexedDocIterator extends IntersectingIterator {
if (topKey == null)
return;
if (log.isTraceEnabled())
- log.trace("using top key to seek for doc: " + topKey.toString());
+ log.trace("using top key to seek for doc: {}", topKey.toString());
Key docKey = buildDocKey();
docSource.seek(new Range(docKey, true, null, false), docColfSet, true);
- log.debug("got doc key: " + docSource.getTopKey().toString());
+ log.debug("got doc key: {}", docSource.getTopKey().toString());
if (docSource.hasTop() && docKey.equals(docSource.getTopKey(), PartialKey.ROW_COLFAM_COLQUAL)) {
value = docSource.getTopValue();
}
- log.debug("got doc value: " + value.toString());
+ log.debug("got doc value: {}", value.toString());
}
protected Key buildDocKey() {
if (log.isTraceEnabled())
- log.trace("building doc key for " + currentPartition + " " + currentDocID);
+ log.trace("building doc key for {} {}", currentPartition, currentDocID);
int zeroIndex = currentDocID.find("\0");
if (zeroIndex < 0)
throw new IllegalArgumentException("bad current docID");
@@ -181,12 +181,12 @@ public class IndexedDocIterator extends IntersectingIterator {
colf.append(currentDocID.getBytes(), 0, zeroIndex);
docColfSet = Collections.singleton((ByteSequence) new ArrayByteSequence(colf.getBytes(), 0, colf.getLength()));
if (log.isTraceEnabled())
- log.trace(zeroIndex + " " + currentDocID.getLength());
+ log.trace("{} {}", zeroIndex, currentDocID.getLength());
Text colq = new Text();
colq.set(currentDocID.getBytes(), zeroIndex + 1, currentDocID.getLength() - zeroIndex - 1);
Key k = new Key(currentPartition, colf, colq);
if (log.isTraceEnabled())
- log.trace("built doc key for seek: " + k.toString());
+ log.trace("built doc key for seek: {}", k.toString());
return k;
}
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java
index bd8ae15..81854a9 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java
@@ -388,7 +388,7 @@ abstract public class TransformingIterator extends WrappingIterator implements O
colVis = new ColumnVisibility(visibility.toArray());
parsedVisibilitiesCache.put(visibility, Boolean.TRUE);
} catch (BadArgumentException e) {
- log.error("Parse error after transformation : " + visibility);
+ log.error("Parse error after transformation : {}", visibility);
parsedVisibilitiesCache.put(visibility, Boolean.FALSE);
if (scanning) {
return false;
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/MetadataLocationObtainer.java b/core/src/main/java/org/apache/accumulo/core/metadata/MetadataLocationObtainer.java
index 6336d12..07bcdff 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/MetadataLocationObtainer.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/MetadataLocationObtainer.java
@@ -62,7 +62,6 @@ import org.slf4j.LoggerFactory;
public class MetadataLocationObtainer implements TabletLocationObtainer {
private static final Logger log = LoggerFactory.getLogger(MetadataLocationObtainer.class);
-
private SortedSet<Column> locCols;
private ArrayList<Column> columns;
diff --git a/core/src/main/java/org/apache/accumulo/core/rpc/SslConnectionParams.java b/core/src/main/java/org/apache/accumulo/core/rpc/SslConnectionParams.java
index 017134e..aa2a2ed 100644
--- a/core/src/main/java/org/apache/accumulo/core/rpc/SslConnectionParams.java
+++ b/core/src/main/java/org/apache/accumulo/core/rpc/SslConnectionParams.java
@@ -93,7 +93,7 @@ public class SslConnectionParams {
String keystorePassword = conf.get(passwordOverrideProperty);
if (!keystorePassword.isEmpty()) {
if (log.isTraceEnabled())
- log.trace("Using explicit SSL private key password from " + passwordOverrideProperty.getKey());
+ log.trace("Using explicit SSL private key password from {}", passwordOverrideProperty.getKey());
} else {
keystorePassword = defaultPassword;
}
diff --git a/core/src/main/java/org/apache/accumulo/core/rpc/ThriftUtil.java b/core/src/main/java/org/apache/accumulo/core/rpc/ThriftUtil.java
index 97d0735..58f3382 100644
--- a/core/src/main/java/org/apache/accumulo/core/rpc/ThriftUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/rpc/ThriftUtil.java
@@ -341,7 +341,7 @@ public class ThriftUtil {
try {
transport = TTimeoutTransport.create(address, timeout);
} catch (IOException ex) {
- log.warn("Failed to open transport to " + address);
+ log.warn("Failed to open transport to {}", address);
throw new TTransportException(ex);
}
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java
index 11270e3..37c2ef8 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java
@@ -73,14 +73,14 @@ public class CryptoModuleFactory {
@SuppressWarnings({"rawtypes"})
private static CryptoModule instantiateCryptoModule(String cryptoModuleClassname) {
- log.debug(String.format("About to instantiate crypto module %s", cryptoModuleClassname));
+ log.debug("About to instantiate crypto module {}", cryptoModuleClassname);
CryptoModule cryptoModule = null;
Class cryptoModuleClazz = null;
try {
cryptoModuleClazz = AccumuloVFSClassLoader.loadClass(cryptoModuleClassname);
} catch (ClassNotFoundException e1) {
- log.warn(String.format("Could not find configured crypto module \"%s\". No encryption will be used.", cryptoModuleClassname));
+ log.warn("Could not find configured crypto module \"{}\". No encryption will be used.", cryptoModuleClassname);
return new NullCryptoModule();
}
@@ -96,22 +96,22 @@ public class CryptoModuleFactory {
}
if (!implementsCryptoModule) {
- log.warn("Configured Accumulo crypto module \"" + cryptoModuleClassname + "\" does not implement the CryptoModule interface. No encryption will be used.");
+ log.warn("Configured Accumulo crypto module \"{}\" does not implement the CryptoModule interface. No encryption will be used.", cryptoModuleClassname);
return new NullCryptoModule();
} else {
try {
cryptoModule = (CryptoModule) cryptoModuleClazz.newInstance();
- log.debug("Successfully instantiated crypto module " + cryptoModuleClassname);
+ log.debug("Successfully instantiated crypto module {}", cryptoModuleClassname);
} catch (InstantiationException e) {
- log.warn(String.format("Got instantiation exception %s when instantiating crypto module \"%s\". No encryption will be used.", e.getCause().getClass()
- .getName(), cryptoModuleClassname));
- log.warn("InstantiationException", e.getCause());
+ log.warn("Got instantiation exception {} when instantiating crypto module \"{}\". No encryption will be used.", e.getCause().getClass()
+ .getName(), cryptoModuleClassname);
+ log.warn("InstantiationException {}", e.getCause());
return new NullCryptoModule();
} catch (IllegalAccessException e) {
- log.warn(String.format("Got illegal access exception when trying to instantiate crypto module \"%s\". No encryption will be used.",
- cryptoModuleClassname));
+ log.warn("Got illegal access exception when trying to instantiate crypto module \"{}\". No encryption will be used.",
+ cryptoModuleClassname);
log.warn("IllegalAccessException", e);
return new NullCryptoModule();
}
@@ -150,14 +150,14 @@ public class CryptoModuleFactory {
@SuppressWarnings("rawtypes")
private static SecretKeyEncryptionStrategy instantiateSecreteKeyEncryptionStrategy(String className) {
- log.debug("About to instantiate secret key encryption strategy " + className);
+ log.debug("About to instantiate secret key encryption strategy {}", className);
SecretKeyEncryptionStrategy strategy = null;
Class keyEncryptionStrategyClazz = null;
try {
keyEncryptionStrategyClazz = AccumuloVFSClassLoader.loadClass(className);
} catch (ClassNotFoundException e1) {
- log.warn(String.format("Could not find configured secret key encryption strategy \"%s\". No encryption will be used.", className));
+ log.warn("Could not find configured secret key encryption strategy \"{}\". No encryption will be used.", className);
return new NullSecretKeyEncryptionStrategy();
}
@@ -179,16 +179,16 @@ public class CryptoModuleFactory {
try {
strategy = (SecretKeyEncryptionStrategy) keyEncryptionStrategyClazz.newInstance();
- log.debug("Successfully instantiated secret key encryption strategy " + className);
+ log.debug("Successfully instantiated secret key encryption strategy {}", className);
} catch (InstantiationException e) {
- log.warn(String.format("Got instantiation exception %s when instantiating secret key encryption strategy \"%s\". No encryption will be used.", e
- .getCause().getClass().getName(), className));
- log.warn("InstantiationException", e.getCause());
+ log.warn("Got instantiation exception {} when instantiating secret key encryption strategy \"{}\". No encryption will be used.", e
+ .getCause().getClass().getName(), className);
+ log.warn("InstantiationException {}", e.getCause());
return new NullSecretKeyEncryptionStrategy();
} catch (IllegalAccessException e) {
- log.warn(String.format("Got illegal access exception when trying to instantiate secret key encryption strategy \"%s\". No encryption will be used.",
- className));
+ log.warn("Got illegal access exception when trying to instantiate secret key encryption strategy \"{}\". No encryption will be used.",
+ className);
log.warn("IllegalAccessException", e);
return new NullSecretKeyEncryptionStrategy();
}
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java
index 13104b2..7609bb0 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java
@@ -401,7 +401,7 @@ public class DefaultCryptoModule implements CryptoModule {
if (params.getBlockStreamSize() > 0)
blockedDecryptingInputStream = new BlockedInputStream(blockedDecryptingInputStream, cipher.getBlockSize(), params.getBlockStreamSize());
- log.trace("Initialized cipher input stream with transformation [" + getCipherTransformation(params) + "]");
+ log.trace("Initialized cipher input stream with transformation [{}]", getCipherTransformation(params));
params.setPlaintextInputStream(blockedDecryptingInputStream);
diff --git a/core/src/main/java/org/apache/accumulo/core/trace/DistributedTrace.java b/core/src/main/java/org/apache/accumulo/core/trace/DistributedTrace.java
index 649cb3b..b0bd57f 100644
--- a/core/src/main/java/org/apache/accumulo/core/trace/DistributedTrace.java
+++ b/core/src/main/java/org/apache/accumulo/core/trace/DistributedTrace.java
@@ -161,10 +161,10 @@ public class DistributedTrace {
SpanReceiverBuilder builder = new SpanReceiverBuilder(wrapHadoopConf(conf));
SpanReceiver rcvr = builder.spanReceiverClass(className.trim()).build();
if (rcvr == null) {
- log.warn("Failed to load SpanReceiver " + className);
+ log.warn("Failed to load SpanReceiver {}", className);
} else {
receivers.add(rcvr);
- log.info("SpanReceiver " + className + " was loaded successfully.");
+ log.info("SpanReceiver {} was loaded successfully.", className);
}
}
for (SpanReceiver rcvr : receivers) {
@@ -199,7 +199,7 @@ public class DistributedTrace {
try {
rcvr.close();
} catch (IOException e) {
- log.warn("Unable to close SpanReceiver correctly: {}", e.getMessage(), e);
+ log.warn("Unable to close SpanReceiver correctly: {} {}", e.getMessage(), e);
}
}
receivers.clear();
diff --git a/core/src/main/java/org/apache/accumulo/core/volume/VolumeImpl.java b/core/src/main/java/org/apache/accumulo/core/volume/VolumeImpl.java
index a646cf2..a55ff0d 100644
--- a/core/src/main/java/org/apache/accumulo/core/volume/VolumeImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/volume/VolumeImpl.java
@@ -76,7 +76,7 @@ public class VolumeImpl implements Volume {
try {
other = p.getFileSystem(CachedConfiguration.getInstance());
} catch (IOException e) {
- log.warn("Could not determine filesystem from path: " + p);
+ log.warn("Could not determine filesystem from path: {}", p);
return false;
}
diff --git a/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooUtil.java b/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooUtil.java
index 07a38dc..bf21440 100644
--- a/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooUtil.java
@@ -60,24 +60,24 @@ public class ZooUtil extends org.apache.accumulo.fate.zookeeper.ZooUtil {
} catch (FileNotFoundException ex) {
// ignored
}
- log.debug("Trying to read instance id from " + instanceDirectory);
+ log.debug("Trying to read instance id from {}", instanceDirectory);
if (files == null || files.length == 0) {
- log.error("unable obtain instance id at " + instanceDirectory);
+ log.error("unable obtain instance id at {}", instanceDirectory);
throw new RuntimeException("Accumulo not initialized, there is no instance id at " + instanceDirectory);
} else if (files.length != 1) {
- log.error("multiple potential instances in " + instanceDirectory);
+ log.error("multiple potential instances in {}", instanceDirectory);
throw new RuntimeException("Accumulo found multiple possible instance ids in " + instanceDirectory);
} else {
String result = files[0].getPath().getName();
return result;
}
} catch (IOException e) {
- log.error("Problem reading instance id out of hdfs at " + instanceDirectory, e);
+ log.error("Problem reading instance id out of hdfs at {}", instanceDirectory, e);
throw new RuntimeException("Can't tell if Accumulo is initialized; can't read instance id at " + instanceDirectory, e);
} catch (IllegalArgumentException exception) {
/* HDFS throws this when there's a UnknownHostException due to DNS troubles. */
if (exception.getCause() instanceof UnknownHostException) {
- log.error("Problem reading instance id out of hdfs at " + instanceDirectory, exception);
+ log.error("Problem reading instance id out of hdfs at {}", instanceDirectory, exception);
}
throw exception;
}
diff --git a/core/src/test/java/org/apache/accumulo/core/file/BloomFilterLayerLookupTest.java b/core/src/test/java/org/apache/accumulo/core/file/BloomFilterLayerLookupTest.java
index 4547cd4..71643e9 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/BloomFilterLayerLookupTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/BloomFilterLayerLookupTest.java
@@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory;
public class BloomFilterLayerLookupTest {
- private static final Logger LOG = LoggerFactory.getLogger(BloomFilterLayerLookupTest.class);
+ private static final Logger log = LoggerFactory.getLogger(BloomFilterLayerLookupTest.class);
private static Random random = new Random();
@Rule
@@ -92,13 +92,13 @@ public class BloomFilterLayerLookupTest {
}
long t2 = System.currentTimeMillis();
- LOG.debug(String.format("write rate %6.2f%n", vals.size() / ((t2 - t1) / 1000.0)));
+ log.debug(String.format("write rate %6.2f%n", vals.size() / ((t2 - t1) / 1000.0)));
bmfw.close();
t1 = System.currentTimeMillis();
FileSKVIterator bmfr = FileOperations.getInstance().newReaderBuilder().forFile(fname, fs, conf).withTableConfiguration(acuconf).build();
t2 = System.currentTimeMillis();
- LOG.debug("Opened " + fname + " in " + (t2 - t1));
+ log.debug("Opened {} in {}", fname, (t2 - t1));
int hits = 0;
t1 = System.currentTimeMillis();
@@ -113,8 +113,8 @@ public class BloomFilterLayerLookupTest {
t2 = System.currentTimeMillis();
double rate1 = 5000 / ((t2 - t1) / 1000.0);
- LOG.debug(String.format("random lookup rate : %6.2f%n", rate1));
- LOG.debug("hits = " + hits);
+ log.debug(String.format("random lookup rate : %6.2f%n", rate1));
+ log.debug("hits = {}", hits);
int count = 0;
t1 = System.currentTimeMillis();
@@ -129,8 +129,8 @@ public class BloomFilterLayerLookupTest {
t2 = System.currentTimeMillis();
double rate2 = 500 / ((t2 - t1) / 1000.0);
- LOG.debug(String.format("existant lookup rate %6.2f%n", rate2));
- LOG.debug("expected hits 500. Receive hits: " + count);
+ log.debug(String.format("existant lookup rate %6.2f%n", rate2));
+ log.debug("expected hits 500. Receive hits: {}", count);
bmfr.close();
assertTrue(rate1 > rate2);
diff --git a/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java b/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java
index 93c8a37..65938ac 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java
@@ -226,7 +226,7 @@ public class AdminUtil<T> {
}
} catch (Exception e) {
- log.error("Failed to read locks for " + id + " continuing.", e);
+ log.error("Failed to read locks for {} continuing.", id, e);
}
}
diff --git a/fate/src/main/java/org/apache/accumulo/fate/AgeOffStore.java b/fate/src/main/java/org/apache/accumulo/fate/AgeOffStore.java
index 376dad4..aa31653 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/AgeOffStore.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/AgeOffStore.java
@@ -95,7 +95,7 @@ public class AgeOffStore<T> implements TStore<T> {
case FAILED:
case SUCCESSFUL:
store.delete(txid);
- log.debug("Aged off FATE tx " + String.format("%016x", txid));
+ log.debug("Aged off FATE tx {}", String.format("%016x", txid));
break;
default:
break;
@@ -105,7 +105,7 @@ public class AgeOffStore<T> implements TStore<T> {
store.unreserve(txid, 0);
}
} catch (Exception e) {
- log.warn("Failed to age off FATE tx " + String.format("%016x", txid), e);
+ log.warn("Failed to age off FATE tx {}", String.format("%016x", txid), e);
}
}
}
diff --git a/fate/src/main/java/org/apache/accumulo/fate/Fate.java b/fate/src/main/java/org/apache/accumulo/fate/Fate.java
index 4e482ec..9389428 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/Fate.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/Fate.java
@@ -121,7 +121,7 @@ public class Fate<T> {
}
store.setProperty(tid, EXCEPTION_PROP, e);
store.setStatus(tid, TStatus.FAILED_IN_PROGRESS);
- log.info("Updated status for Repo with tid=" + tidStr + " to FAILED_IN_PROGRESS");
+ log.info("Updated status for Repo with tid={} to FAILED_IN_PROGRESS", tidStr);
}
private void processFailed(long tid, Repo<T> op) {
@@ -152,7 +152,7 @@ public class Fate<T> {
try {
op.undo(tid, environment);
} catch (Exception e) {
- log.warn("Failed to undo Repo, tid=" + String.format("%016x", tid), e);
+ log.warn("Failed to undo Repo, tid={}", String.format("%016x", tid), e);
}
}
diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLock.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLock.java
index fe31011..b6e5be8 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLock.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLock.java
@@ -145,7 +145,7 @@ public class DistributedReadWriteLock implements java.util.concurrent.locks.Read
public boolean tryLock() {
if (entry == -1) {
entry = qlock.addEntry(new ParsedLock(this.lockType(), this.userData).getLockData());
- log.info("Added lock entry " + entry + " userData " + new String(this.userData, UTF_8) + " lockType " + lockType());
+ log.info("Added lock entry {} userData {} lockTpye {}", entry, new String(this.userData, UTF_8), lockType());
}
SortedMap<Long,byte[]> entries = qlock.getEarlierEntries(entry);
for (Entry<Long,byte[]> entry : entries.entrySet()) {
@@ -177,7 +177,7 @@ public class DistributedReadWriteLock implements java.util.concurrent.locks.Read
public void unlock() {
if (entry == -1)
return;
- log.debug("Removing lock entry " + entry + " userData " + new String(this.userData, UTF_8) + " lockType " + lockType());
+ log.debug("Removing lock entry {} userData {} lockType {}", entry, new String(this.userData, UTF_8), lockType());
qlock.removeEntry(entry);
entry = -1;
}
@@ -207,7 +207,7 @@ public class DistributedReadWriteLock implements java.util.concurrent.locks.Read
public boolean tryLock() {
if (entry == -1) {
entry = qlock.addEntry(new ParsedLock(this.lockType(), this.userData).getLockData());
- log.info("Added lock entry " + entry + " userData " + new String(this.userData, UTF_8) + " lockType " + lockType());
+ log.info("Added lock entry {} userData {} lockType {}", entry, new String(this.userData, UTF_8), lockType());
}
SortedMap<Long,byte[]> entries = qlock.getEarlierEntries(entry);
Iterator<Entry<Long,byte[]>> iterator = entries.entrySet().iterator();
diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/Retry.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/Retry.java
index e84b1af..006d162 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/Retry.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/Retry.java
@@ -107,7 +107,7 @@ public class Retry {
}
public void waitForNextAttempt() throws InterruptedException {
- log.debug("Sleeping for " + currentWait + "ms before retrying operation");
+ log.debug("Sleeping for {}ms before retrying operation", currentWait);
sleep(currentWait);
currentWait = Math.min(maxWait, currentWait + waitIncrement);
}
diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/TransactionWatcher.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/TransactionWatcher.java
index b10ddea..25988bc 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/TransactionWatcher.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/TransactionWatcher.java
@@ -56,7 +56,7 @@ public class TransactionWatcher {
synchronized (counts) {
AtomicInteger count = counts.get(tid);
if (count == null) {
- log.error("unexpected missing count for transaction" + tid);
+ log.error("unexpected missing count for transaction {}", tid);
} else {
if (count.decrementAndGet() == 0)
counts.remove(tid);
@@ -67,7 +67,7 @@ public class TransactionWatcher {
public boolean isActive(long tid) {
synchronized (counts) {
- log.debug("Transactions in progress " + counts);
+ log.debug("Transactions in progress {}", counts);
AtomicInteger count = counts.get(tid);
return count != null && count.get() > 0;
}
diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooCache.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooCache.java
index 66234fb..712ac83 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooCache.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooCache.java
@@ -105,12 +105,12 @@ public class ZooCache {
clear();
break;
default:
- log.warn("Unhandled: " + event);
+ log.warn("Unhandled: {}", event);
break;
}
break;
default:
- log.warn("Unhandled: " + event);
+ log.warn("Unhandled: {}", event);
break;
}
@@ -191,9 +191,9 @@ public class ZooCache {
} catch (KeeperException e) {
final Code code = e.code();
if (code == Code.NONODE) {
- log.error("Looked up non-existent node in cache " + e.getPath(), e);
+ log.error("Looked up non-existent node in cache {}", e.getPath(), e);
} else if (code == Code.CONNECTIONLOSS || code == Code.OPERATIONTIMEOUT || code == Code.SESSIONEXPIRED) {
- log.warn("Saw (possibly) transient exception communicating with ZooKeeper, will retry", e);
+ log.warn("Saw (possibly) transient exception communicating with ZooKeeper, will retry {}", e);
} else {
log.warn("Zookeeper error, will retry", e);
}
@@ -320,7 +320,7 @@ public class ZooCache {
byte[] data = null;
if (stat == null) {
if (log.isTraceEnabled()) {
- log.trace("zookeeper did not contain " + zPath);
+ log.trace("zookeeper did not contain {}", zPath);
}
} else {
try {
@@ -331,7 +331,7 @@ public class ZooCache {
throw new ConcurrentModificationException();
}
if (log.isTraceEnabled()) {
- log.trace("zookeeper contained " + zPath + " " + (data == null ? null : new String(data, UTF_8)));
+ log.trace("zookeeper contained {} {}", zPath, (data == null ? null : new String(data, UTF_8)));
}
}
put(zPath, data, stat);
diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooLock.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooLock.java
index 90fb4aa..c102397 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooLock.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooLock.java
@@ -146,7 +146,7 @@ public class ZooLock implements Watcher {
if (log.isTraceEnabled()) {
log.trace("Candidate lock nodes");
for (String child : children) {
- log.trace("- " + child);
+ log.trace("- {}", child);
}
}
@@ -172,26 +172,26 @@ public class ZooLock implements Watcher {
}
final String lockToWatch = path + "/" + prev;
- log.trace("Establishing watch on " + lockToWatch);
+ log.trace("Establishing watch on {}", lockToWatch);
Stat stat = zooKeeper.getStatus(lockToWatch, new Watcher() {
@Override
public void process(WatchedEvent event) {
if (log.isTraceEnabled()) {
log.trace("Processing event:");
- log.trace("- type " + event.getType());
- log.trace("- path " + event.getPath());
- log.trace("- state " + event.getState());
+ log.trace("- type {}", event.getType());
+ log.trace("- path {}", event.getPath());
+ log.trace("- state {}", event.getState());
}
boolean renew = true;
if (event.getType() == EventType.NodeDeleted && event.getPath().equals(lockToWatch)) {
- log.trace("Detected deletion of " + lockToWatch + ", attempting to acquire lock");
+ log.trace("Detected deletion of {}, attempting to acquire lock", lockToWatch);
synchronized (ZooLock.this) {
try {
if (asyncLock != null) {
lockAsync(myLock, lw);
} else if (log.isTraceEnabled()) {
- log.trace("While waiting for another lock " + lockToWatch + " " + myLock + " was deleted");
+ log.trace("While waiting for another lock {} {} was deleted", lockToWatch, myLock);
}
} catch (Exception e) {
if (lock == null) {
@@ -212,7 +212,7 @@ public class ZooLock implements Watcher {
renew = false;
}
if (renew) {
- log.trace("Renewing watch on " + lockToWatch);
+ log.trace("Renewing watch on {}", lockToWatch);
try {
Stat restat = zooKeeper.getStatus(lockToWatch, this);
if (restat == null) {
@@ -250,7 +250,7 @@ public class ZooLock implements Watcher {
try {
final String asyncLockPath = zooKeeper.putEphemeralSequential(path + "/" + LOCK_PREFIX, data);
- log.trace("Ephemeral node " + asyncLockPath + " created");
+ log.trace("Ephemeral node {} created", asyncLockPath);
Stat stat = zooKeeper.getStatus(asyncLockPath, new Watcher() {
private void failedToAcquireLock() {
@@ -266,7 +266,7 @@ public class ZooLock implements Watcher {
} else if (asyncLock != null && event.getType() == EventType.NodeDeleted && event.getPath().equals(path + "/" + asyncLock)) {
failedToAcquireLock();
} else if (event.getState() != KeeperState.Disconnected && event.getState() != KeeperState.Expired && (lock != null || asyncLock != null)) {
- log.debug("Unexpected event watching lock node " + event + " " + asyncLockPath);
+ log.debug("Unexpected event watching lock node {} {}", event, asyncLockPath);
try {
Stat stat2 = zooKeeper.getStatus(asyncLockPath, this);
if (stat2 == null) {
@@ -277,7 +277,7 @@ public class ZooLock implements Watcher {
}
} catch (Throwable e) {
lockWatcher.unableToMonitorLockNode(e);
- log.error("Failed to stat lock node " + asyncLockPath, e);
+ log.error("Failed to stat lock node {}", asyncLockPath, e);
}
}
@@ -371,7 +371,7 @@ public class ZooLock implements Watcher {
@Override
public synchronized void process(WatchedEvent event) {
- log.debug("event " + event.getPath() + " " + event.getType() + " " + event.getState());
+ log.debug("event {} {} {}", event.getPath(), event.getType(), event.getState());
watchingParent = false;
@@ -388,7 +388,7 @@ public class ZooLock implements Watcher {
} catch (Exception ex) {
if (lock != null || asyncLock != null) {
lockWatcher.unableToMonitorLockNode(ex);
- log.error("Error resetting watch on ZooLock " + lock == null ? asyncLock : lock + " " + event, ex);
+ log.error("Error resetting watch on ZooLock {} {}", lock == null ? asyncLock : lock, event, ex);
}
}
diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReader.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReader.java
index bda8307..0f3c71d 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReader.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReader.java
@@ -58,7 +58,7 @@ public class ZooReader implements IZooReader {
return;
}
- log.error("Retry attempts (" + retry.retriesCompleted() + ") exceeded trying to communicate with ZooKeeper");
+ log.error("Retry attempts ({}) exceeded trying to communicate with ZooKeeper", retry.retriesCompleted());
throw e;
}
diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReservation.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReservation.java
index 5cc2be1..58be8f5 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReservation.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReservation.java
@@ -60,7 +60,7 @@ public class ZooReservation {
zooData = zk.getData(path, null);
} catch (NoNodeException e) {
// Just logging a warning, if data is gone then our work here is done.
- LoggerFactory.getLogger(ZooReservation.class).debug("Node does not exist " + path);
+ LoggerFactory.getLogger(ZooReservation.class).debug("Node does not exist {}", path);
return;
}
diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooSession.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooSession.java
index b9fedac..6dc8062 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooSession.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooSession.java
@@ -62,7 +62,7 @@ public class ZooSession {
@Override
public void process(WatchedEvent event) {
if (event.getState() == KeeperState.Expired) {
- log.debug("Session expired, state of current session : " + event.getState());
+ log.debug("Session expired, state of current session : {}", event.getState());
}
}
@@ -109,7 +109,7 @@ public class ZooSession {
*/
sleepTime = Math.max(sleepTime, (AddressUtil.getAddressCacheNegativeTtl((UnknownHostException) e) + 1) * 1000);
}
- log.warn("Connection to zooKeeper failed, will try again in " + String.format("%.2f secs", sleepTime / 1000.0), e);
+ log.warn("Connection to zooKeeper failed, will try again in {} {}", String.format("%.2f secs", sleepTime / 1000.0), e);
} finally {
if (tryAgain && zooKeeper != null)
try {
@@ -151,7 +151,7 @@ public class ZooSession {
String readOnlySessionKey = sessionKey(zooKeepers, timeout, null, null);
ZooSessionInfo zsi = sessions.get(sessionKey);
if (zsi != null && zsi.zooKeeper.getState() == States.CLOSED) {
- log.debug("Removing closed ZooKeeper session to " + zooKeepers);
+ log.debug("Removing closed ZooKeeper session to {}", zooKeepers);
if (auth != null && sessions.get(readOnlySessionKey) == zsi)
sessions.remove(readOnlySessionKey);
zsi = null;
@@ -160,7 +160,7 @@ public class ZooSession {
if (zsi == null) {
ZooWatcher watcher = new ZooWatcher();
- log.debug("Connecting to " + zooKeepers + " with timeout " + timeout + " with auth");
+ log.debug("Connecting to {} with timeout {} with auth", zooKeepers, timeout);
zsi = new ZooSessionInfo(connect(zooKeepers, timeout, scheme, auth, watcher), watcher);
sessions.put(sessionKey, zsi);
if (auth != null && !sessions.containsKey(readOnlySessionKey))
diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooUtil.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooUtil.java
index 6ea10d0..41d255d 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooUtil.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooUtil.java
@@ -184,7 +184,7 @@ public class ZooUtil {
return;
}
- log.error("Retry attempts (" + retry.retriesCompleted() + ") exceeded trying to communicate with ZooKeeper");
+ log.error("Retry attempts ({}) exceeded trying to communicate with ZooKeeper", retry.retriesCompleted());
throw e;
}
diff --git a/minicluster/src/main/java/org/apache/accumulo/cluster/RemoteShell.java b/minicluster/src/main/java/org/apache/accumulo/cluster/RemoteShell.java
index 137b7f7..cb46402 100644
--- a/minicluster/src/main/java/org/apache/accumulo/cluster/RemoteShell.java
+++ b/minicluster/src/main/java/org/apache/accumulo/cluster/RemoteShell.java
@@ -80,7 +80,7 @@ public class RemoteShell extends ShellCommandExecutor {
String cmd = String.format("%1$s %2$s %3$s \"%4$s\"", options.getSshCommand(), options.getSshOptions(), hostWithUser, remoteCmd);
- log.debug("Executing full command [" + cmd + "]");
+ log.debug("Executing full command [{}]", cmd);
return new String[] {"/usr/bin/env", "bash", "-c", cmd};
}
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
index 93800da..2d70e87 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
@@ -712,7 +712,7 @@ public class MiniAccumuloClusterImpl implements AccumuloCluster {
// the single thread executor shouldn't have any pending tasks, but check anyways
if (!tasksRemaining.isEmpty()) {
- log.warn("Unexpectedly had " + tasksRemaining.size() + " task(s) remaining in threadpool for execution when being stopped");
+ log.warn("Unexpectedly had {} task(s) remaining in threadpool for execution when being stopped", tasksRemaining.size());
}
executor = null;
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloConfigImpl.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloConfigImpl.java
index 0f4634d..8505146 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloConfigImpl.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloConfigImpl.java
@@ -214,7 +214,7 @@ public class MiniAccumuloConfigImpl {
try {
CredentialProviderFactoryShim.createEntry(conf, entry.getKey(), entry.getValue().toCharArray());
} catch (IOException e) {
- log.warn("Attempted to add " + entry.getKey() + " to CredentialProvider but failed", e);
+ log.warn("Attempted to add {} to CredentialProvider but failed", entry.getKey(), e);
continue;
}
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java b/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
index 7ff42fe..fcbef6b 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
@@ -177,7 +177,7 @@ public class Proxy implements KeywordExecutable {
throw new RuntimeException();
} finally {
if (!folder.delete())
- log.warn("Unexpected error removing " + folder);
+ log.warn("Unexpected error removing {}", folder);
}
}
});
@@ -194,7 +194,7 @@ public class Proxy implements KeywordExecutable {
while (!server.server.isServing()) {
Thread.sleep(100);
}
- log.info("Proxy server started on " + server.getAddress());
+ log.info("Proxy server started on {}", server.getAddress());
while (server.server.isServing()) {
Thread.sleep(1000);
}
@@ -267,7 +267,7 @@ public class Proxy implements KeywordExecutable {
}
UserGroupInformation.loginUserFromKeytab(kerberosPrincipal, kerberosKeytab);
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
- log.info("Logged in as " + ugi.getUserName());
+ log.info("Logged in as {}", ugi.getUserName());
// The kerberosPrimary set in the SASL server needs to match the principal we're logged in as.
final String shortName = ugi.getShortUserName();
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
index 45ae8f5..5bd4dce 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
@@ -1852,7 +1852,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
if (ThriftServerType.SASL == serverType) {
String remoteUser = UGIAssumingProcessor.rpcPrincipal();
if (null == remoteUser || !remoteUser.equals(principal)) {
- logger.error("Denying login from user " + remoteUser + " who attempted to log in as " + principal);
+ logger.error("Denying login from user {} who attempted to log in as {}", remoteUser, principal);
throw new org.apache.accumulo.proxy.thrift.AccumuloSecurityException(RPC_ACCUMULO_PRINCIPAL_MISMATCH_MSG);
}
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java b/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
index 9a4a434..802ff93 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
@@ -48,18 +48,20 @@ import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.log4j.Logger;
+import org.apache.log4j.helpers.LogLog;
import org.apache.zookeeper.KeeperException;
+import org.slf4j.LoggerFactory;
+import org.slf4j.Logger;
public class Accumulo {
- private static final Logger log = Logger.getLogger(Accumulo.class);
+ private static final Logger log = LoggerFactory.getLogger(Accumulo.class);
public static synchronized void updateAccumuloVersion(VolumeManager fs, int oldVersion) {
for (Volume volume : fs.getVolumes()) {
try {
if (getAccumuloPersistentVersion(fs) == oldVersion) {
- log.debug("Attempting to upgrade " + volume);
+ log.debug("Attempting to upgrade {}", volume);
Path dataVersionLocation = ServerConstants.getDataVersionLocation(volume);
fs.create(new Path(dataVersionLocation, Integer.toString(ServerConstants.DATA_VERSION))).close();
// TODO document failure mode & recovery if FS permissions cause above to work and below to fail ACCUMULO-2596
@@ -105,10 +107,10 @@ public class Accumulo {
public static void init(VolumeManager fs, Instance instance, ServerConfigurationFactory serverConfig, String application) throws IOException {
final AccumuloConfiguration conf = serverConfig.getSystemConfiguration();
- log.info(application + " starting");
- log.info("Instance " + instance.getInstanceID());
+ log.info("{} starting", application);
+ log.info("Instance {}", instance.getInstanceID());
int dataVersion = Accumulo.getAccumuloPersistentVersion(fs);
- log.info("Data Version " + dataVersion);
+ log.info("Data Version {}", dataVersion);
Accumulo.waitForZookeeperAndHdfs(fs);
if (!(canUpgradeFromDataVersion(dataVersion))) {
@@ -121,7 +123,7 @@ public class Accumulo {
for (Entry<String,String> entry : sortedProps.entrySet()) {
String key = entry.getKey();
- log.info(key + " = " + (Property.isSensitive(key) ? "<hidden>" : entry.getValue()));
+ log.info("{} = {}", key, (Property.isSensitive(key) ? "<hidden>" : entry.getValue()));
}
monitorSwappiness(conf);
@@ -132,7 +134,7 @@ public class Accumulo {
Property.MONITOR_SSL_INCLUDE_PROTOCOLS)) {
String value = conf.get(sslProtocolProperty);
if (value.contains(SSL)) {
- log.warn("It is recommended that " + sslProtocolProperty + " only allow TLS");
+ log.warn("It is recommended that {} only allow TLS", sslProtocolProperty);
}
}
}
@@ -172,15 +174,15 @@ public class Accumulo {
String setting = new String(buffer, 0, bytes, UTF_8);
setting = setting.trim();
if (bytes > 0 && Integer.parseInt(setting) > 10) {
- log.warn("System swappiness setting is greater than ten (" + setting + ") which can cause time-sensitive operations to be delayed. "
- + " Accumulo is time sensitive because it needs to maintain distributed lock agreement.");
+ log.warn("System swappiness setting is greater than ten ({}) which can cause time-sensitive operations to be delayed. "
+ + " Accumulo is time sensitive because it needs to maintain distributed lock agreement.", setting);
}
} finally {
is.close();
}
}
} catch (Throwable t) {
- log.error(t, t);
+ log.error("", t);
}
}
}, 1000, 10 * 60 * 1000);
@@ -213,9 +215,9 @@ public class Accumulo {
/* Unwrap the UnknownHostException so we can deal with it directly */
if (exception.getCause() instanceof UnknownHostException) {
if (unknownHostTries > 0) {
- log.warn("Unable to connect to HDFS, will retry. cause: " + exception.getCause());
+ log.warn("Unable to connect to HDFS, will retry. cause: {}", exception.getCause());
/* We need to make sure our sleep period is long enough to avoid getting a cached failure of the host lookup. */
- sleep = Math.max(sleep, (AddressUtil.getAddressCacheNegativeTtl((UnknownHostException) (exception.getCause())) + 1) * 1000);
+ sleep = Math.max(sleep, (org.apache.accumulo.fate.util.AddressUtil.getAddressCacheNegativeTtl((UnknownHostException) (exception.getCause())) + 1) * 1000);
} else {
log.error("Unable to connect to HDFS and have exceeded the maximum number of retries.", exception);
throw exception;
@@ -225,7 +227,7 @@ public class Accumulo {
throw exception;
}
}
- log.info("Backing off due to failure; current sleep period is " + sleep / 1000. + " seconds");
+ log.info("Backing off due to failure; current sleep period is {} seconds", sleep / 1000.);
sleepUninterruptibly(sleep, TimeUnit.MILLISECONDS);
/* Back off to give transient failures more time to clear. */
sleep = Math.min(60 * 1000, sleep * 2);
@@ -253,7 +255,7 @@ public class Accumulo {
+ "Please see the README document for instructions on what to do under your previous version.");
}
} catch (Exception exception) {
- log.fatal("Problem verifying Fate readiness", exception);
+ log.error("Problem verifying Fate readiness", exception);
System.exit(1);
}
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/client/BulkImporter.java b/server/base/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
index 16ca6aa..f17deed 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
@@ -147,9 +147,9 @@ public class BulkImporter {
try {
tabletsToAssignMapFileTo = findOverlappingTablets(context, fs, locator, mapFile);
} catch (Exception ex) {
- log.warn("Unable to find tablets that overlap file " + mapFile.toString(), ex);
+ log.warn("Unable to find tablets that overlap file {}", mapFile.toString(), ex);
}
- log.debug("Map file " + mapFile + " found to overlap " + tabletsToAssignMapFileTo.size() + " tablets");
+ log.debug("Map file {} found to overlap {} tablets", mapFile, tabletsToAssignMapFileTo.size());
if (tabletsToAssignMapFileTo.size() == 0) {
List<KeyExtent> empty = Collections.emptyList();
completeFailures.put(mapFile, empty);
@@ -192,7 +192,7 @@ public class BulkImporter {
sleepUninterruptibly(sleepTime, TimeUnit.MILLISECONDS);
timer.stop(Timers.SLEEP);
- log.debug("Trying to assign " + assignmentFailures.size() + " map files that previously failed on some key extents");
+ log.debug("Trying to assign {} map files that previously failed on some key extents", assignmentFailures.size());
assignments.clear();
// for failed key extents, try to find children key extents to
@@ -210,7 +210,7 @@ public class BulkImporter {
tabletsToAssignMapFileTo.addAll(findOverlappingTablets(context, fs, locator, entry.getKey(), ke));
keListIter.remove();
} catch (Exception ex) {
- log.warn("Exception finding overlapping tablets, will retry tablet " + ke, ex);
+ log.warn("Exception finding overlapping tablets, will retry tablet {}", ke, ex);
}
timer.stop(Timers.QUERY_METADATA);
}
@@ -246,7 +246,7 @@ public class BulkImporter {
for (Entry<Path,Integer> entry : failureIter) {
int retries = context.getConfiguration().getCount(Property.TSERV_BULK_RETRY);
if (entry.getValue() > retries && assignmentFailures.get(entry.getKey()) != null) {
- log.error("Map file " + entry.getKey() + " failed more than " + retries + " times, giving up.");
+ log.error("Map file {} failed more than {} times, giving up.", entry.getKey(), retries);
completeFailures.put(entry.getKey(), assignmentFailures.get(entry.getKey()));
assignmentFailures.remove(entry.getKey());
}
@@ -281,7 +281,7 @@ public class BulkImporter {
Collections.sort(files);
log.debug("BULK IMPORT TIMING STATISTICS");
- log.debug("Files: " + files);
+ log.debug("Files: {}", files);
log.debug(String.format("Examine map files : %,10.2f secs %6.2f%s", timer.getSecs(Timers.EXAMINE_MAP_FILES), 100.0 * timer.get(Timers.EXAMINE_MAP_FILES)
/ timer.get(Timers.TOTAL), "%"));
log.debug(String.format("Query %-14s : %,10.2f secs %6.2f%s", MetadataTable.NAME, timer.getSecs(Timers.QUERY_METADATA),
@@ -310,7 +310,7 @@ public class BulkImporter {
List<KeyExtent> extents = entry.getValue();
for (KeyExtent keyExtent : extents)
- log.debug("\t" + entry.getKey() + " -> " + keyExtent);
+ log.debug("\t{} -> {}", entry.getKey(), keyExtent);
}
return Collections.emptySet();
@@ -345,7 +345,7 @@ public class BulkImporter {
mapFileSizes.put(path, fs.getContentSummary(path).getLength());
}
} catch (IOException e) {
- log.error("Failed to get map files in for {}: {}", paths, e.getMessage(), e);
+ log.error("Failed to get map files in for {}: {}", paths, e.getMessage(), e);
throw new RuntimeException(e);
}
@@ -464,7 +464,7 @@ public class BulkImporter {
}
}
- log.info("Could not assign {} map files to tablet {} because : {} . Will retry ...", mapFiles.size(), ke, message);
+ log.info("Could not assign {} map files to tablet {} because : {}. Will retry ...", mapFiles.size(), ke, message);
}
}
@@ -475,7 +475,7 @@ public class BulkImporter {
for (PathSize ps : mapFiles)
uniqMapFiles.add(ps.path);
- log.debug("Assigning " + uniqMapFiles.size() + " map files to " + assignmentsPerTablet.size() + " tablets at " + location);
+ log.debug("Assigning {} map files to {} tablets at {}", uniqMapFiles.size(), assignmentsPerTablet.size(), location);
try {
List<KeyExtent> failures = assignMapFiles(context, location, assignmentsPerTablet);
@@ -546,7 +546,7 @@ public class BulkImporter {
}
}
- log.warn("Could not assign " + entry.getValue().size() + " map files to tablet " + ke + " because it had no location, will retry ...");
+ log.warn("Could not assign {} map files to tablet {} because it had no location, will retry ...", entry.getValue().size(), ke);
continue;
}
@@ -598,7 +598,7 @@ public class BulkImporter {
}
}
- log.debug("Asking " + location + " to bulk load " + files);
+ log.debug("Asking {} to bulk load {}", location, files);
List<TKeyExtent> failures = client.bulkImport(Tracer.traceInfo(), context.rpcCreds(), tid, Translator.translate(files, Translators.KET), setTime);
return Translator.translate(failures, Translators.TKET);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java b/server/base/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java
index abfb160..9c6884d 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java
@@ -309,7 +309,7 @@ public class ClientServiceHandler implements ClientService.Iface {
if (!security.canPerformSystemActions(credentials))
throw new AccumuloSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
bulkImportStatus.updateBulkImportStatus(files, BulkImportState.INITIAL);
- log.debug("Got request to bulk import files to table(" + tableId + "): " + files);
+ log.debug("Got request to bulk import files to table({}): {}", tableId, files);
return transactionWatcher.run(Constants.BULK_ARBITRATOR_TYPE, tid, new Callable<List<String>>() {
@Override
public List<String> call() throws Exception {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/ZooCachePropertyAccessor.java b/server/base/src/main/java/org/apache/accumulo/server/conf/ZooCachePropertyAccessor.java
index 68b1847..a51552e 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/ZooCachePropertyAccessor.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/ZooCachePropertyAccessor.java
@@ -103,7 +103,7 @@ public class ZooCachePropertyAccessor {
if (value == null || !property.getType().isValidFormat(value)) {
if (value != null) {
- log.error("Using default value for " + key + " due to improperly formatted " + property.getType() + ": " + value);
+ log.error("Using default value for {} due to improperly formatted {}: {}", key, property.getType(), value);
}
if (parent != null) {
value = parent.get(property);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java b/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java
index 51f713c..1648c9b 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java
@@ -71,7 +71,7 @@ public class ZooConfiguration extends AccumuloConfiguration {
if (value == null || !property.getType().isValidFormat(value)) {
if (value != null)
- log.error("Using parent value for " + key + " due to improperly formatted " + property.getType() + ": " + value);
+ log.error("Using parent value for {} due to improperly formatted {}: {}", key, property.getType(), value);
value = parent.get(property);
}
return value;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java b/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java
index 4aca493..0143f24 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java
@@ -81,8 +81,8 @@ public class PreferredVolumeChooser extends RandomVolumeChooser {
if (log.isTraceEnabled()) {
log.trace("In custom chooser");
- log.trace("Volumes: " + volumes);
- log.trace("TableID: " + env.getTableId());
+ log.trace("Volumes: {}", volumes);
+ log.trace("TableID: {}", env.getTableId());
}
// If the preferred volumes property was specified, split the returned string by the comma and add use it to filter the given options.
Set<String> preferred = parsedPreferredVolumes.get(volumes);
@@ -104,7 +104,7 @@ public class PreferredVolumeChooser extends RandomVolumeChooser {
// Randomly choose the volume from the preferred volumes
String choice = super.choose(env, filteredOptions.toArray(EMPTY_STRING_ARRAY));
if (log.isTraceEnabled()) {
- log.trace("Choice = " + choice);
+ log.trace("Choice = {}", choice);
}
return choice;
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
index 83d4b78..4495e79 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
@@ -172,7 +172,7 @@ public class VolumeManagerImpl implements VolumeManager {
blockSize = correctBlockSize(fs.getConf(), blockSize);
bufferSize = correctBufferSize(fs.getConf(), bufferSize);
EnumSet<CreateFlag> set = EnumSet.of(CreateFlag.SYNC_BLOCK, CreateFlag.CREATE);
- log.debug("creating " + logPath + " with CreateFlag set: " + set);
+ log.debug("creating {} with CreateFlag set: {}", logPath, set);
try {
return fs.create(logPath, FsPermission.getDefault(), set, bufferSize, replication, blockSize, null);
} catch (Exception ex) {
@@ -215,7 +215,7 @@ public class VolumeManagerImpl implements VolumeManager {
synchronized (WARNED_ABOUT_SYNCONCLOSE) {
if (!WARNED_ABOUT_SYNCONCLOSE.contains(entry.getKey())) {
WARNED_ABOUT_SYNCONCLOSE.add(entry.getKey());
- log.warn(DFS_DATANODE_SYNCONCLOSE + " set to false in hdfs-site.xml: data loss is possible on hard system reset or power loss");
+ log.warn("{} set to false in hdfs-site.xml: data loss is possible on hard system reset or power loss", DFS_DATANODE_SYNCONCLOSE);
}
}
}
@@ -247,7 +247,7 @@ public class VolumeManagerImpl implements VolumeManager {
}
}
} else {
- log.debug("Could not determine volume for Path: " + path);
+ log.debug("Could not determine volume for Path: {}", path);
}
return new NonConfiguredVolume(desiredFs);
@@ -471,8 +471,8 @@ public class VolumeManagerImpl implements VolumeManager {
public String choose(VolumeChooserEnvironment env, String[] options) {
final String choice = chooser.choose(env, options);
if (!(ArrayUtils.contains(options, choice))) {
- log.error("The configured volume chooser, '" + chooser.getClass() + "', or one of its delegates returned a volume not in the set of options provided; "
- + "will continue by relying on a RandomVolumeChooser. You should investigate and correct the named chooser.");
+ log.error("The configured volume chooser, '{}', or one of its delegates returned a volume not in the set of options provided; "
+ + "will continue by relying on a RandomVolumeChooser. You should investigate and correct the named chooser.", chooser.getClass());
return failsafeChooser.choose(env, options);
}
return choice;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
index 95146c2..adfd9dd 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
@@ -106,12 +106,12 @@ public class VolumeUtil {
if (key.equals(volume)) {
String replacement = new Path(pair.getSecond(), ft.removeVolume(p)).toString();
- log.trace("Replacing " + path + " with " + replacement);
+ log.trace("Replacing {} with {}", path, replacement);
return replacement;
}
}
- log.trace("Could not find replacement for " + ft + " at " + path);
+ log.trace("Could not find replacement for {} at {}", ft, path);
return null;
}
@@ -134,13 +134,13 @@ public class VolumeUtil {
}
if (numSwitched == 0) {
- log.trace("Did not switch " + le);
+ log.trace("Did not switch {}", le);
return null;
}
LogEntry newLogEntry = new LogEntry(le.extent, le.timestamp, le.server, switchedPath);
- log.trace("Switched " + le + " to " + newLogEntry);
+ log.trace("Switched {} to {}", le, newLogEntry);
return newLogEntry;
}
@@ -166,7 +166,7 @@ public class VolumeUtil {
String newLocation = switchVolume(location, FileType.TABLE, ServerConstants.getVolumeReplacements());
if (newLocation != null) {
MetadataTableUtil.setRootTabletDir(newLocation);
- log.info("Volume replaced: " + location + " -> " + newLocation);
+ log.info("Volume replaced: {} -> {}", location, newLocation);
return new Path(newLocation).toString();
}
return location;
@@ -179,7 +179,7 @@ public class VolumeUtil {
public static TabletFiles updateTabletVolumes(AccumuloServerContext context, ZooLock zooLock, VolumeManager vm, KeyExtent extent, TabletFiles tabletFiles,
boolean replicate) throws IOException {
List<Pair<Path,Path>> replacements = ServerConstants.getVolumeReplacements();
- log.trace("Using volume replacements: " + replacements);
+ log.trace("Using volume replacements: {}", replacements);
List<LogEntry> logsToRemove = new ArrayList<>();
List<LogEntry> logsToAdd = new ArrayList<>();
@@ -195,7 +195,7 @@ public class VolumeUtil {
logsToRemove.add(logEntry);
logsToAdd.add(switchedLogEntry);
ret.logEntries.add(switchedLogEntry);
- log.debug("Replacing volume " + extent + " : " + logEntry.filename + " -> " + switchedLogEntry.filename);
+ log.debug("Replacing volume {} : {} -> {}", extent, logEntry.filename, switchedLogEntry.filename);
} else {
ret.logEntries.add(logEntry);
}
@@ -212,7 +212,7 @@ public class VolumeUtil {
FileRef switchedRef = new FileRef(switchedPath, new Path(switchedPath));
filesToAdd.put(switchedRef, entry.getValue());
ret.datafiles.put(switchedRef, entry.getValue());
- log.debug("Replacing volume " + extent + " : " + metaPath + " -> " + switchedPath);
+ log.debug("Replacing volume {} : {} -> {}", extent, metaPath, switchedPath);
} else {
ret.datafiles.put(entry.getKey(), entry.getValue());
}
@@ -223,7 +223,7 @@ public class VolumeUtil {
String switchedDir = switchVolume(tabletDir, FileType.TABLE, replacements);
if (switchedDir != null) {
- log.debug("Replacing volume " + extent + " : " + tabletDir + " -> " + switchedDir);
+ log.debug("Replacing volume {} : {} -> {}", extent, tabletDir, switchedDir);
tabletDir = switchedDir;
}
@@ -231,7 +231,7 @@ public class VolumeUtil {
MetadataTableUtil.updateTabletVolumes(extent, logsToRemove, logsToAdd, filesToRemove, filesToAdd, switchedDir, zooLock, context);
if (replicate) {
Status status = StatusUtil.fileClosed();
- log.debug("Tablet directory switched, need to record old log files " + logsToRemove + " " + ProtobufUtil.toString(status));
+ log.debug("Tablet directory switched, need to record old log files {} {}", logsToRemove, ProtobufUtil.toString(status));
// Before deleting these logs, we need to mark them for replication
for (LogEntry logEntry : logsToRemove) {
ReplicationTableUtil.updateFiles(context, extent, logEntry.filename, status);
@@ -266,7 +266,7 @@ public class VolumeUtil {
Path newDir = new Path(vm.choose(chooserEnv, ServerConstants.getBaseUris()) + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR
+ dir.getParent().getName() + Path.SEPARATOR + dir.getName());
- log.info("Updating directory for " + extent + " from " + dir + " to " + newDir);
+ log.info("Updating directory for {} from {} to {}", extent, dir, newDir);
if (extent.isRootTablet()) {
// the root tablet is special case, its files need to be copied if its dir is changed
@@ -280,30 +280,30 @@ public class VolumeUtil {
Path newDirBackup = getBackupName(fs2, newDir);
// never delete anything because were dealing with the root tablet
// one reason this dir may exist is because this method failed previously
- log.info("renaming " + newDir + " to " + newDirBackup);
+ log.info("renaming {} to {}", newDir, newDirBackup);
if (!fs2.rename(newDir, newDirBackup)) {
throw new IOException("Failed to rename " + newDir + " to " + newDirBackup);
}
}
// do a lot of logging since this is the root tablet
- log.info("copying " + dir + " to " + newDir);
+ log.info("copying {} to {}", dir, newDir);
if (!FileUtil.copy(fs1, dir, fs2, newDir, false, CachedConfiguration.getInstance())) {
throw new IOException("Failed to copy " + dir + " to " + newDir);
}
// only set the new location in zookeeper after a successful copy
- log.info("setting root tablet location to " + newDir);
+ log.info("setting root tablet location to {}", newDir);
MetadataTableUtil.setRootTabletDir(newDir.toString());
// rename the old dir to avoid confusion when someone looks at filesystem... its ok if we fail here and this does not happen because the location in
// zookeeper is the authority
Path dirBackup = getBackupName(fs1, dir);
- log.info("renaming " + dir + " to " + dirBackup);
+ log.info("renaming {} to {}", dir, dirBackup);
fs1.rename(dir, dirBackup);
} else {
- log.info("setting root tablet location to " + newDir);
+ log.info("setting root tablet location to {}", newDir);
MetadataTableUtil.setRootTabletDir(newDir.toString());
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
index 42117ba..1cf1859 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
@@ -224,9 +224,9 @@ public class Initialize implements KeywordExecutable {
String fsUri = sconf.get(Property.INSTANCE_DFS_URI);
if (fsUri.equals(""))
fsUri = FileSystem.getDefaultUri(conf).toString();
- log.info("Hadoop Filesystem is " + fsUri);
- log.info("Accumulo data dirs are " + Arrays.asList(VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance())));
- log.info("Zookeeper server is " + sconf.get(Property.INSTANCE_ZK_HOST));
+ log.info("Hadoop Filesystem is {}", fsUri);
+ log.info("Accumulo data dirs are {}", Arrays.asList(VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance())));
+ log.info("Zookeeper server is {}", sconf.get(Property.INSTANCE_ZK_HOST));
log.info("Checking if Zookeeper is available. If this hangs, then you need to make sure zookeeper is running");
if (!zookeeperAvailable()) {
// ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j compatibility
@@ -265,7 +265,7 @@ public class Initialize implements KeywordExecutable {
Property INSTANCE_DFS_URI = Property.INSTANCE_DFS_URI;
String instanceDfsDir = sconf.get(INSTANCE_DFS_DIR);
// ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j compatibility
- log.error("FATAL It appears the directories " + Arrays.asList(VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance()))
+ log.error("FATAL It appears the directories {}", Arrays.asList(VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance()))
+ " were previously initialized.");
String instanceVolumes = sconf.get(Property.INSTANCE_VOLUMES);
String instanceDfsUri = sconf.get(INSTANCE_DFS_URI);
@@ -273,16 +273,16 @@ public class Initialize implements KeywordExecutable {
// ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j compatibility
if (!instanceVolumes.isEmpty()) {
- log.error("FATAL: Change the property " + Property.INSTANCE_VOLUMES + " to use different filesystems,");
+ log.error("FATAL: Change the property {} to use different filesystems,", Property.INSTANCE_VOLUMES);
} else if (!instanceDfsDir.isEmpty()) {
- log.error("FATAL: Change the property " + INSTANCE_DFS_URI + " to use a different filesystem,");
+ log.error("FATAL: Change the property {} to use a different filesystem,", INSTANCE_DFS_URI);
} else {
- log.error("FATAL: You are using the default URI for the filesystem. Set the property " + Property.INSTANCE_VOLUMES + " to use a different filesystem,");
+ log.error("FATAL: You are using the default URI for the filesystem. Set the property {} to use a different filesystem,", Property.INSTANCE_VOLUMES);
}
- log.error("FATAL: or change the property " + INSTANCE_DFS_DIR + " to use a different directory.");
- log.error("FATAL: The current value of " + INSTANCE_DFS_URI + " is |" + instanceDfsUri + "|");
- log.error("FATAL: The current value of " + INSTANCE_DFS_DIR + " is |" + instanceDfsDir + "|");
- log.error("FATAL: The current value of " + Property.INSTANCE_VOLUMES + " is |" + instanceVolumes + "|");
+ log.error("FATAL: or change the property {} to use a different directory.", INSTANCE_DFS_DIR);
+ log.error("FATAL: The current value of {} is |{}|", INSTANCE_DFS_URI, instanceDfsUri);
+ log.error("FATAL: The current value of {} is |{}|", INSTANCE_DFS_DIR, instanceDfsDir);
+ log.error("FATAL: The current value of {} is |{}|", Property.INSTANCE_VOLUMES, instanceVolumes);
}
public boolean doInit(Opts opts, Configuration conf, VolumeManager fs) throws IOException {
@@ -348,7 +348,7 @@ public class Initialize implements KeywordExecutable {
// Try to determine when we couldn't find an appropriate core-site.xml on the classpath
if (defaultFsUri.equals(fsDefaultName) && defaultFsUri.equals(fsDefaultFS)) {
- log.error("FATAL: Default filesystem value ('fs.defaultFS' or 'fs.default.name') of '" + defaultFsUri + "' was found in the Hadoop configuration");
+ log.error("FATAL: Default filesystem value ('fs.defaultFS' or 'fs.default.name') of '{}' was found in the Hadoop configuration", defaultFsUri);
log.error("FATAL: Please ensure that the Hadoop core-site.xml is on the classpath using 'general.classpaths' in accumulo-site.xml");
}
}
@@ -375,7 +375,7 @@ public class Initialize implements KeywordExecutable {
return false;
}
- log.info("Logging in as " + accumuloPrincipal + " with " + accumuloKeytab);
+ log.info("Logging in as {} with {}", accumuloPrincipal, accumuloKeytab);
// Login using the keytab as the 'accumulo' user
UserGroupInformation.loginUserFromKeytab(accumuloPrincipal, accumuloKeytab);
@@ -415,7 +415,7 @@ public class Initialize implements KeywordExecutable {
fs.mkdirs(iidLocation);
fs.createNewFile(new Path(iidLocation, uuid.toString()));
if (print)
- log.info("Initialized volume " + baseDir);
+ log.info("Initialized volume {}", baseDir);
}
}
@@ -505,13 +505,13 @@ public class Initialize implements KeywordExecutable {
try {
FileStatus fstat = fs.getFileStatus(dir);
if (!fstat.isDirectory()) {
- log.error("FATAL: location " + dir + " exists but is not a directory");
+ log.error("FATAL: location {} exists but is not a directory", dir);
return;
}
} catch (FileNotFoundException fnfe) {
// attempt to create directory, since it doesn't exist
if (!fs.mkdirs(dir)) {
- log.error("FATAL: unable to create directory " + dir);
+ log.error("FATAL: unable to create directory {}", dir);
return;
}
}
@@ -721,8 +721,8 @@ public class Initialize implements KeywordExecutable {
UUID uuid = UUID.fromString(ZooUtil.getInstanceIDFromHdfs(iidPath, SiteConfiguration.getInstance()));
for (Pair<Path,Path> replacementVolume : ServerConstants.getVolumeReplacements()) {
if (aBasePath.equals(replacementVolume.getFirst()))
- log.error(aBasePath + " is set to be replaced in " + Property.INSTANCE_VOLUMES_REPLACEMENTS + " and should not appear in " + Property.INSTANCE_VOLUMES
- + ". It is highly recommended that this property be removed as data could still be written to this volume.");
+ log.error("{} is set to be replaced in {} and should not appear in {}"
+ + ". It is highly recommended that this property be removed as data could still be written to this volume.", aBasePath, Property.INSTANCE_VOLUMES_REPLACEMENTS, Property.INSTANCE_VOLUMES);
}
if (ServerConstants.DATA_VERSION != Accumulo.getAccumuloPersistentVersion(versionPath.getFileSystem(CachedConfiguration.getInstance()), versionPath)) {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/LiveTServerSet.java b/server/base/src/main/java/org/apache/accumulo/server/master/LiveTServerSet.java
index b07551f..b88fcb0 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/LiveTServerSet.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/LiveTServerSet.java
@@ -415,7 +415,7 @@ public class LiveTServerSet implements Watcher {
current.remove(zPath);
currentInstances.remove(server);
- log.info("Removing zookeeper lock for " + server);
+ log.info("Removing zookeeper lock for {}", server);
String fullpath = ZooUtil.getRoot(context.getInstance()) + Constants.ZTSERVERS + "/" + zPath;
try {
ZooReaderWriter.getInstance().recursiveDelete(fullpath, SKIP);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TableLoadBalancer.java b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TableLoadBalancer.java
index bbf2b80..b27cb36 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TableLoadBalancer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TableLoadBalancer.java
@@ -86,22 +86,22 @@ public class TableLoadBalancer extends TabletBalancer {
balancer.init(this.context);
}
- log.info("Loaded new class " + clazzName + " for table " + tableId);
+ log.info("Loaded new class {} for table {}", clazzName, tableId);
} catch (Exception e) {
- log.warn("Failed to load table balancer class " + clazzName + " for table " + tableId, e);
+ log.warn("Failed to load table balancer class {} for table {}", clazzName, tableId, e);
}
}
}
if (balancer == null) {
try {
balancer = constructNewBalancerForTable(clazzName, tableId);
- log.info("Loaded class " + clazzName + " for table " + tableId);
+ log.info("Loaded class {} for table {}", clazzName, tableId);
} catch (Exception e) {
- log.warn("Failed to load table balancer class " + clazzName + " for table " + tableId, e);
+ log.warn("Failed to load table balancer class {} for table {}", clazzName, tableId, e);
}
if (balancer == null) {
- log.info("Using balancer " + DefaultLoadBalancer.class.getName() + " for table " + tableId);
+ log.info("Using balancer {} for table {}", DefaultLoadBalancer.class.getName(), tableId);
balancer = new DefaultLoadBalancer(tableId);
}
perTableBalancers.put(tableId, balancer);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TabletBalancer.java b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TabletBalancer.java
index 00372fa..03e9ea9 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TabletBalancer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TabletBalancer.java
@@ -162,9 +162,9 @@ public abstract class TabletBalancer {
@Override
public void run() {
- balancerLog.warn("Not balancing due to " + migrations.size() + " outstanding migrations.");
+ balancerLog.warn("Not balancing due to {} outstanding migrations.", migrations.size());
/* TODO ACCUMULO-2938 redact key extents in this output to avoid leaking protected information. */
- balancerLog.debug("Sample up to 10 outstanding migrations: " + Iterables.limit(migrations, 10));
+ balancerLog.debug("Sample up to 10 outstanding migrations: {}", Iterables.limit(migrations, 10));
}
}
@@ -206,12 +206,12 @@ public abstract class TabletBalancer {
* any other problem
*/
public List<TabletStats> getOnlineTabletsForTable(TServerInstance tserver, Table.ID tableId) throws ThriftSecurityException, TException {
- log.debug("Scanning tablet server " + tserver + " for table " + tableId);
+ log.debug("Scanning tablet server {} for table {}", tserver, tableId);
Client client = ThriftUtil.getClient(new TabletClientService.Client.Factory(), tserver.getLocation(), context);
try {
return client.getTabletStats(Tracer.traceInfo(), context.rpcCreds(), tableId.canonicalID());
} catch (TTransportException e) {
- log.error("Unable to connect to " + tserver + ": " + e);
+ log.error("Unable to connect to {}: ", tserver, e);
} finally {
ThriftUtil.returnClient(client);
}
@@ -231,23 +231,23 @@ public abstract class TabletBalancer {
List<TabletMigration> result = new ArrayList<>(migrations.size());
for (TabletMigration m : migrations) {
if (m.tablet == null) {
- log.warn("Balancer gave back a null tablet " + m);
+ log.warn("Balancer gave back a null tablet {}", m);
continue;
}
if (m.newServer == null) {
- log.warn("Balancer did not set the destination " + m);
+ log.warn("Balancer did not set the destination {}", m);
continue;
}
if (m.oldServer == null) {
- log.warn("Balancer did not set the source " + m);
+ log.warn("Balancer did not set the source {}", m);
continue;
}
if (!current.contains(m.oldServer)) {
- log.warn("Balancer wants to move a tablet from a server that is not current: " + m);
+ log.warn("Balancer wants to move a tablet from a server that is not current: {}", m);
continue;
}
if (!current.contains(m.newServer)) {
- log.warn("Balancer wants to move a tablet to a server that is not current: " + m);
+ log.warn("Balancer wants to move a tablet to a server that is not current: {}", m);
continue;
}
result.add(m);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java
index ba57dcc..5b778bd 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java
@@ -53,16 +53,16 @@ public class HadoopLogCloser implements LogCloser {
DistributedFileSystem dfs = (DistributedFileSystem) ns;
try {
if (!dfs.recoverLease(source)) {
- log.info("Waiting for file to be closed " + source.toString());
+ log.info("Waiting for file to be closed {}", source.toString());
return conf.getTimeInMillis(Property.MASTER_LEASE_RECOVERY_WAITING_PERIOD);
}
- log.info("Recovered lease on " + source.toString());
+ log.info("Recovered lease on {}", source.toString());
} catch (FileNotFoundException ex) {
throw ex;
} catch (Exception ex) {
- log.warn("Error recovering lease on " + source.toString(), ex);
+ log.warn("Error recovering lease on {}", source.toString(), ex);
ns.append(source).close();
- log.info("Recovered lease on " + source.toString() + " using append");
+ log.info("Recovered lease on {} using append", source.toString());
}
} else if (ns instanceof LocalFileSystem || ns instanceof RawLocalFileSystem) {
// ignore
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/MapRLogCloser.java b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/MapRLogCloser.java
index 5d6ab48..d291597 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/MapRLogCloser.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/MapRLogCloser.java
@@ -32,7 +32,7 @@ public class MapRLogCloser implements LogCloser {
@Override
public long close(AccumuloConfiguration conf, VolumeManager fs, Path path) throws IOException {
- log.info("Recovering file " + path.toString() + " by changing permission to readonly");
+ log.info("Recovering file {} by changing permission to readonly", path.toString());
FileSystem ns = fs.getVolumeByPath(path).getFileSystem();
FsPermission roPerm = new FsPermission((short) 0444);
try {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/state/DeadServerList.java b/server/base/src/main/java/org/apache/accumulo/server/master/state/DeadServerList.java
index 0be5f45..8abab08 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/state/DeadServerList.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/DeadServerList.java
@@ -41,7 +41,7 @@ public class DeadServerList {
try {
zoo.mkdirs(path);
} catch (Exception ex) {
- log.error("Unable to make parent directories of " + path, ex);
+ log.error("Unable to make parent directories of {}", path, ex);
}
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java b/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java
index 56e2834..223c724 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateChangeIterator.java
@@ -76,7 +76,7 @@ public class TabletStateChangeIterator extends SkippingIterator {
masterState = MasterState.valueOf(options.get(MASTER_STATE_OPTION));
} catch (Exception ex) {
if (options.get(MASTER_STATE_OPTION) != null) {
- log.error("Unable to decode masterState " + options.get(MASTER_STATE_OPTION));
+ log.error("Unable to decode masterState {}", options.get(MASTER_STATE_OPTION));
}
}
Set<TServerInstance> shuttingDown = parseServers(options.get(SHUTTING_DOWN_OPTION));
@@ -183,7 +183,7 @@ public class TabletStateChangeIterator extends SkippingIterator {
boolean shouldBeOnline = onlineTables.contains(tls.extent.getTableId());
if (debug) {
- log.debug(tls.extent + " is " + tls.getState(current) + " and should be " + (shouldBeOnline ? "on" : "off") + "line");
+ log.debug("{} is {} and should be {} line", tls.extent, tls.getState(current), (shouldBeOnline ? "on" : "off"));
}
switch (tls.getState(current)) {
case ASSIGNED:
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooStore.java b/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooStore.java
index 5986fab..ee1fd90 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooStore.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooStore.java
@@ -77,7 +77,7 @@ public class ZooStore implements DistributedStore {
path = relative(path);
ZooReaderWriter.getInstance().putPersistentData(path, bs, NodeExistsPolicy.OVERWRITE);
cache.clear();
- log.debug("Wrote " + new String(bs, UTF_8) + " to " + path);
+ log.debug("Wrote {} to {}", new String(bs, UTF_8), path);
} catch (Exception ex) {
throw new DistributedStoreException(ex);
}
@@ -86,7 +86,7 @@ public class ZooStore implements DistributedStore {
@Override
public void remove(String path) throws DistributedStoreException {
try {
- log.debug("Removing " + path);
+ log.debug("Removing {}", path);
path = relative(path);
IZooReaderWriter zoo = ZooReaderWriter.getInstance();
if (zoo.exists(path))
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java b/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java
index 148b6cc..a982a2f 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java
@@ -90,11 +90,11 @@ public class ZooTabletStateStore extends TabletStateStore {
if (logInfo != null) {
LogEntry logEntry = LogEntry.fromBytes(logInfo);
logs.add(Collections.singleton(logEntry.filename));
- log.debug("root tablet log " + logEntry.filename);
+ log.debug("root tablet log {}", logEntry.filename);
}
}
TabletLocationState result = new TabletLocationState(RootTable.EXTENT, futureSession, currentSession, lastSession, null, logs, false);
- log.debug("Returning root tablet state: " + result);
+ log.debug("Returning root tablet state: {}", result);
return result;
} catch (Exception ex) {
throw new RuntimeException(ex);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/metrics/AbstractMetricsImpl.java b/server/base/src/main/java/org/apache/accumulo/server/metrics/AbstractMetricsImpl.java
index 24a9750..b1b3e0f 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/metrics/AbstractMetricsImpl.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/metrics/AbstractMetricsImpl.java
@@ -175,7 +175,7 @@ public abstract class AbstractMetricsImpl implements Metrics {
File dir = new File(mDir);
if (!dir.isDirectory())
if (!dir.mkdir())
- log.warn("Could not create log directory: " + dir);
+ log.warn("Could not create log directory: {}", dir);
logDir = dir;
// Create new log file
startNewLog();
diff --git a/server/base/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java b/server/base/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java
index 2a6fcd4..e464cef 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java
@@ -182,7 +182,7 @@ public class MetricsConfiguration {
// set the enabled boolean from the configuration
enabled = config.getBoolean(enabledName);
if (log.isDebugEnabled())
- log.debug("Metrics collection enabled=" + enabled);
+ log.debug("Metrics collection enabled={}", enabled);
} else {
enabled = false;
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/monitor/LogService.java b/server/base/src/main/java/org/apache/accumulo/server/monitor/LogService.java
index d59267a..f6e11d4 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/monitor/LogService.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/monitor/LogService.java
@@ -99,7 +99,7 @@ public class LogService extends org.apache.log4j.AppenderSkeleton {
// getLocalPort will return the actual ephemeral port used when '0' was provided.
String logForwardingAddr = hostAddress + ":" + server.getLocalPort();
- log.debug("Setting monitor log4j log-forwarding address to: " + logForwardingAddr);
+ log.debug("Setting monitor log4j log-forwarding address to: {}", logForwardingAddr);
final String path = ZooUtil.getRoot(instanceId) + Constants.ZMONITOR_LOG4J_ADDR;
final ZooReaderWriter zoo = ZooReaderWriter.getInstance();
diff --git a/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java b/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
index 70f873d..2d53c49 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
@@ -94,7 +94,7 @@ public class ProblemReports implements Iterable<ProblemReport> {
@Override
public void run() {
- log.debug("Filing problem report " + pr.getTableId() + " " + pr.getProblemType() + " " + pr.getResource());
+ log.debug("Filing problem report {} {} {}", pr.getTableId(), pr.getProblemType(), pr.getResource());
try {
if (isMeta(pr.getTableId())) {
@@ -105,7 +105,7 @@ public class ProblemReports implements Iterable<ProblemReport> {
pr.saveToMetadataTable(context);
}
} catch (Exception e) {
- log.error("Failed to file problem report " + pr.getTableId() + " " + pr.getProblemType() + " " + pr.getResource(), e);
+ log.error("Failed to file problem report {} {} {}", pr.getTableId(), pr.getProblemType(), pr.getResource(), e);
}
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/replication/StatusCombiner.java b/server/base/src/main/java/org/apache/accumulo/server/replication/StatusCombiner.java
index 84e4742..e29dd3c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/replication/StatusCombiner.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/replication/StatusCombiner.java
@@ -111,7 +111,7 @@ public class StatusCombiner extends TypedValueCombiner<Status> {
if (null == combined) {
if (!iter.hasNext()) {
if (log.isTraceEnabled()) {
- log.trace("Returned single value: " + key.toStringNoTruncate() + " " + ProtobufUtil.toString(status));
+ log.trace("Returned single value: {} {}", key.toStringNoTruncate(), ProtobufUtil.toString(status));
}
return status;
} else {
@@ -124,7 +124,7 @@ public class StatusCombiner extends TypedValueCombiner<Status> {
}
if (log.isTraceEnabled()) {
- log.trace("Combined: " + key.toStringNoTruncate() + " " + ProtobufUtil.toString(combined.build()));
+ log.trace("Combined: {} {}", key.toStringNoTruncate(), ProtobufUtil.toString(combined.build()));
}
return combined.build();
@@ -140,7 +140,7 @@ public class StatusCombiner extends TypedValueCombiner<Status> {
*/
public void combine(Builder combined, Status status) {
if (log.isTraceEnabled()) {
- log.trace("Combining " + status.toString().replace("\n", ", ") + " into " + builderToString(combined));
+ log.trace("Combining {} into {}", status.toString().replace("\n", ", "), builderToString(combined));
}
// offset up to which replication is completed
diff --git a/server/base/src/main/java/org/apache/accumulo/server/rpc/TCredentialsUpdatingInvocationHandler.java b/server/base/src/main/java/org/apache/accumulo/server/rpc/TCredentialsUpdatingInvocationHandler.java
index 06ca99f..c8f2f42 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/rpc/TCredentialsUpdatingInvocationHandler.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/rpc/TCredentialsUpdatingInvocationHandler.java
@@ -99,7 +99,7 @@ public class TCredentialsUpdatingInvocationHandler<I> implements InvocationHandl
// If the authentication token isn't a KerberosToken
if (!KerberosToken.class.isAssignableFrom(tokenClass) && !SystemToken.class.isAssignableFrom(tokenClass)) {
// Don't include messages about SystemToken since it's internal
- log.debug("Will not update principal on authentication tokens other than KerberosToken. Received " + tokenClass);
+ log.debug("Will not update principal on authentication tokens other than KerberosToken. Received {}", tokenClass);
throw new ThriftSecurityException("Did not receive a valid token", SecurityErrorCode.BAD_CREDENTIALS);
}
@@ -139,7 +139,7 @@ public class TCredentialsUpdatingInvocationHandler<I> implements InvocationHandl
try {
clz = Class.forName(tokenClassName);
} catch (ClassNotFoundException e) {
- log.debug("Could not create class from token name: " + tokenClassName, e);
+ log.debug("Could not create class from token name: {}", tokenClassName, e);
return null;
}
typedClz = clz.asSubclass(AuthenticationToken.class);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java b/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
index b85c4ed..2d6e257 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
@@ -51,8 +51,8 @@ import org.apache.accumulo.server.security.handler.Authenticator;
import org.apache.accumulo.server.security.handler.Authorizor;
import org.apache.accumulo.server.security.handler.PermissionHandler;
import org.apache.hadoop.io.Text;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.Logger;
/**
*
@@ -60,7 +60,7 @@ import org.apache.log4j.Logger;
public class AuditedSecurityOperation extends SecurityOperation {
public static final String AUDITLOG = "org.apache.accumulo.audit";
- public static final Logger audit = Logger.getLogger(AUDITLOG);
+ public static final Logger audit = LoggerFactory.getLogger(AUDITLOG);
public AuditedSecurityOperation(AccumuloServerContext context, Authorizor author, Authenticator authent, PermissionHandler pm) {
super(context, author, authent, pm);
@@ -96,7 +96,7 @@ public class AuditedSecurityOperation extends SecurityOperation {
}
private boolean shouldAudit(TCredentials credentials, Table.ID tableId) {
- return (audit.isInfoEnabled() || audit.isEnabledFor(Level.WARN)) && !tableId.equals(MetadataTable.ID) && shouldAudit(credentials);
+ return (audit.isInfoEnabled() || audit.isWarnEnabled()) && !tableId.equals(MetadataTable.ID) && shouldAudit(credentials);
}
// Is INFO the right level to check? Do we even need that check?
@@ -111,20 +111,20 @@ public class AuditedSecurityOperation extends SecurityOperation {
* digging through loads of other code to find it.
*/
private void audit(TCredentials credentials, ThriftSecurityException ex, String template, Object... args) {
- audit.warn("operation: failed; user: " + credentials.getPrincipal() + "; " + String.format(template, args) + "; exception: " + ex.toString());
+ audit.warn("operation: failed; user: {}; {}; exception: {}",credentials.getPrincipal(), String.format(template, args), ex.toString());
}
private void audit(TCredentials credentials, String template, Object... args) {
if (shouldAudit(credentials)) {
- audit.info("operation: success; user: " + credentials.getPrincipal() + ": " + String.format(template, args));
+ audit.info("operation: success; user: {}: {}", credentials.getPrincipal(), String.format(template, args));
}
}
private void audit(TCredentials credentials, boolean permitted, String template, Object... args) {
if (shouldAudit(credentials)) {
String prefix = permitted ? "permitted" : "denied";
- audit.info("operation: " + prefix + "; user: " + credentials.getPrincipal() + "; client: " + TServerUtils.clientAddress.get() + "; "
- + String.format(template, args));
+ audit.info("operation: {}; user: {}; client: {}; {}",
+ prefix, credentials.getPrincipal(), TServerUtils.clientAddress.get(), String.format(template, args));
}
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java b/server/base/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
index d3d98e3..c7cddaf 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
@@ -170,7 +170,7 @@ public class SecurityOperation {
}
} else {
if (!(context.getCredentials().equals(creds))) {
- log.debug("Provided credentials did not match server's expected credentials. Expected " + context.getCredentials() + " but got " + creds);
+ log.debug("Provided credentials did not match server's expected credentials. Expected {} but got {}", context.getCredentials(), creds);
throw new ThriftSecurityException(creds.getPrincipal(), SecurityErrorCode.BAD_CREDENTIALS);
}
}
@@ -604,7 +604,7 @@ public class SecurityOperation {
try {
authorizor.changeAuthorizations(user, authorizations);
- log.info("Changed authorizations for user " + user + " at the request of user " + credentials.getPrincipal());
+ log.info("Changed authorizations for user {} at the request of user {}", user, credentials.getPrincipal());
} catch (AccumuloSecurityException ase) {
throw ase.asThriftException();
}
@@ -616,7 +616,7 @@ public class SecurityOperation {
try {
AuthenticationToken token = toChange.getToken();
authenticator.changePassword(toChange.getPrincipal(), token);
- log.info("Changed password for user " + toChange.getPrincipal() + " at the request of user " + credentials.getPrincipal());
+ log.info("Changed password for user {} at the request of user {}", toChange.getPrincipal(), credentials.getPrincipal());
} catch (AccumuloSecurityException e) {
throw e.asThriftException();
}
@@ -641,7 +641,7 @@ public class SecurityOperation {
authenticator.createUser(newUser.getPrincipal(), token);
authorizor.initUser(newUser.getPrincipal());
permHandle.initUser(newUser.getPrincipal());
- log.info("Created user " + newUser.getPrincipal() + " at the request of user " + credentials.getPrincipal());
+ log.info("Created user {} at the request of user {}", newUser.getPrincipal(), credentials.getPrincipal());
} catch (AccumuloSecurityException ase) {
throw ase.asThriftException();
}
@@ -654,7 +654,7 @@ public class SecurityOperation {
authorizor.dropUser(user);
authenticator.dropUser(user);
permHandle.cleanUser(user);
- log.info("Deleted user " + user + " at the request of user " + credentials.getPrincipal());
+ log.info("Deleted user {} at the request of user {}", user, credentials.getPrincipal());
} catch (AccumuloSecurityException e) {
throw e.asThriftException();
}
@@ -668,7 +668,7 @@ public class SecurityOperation {
try {
permHandle.grantSystemPermission(user, permissionById);
- log.info("Granted system permission " + permissionById + " for user " + user + " at the request of user " + credentials.getPrincipal());
+ log.info("Granted system permission {} for user {} at the request of user {}", permissionById, user, credentials.getPrincipal());
} catch (AccumuloSecurityException e) {
throw e.asThriftException();
}
@@ -683,7 +683,7 @@ public class SecurityOperation {
try {
permHandle.grantTablePermission(user, tableId.canonicalID(), permission);
- log.info("Granted table permission " + permission + " for user " + user + " on the table " + tableId + " at the request of user " + c.getPrincipal());
+ log.info("Granted table permission {} for user {} on the table {} at the request of user {}", permission, user, tableId, c.getPrincipal());
} catch (AccumuloSecurityException e) {
throw e.asThriftException();
} catch (TableNotFoundException e) {
@@ -699,8 +699,8 @@ public class SecurityOperation {
try {
permHandle.grantNamespacePermission(user, namespace, permission);
- log.info("Granted namespace permission " + permission + " for user " + user + " on the namespace " + namespace + " at the request of user "
- + c.getPrincipal());
+ log.info("Granted namespace permission {} for user {} on the namespace {} at the request of user {}",
+ permission, user, namespace, c.getPrincipal());
} catch (AccumuloSecurityException e) {
throw e.asThriftException();
} catch (NamespaceNotFoundException e) {
@@ -716,7 +716,7 @@ public class SecurityOperation {
try {
permHandle.revokeSystemPermission(user, permission);
- log.info("Revoked system permission " + permission + " for user " + user + " at the request of user " + credentials.getPrincipal());
+ log.info("Revoked system permission {} for user {} at the request of user {}", permission, user, credentials.getPrincipal());
} catch (AccumuloSecurityException e) {
throw e.asThriftException();
@@ -732,7 +732,7 @@ public class SecurityOperation {
try {
permHandle.revokeTablePermission(user, tableId.canonicalID(), permission);
- log.info("Revoked table permission " + permission + " for user " + user + " on the table " + tableId + " at the request of user " + c.getPrincipal());
+ log.info("Revoked table permission {} for user {} on the table {} at the request of user {}", permission, user, tableId, c.getPrincipal());
} catch (AccumuloSecurityException e) {
throw e.asThriftException();
@@ -749,8 +749,8 @@ public class SecurityOperation {
try {
permHandle.revokeNamespacePermission(user, namespace, permission);
- log.info("Revoked namespace permission " + permission + " for user " + user + " on the namespace " + namespace + " at the request of user "
- + c.getPrincipal());
+ log.info("Revoked namespace permission {} for user {} on the namespace {} at the request of user {}",
+ permission, user, namespace, c.getPrincipal());
} catch (AccumuloSecurityException e) {
throw e.asThriftException();
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/SecurityUtil.java b/server/base/src/main/java/org/apache/accumulo/server/security/SecurityUtil.java
index 38afa31..f502b8c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/SecurityUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/SecurityUtil.java
@@ -88,13 +88,13 @@ public class SecurityUtil {
try {
String principalName = getServerPrincipal(principalConfig);
if (keyTabPath != null && principalName != null && keyTabPath.length() != 0 && principalName.length() != 0) {
- log.info("Attempting to login with keytab as " + principalName);
+ log.info("Attempting to login with keytab as {}", principalName);
UserGroupInformation.loginUserFromKeytab(principalName, keyTabPath);
- log.info("Succesfully logged in as user " + principalName);
+ log.info("Succesfully logged in as user {}", principalName);
return true;
}
} catch (IOException io) {
- log.error("Error logging in user " + principalConfig + " using keytab at " + keyTabPath, io);
+ log.error("Error logging in user {} using keytab at {}", principalConfig, keyTabPath, io);
}
return false;
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/UserImpersonation.java b/server/base/src/main/java/org/apache/accumulo/server/security/UserImpersonation.java
index 736fdeb..96d9032 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/UserImpersonation.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/UserImpersonation.java
@@ -304,7 +304,7 @@ public class UserImpersonation {
hosts.addAll(Arrays.<String> asList(hostValues));
}
} else {
- log.debug("Ignoring key " + aclKey);
+ log.debug("Ignoring key {}", aclKey);
}
}
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyWatcher.java b/server/base/src/main/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyWatcher.java
index fe4407e..6061488 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyWatcher.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyWatcher.java
@@ -63,7 +63,7 @@ public class ZooAuthenticationKeyWatcher implements Watcher {
}
break;
default:
- log.warn("Unhandled: " + event);
+ log.warn("Unhandled: {}", event);
}
// Nothing more to do for EventType.None
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/handler/KerberosAuthenticator.java b/server/base/src/main/java/org/apache/accumulo/server/security/handler/KerberosAuthenticator.java
index 504f291..a701052 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/handler/KerberosAuthenticator.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/handler/KerberosAuthenticator.java
@@ -97,7 +97,7 @@ public class KerberosAuthenticator implements Authenticator {
zooCache.clear();
if (zoo.exists(zkUserPath)) {
zoo.recursiveDelete(zkUserPath, NodeMissingPolicy.SKIP);
- log.info("Removed " + zkUserPath + "/" + " from zookeeper");
+ log.info("Removed {}/ from zookeeper", zkUserPath);
}
// prep parent node of users with root username
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthenticator.java b/server/base/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthenticator.java
index 6623fc6..718f9df 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthenticator.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthenticator.java
@@ -70,7 +70,7 @@ public final class ZKAuthenticator implements Authenticator {
zooCache.clear();
if (zoo.exists(ZKUserPath)) {
zoo.recursiveDelete(ZKUserPath, NodeMissingPolicy.SKIP);
- log.info("Removed " + ZKUserPath + "/" + " from zookeeper");
+ log.info("Removed {}/ from zookeeper", ZKUserPath);
}
// prep parent node of users with root username
diff --git a/server/base/src/main/java/org/apache/accumulo/server/tables/TableManager.java b/server/base/src/main/java/org/apache/accumulo/server/tables/TableManager.java
index bea9487..cfc5c90 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/tables/TableManager.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/tables/TableManager.java
@@ -63,7 +63,7 @@ public class TableManager {
public static void prepareNewNamespaceState(String instanceId, Namespace.ID namespaceId, String namespace, NodeExistsPolicy existsPolicy)
throws KeeperException, InterruptedException {
- log.debug("Creating ZooKeeper entries for new namespace " + namespace + " (ID: " + namespaceId + ")");
+ log.debug("Creating ZooKeeper entries for new namespace {} (ID: {})", namespace, namespaceId);
String zPath = Constants.ZROOT + "/" + instanceId + Constants.ZNAMESPACES + "/" + namespaceId;
IZooReaderWriter zoo = ZooReaderWriter.getInstance();
@@ -75,7 +75,7 @@ public class TableManager {
public static void prepareNewTableState(String instanceId, Table.ID tableId, Namespace.ID namespaceId, String tableName, TableState state,
NodeExistsPolicy existsPolicy) throws KeeperException, InterruptedException {
// state gets created last
- log.debug("Creating ZooKeeper entries for new table " + tableName + " (ID: " + tableId + ") in namespace (ID: " + namespaceId + ")");
+ log.debug("Creating ZooKeeper entries for new table {} (ID: {}) in namespace (ID: {})", tableName, tableId, namespaceId);
Pair<String,String> qualifiedTableName = Tables.qualify(tableName);
tableName = qualifiedTableName.getSecond();
String zTablePath = Constants.ZROOT + "/" + instanceId + Constants.ZTABLES + "/" + tableId;
@@ -157,13 +157,13 @@ public class TableManager {
}
if (!transition)
throw new IllegalTableTransitionException(oldState, newState);
- log.debug("Transitioning state for table " + tableId + " from " + oldState + " to " + newState);
+ log.debug("Transitioning state for table {} from {} to {}", tableId, oldState, newState);
return newState.name().getBytes(UTF_8);
}
});
} catch (Exception e) {
// ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j compability
- log.error("FATAL Failed to transition table to state " + newState);
+ log.error("FATAL Failed to transition table to state {}", newState);
throw new RuntimeException(e);
}
}
@@ -185,7 +185,7 @@ public class TableManager {
try {
tState = TableState.valueOf(sState);
} catch (IllegalArgumentException e) {
- log.error("Unrecognized state for table with tableId=" + tableId + ": " + sState);
+ log.error("Unrecognized state for table with tableId={}: {}", tableId, sState);
}
tableStateCache.put(tableId, tState);
}
@@ -255,7 +255,7 @@ public class TableManager {
tableId = Table.ID.of(sa[0]);
}
if (tableId == null) {
- log.warn("Unknown path in " + event);
+ log.warn("Unknown path in {}", event);
return;
}
}
@@ -265,14 +265,14 @@ public class TableManager {
if (zPath != null && zPath.equals(tablesPrefix)) {
updateTableStateCache();
} else {
- log.warn("Unexpected path " + zPath);
+ log.warn("Unexpected path {}", zPath);
}
break;
case NodeCreated:
case NodeDataChanged:
// state transition
TableState tState = updateTableStateCache(tableId);
- log.debug("State transition to " + tState + " @ " + event);
+ log.debug("State transition to {} @ {}", tState, event);
synchronized (observers) {
for (TableObserver to : observers)
to.stateChanged(tableId, tState);
@@ -289,7 +289,7 @@ public class TableManager {
switch (event.getState()) {
case Expired:
if (log.isTraceEnabled())
- log.trace("Session expired " + event);
+ log.trace("Session expired {}", event);
synchronized (observers) {
for (TableObserver to : observers)
to.sessionExpired();
@@ -298,11 +298,11 @@ public class TableManager {
case SyncConnected:
default:
if (log.isTraceEnabled())
- log.trace("Ignored " + event);
+ log.trace("Ignored {}", event);
}
break;
default:
- log.warn("Unandled " + event);
+ log.warn("Unandled {}", event);
}
}
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java b/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
index b188f41..a0e751b 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
@@ -375,7 +375,7 @@ public class Admin implements KeywordExecutable {
for (int port : context.getConfiguration().getPort(Property.TSERV_CLIENTPORT)) {
HostAndPort address = AddressUtil.parseAddress(server, port);
final String finalServer = qualifyWithZooKeeperSessionId(zTServerRoot, zc, address.toString());
- log.info("Stopping server " + finalServer);
+ log.info("Stopping server {}", finalServer);
MasterClient.executeVoid(context, new ClientExec<MasterClientService.Client>() {
@Override
public void execute(MasterClientService.Client client) throws Exception {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/CleanZookeeper.java b/server/base/src/main/java/org/apache/accumulo/server/util/CleanZookeeper.java
index b3ca6dd..961991e 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/CleanZookeeper.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/CleanZookeeper.java
@@ -67,7 +67,7 @@ public class CleanZookeeper {
try {
zk.recursiveDelete(instanceNamePath, NodeMissingPolicy.SKIP);
} catch (KeeperException.NoAuthException ex) {
- log.warn("Unable to delete " + instanceNamePath);
+ log.warn("Unable to delete {}", instanceNamePath);
}
}
}
@@ -76,7 +76,7 @@ public class CleanZookeeper {
try {
zk.recursiveDelete(path, NodeMissingPolicy.SKIP);
} catch (KeeperException.NoAuthException ex) {
- log.warn("Unable to delete " + path);
+ log.warn("Unable to delete {}", path);
}
}
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
index 36c8f29..8fa5292 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
@@ -202,13 +202,13 @@ public class FileUtil {
if (mapFiles.size() > maxToOpen) {
tmpDir = createTmpDir(acuconf, fs, tabletDir);
- log.debug("Too many indexes (" + mapFiles.size() + ") to open at once for " + endRow + " " + prevEndRow + ", reducing in tmpDir = " + tmpDir);
+ log.debug("Too many indexes ({}) to open at once for {} {}, reducing in tmpDir = {}", mapFiles.size(), endRow, prevEndRow, tmpDir);
long t1 = System.currentTimeMillis();
mapFiles = reduceFiles(acuconf, conf, fs, prevEndRow, endRow, mapFiles, maxToOpen, tmpDir, 0);
long t2 = System.currentTimeMillis();
- log.debug("Finished reducing indexes for " + endRow + " " + prevEndRow + " in " + String.format("%6.2f secs", (t2 - t1) / 1000.0));
+ log.debug("Finished reducing indexes for {} {} in {}", endRow, prevEndRow, String.format("%6.2f secs", (t2 - t1) / 1000.0));
}
if (prevEndRow == null)
@@ -277,13 +277,13 @@ public class FileUtil {
throw new IOException("Cannot find mid point using data files, too many " + mapFiles.size());
tmpDir = createTmpDir(acuConf, fs, tabletDirectory);
- log.debug("Too many indexes (" + mapFiles.size() + ") to open at once for " + endRow + " " + prevEndRow + ", reducing in tmpDir = " + tmpDir);
+ log.debug("Too many indexes ({}) to open at once for {} {}, reducing in tmpDir = {}", mapFiles.size(), endRow, prevEndRow, tmpDir);
long t1 = System.currentTimeMillis();
mapFiles = reduceFiles(acuConf, conf, fs, prevEndRow, endRow, mapFiles, maxToOpen, tmpDir, 0);
long t2 = System.currentTimeMillis();
- log.debug("Finished reducing indexes for " + endRow + " " + prevEndRow + " in " + String.format("%6.2f secs", (t2 - t1) / 1000.0));
+ log.debug("Finished reducing indexes for {} {} in {}", endRow, prevEndRow, String.format("%6.2f secs", (t2 - t1) / 1000.0));
}
if (prevEndRow == null)
@@ -297,8 +297,8 @@ public class FileUtil {
if (numKeys == 0) {
if (useIndex) {
- log.warn("Failed to find mid point using indexes, falling back to data files which is slower. No entries between " + prevEndRow + " and " + endRow
- + " for " + mapFiles);
+ log.warn("Failed to find mid point using indexes, falling back to data files which is slower. No entries between {} and {} for {}",
+ prevEndRow, endRow, mapFiles);
// need to pass original map files, not possibly reduced indexes
return findMidPoint(fs, tabletDirectory, acuConf, prevEndRow, endRow, origMapFiles, minSplit, false);
}
@@ -379,7 +379,7 @@ public class FileUtil {
return;
}
- log.error("Did not delete tmp dir because it wasn't a tmp dir " + tmpDir);
+ log.error("Did not delete tmp dir because it wasn't a tmp dir {}", tmpDir);
}
}
@@ -447,13 +447,13 @@ public class FileUtil {
}
} catch (IOException ioe) {
- log.warn("Failed to read map file to determine first and last key : " + mapfile, ioe);
+ log.warn("Failed to read map file to determine first and last key : {} {}", mapfile, ioe);
} finally {
if (reader != null) {
try {
reader.close();
} catch (IOException ioe) {
- log.warn("failed to close " + mapfile, ioe);
+ log.warn("failed to close {} {}", mapfile, ioe);
}
}
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java b/server/base/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
index 3e6d812..31b5221 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
@@ -66,9 +66,9 @@ public class FindOfflineTablets {
@Override
public void update(LiveTServerSet current, Set<TServerInstance> deleted, Set<TServerInstance> added) {
if (!deleted.isEmpty() && scanning.get())
- log.warn("Tablet servers deleted while scanning: " + deleted);
+ log.warn("Tablet servers deleted while scanning: {}", deleted);
if (!added.isEmpty() && scanning.get())
- log.warn("Tablet servers added while scanning: " + added);
+ log.warn("Tablet servers added while scanning: {}", added);
}
});
tservers.startListeningForTabletServerChanges();
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/ListInstances.java b/server/base/src/main/java/org/apache/accumulo/server/util/ListInstances.java
index 0674bea..46a89b0 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/ListInstances.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/ListInstances.java
@@ -209,7 +209,7 @@ public class ListInstances {
try {
ts.add(UUID.fromString(iid));
} catch (Exception e) {
- log.error("Exception: " + e);
+ log.error("Exception: ", e);
}
}
} catch (Exception e) {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
index b3de628..b72b4e3 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
@@ -102,7 +102,7 @@ public class MasterMetadataUtil {
public static KeyExtent fixSplit(ClientContext context, Text metadataEntry, SortedMap<ColumnFQ,Value> columns, TServerInstance tserver, ZooLock lock)
throws AccumuloException, IOException {
- log.info("Incomplete split " + metadataEntry + " attempting to fix");
+ log.info("Incomplete split {} attempting to fix", metadataEntry);
Value oper = columns.get(TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN);
@@ -156,11 +156,11 @@ public class MasterMetadataUtil {
VolumeManager fs = VolumeManagerImpl.get();
if (!scanner2.iterator().hasNext()) {
- log.info("Rolling back incomplete split " + metadataEntry + " " + metadataPrevEndRow);
+ log.info("Rolling back incomplete split {} {}", metadataEntry, metadataPrevEndRow);
MetadataTableUtil.rollBackSplit(metadataEntry, KeyExtent.decodePrevEndRow(oper), context, lock);
return new KeyExtent(metadataEntry, KeyExtent.decodePrevEndRow(oper));
} else {
- log.info("Finishing incomplete split " + metadataEntry + " " + metadataPrevEndRow);
+ log.info("Finishing incomplete split {} {}", metadataEntry, metadataPrevEndRow);
List<FileRef> highDatafilesToRemove = new ArrayList<>();
@@ -274,7 +274,7 @@ public class MasterMetadataUtil {
while (true) {
try {
if (zk.exists(zpath)) {
- log.debug("Removing WAL reference for root table " + zpath);
+ log.debug("Removing WAL reference for root table {}", zpath);
zk.recursiveDelete(zpath, NodeMissingPolicy.SKIP);
}
break;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
index 01d036f..a9c3287 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
@@ -550,13 +550,13 @@ public class MetadataTableUtil {
}
public static List<LogEntry> getLogEntries(ClientContext context, KeyExtent extent) throws IOException, KeeperException, InterruptedException {
- log.info("Scanning logging entries for " + extent);
+ log.info("Scanning logging entries for {}", extent);
ArrayList<LogEntry> result = new ArrayList<>();
if (extent.equals(RootTable.EXTENT)) {
log.info("Getting logs for root tablet from zookeeper");
getRootLogEntries(result);
} else {
- log.info("Scanning metadata for logs used for tablet " + extent);
+ log.info("Scanning metadata for logs used for tablet {}", extent);
Scanner scanner = getTabletLogScanner(context, extent);
Text pattern = extent.getMetadataEntry();
for (Entry<Key,Value> entry : scanner) {
@@ -569,7 +569,7 @@ public class MetadataTableUtil {
}
}
- log.info("Returning logs " + result + " for extent " + extent);
+ log.info("Returning logs {} for extent {}", result, extent);
return result;
}
@@ -618,7 +618,7 @@ public class MetadataTableUtil {
rootTableEntries = getLogEntries(context, new KeyExtent(MetadataTable.ID, null, null)).iterator();
try {
Scanner scanner = context.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- log.info("Setting range to " + MetadataSchema.TabletsSection.getRange());
+ log.info("Setting range to {}", MetadataSchema.TabletsSection.getRange());
scanner.setRange(MetadataSchema.TabletsSection.getRange());
scanner.fetchColumnFamily(LogColumnFamily.NAME);
metadataEntries = scanner.iterator();
@@ -860,7 +860,7 @@ public class MetadataTableUtil {
// delete what we have cloned and try again
deleteTable(tableId, false, context, null);
- log.debug("Tablets merged in table " + srcTableId + " while attempting to clone, trying again");
+ log.debug("Tablets merged in table {} while attempting to clone, trying again", srcTableId);
sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
}
@@ -1030,7 +1030,7 @@ public class MetadataTableUtil {
String filename = rowID.substring(prefix.length());
// add the new entry first
- log.info("Moving " + filename + " marker in " + RootTable.NAME);
+ log.info("Moving {} marker in {}", filename, RootTable.NAME);
Mutation m = new Mutation(MetadataSchema.DeletesSection.getRowPrefix() + filename);
m.put(EMPTY_BYTES, EMPTY_BYTES, EMPTY_BYTES);
update(context, m, RootTable.EXTENT);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/RandomWriter.java b/server/base/src/main/java/org/apache/accumulo/server/util/RandomWriter.java
index 8d4b67e..f8f4615 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/RandomWriter.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/RandomWriter.java
@@ -65,7 +65,7 @@ public class RandomWriter {
}
mutations_so_far++;
if (mutations_so_far % 1000000 == 0) {
- log.info("Created " + mutations_so_far + " mutations so far");
+ log.info("Created {} mutations so far", mutations_so_far);
}
return m;
}
@@ -96,11 +96,11 @@ public class RandomWriter {
opts.parseArgs(RandomWriter.class.getName(), args, bwOpts);
long start = System.currentTimeMillis();
- log.info("starting at " + start + " for user " + opts.getPrincipal());
+ log.info("starting at {} for user {}", start, opts.getPrincipal());
try {
Connector connector = opts.getConnector();
BatchWriter bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
- log.info("Writing " + opts.count + " mutations...");
+ log.info("Writing {} mutations...", opts.count);
bw.addMutations(new RandomMutationGenerator(opts.count));
bw.close();
} catch (Exception e) {
@@ -109,8 +109,8 @@ public class RandomWriter {
}
long stop = System.currentTimeMillis();
- log.info("stopping at " + stop);
- log.info("elapsed: " + (((double) stop - (double) start) / 1000.0));
+ log.info("stopping at {}", stop);
+ log.info("elapsed: {}", (((double) stop - (double) start) / 1000.0));
}
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java b/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
index 5230bd6..1ee74b4 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
@@ -83,18 +83,18 @@ public class RandomizeVolumes {
}
String tblStr = c.tableOperations().tableIdMap().get(tableName);
if (null == tblStr) {
- log.error("Could not determine the table ID for table " + tableName);
+ log.error("Could not determine the table ID for table {}", tableName);
return 2;
}
Table.ID tableId = Table.ID.of(tblStr);
TableState tableState = TableManager.getInstance().getTableState(tableId);
if (TableState.OFFLINE != tableState) {
- log.info("Taking " + tableName + " offline");
+ log.info("Taking {} offline", tableName);
c.tableOperations().offline(tableName, true);
- log.info(tableName + " offline");
+ log.info("{} offline", tableName);
}
SimpleThreadPool pool = new SimpleThreadPool(50, "directory maker");
- log.info("Rewriting entries for " + tableName);
+ log.info("Rewriting entries for {}", tableName);
Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
DIRECTORY_COLUMN.fetch(scanner);
scanner.setRange(TabletsSection.getRange(tableId));
@@ -107,7 +107,7 @@ public class RandomizeVolumes {
String[] parts = oldLocation.split(Path.SEPARATOR);
Table.ID tableIdEntry = Table.ID.of(parts[parts.length - 2]);
if (!tableIdEntry.equals(tableId)) {
- log.error("Unexpected table id found: " + tableIdEntry + ", expected " + tableId + "; skipping");
+ log.error("Unexpected table id found: {}, expected {}; skipping", tableIdEntry, tableId);
continue;
}
directory = parts[parts.length - 1];
@@ -122,7 +122,7 @@ public class RandomizeVolumes {
+ Path.SEPARATOR + directory;
m.put(key.getColumnFamily(), key.getColumnQualifier(), new Value(newLocation.getBytes(UTF_8)));
if (log.isTraceEnabled()) {
- log.trace("Replacing " + oldLocation + " with " + newLocation);
+ log.trace("Replacing {} with {}", oldLocation, newLocation);
}
writer.addMutation(m);
pool.submit(new Runnable() {
@@ -148,10 +148,10 @@ public class RandomizeVolumes {
break;
}
}
- log.info("Updated " + count + " entries for table " + tableName);
+ log.info("Updated {} entries for table {}", count, tableName);
if (TableState.OFFLINE != tableState) {
c.tableOperations().online(tableName, true);
- log.info("table " + tableName + " back online");
+ log.info("table {} back online", tableName);
}
return 0;
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java
index 9f5abe6..a6ba4c5 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java
@@ -179,7 +179,7 @@ public class ReplicationTableUtil {
*/
public static void updateFiles(ClientContext context, KeyExtent extent, String file, Status stat) {
if (log.isDebugEnabled()) {
- log.debug("Updating replication status for " + extent + " with " + file + " using " + ProtobufUtil.toString(stat));
+ log.debug("Updating replication status for {} with {} using {}", extent, file, ProtobufUtil.toString(stat));
}
// TODO could use batch writer, would need to handle failure and retry like update does - ACCUMULO-1294
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java b/server/base/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java
index f36de51..5f58bdc 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java
@@ -102,12 +102,12 @@ public class TableDiskUsage {
Map<List<Integer>,Long> usage = new HashMap<>();
if (log.isTraceEnabled()) {
- log.trace("fileSizes " + fileSizes);
+ log.trace("fileSizes {}", fileSizes);
}
// For each file w/ referenced-table bitset
for (Entry<String,Integer[]> entry : tableFiles.entrySet()) {
if (log.isTraceEnabled()) {
- log.trace("file " + entry.getKey() + " table bitset " + Arrays.toString(entry.getValue()));
+ log.trace("file {} table bitset {}", entry.getKey(), Arrays.toString(entry.getValue()));
}
List<Integer> key = Arrays.asList(entry.getValue());
Long size = fileSizes.get(entry.getKey());
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/TabletIterator.java b/server/base/src/main/java/org/apache/accumulo/server/util/TabletIterator.java
index 50e9e58..d6d97f2 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/TabletIterator.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/TabletIterator.java
@@ -124,7 +124,7 @@ public class TabletIterator implements Iterator<Map<Key,Value>> {
Table.ID currentTable = new KeyExtent(prevEndRowKey.getRow(), (Text) null).getTableId();
if (!lastTable.equals(currentTable) && (per != null || lastEndRow != null)) {
- log.info("Metadata inconsistency on table transition : " + lastTable + " " + currentTable + " " + per + " " + lastEndRow);
+ log.info("Metadata inconsistency on table transition : {} {} {} {}", lastTable, currentTable, per, lastEndRow);
currentTabletKeys = null;
resetScanner();
@@ -139,7 +139,7 @@ public class TabletIterator implements Iterator<Map<Key,Value>> {
if (!perEqual) {
- log.info("Metadata inconsistency : " + per + " != " + lastEndRow + " metadataKey = " + prevEndRowKey);
+ log.info("Metadata inconsistency : {} != {} metadataKey = {}", per, lastEndRow, prevEndRowKey);
currentTabletKeys = null;
resetScanner();
@@ -217,7 +217,7 @@ public class TabletIterator implements Iterator<Map<Key,Value>> {
}
if (!sawPrevEndRow && tm.size() > 0) {
- log.warn("Metadata problem : tablet " + curMetaDataRow + " has no prev end row");
+ log.warn("Metadata problem : tablet {} has no prev end row", curMetaDataRow);
resetScanner();
curMetaDataRow = null;
tm.clear();
@@ -249,7 +249,7 @@ public class TabletIterator implements Iterator<Map<Key,Value>> {
range = new Range(new Key(lastTablet).followingKey(PartialKey.ROW), true, this.range.getEndKey(), this.range.isEndKeyInclusive());
}
- log.info("Resetting " + MetadataTable.NAME + " scanner to " + range);
+ log.info("Resetting {} scanner to {}", MetadataTable.NAME, range);
scanner.setRange(range);
iter = scanner.iterator();
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java b/server/base/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
index da33888..3ee7f69 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
@@ -127,7 +127,7 @@ public class VerifyTabletAssignments {
try {
checkTabletServer(context, entry, failures);
} catch (Exception e) {
- log.error("Failure on tablet server '" + entry.getKey() + ".", e);
+ log.error("Failure on tablet server ' {}.", entry.getKey(), e);
failures.addAll(entry.getValue());
}
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/time/SimpleTimer.java b/server/base/src/main/java/org/apache/accumulo/server/util/time/SimpleTimer.java
index 0a40432..cd8a4de 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/time/SimpleTimer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/time/SimpleTimer.java
@@ -63,7 +63,7 @@ public class SimpleTimer {
SimpleTimer.instanceThreadPoolSize = threadPoolSize;
} else {
if (SimpleTimer.instanceThreadPoolSize != threadPoolSize) {
- log.warn("Asked to create SimpleTimer with thread pool size " + threadPoolSize + ", existing instance has " + instanceThreadPoolSize);
+ log.warn("Asked to create SimpleTimer with thread pool size {}, existing instance has {}", threadPoolSize, instanceThreadPoolSize);
}
}
return instance;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/zookeeper/DistributedWorkQueue.java b/server/base/src/main/java/org/apache/accumulo/server/zookeeper/DistributedWorkQueue.java
index 4faa7ad..5ac3e96 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/zookeeper/DistributedWorkQueue.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/zookeeper/DistributedWorkQueue.java
@@ -96,7 +96,7 @@ public class DistributedWorkQueue {
break;
}
- log.debug("got lock for " + child);
+ log.debug("got lock for {}", child);
Runnable task = new Runnable() {
@@ -110,17 +110,17 @@ public class DistributedWorkQueue {
try {
zoo.recursiveDelete(childPath, NodeMissingPolicy.SKIP);
} catch (Exception e) {
- log.error("Error received when trying to delete entry in zookeeper " + childPath, e);
+ log.error("Error received when trying to delete entry in zookeeper {}", childPath, e);
}
} catch (Exception e) {
- log.warn("Failed to process work " + child, e);
+ log.warn("Failed to process work {}", child, e);
}
try {
zoo.recursiveDelete(lockPath, NodeMissingPolicy.SKIP);
} catch (Exception e) {
- log.error("Error received when trying to delete entry in zookeeper " + childPath, e);
+ log.error("Error received when trying to delete entry in zookeeper {}", childPath, e);
}
} finally {
@@ -186,13 +186,13 @@ public class DistributedWorkQueue {
log.info("Interrupted looking for work", e);
}
else
- log.info("Unexpected path for NodeChildrenChanged event " + event.getPath());
+ log.info("Unexpected path for NodeChildrenChanged event {}", event.getPath());
break;
case NodeCreated:
case NodeDataChanged:
case NodeDeleted:
case None:
- log.info("Got unexpected zookeeper event: " + event.getType() + " for " + path);
+ log.info("Got unexpected zookeeper event: {} for {}", event.getType(), path);
break;
}
@@ -205,7 +205,7 @@ public class DistributedWorkQueue {
SimpleTimer.getInstance(config).schedule(new Runnable() {
@Override
public void run() {
- log.debug("Looking for work in " + path);
+ log.debug("Looking for work in {}", path);
try {
lookForWork(processor, zoo.getChildren(path));
} catch (KeeperException e) {
@@ -255,7 +255,7 @@ public class DistributedWorkQueue {
case NodeDataChanged:
case NodeDeleted:
case None:
- log.info("Got unexpected zookeeper event: " + event.getType() + " for " + path);
+ log.info("Got unexpected zookeeper event: {} for {}", event.getType(), path);
break;
}
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
index 8803a40..439287b 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
@@ -95,8 +95,8 @@ public class GarbageCollectWriteAheadLogs {
this.liveServers = new LiveTServerSet(context, new Listener() {
@Override
public void update(LiveTServerSet current, Set<TServerInstance> deleted, Set<TServerInstance> added) {
- log.debug("New tablet servers noticed: " + added);
- log.debug("Tablet servers removed: " + deleted);
+ log.debug("New tablet servers noticed: {}", added);
+ log.debug("Tablet servers removed: {}", deleted);
}
});
liveServers.startListeningForTabletServerChanges();
@@ -237,7 +237,7 @@ public class GarbageCollectWriteAheadLogs {
private long removeFiles(Collection<Pair<WalState,Path>> collection, final GCStatus status) {
for (Pair<WalState,Path> stateFile : collection) {
Path path = stateFile.getSecond();
- log.debug("Removing " + stateFile.getFirst() + " WAL " + path);
+ log.debug("Removing {} WAL {}", stateFile.getFirst(), path);
try {
if (!useTrash || !fs.moveToTrash(path)) {
fs.deleteRecursively(path);
@@ -246,7 +246,7 @@ public class GarbageCollectWriteAheadLogs {
} catch (FileNotFoundException ex) {
// ignored
} catch (IOException ex) {
- log.error("Unable to delete wal " + path + ": " + ex);
+ log.error("Unable to delete wal {}: ", path, ex);
}
}
return status.currentLog.deleted;
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
index 17cc70e..4f5b8da 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
@@ -120,7 +120,7 @@ public class GarbageCollectionAlgorithm {
try {
relPath = makeRelative(candidate, 0);
} catch (IllegalArgumentException iae) {
- log.warn("Ignoring invalid deletion candidate " + candidate);
+ log.warn("Ignoring invalid deletion candidate {}", candidate);
continue;
}
ret.put(relPath, candidate);
@@ -161,7 +161,7 @@ public class GarbageCollectionAlgorithm {
}
if (count > 0)
- log.debug("Folder has bulk processing flag: " + blipPath);
+ log.debug("Folder has bulk processing flag: {}", blipPath);
}
}
@@ -188,11 +188,11 @@ public class GarbageCollectionAlgorithm {
// WARNING: This line is EXTREMELY IMPORTANT.
// You MUST REMOVE candidates that are still in use
if (candidateMap.remove(reference) != null)
- log.debug("Candidate was still in use: " + reference);
+ log.debug("Candidate was still in use: {}", reference);
String dir = reference.substring(0, reference.lastIndexOf('/'));
if (candidateMap.remove(dir) != null)
- log.debug("Candidate was still in use: " + reference);
+ log.debug("Candidate was still in use: {}", reference);
} else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
String tableID = new String(KeyExtent.tableOfMetadataRow(key.getRow()));
@@ -206,7 +206,7 @@ public class GarbageCollectionAlgorithm {
dir = makeRelative(dir, 2);
if (candidateMap.remove(dir) != null)
- log.debug("Candidate was still in use: " + dir);
+ log.debug("Candidate was still in use: {}", dir);
} else
throw new RuntimeException("Scanner over metadata table returned unexpected column : " + entry.getKey());
}
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
index 1edfa86..d47c85c 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
@@ -176,12 +176,12 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
this.fs = fs;
long gcDelay = getConfiguration().getTimeInMillis(Property.GC_CYCLE_DELAY);
- log.info("start delay: " + getStartDelay() + " milliseconds");
- log.info("time delay: " + gcDelay + " milliseconds");
- log.info("safemode: " + opts.safeMode);
- log.info("verbose: " + opts.verbose);
- log.info("memory threshold: " + CANDIDATE_MEMORY_PERCENTAGE + " of " + Runtime.getRuntime().maxMemory() + " bytes");
- log.info("delete threads: " + getNumDeleteThreads());
+ log.info("start delay: {} milliseconds", getStartDelay());
+ log.info("time delay: {} milliseconds", gcDelay);
+ log.info("safemode: {}", opts.safeMode);
+ log.info("verbose: {}", opts.verbose);
+ log.info("memory threshold: {} of bytes", CANDIDATE_MEMORY_PERCENTAGE, Runtime.getRuntime().maxMemory());
+ log.info("delete threads: {}", getNumDeleteThreads());
}
/**
@@ -305,7 +305,7 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
+ " Examine the log files to identify them.%n");
log.info("SAFEMODE: Listing all data file candidates for deletion");
for (String s : confirmedDeletes.values())
- log.info("SAFEMODE: " + s);
+ log.info("SAFEMODE: {}", s);
log.info("SAFEMODE: End candidates for deletion");
return;
}
@@ -328,7 +328,7 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
lastDir = absPath;
} else if (lastDir != null) {
if (absPath.startsWith(lastDir)) {
- log.debug("Ignoring " + entry.getValue() + " because " + lastDir + " exist");
+ log.debug("Ignoring {} because {} exist", entry.getValue(), lastDir);
try {
putMarkerDeleteMutation(entry.getValue(), writer);
} catch (MutationsRejectedException e) {
@@ -362,13 +362,13 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
// atomically in one mutation and extreme care would need to be taken that delete entry was not lost. Instead of doing that, just deal with
// volume switching when something needs to be deleted. Since the rest of the code uses suffixes to compare delete entries, there is no danger
// of deleting something that should not be deleted. Must not change value of delete variable because thats whats stored in metadata table.
- log.debug("Volume replaced " + delete + " -> " + switchedDelete);
+ log.debug("Volume replaced {} -> ", delete, switchedDelete);
fullPath = fs.getFullPath(FileType.TABLE, switchedDelete);
} else {
fullPath = fs.getFullPath(FileType.TABLE, delete);
}
- log.debug("Deleting " + fullPath);
+ log.debug("Deleting {}", fullPath);
if (archiveOrMoveToTrash(fullPath) || fs.deleteRecursively(fullPath)) {
// delete succeeded, still want to delete
@@ -382,7 +382,7 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
synchronized (SimpleGarbageCollector.this) {
++status.current.errors;
}
- log.warn("File exists, but was not deleted for an unknown reason: " + fullPath);
+ log.warn("File exists, but was not deleted for an unknown reason: {}", fullPath);
} else {
// this failure, we still want to remove the metadata entry
removeFlag = true;
@@ -398,10 +398,10 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
if (tableState != null && tableState != TableState.DELETING) {
// clone directories don't always exist
if (!tabletDir.startsWith(Constants.CLONE_PREFIX))
- log.debug("File doesn't exist: " + fullPath);
+ log.debug("File doesn't exist: {}", fullPath);
}
} else {
- log.warn("Very strange path name: " + delete);
+ log.warn("Very strange path name: {}", delete);
}
}
@@ -411,7 +411,7 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
putMarkerDeleteMutation(delete, finalWriter);
}
} catch (Exception e) {
- log.error("{}", e.getMessage(), e);
+ log.error(e.getMessage(), e);
}
}
@@ -426,7 +426,7 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
try {
while (!deleteThreadPool.awaitTermination(1000, TimeUnit.MILLISECONDS)) {}
} catch (InterruptedException e1) {
- log.error("{}", e1.getMessage(), e1);
+ log.error(e1.getMessage(), e1);
}
if (writer != null) {
@@ -452,7 +452,7 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
if (tabletDirs.length == 0) {
Path p = new Path(dir + "/" + tableID);
- log.debug("Removing table dir " + p);
+ log.debug("Removing table dir {}", p);
if (!archiveOrMoveToTrash(p))
fs.delete(p);
}
@@ -481,7 +481,7 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
try {
stat = Status.parseFrom(input.getValue().get());
} catch (InvalidProtocolBufferException e) {
- log.warn("Could not deserialize protobuf for: " + input.getKey());
+ log.warn("Could not deserialize protobuf for: {}", input.getKey());
stat = null;
}
return Maps.immutableEntry(file, stat);
@@ -504,16 +504,16 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
try {
getZooLock(startStatsService());
} catch (Exception ex) {
- log.error("{}", ex.getMessage(), ex);
+ log.error(ex.getMessage(), ex);
System.exit(1);
}
try {
long delay = getStartDelay();
- log.debug("Sleeping for " + delay + " milliseconds before beginning garbage collection cycles");
+ log.debug("Sleeping for {} milliseconds before beginning garbage collection cycles", delay);
Thread.sleep(delay);
} catch (InterruptedException e) {
- log.warn("{}", e.getMessage(), e);
+ log.warn(e.getMessage(), e);
return;
}
@@ -532,17 +532,17 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
new GarbageCollectionAlgorithm().collect(new GCEnv(RootTable.NAME));
new GarbageCollectionAlgorithm().collect(new GCEnv(MetadataTable.NAME));
- log.info("Number of data file candidates for deletion: " + status.current.candidates);
- log.info("Number of data file candidates still in use: " + status.current.inUse);
- log.info("Number of successfully deleted data files: " + status.current.deleted);
- log.info("Number of data files delete failures: " + status.current.errors);
+ log.info("Number of data file candidates for deletion: {}", status.current.candidates);
+ log.info("Number of data file candidates still in use: {}", status.current.inUse);
+ log.info("Number of successfully deleted data files: {}", status.current.deleted);
+ log.info("Number of data files delete failures: {}", status.current.errors);
status.current.finished = System.currentTimeMillis();
status.last = status.current;
status.current = new GcCycleStats();
} catch (Exception e) {
- log.error("{}", e.getMessage(), e);
+ log.error(e.getMessage(), e);
}
tStop = System.currentTimeMillis();
@@ -560,13 +560,14 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
replSpan.stop();
}
+ // Clean up any unused write-ahead logs
Span waLogs = Trace.start("walogs");
try {
GarbageCollectWriteAheadLogs walogCollector = new GarbageCollectWriteAheadLogs(this, fs, isUsingTrash());
log.info("Beginning garbage collection of write-ahead logs");
walogCollector.collect(status);
} catch (Exception e) {
- log.error("{}", e.getMessage(), e);
+ log.error(e.getMessage(), e);
} finally {
waLogs.stop();
}
@@ -578,16 +579,16 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
connector.tableOperations().compact(MetadataTable.NAME, null, null, true, true);
connector.tableOperations().compact(RootTable.NAME, null, null, true, true);
} catch (Exception e) {
- log.warn("{}", e.getMessage(), e);
+ log.warn(e.getMessage(), e);
}
Trace.off();
try {
long gcDelay = getConfiguration().getTimeInMillis(Property.GC_CYCLE_DELAY);
- log.debug("Sleeping for " + gcDelay + " milliseconds");
+ log.debug("Sleeping for {} milliseconds", gcDelay);
Thread.sleep(gcDelay);
} catch (InterruptedException e) {
- log.warn("{}", e.getMessage(), e);
+ log.warn(e.getMessage(), e);
return;
}
}
@@ -627,7 +628,7 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
Volume sourceVolume = fs.getVolumeByPath(fileToArchive);
String sourceVolumeBasePath = sourceVolume.getBasePath();
- log.debug("Base path for volume: " + sourceVolumeBasePath);
+ log.debug("Base path for volume: {}", sourceVolumeBasePath);
// Get the path for the file we want to archive
String sourcePathBasePath = fileToArchive.toUri().getPath();
@@ -642,27 +643,27 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
}
}
- log.debug("Computed relative path for file to archive: " + relativeVolumePath);
+ log.debug("Computed relative path for file to archive: {}", relativeVolumePath);
// The file archive path on this volume (we can't archive this file to a different volume)
Path archivePath = new Path(sourceVolumeBasePath, ServerConstants.FILE_ARCHIVE_DIR);
- log.debug("File archive path: " + archivePath);
+ log.debug("File archive path: {}", archivePath);
fs.mkdirs(archivePath);
// Preserve the path beneath the Volume's base directory (e.g. tables/1/A_0000001.rf)
Path fileArchivePath = new Path(archivePath, relativeVolumePath);
- log.debug("Create full path of " + fileArchivePath + " from " + archivePath + " and " + relativeVolumePath);
+ log.debug("Create full path of {} from {} and {}", fileArchivePath, archivePath, relativeVolumePath);
// Make sure that it doesn't already exist, something is wrong.
if (fs.exists(fileArchivePath)) {
- log.warn("Tried to archive file, but it already exists: " + fileArchivePath);
+ log.warn("Tried to archive file, but it already exists: {}", fileArchivePath);
return false;
}
- log.debug("Moving " + fileToArchive + " to " + fileArchivePath);
+ log.debug("Moving {} to {}", fileToArchive, fileArchivePath);
return fs.rename(fileToArchive, fileArchivePath);
}
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java b/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java
index 0c09396..7075702 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java
@@ -109,7 +109,7 @@ public class CloseWriteAheadLogReferences implements Runnable {
findWalsSpan.stop();
}
- log.info("Found " + closed.size() + " WALs referenced in metadata in " + sw.toString());
+ log.info("Found {} WALs referenced in metadata in {}", closed.size(), sw.toString());
sw.reset();
Span updateReplicationSpan = Trace.start("updateReplicationTable");
@@ -122,7 +122,7 @@ public class CloseWriteAheadLogReferences implements Runnable {
updateReplicationSpan.stop();
}
- log.info("Closed " + recordsClosed + " WAL replication references in replication table in " + sw.toString());
+ log.info("Closed {} WAL replication references in replication table in {}", recordsClosed, sw.toString());
}
/**
@@ -190,7 +190,7 @@ public class CloseWriteAheadLogReferences implements Runnable {
closeWal(bw, entry.getKey());
recordsClosed++;
} catch (MutationsRejectedException e) {
- log.error("Failed to submit delete mutation for " + entry.getKey());
+ log.error("Failed to submit delete mutation for {}", entry.getKey());
continue;
}
}
@@ -236,7 +236,7 @@ public class CloseWriteAheadLogReferences implements Runnable {
return null;
return HostAndPort.fromString(locations.get(0));
} catch (Exception e) {
- log.warn("Failed to obtain master host " + e);
+ log.warn("Failed to obtain master host", e);
}
return null;
@@ -251,7 +251,7 @@ public class CloseWriteAheadLogReferences implements Runnable {
}
return ThriftUtil.getClient(new MasterClientService.Client.Factory(), address, context);
} catch (Exception e) {
- log.warn("Issue with masterConnection (" + address + ") " + e, e);
+ log.warn("Issue with masterConnection ({}) {}", address, e, e);
}
return null;
}
@@ -290,10 +290,10 @@ public class CloseWriteAheadLogReferences implements Runnable {
tserverClient = ThriftUtil.getClient(new TabletClientService.Client.Factory(), server, context);
return tserverClient.getActiveLogs(tinfo, context.rpcCreds());
} catch (TTransportException e) {
- log.warn("Failed to fetch active write-ahead logs from " + server, e);
+ log.warn("Failed to fetch active write-ahead logs from {}", server, e);
return null;
} catch (TException e) {
- log.warn("Failed to fetch active write-ahead logs from " + server, e);
+ log.warn("Failed to fetch active write-ahead logs from {}", server, e);
return null;
} finally {
ThriftUtil.returnClient(tserverClient);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java b/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java
index 0c378d4..fb9f315 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java
@@ -318,7 +318,7 @@ class FateServiceHandler implements FateService.Iface {
if (!canMerge)
throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
- Master.log.debug("Creating merge op: " + tableId + " " + startRow + " " + endRow);
+ Master.log.debug("Creating merge op: {} {} {}", tableId, startRow, endRow);
master.fate.seedTransaction(opid, new TraceRepo<>(new TableRangeOp(MergeInfo.Operation.MERGE, namespaceId, tableId, startRow, endRow)), autoCleanup);
break;
}
diff --git a/server/master/src/main/java/org/apache/accumulo/master/Master.java b/server/master/src/main/java/org/apache/accumulo/master/Master.java
index 0027918..8ccbf69 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/Master.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/Master.java
@@ -249,7 +249,7 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
if (state.equals(newState))
return;
if (!transitionOK[state.ordinal()][newState.ordinal()]) {
- log.error("Programmer error: master should not transition from " + state + " to " + newState);
+ log.error("Programmer error: master should not transition from {} to {}", state, newState);
}
MasterState oldState = state;
state = newState;
@@ -290,7 +290,7 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
throw new IOException("Failed to move root tablet from " + oldPath + " to " + newPath);
}
- log.info("Upgrade renamed " + oldPath + " to " + newPath);
+ log.info("Upgrade renamed {} to {}", oldPath, newPath);
}
Path location = null;
@@ -309,7 +309,7 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
if (location == null)
throw new IllegalStateException("Failed to find root tablet");
- log.info("Upgrade setting root table location in zookeeper " + location);
+ log.info("Upgrade setting root table location in zookeeper {}", location);
zoo.putPersistentData(dirZPath, location.toString().getBytes(), NodeExistsPolicy.FAIL);
}
}
@@ -337,7 +337,7 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
IZooReaderWriter zoo = ZooReaderWriter.getInstance();
final String zooRoot = ZooUtil.getRoot(getInstance());
- log.debug("Handling updates for version " + accumuloPersistentVersion);
+ log.debug("Handling updates for version {}", accumuloPersistentVersion);
log.debug("Cleaning out remnants of logger role.");
zoo.recursiveDelete(zooRoot + "/loggers", NodeMissingPolicy.SKIP);
@@ -348,7 +348,7 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
zoo.putPersistentData(zooRoot + Constants.ZRECOVERY, zero, NodeExistsPolicy.SKIP);
for (String id : zoo.getChildren(zooRoot + Constants.ZTABLES)) {
- log.debug("Prepping table " + id + " for compaction cancellations.");
+ log.debug("Prepping table {} for compaction cancellations.", id);
zoo.putPersistentData(zooRoot + Constants.ZTABLES + "/" + id + Constants.ZTABLE_COMPACT_CANCEL_ID, zero, NodeExistsPolicy.SKIP);
}
@@ -365,7 +365,7 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
// skip
}
for (String id : zoo.getChildren(zooRoot + Constants.ZTABLES)) {
- log.debug("Converting table " + id + " WALog setting to Durability");
+ log.debug("Converting table {} WALog setting to Durability", id);
try {
@SuppressWarnings("deprecation")
String path = zooRoot + Constants.ZTABLES + "/" + id + Constants.ZTABLE_CONF + "/" + Property.TABLE_WALOG_ENABLED.getKey();
@@ -394,18 +394,18 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
Collections.singleton(new Pair<>(Namespace.DEFAULT, Namespace.ID.DEFAULT)))) {
String ns = namespace.getFirst();
Namespace.ID id = namespace.getSecond();
- log.debug("Upgrade creating namespace \"" + ns + "\" (ID: " + id + ")");
+ log.debug("Upgrade creating namespace \"{}\" (ID: {})", ns, id);
if (!Namespaces.exists(getInstance(), id))
TableManager.prepareNewNamespaceState(getInstance().getInstanceID(), id, ns, NodeExistsPolicy.SKIP);
}
// create replication table in zk
- log.debug("Upgrade creating table " + ReplicationTable.NAME + " (ID: " + ReplicationTable.ID + ")");
+ log.debug("Upgrade creating table {} (ID: {})", ReplicationTable.NAME, ReplicationTable.ID);
TableManager.prepareNewTableState(getInstance().getInstanceID(), ReplicationTable.ID, Namespace.ID.ACCUMULO, ReplicationTable.NAME, TableState.OFFLINE,
NodeExistsPolicy.SKIP);
// create root table
- log.debug("Upgrade creating table " + RootTable.NAME + " (ID: " + RootTable.ID + ")");
+ log.debug("Upgrade creating table {} (ID: {})", RootTable.NAME, RootTable.ID);
TableManager.prepareNewTableState(getInstance().getInstanceID(), RootTable.ID, Namespace.ID.ACCUMULO, RootTable.NAME, TableState.ONLINE,
NodeExistsPolicy.SKIP);
Initialize.initSystemTablesConfig();
@@ -417,13 +417,12 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
for (String tableId : zoo.getChildren(tables)) {
Namespace.ID targetNamespace = (MetadataTable.ID.canonicalID().equals(tableId) || RootTable.ID.canonicalID().equals(tableId)) ? Namespace.ID.ACCUMULO
: Namespace.ID.DEFAULT;
- log.debug("Upgrade moving table " + new String(zoo.getData(tables + "/" + tableId + Constants.ZTABLE_NAME, null), UTF_8) + " (ID: " + tableId
- + ") into namespace with ID " + targetNamespace);
+ log.debug("Upgrade moving table {} (ID: {}) into namespace with ID {}", new String(zoo.getData(tables + "/" + tableId + Constants.ZTABLE_NAME, null), UTF_8), tableId, targetNamespace);
zoo.putPersistentData(tables + "/" + tableId + Constants.ZTABLE_NAMESPACE, targetNamespace.getUtf8(), NodeExistsPolicy.SKIP);
}
// rename metadata table
- log.debug("Upgrade renaming table " + MetadataTable.OLD_NAME + " (ID: " + MetadataTable.ID + ") to " + MetadataTable.NAME);
+ log.debug("Upgrade renaming table {} (ID: {}) to {}", MetadataTable.OLD_NAME, MetadataTable.ID, MetadataTable.NAME);
zoo.putPersistentData(tables + "/" + MetadataTable.ID + Constants.ZTABLE_NAME, Tables.qualify(MetadataTable.NAME).getSecond().getBytes(UTF_8),
NodeExistsPolicy.OVERWRITE);
@@ -605,10 +604,9 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
AccumuloConfiguration aconf = serverConfig.getSystemConfiguration();
- log.info("Version " + Constants.VERSION);
- log.info("Instance " + getInstance().getInstanceID());
+ log.info("Version {}", Constants.VERSION);
+ log.info("Instance {}", getInstance().getInstanceID());
timeKeeper = new MasterTime(this);
-
ThriftTransportPool.getInstance().setIdleTime(aconf.getTimeInMillis(Property.GENERAL_RPC_TIMEOUT));
tserverSet = new LiveTServerSet(this, this);
this.tabletBalancer = aconf.instantiateClassProperty(Property.MASTER_TABLET_BALANCER, TabletBalancer.class, new DefaultLoadBalancer());
@@ -718,7 +716,7 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
byte[] data = ZooReaderWriter.getInstance().getData(ZooUtil.getRoot(getInstance()) + Constants.ZMASTER_GOAL_STATE, null);
return MasterGoalState.valueOf(new String(data));
} catch (Exception e) {
- log.error("Problem getting real goal state from zookeeper: " + e);
+ log.error("Problem getting real goal state from zookeeper: ", e);
sleepUninterruptibly(1, TimeUnit.SECONDS);
}
}
@@ -807,7 +805,7 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
}
// Handle merge transitions
if (mergeInfo.getExtent() != null) {
- log.debug("mergeInfo overlaps: " + extent + " " + mergeInfo.overlaps(extent));
+ log.debug("mergeInfo overlaps: {} {}", extent, mergeInfo.overlaps(extent));
if (mergeInfo.overlaps(extent)) {
switch (mergeInfo.getState()) {
case NONE:
@@ -913,7 +911,7 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
for (int i = start; i < watchers.size(); i++) {
TabletGroupWatcher watcher = watchers.get(i);
if (watcher.stats.getLastMasterState() != getMasterState()) {
- log.debug(watcher.getName() + ": " + watcher.stats.getLastMasterState() + " != " + getMasterState());
+ log.debug("{}: {} != {}", watcher.getName(), watcher.stats.getLastMasterState(), getMasterState());
return false;
}
}
@@ -969,7 +967,7 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
log.debug("The root tablet is still assigned or hosted");
if (count + root_count == 0 && goodStats()) {
Set<TServerInstance> currentServers = tserverSet.getCurrentServers();
- log.debug("stopping " + currentServers.size() + " tablet servers");
+ log.debug("stopping {} tablet servers", currentServers.size());
for (TServerInstance server : currentServers) {
try {
serversToShutdown.add(server);
@@ -997,7 +995,7 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
wait = updateStatus();
eventListener.waitForEvents(wait);
} catch (Throwable t) {
- log.error("Error balancing tablets, will wait for " + WAIT_BETWEEN_ERRORS / ONE_SECOND + " (seconds) and then retry", t);
+ log.error("Error balancing tablets, will wait for {} (seconds) and then retry ", WAIT_BETWEEN_ERRORS / ONE_SECOND, t);
sleepUninterruptibly(WAIT_BETWEEN_ERRORS, TimeUnit.MILLISECONDS);
}
}
@@ -1009,13 +1007,13 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
checkForHeldServer(tserverStatus);
if (!badServers.isEmpty()) {
- log.debug("not balancing because the balance information is out-of-date " + badServers.keySet());
+ log.debug("not balancing because the balance information is out-of-date {}", badServers.keySet());
} else if (notHosted() > 0) {
- log.debug("not balancing because there are unhosted tablets: " + notHosted());
+ log.debug("not balancing because there are unhosted tablets: {}", notHosted());
} else if (getMasterGoalState() == MasterGoalState.CLEAN_STOP) {
log.debug("not balancing because the master is attempting to stop cleanly");
} else if (!serversToShutdown.isEmpty()) {
- log.debug("not balancing while shutting down servers " + serversToShutdown);
+ log.debug("not balancing while shutting down servers {}", serversToShutdown);
} else {
for (TabletGroupWatcher tgw : watchers) {
if (!tgw.isSameTserversAsLastScan(currentServers)) {
@@ -1043,13 +1041,13 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
}
}
if (crazyHoldTime == 1 && someHoldTime == 1 && tserverStatus.size() > 1) {
- log.warn("Tablet server " + instance + " exceeded maximum hold time: attempting to kill it");
+ log.warn("Tablet server {} exceeded maximum hold time: attempting to kill it", instance);
try {
TServerConnection connection = tserverSet.getConnection(instance);
if (connection != null)
connection.fastHalt(masterLock);
} catch (TException e) {
- log.error("{}", e.getMessage(), e);
+ log.error(e.getMessage(), e);
}
tserverSet.remove(instance);
}
@@ -1061,11 +1059,11 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
for (TabletMigration m : TabletBalancer.checkMigrationSanity(tserverStatus.keySet(), migrationsOut)) {
if (migrations.containsKey(m.tablet)) {
- log.warn("balancer requested migration more than once, skipping " + m);
+ log.warn("balancer requested migration more than once, skipping {}", m);
continue;
}
migrations.put(m.tablet, m.newServer);
- log.debug("migration " + m);
+ log.debug("migration {}", m);
}
if (migrationsOut.size() > 0) {
nextEvent.event("Migrating %d more tablets, %d total", migrationsOut.size(), migrations.size());
@@ -1103,10 +1101,10 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
t.setName(oldName);
}
} catch (Exception ex) {
- log.error("unable to get tablet server status " + server + " " + ex.toString());
- log.debug("unable to get tablet server status " + server, ex);
+ log.error("unable to get tablet server status {} {}", server, ex.toString());
+ log.debug("unable to get tablet server status {}", server, ex);
if (badServers.get(server).incrementAndGet() > MAX_BAD_STATUS_COUNT) {
- log.warn("attempting to stop " + server);
+ log.warn("attempting to stop {}", server);
try {
TServerConnection connection = tserverSet.getConnection(server);
if (connection != null) {
@@ -1115,7 +1113,7 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
} catch (TTransportException e) {
// ignore: it's probably down
} catch (Exception e) {
- log.info("error talking to troublesome tablet server ", e);
+ log.info("error talking to troublesome tablet server", e);
}
badServers.remove(server);
}
@@ -1275,7 +1273,7 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
}
String address = sa.address.toString();
- log.info("Setting master lock data to " + address);
+ log.info("Setting master lock data to {}", address);
masterLock.replaceLockData(address.getBytes());
while (!clientService.isServing()) {
@@ -1383,11 +1381,11 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
@Override
public synchronized void failedToAcquireLock(Exception e) {
- log.warn("Failed to get master lock " + e);
+ log.warn("Failed to get master lock", e);
if (e instanceof NoAuthException) {
String msg = "Failed to acquire master lock due to incorrect ZooKeeper authentication.";
- log.error(msg + " Ensure instance.secret is consistent across Accumulo configuration", e);
+ log.error("{} Ensure instance.secret is consistent across Accumulo configuration", msg, e);
Halt.halt(msg, -1);
}
@@ -1464,7 +1462,7 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
public void update(LiveTServerSet current, Set<TServerInstance> deleted, Set<TServerInstance> added) {
DeadServerList obit = new DeadServerList(ZooUtil.getRoot(getInstance()) + Constants.ZDEADTSERVERS);
if (added.size() > 0) {
- log.info("New servers: " + added);
+ log.info("New servers: {}", added);
for (TServerInstance up : added)
obit.delete(up.hostPort());
}
@@ -1480,7 +1478,7 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
unexpected.removeAll(this.serversToShutdown);
if (unexpected.size() > 0) {
if (stillMaster() && !getMasterGoalState().equals(MasterGoalState.CLEAN_STOP)) {
- log.warn("Lost servers " + unexpected);
+ log.warn("Lost servers {}", unexpected);
}
}
serversToShutdown.removeAll(deleted);
@@ -1498,7 +1496,7 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
while (iter.hasNext()) {
Entry<KeyExtent,TServerInstance> entry = iter.next();
if (deleted.contains(entry.getValue())) {
- log.info("Canceling migration of " + entry.getKey() + " to " + entry.getValue());
+ log.info("Canceling migration of {} to {}", entry.getKey(), entry.getValue());
iter.remove();
}
}
diff --git a/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java b/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java
index 94e6c86..dafd8f3 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java
@@ -134,7 +134,7 @@ public class MasterClientServiceHandler extends FateServiceHandler implements Ma
} catch (NoNodeException nne) {
throw new ThriftTableOperationException(tableId.canonicalID(), null, TableOperation.FLUSH, TableOperationExceptionType.NOTFOUND, null);
} catch (Exception e) {
- Master.log.warn("{}", e.getMessage(), e);
+ Master.log.warn(e.getMessage(), e);
throw new ThriftTableOperationException(tableId.canonicalID(), null, TableOperation.FLUSH, TableOperationExceptionType.OTHER, null);
}
return Long.parseLong(new String(fid));
@@ -245,14 +245,14 @@ public class MasterClientServiceHandler extends FateServiceHandler implements Ma
throw new ThriftTableOperationException(tableId.canonicalID(), null, TableOperation.FLUSH, TableOperationExceptionType.NOTFOUND, null);
} catch (AccumuloException e) {
- Master.log.debug("Failed to scan " + MetadataTable.NAME + " table to wait for flush " + tableId, e);
+ Master.log.debug("Failed to scan {} table to wait for flush {}", MetadataTable.NAME, tableId, e);
} catch (TabletDeletedException tde) {
- Master.log.debug("Failed to scan " + MetadataTable.NAME + " table to wait for flush " + tableId, tde);
+ Master.log.debug("Failed to scan {} table to wait for flush {}", MetadataTable.NAME, tableId, tde);
} catch (AccumuloSecurityException e) {
- Master.log.warn("{}", e.getMessage(), e);
+ Master.log.warn(e.getMessage(), e);
throw new ThriftSecurityException();
} catch (TableNotFoundException e) {
- Master.log.error("{}", e.getMessage(), e);
+ Master.log.error(e.getMessage(), e);
throw new ThriftTableOperationException();
}
}
@@ -307,7 +307,7 @@ public class MasterClientServiceHandler extends FateServiceHandler implements Ma
if (!force) {
final TServerConnection server = master.tserverSet.getConnection(doomed);
if (server == null) {
- Master.log.warn("No server found for name " + tabletServer);
+ Master.log.warn("No server found for name {}", tabletServer);
return;
}
}
@@ -327,7 +327,7 @@ public class MasterClientServiceHandler extends FateServiceHandler implements Ma
public void reportSplitExtent(TInfo info, TCredentials credentials, String serverName, TabletSplit split) {
KeyExtent oldTablet = new KeyExtent(split.oldTablet);
if (master.migrations.remove(oldTablet) != null) {
- Master.log.info("Canceled migration of " + split.oldTablet);
+ Master.log.info("Canceled migration of {}", split.oldTablet);
}
for (TServerInstance instance : master.tserverSet.getCurrentServers()) {
if (serverName.equals(instance.hostPort())) {
@@ -335,7 +335,7 @@ public class MasterClientServiceHandler extends FateServiceHandler implements Ma
return;
}
}
- Master.log.warn("Got a split from a server we don't recognize: " + serverName);
+ Master.log.warn("Got a split from a server we don't recognize: {}", serverName);
}
@Override
@@ -344,7 +344,7 @@ public class MasterClientServiceHandler extends FateServiceHandler implements Ma
switch (status) {
case LOAD_FAILURE:
- Master.log.error(serverName + " reports assignment failed for tablet " + tablet);
+ Master.log.error("{} reports assignment failed for tablet {}", serverName, tablet);
break;
case LOADED:
master.nextEvent.event("tablet %s was loaded on %s", tablet, serverName);
@@ -353,11 +353,11 @@ public class MasterClientServiceHandler extends FateServiceHandler implements Ma
master.nextEvent.event("tablet %s was unloaded from %s", tablet, serverName);
break;
case UNLOAD_ERROR:
- Master.log.error(serverName + " reports unload failed for tablet " + tablet);
+ Master.log.error("{} reports unload failed for tablet {}", serverName, tablet);
break;
case UNLOAD_FAILURE_NOT_SERVING:
if (Master.log.isTraceEnabled()) {
- Master.log.trace(serverName + " reports unload failed: not serving tablet, could be a split: " + tablet);
+ Master.log.trace("{} reports unload failed: not serving tablet, could be a split: {}", serverName, tablet);
}
break;
case CHOPPED:
@@ -471,7 +471,7 @@ public class MasterClientServiceHandler extends FateServiceHandler implements Ma
new DefaultLoadBalancer());
balancer.init(master);
master.tabletBalancer = balancer;
- log.info("tablet balancer changed to " + master.tabletBalancer.getClass().getName());
+ log.info("tablet balancer changed to {}", master.tabletBalancer.getClass().getName());
}
}
diff --git a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
index b356241..86cf00e 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
@@ -201,13 +201,13 @@ abstract class TabletGroupWatcher extends Daemon {
if (tls == null) {
continue;
}
- Master.log.debug(store.name() + " location State: " + tls);
+ Master.log.debug("{} location State: {}", store.name(), tls);
// ignore entries for tables that do not exist in zookeeper
if (TableManager.getInstance().getTableState(tls.extent.getTableId()) == null)
continue;
if (Master.log.isTraceEnabled())
- Master.log.trace(tls + " walogs " + tls.walogs.size());
+ Master.log.trace("{} walogs {}", tls, tls.walogs.size());
// Don't overwhelm the tablet servers with work
if (unassigned.size() + unloaded > Master.MAX_TSERVER_WORK_CHUNK * currentTServers.size()) {
@@ -235,7 +235,7 @@ abstract class TabletGroupWatcher extends Daemon {
TServerInstance server = tls.getServer();
TabletState state = tls.getState(currentTServers.keySet());
if (Master.log.isTraceEnabled()) {
- Master.log.trace("Goal state " + goal + " current " + state + " for " + tls.extent);
+ Master.log.trace("Goal state {} current {} for {}", goal, state, tls.extent);
}
stats.update(tableId, state);
mergeStats.update(tls.extent, state, tls.chopped, !tls.walogs.isEmpty());
@@ -342,7 +342,7 @@ abstract class TabletGroupWatcher extends Daemon {
unloaded++;
totalUnloaded++;
} else {
- Master.log.warn("Could not connect to server " + server);
+ Master.log.warn("Could not connect to server {}", server);
}
break;
case ASSIGNED:
@@ -382,7 +382,7 @@ abstract class TabletGroupWatcher extends Daemon {
Master.log.info("Detected change in current tserver set, re-running state machine.");
}
} catch (Exception ex) {
- Master.log.error("Error processing table state for store " + store.name(), ex);
+ Master.log.error("Error processing table state for store {}", store.name(), ex);
if (ex.getCause() != null && ex.getCause() instanceof BadLocationStateException) {
repairMetadata(((BadLocationStateException) ex.getCause()).getEncodedEndRow());
} else {
@@ -393,7 +393,7 @@ abstract class TabletGroupWatcher extends Daemon {
try {
iter.close();
} catch (IOException ex) {
- Master.log.warn("Error closing TabletLocationState iterator: " + ex, ex);
+ Master.log.warn("Error closing TabletLocationState iterator:", ex);
}
}
}
@@ -409,7 +409,7 @@ abstract class TabletGroupWatcher extends Daemon {
}
private void repairMetadata(Text row) {
- Master.log.debug("Attempting repair on " + row);
+ Master.log.debug("Attempting repair on {}", row);
// ACCUMULO-2261 if a dying tserver writes a location before its lock information propagates, it may cause duplicate assignment.
// Attempt to find the dead server entry and remove it.
try {
@@ -437,7 +437,7 @@ abstract class TabletGroupWatcher extends Daemon {
} else if (future.size() == 0 && assigned.size() > 1) {
Master.log.warn("Found a tablet hosted on multiple servers, attempting to repair");
} else {
- Master.log.info("Attempted a repair, but nothing seems to be obviously wrong. " + assigned + " " + future);
+ Master.log.info("Attempted a repair, but nothing seems to be obviously wrong. {} {}", assigned, future);
return;
}
Iterator<Entry<Key,Value>> iter = Iterators.concat(future.entrySet().iterator(), assigned.entrySet().iterator());
@@ -445,7 +445,7 @@ abstract class TabletGroupWatcher extends Daemon {
Entry<Key,Value> entry = iter.next();
TServerInstance alive = master.tserverSet.find(entry.getValue().toString());
if (alive == null) {
- Master.log.info("Removing entry " + entry);
+ Master.log.info("Removing entry {}", entry);
BatchWriter bw = this.master.getConnector().createBatchWriter(table, new BatchWriterConfig());
Mutation m = new Mutation(entry.getKey().getRow());
m.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
@@ -454,9 +454,9 @@ abstract class TabletGroupWatcher extends Daemon {
return;
}
}
- Master.log.error("Metadata table is inconsistent at " + row + " and all assigned/future tservers are still online.");
+ Master.log.error("Metadata table is inconsistent at {} and all assigned/future tservers are still online.", row);
} catch (Throwable e) {
- Master.log.error("Error attempting repair of metadata " + row + ": " + e, e);
+ Master.log.error("Error attempting repair of metadata {}: ", row, e);
}
}
@@ -494,15 +494,15 @@ abstract class TabletGroupWatcher extends Daemon {
TServerConnection conn;
conn = this.master.tserverSet.getConnection(tls.current);
if (conn != null) {
- Master.log.info("Asking " + tls.current + " to split " + tls.extent + " at " + splitPoint);
+ Master.log.info("Asking {} to split {} at {}", tls.current, tls.extent, splitPoint);
conn.splitTablet(this.master.masterLock, tls.extent, splitPoint);
} else {
- Master.log.warn("Not connected to server " + tls.current);
+ Master.log.warn("Not connected to server {}", tls.current);
}
} catch (NotServingTabletException e) {
- Master.log.debug("Error asking tablet server to split a tablet: " + e);
+ Master.log.debug("Error asking tablet server to split a tablet: ", e);
} catch (Exception e) {
- Master.log.warn("Error asking tablet server to split a tablet: " + e);
+ Master.log.warn("Error asking tablet server to split a tablet: ", e);
}
}
}
@@ -524,10 +524,10 @@ abstract class TabletGroupWatcher extends Daemon {
try {
conn = this.master.tserverSet.getConnection(tls.current);
if (conn != null) {
- Master.log.info("Asking " + tls.current + " to chop " + tls.extent);
+ Master.log.info("Asking {} to chop {}", tls.current, tls.extent);
conn.chop(this.master.masterLock, tls.extent);
} else {
- Master.log.warn("Could not connect to server " + tls.current);
+ Master.log.warn("Could not connect to server {}", tls.current);
}
} catch (TException e) {
Master.log.warn("Communications error asking tablet server to chop a tablet");
@@ -562,7 +562,7 @@ abstract class TabletGroupWatcher extends Daemon {
}
}
} catch (Exception ex) {
- Master.log.error("Unable to update merge state for merge " + stats.getMergeInfo().getExtent(), ex);
+ Master.log.error("Unable to update merge state for merge {} ", stats.getMergeInfo().getExtent(), ex);
}
}
}
@@ -570,13 +570,13 @@ abstract class TabletGroupWatcher extends Daemon {
private void deleteTablets(MergeInfo info) throws AccumuloException {
KeyExtent extent = info.getExtent();
String targetSystemTable = extent.isMeta() ? RootTable.NAME : MetadataTable.NAME;
- Master.log.debug("Deleting tablets for " + extent);
+ Master.log.debug("Deleting tablets for {}", extent);
char timeType = '\0';
KeyExtent followingTablet = null;
if (extent.getEndRow() != null) {
Key nextExtent = new Key(extent.getEndRow()).followingKey(PartialKey.ROW);
followingTablet = getHighTablet(new KeyExtent(extent.getTableId(), nextExtent.getRow(), extent.getEndRow()));
- Master.log.debug("Found following tablet " + followingTablet);
+ Master.log.debug("Found following tablet {}", followingTablet);
}
try {
Connector conn = this.master.getConnector();
@@ -584,7 +584,7 @@ abstract class TabletGroupWatcher extends Daemon {
if (start == null) {
start = new Text();
}
- Master.log.debug("Making file deletion entries for " + extent);
+ Master.log.debug("Making file deletion entries for {}", extent);
Range deleteRange = new Range(KeyExtent.getMetadataEntry(extent.getTableId(), start), false, KeyExtent.getMetadataEntry(extent.getTableId(),
extent.getEndRow()), true);
Scanner scanner = conn.createScanner(targetSystemTable, Authorizations.EMPTY);
@@ -630,7 +630,7 @@ abstract class TabletGroupWatcher extends Daemon {
}
if (followingTablet != null) {
- Master.log.debug("Updating prevRow of " + followingTablet + " to " + extent.getPrevEndRow());
+ Master.log.debug("Updating prevRow of {} to {}", followingTablet, extent.getPrevEndRow());
bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
try {
Mutation m = new Mutation(followingTablet.getMetadataEntry());
@@ -643,7 +643,7 @@ abstract class TabletGroupWatcher extends Daemon {
}
} else {
// Recreate the default tablet to hold the end of the table
- Master.log.debug("Recreating the last tablet to point to " + extent.getPrevEndRow());
+ Master.log.debug("Recreating the last tablet to point to {}", extent.getPrevEndRow());
VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(Optional.of(extent.getTableId()));
String tdir = master.getFileSystem().choose(chooserEnv, ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR
+ extent.getTableId() + Constants.DEFAULT_TABLET_LOCATION;
@@ -656,9 +656,9 @@ abstract class TabletGroupWatcher extends Daemon {
private void mergeMetadataRecords(MergeInfo info) throws AccumuloException {
KeyExtent range = info.getExtent();
- Master.log.debug("Merging metadata for " + range);
+ Master.log.debug("Merging metadata for {}", range);
KeyExtent stop = getHighTablet(range);
- Master.log.debug("Highest tablet is " + stop);
+ Master.log.debug("Highest tablet is {}", stop);
Value firstPrevRowValue = null;
Text stopRow = stop.getMetadataEntry();
Text start = range.getPrevEndRow();
@@ -692,7 +692,7 @@ abstract class TabletGroupWatcher extends Daemon {
m.put(key.getColumnFamily(), key.getColumnQualifier(), value);
fileCount++;
} else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key) && firstPrevRowValue == null) {
- Master.log.debug("prevRow entry for lowest tablet is " + value);
+ Master.log.debug("prevRow entry for lowest tablet is {}", value);
firstPrevRowValue = new Value(value);
} else if (TabletsSection.ServerColumnFamily.TIME_COLUMN.hasColumns(key)) {
maxLogicalTime = TabletTime.maxMetadataTime(maxLogicalTime, value.toString());
@@ -721,7 +721,7 @@ abstract class TabletGroupWatcher extends Daemon {
bw.flush();
- Master.log.debug("Moved " + fileCount + " files to " + stop);
+ Master.log.debug("Moved {} files to {}", fileCount, stop);
if (firstPrevRowValue == null) {
Master.log.debug("tablet already merged");
@@ -730,7 +730,7 @@ abstract class TabletGroupWatcher extends Daemon {
stop.setPrevEndRow(KeyExtent.decodePrevEndRow(firstPrevRowValue));
Mutation updatePrevRow = stop.getPrevRowUpdateMutation();
- Master.log.debug("Setting the prevRow for last tablet: " + stop);
+ Master.log.debug("Setting the prevRow for last tablet: {}", stop);
bw.addMutation(updatePrevRow);
bw.flush();
@@ -762,7 +762,7 @@ abstract class TabletGroupWatcher extends Daemon {
// either disappear entirely or not all.. this is important for the case
// where the process terminates in the loop below...
scanner = conn.createScanner(info.getExtent().isMeta() ? RootTable.NAME : MetadataTable.NAME, Authorizations.EMPTY);
- Master.log.debug("Deleting range " + scanRange);
+ Master.log.debug("Deleting range {}", scanRange);
scanner.setRange(scanRange);
RowIterator rowIter = new RowIterator(scanner);
while (rowIter.hasNext()) {
@@ -776,7 +776,7 @@ abstract class TabletGroupWatcher extends Daemon {
m = new Mutation(key.getRow());
m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
- Master.log.debug("deleting entry " + key);
+ Master.log.debug("deleting entry {}", key);
}
bw.addMutation(m);
}
@@ -812,8 +812,8 @@ abstract class TabletGroupWatcher extends Daemon {
boolean tabletsSuspendable = canSuspendTablets();
if (!assignedToDeadServers.isEmpty()) {
int maxServersToShow = min(assignedToDeadServers.size(), 100);
- Master.log.debug(assignedToDeadServers.size() + " assigned to dead servers: " + assignedToDeadServers.subList(0, maxServersToShow) + "...");
- Master.log.debug("logs for dead servers: " + logsForDeadServers);
+ Master.log.debug("{} assigned to dead servers: {}...", assignedToDeadServers.size(), assignedToDeadServers.subList(0, maxServersToShow));
+ Master.log.debug("logs for dead servers: {}", logsForDeadServers);
if (tabletsSuspendable) {
store.suspend(assignedToDeadServers, logsForDeadServers, master.getSteadyTime());
} else {
@@ -836,7 +836,7 @@ abstract class TabletGroupWatcher extends Daemon {
if (unassigned.containsKey(assignment.getKey())) {
if (assignment.getValue() != null) {
if (!currentTServers.containsKey(assignment.getValue())) {
- Master.log.warn("balancer assigned " + assignment.getKey() + " to a tablet server that is not current " + assignment.getValue() + " ignoring");
+ Master.log.warn("balancer assigned {} to a tablet server that is not current {} ignoring", assignment.getKey(), assignment.getValue());
continue;
}
@@ -849,21 +849,21 @@ abstract class TabletGroupWatcher extends Daemon {
// Don't let the log message get too gigantic
if (builder.length() > ASSINGMENT_BUFFER_MAX_LENGTH) {
builder.append("]");
- Master.log.debug(store.name() + " assigning tablets: [" + builder.toString());
+ Master.log.debug("{} assigning tablets: [{}", store.name(), builder.toString());
builder.setLength(0);
}
assignments.add(new Assignment(assignment.getKey(), assignment.getValue()));
}
} else {
- Master.log.warn(store.name() + " load balancer assigning tablet that was not nominated for assignment " + assignment.getKey());
+ Master.log.warn("{} load balancer assigning tablet that was not nominated for assignment {}", store.name(), assignment.getKey());
}
}
if (builder.length() > 0) {
// Make sure to log any leftover assignments
builder.append("]");
- Master.log.debug(store.name() + " assigning tablets: [" + builder.toString());
+ Master.log.debug("{} assigning tablets: [{}", store.name(), builder.toString());
}
if (!unassigned.isEmpty() && assignedOut.isEmpty())
@@ -880,7 +880,7 @@ abstract class TabletGroupWatcher extends Daemon {
if (conn != null) {
conn.assignTablet(this.master.masterLock, a.tablet);
} else {
- Master.log.warn("Could not connect to server " + a.server);
+ Master.log.warn("Could not connect to server {}", a.server);
}
master.assignedTablet(a.tablet);
}
diff --git a/server/master/src/main/java/org/apache/accumulo/master/recovery/RecoveryManager.java b/server/master/src/main/java/org/apache/accumulo/master/recovery/RecoveryManager.java
index bd49a7d..7880432 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/recovery/RecoveryManager.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/recovery/RecoveryManager.java
@@ -71,7 +71,7 @@ public class RecoveryManager {
List<String> workIDs = new DistributedWorkQueue(ZooUtil.getRoot(master.getInstance()) + Constants.ZRECOVERY, aconf).getWorkQueued();
sortsQueued.addAll(workIDs);
} catch (Exception e) {
- log.warn("{}", e.getMessage(), e);
+ log.warn(e.getMessage(), e);
}
}
@@ -102,9 +102,9 @@ public class RecoveryManager {
initiateSort(sortId, source, destination, aconf);
}
} catch (FileNotFoundException e) {
- log.debug("Unable to initate log sort for " + source + ": " + e);
+ log.debug("Unable to initate log sort for {}: ", source, e);
} catch (Exception e) {
- log.warn("Failed to initiate log sort " + source, e);
+ log.warn("Failed to initiate log sort {}", source, e);
} finally {
if (!rescheduled) {
synchronized (RecoveryManager.this) {
@@ -126,7 +126,7 @@ public class RecoveryManager {
}
final String path = ZooUtil.getRoot(master.getInstance()) + Constants.ZRECOVERY + "/" + sortId;
- log.info("Created zookeeper entry " + path + " with data " + work);
+ log.info("Created zookeeper entry {} with data {}", path, work);
}
public boolean recoverLogs(KeyExtent extent, Collection<Collection<String>> walogs) throws IOException {
@@ -139,7 +139,7 @@ public class RecoveryManager {
if (switchedWalog != null) {
// replaces the volume used for sorting, but do not change entry in metadata table. When the tablet loads it will change the metadata table entry. If
// the tablet has the same replacement config, then it will find the sorted log.
- log.info("Volume replaced " + walog + " -> " + switchedWalog);
+ log.info("Volume replaced {} -> {}", walog, switchedWalog);
walog = switchedWalog;
}
@@ -147,7 +147,7 @@ public class RecoveryManager {
String sortId = parts[parts.length - 1];
String filename = master.getFileSystem().getFullPath(FileType.WAL, walog).toString();
String dest = RecoveryPath.getRecoveryPath(master.getFileSystem(), new Path(filename)).toString();
- log.debug("Recovering " + filename + " to " + dest);
+ log.debug("Recovering {} to {}", filename, dest);
boolean sortQueued;
synchronized (this) {
@@ -181,7 +181,7 @@ public class RecoveryManager {
delay = Math.min(2 * delay, 1000 * 60 * 5l);
}
- log.info("Starting recovery of " + filename + " (in : " + (delay / 1000) + "s), tablet " + extent + " holds a reference");
+ log.info("Starting recovery of {} (in : {}s), tablet {} holds a reference", filename, (delay / 1000), extent);
executor.schedule(new LogSortTask(closer, filename, dest, sortId), delay, TimeUnit.MILLISECONDS);
closeTasksQueued.add(sortId);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationDriver.java b/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationDriver.java
index 72b3bbd..48a9591 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationDriver.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationDriver.java
@@ -56,7 +56,7 @@ public class ReplicationDriver extends Daemon {
ProbabilitySampler sampler = new ProbabilitySampler(conf.getFraction(Property.REPLICATION_TRACE_PERCENT));
long millisToWait = conf.getTimeInMillis(Property.REPLICATION_DRIVER_DELAY);
- log.debug("Waiting " + millisToWait + "ms before starting main replication loop");
+ log.debug("Waiting {}ms before starting main replication loop", millisToWait);
UtilWaitThread.sleep(millisToWait);
log.debug("Starting replication loop");
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java b/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java
index 3e2dc1c..e83cec5 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java
@@ -95,7 +95,7 @@ public class WorkMaker {
// Extract the useful bits from the status key
ReplicationSchema.StatusSection.getFile(entry.getKey(), file);
Table.ID tableId = ReplicationSchema.StatusSection.getTableId(entry.getKey());
- log.debug("Processing replication status record for " + file + " on table " + tableId);
+ log.debug("Processing replication status record for {} on table {}", file, tableId);
Status status;
try {
@@ -108,7 +108,7 @@ public class WorkMaker {
// Don't create the record if we have nothing to do.
// TODO put this into a filter on serverside
if (!shouldCreateWork(status)) {
- log.debug("Not creating work: " + status.toString());
+ log.debug("Not creating work: {}", status.toString());
continue;
}
@@ -170,7 +170,7 @@ public class WorkMaker {
}
protected void addWorkRecord(Text file, Value v, Map<String,String> targets, Table.ID sourceTableId) {
- log.info("Adding work records for " + file + " to targets " + targets);
+ log.info("Adding work records for {} to targets {}", file, targets);
try {
Mutation m = new Mutation(file);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java b/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java
index 0f40698..315c8c5 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java
@@ -109,70 +109,70 @@ public class MergeStats {
if (state == MergeState.NONE)
return state;
if (total == 0) {
- log.trace("failed to see any tablets for this range, ignoring " + info.getExtent());
+ log.trace("failed to see any tablets for this range, ignoring {}", info.getExtent());
return state;
}
- log.info("Computing next merge state for " + info.getExtent() + " which is presently " + state + " isDelete : " + info.isDelete());
+ log.info("Computing next merge state for {} which is presently {} isDelete : {}", info.getExtent(), state, info.isDelete());
if (state == MergeState.STARTED) {
state = MergeState.SPLITTING;
}
if (state == MergeState.SPLITTING) {
- log.info(hosted + " are hosted, total " + total);
+ log.info("{} are hosted, total {}", hosted, total);
if (!info.isDelete() && total == 1) {
- log.info("Merge range is already contained in a single tablet " + info.getExtent());
+ log.info("Merge range is already contained in a single tablet {}", info.getExtent());
state = MergeState.COMPLETE;
} else if (hosted == total) {
if (info.isDelete()) {
if (!lowerSplit)
- log.info("Waiting for " + info + " lower split to occur " + info.getExtent());
+ log.info("Waiting for {} lower split to occur {}", info, info.getExtent());
else if (!upperSplit)
- log.info("Waiting for " + info + " upper split to occur " + info.getExtent());
+ log.info("Waiting for {} upper split to occur {}", info, info.getExtent());
else
state = MergeState.WAITING_FOR_CHOPPED;
} else {
state = MergeState.WAITING_FOR_CHOPPED;
}
} else {
- log.info("Waiting for " + hosted + " hosted tablets to be " + total + " " + info.getExtent());
+ log.info("Waiting for {} hosted tablets to be {} {}", hosted, total, info.getExtent());
}
}
if (state == MergeState.WAITING_FOR_CHOPPED) {
- log.info(chopped + " tablets are chopped " + info.getExtent());
+ log.info("{} tablets are chopped {}", chopped, info.getExtent());
if (chopped == needsToBeChopped) {
state = MergeState.WAITING_FOR_OFFLINE;
} else {
- log.info("Waiting for " + chopped + " chopped tablets to be " + needsToBeChopped + " " + info.getExtent());
+ log.info("Waiting for {} chopped tablets to be {} {}", chopped, needsToBeChopped, info.getExtent());
}
}
if (state == MergeState.WAITING_FOR_OFFLINE) {
if (chopped != needsToBeChopped) {
- log.warn("Unexpected state: chopped tablets should be " + needsToBeChopped + " was " + chopped + " merge " + info.getExtent());
+ log.warn("Unexpected state: chopped tablets should be {} was {} merge {}", needsToBeChopped, chopped, info.getExtent());
// Perhaps a split occurred after we chopped, but before we went offline: start over
state = MergeState.WAITING_FOR_CHOPPED;
} else {
- log.info(chopped + " tablets are chopped, " + unassigned + " are offline " + info.getExtent());
+ log.info("{} tablets are chopped, {} are offline {}", chopped, unassigned, info.getExtent());
if (unassigned == total && chopped == needsToBeChopped) {
if (verifyMergeConsistency(connector, master))
state = MergeState.MERGING;
else
- log.info("Merge consistency check failed " + info.getExtent());
+ log.info("Merge consistency check failed {}", info.getExtent());
} else {
- log.info("Waiting for " + unassigned + " unassigned tablets to be " + total + " " + info.getExtent());
+ log.info("Waiting for {} unassigned tablets to be {} {}", unassigned, total, info.getExtent());
}
}
}
if (state == MergeState.MERGING) {
if (hosted != 0) {
// Shouldn't happen
- log.error("Unexpected state: hosted tablets should be zero " + hosted + " merge " + info.getExtent());
+ log.error("Unexpected state: hosted tablets should be zero {} merge {}", hosted, info.getExtent());
state = MergeState.WAITING_FOR_OFFLINE;
}
if (unassigned != total) {
// Shouldn't happen
- log.error("Unexpected state: unassigned tablets should be " + total + " was " + unassigned + " merge " + info.getExtent());
+ log.error("Unexpected state: unassigned tablets should be {} was {} merge {}", total, unassigned, info.getExtent());
state = MergeState.WAITING_FOR_CHOPPED;
}
- log.info(unassigned + " tablets are unassigned " + info.getExtent());
+ log.info("{} tablets are unassigned {}", unassigned, info.getExtent());
}
return state;
}
@@ -192,39 +192,39 @@ public class MergeStats {
scanner.setRange(range.clip(MetadataSchema.TabletsSection.getRange()));
KeyExtent prevExtent = null;
- log.debug("Scanning range " + range);
+ log.debug("Scanning range {}", range);
for (Entry<Key,Value> entry : scanner) {
TabletLocationState tls;
try {
tls = MetaDataTableScanner.createTabletLocationState(entry.getKey(), entry.getValue());
} catch (BadLocationStateException e) {
- log.error("{}", e.getMessage(), e);
+ log.error(e.getMessage(), e);
return false;
}
- log.debug("consistency check: " + tls + " walogs " + tls.walogs.size());
+ log.debug("consistency check: {} walogs {}", tls, tls.walogs.size());
if (!tls.extent.getTableId().equals(tableId)) {
break;
}
if (!tls.walogs.isEmpty() && verify.getMergeInfo().needsToBeChopped(tls.extent)) {
- log.debug("failing consistency: needs to be chopped" + tls.extent);
+ log.debug("failing consistency: needs to be chopped {}", tls.extent);
return false;
}
if (prevExtent == null) {
// this is the first tablet observed, it must be offline and its prev row must be less than the start of the merge range
if (tls.extent.getPrevEndRow() != null && tls.extent.getPrevEndRow().compareTo(start) > 0) {
- log.debug("failing consistency: prev row is too high " + start);
+ log.debug("failing consistency: prev row is too high {}", start);
return false;
}
if (tls.getState(master.onlineTabletServers()) != TabletState.UNASSIGNED && tls.getState(master.onlineTabletServers()) != TabletState.SUSPENDED) {
- log.debug("failing consistency: assigned or hosted " + tls);
+ log.debug("failing consistency: assigned or hosted {}", tls);
return false;
}
} else if (!tls.extent.isPreviousExtent(prevExtent)) {
- log.debug("hole in " + MetadataTable.NAME);
+ log.debug("hole in {}", MetadataTable.NAME);
return false;
}
@@ -236,8 +236,9 @@ public class MergeStats {
break;
}
}
- log.debug("chopped " + chopped + " v.chopped " + verify.chopped + " unassigned " + unassigned + " v.unassigned " + verify.unassigned + " verify.total "
- + verify.total);
+ log.debug("chopped {} v.chopped {} unassigned {} v.unassigned {} verify.total {}",
+ chopped, verify.chopped, unassigned, verify.unassigned, verify.total);
+
return chopped == verify.chopped && unassigned == verify.unassigned && unassigned == verify.total;
}
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
index 60d9c16..ff3b057 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
@@ -108,7 +108,7 @@ public class BulkImport extends MasterRepo {
@Override
public Repo<Master> call(long tid, Master master) throws Exception {
- log.debug(" tid " + tid + " sourceDir " + sourceDir);
+ log.debug(" tid {} sourceDir {}", tid, sourceDir);
Utils.getReadLock(tableId, tid).lock();
@@ -137,10 +137,10 @@ public class BulkImport extends MasterRepo {
// move the files into the directory
try {
String bulkDir = prepareBulkImport(master, fs, sourceDir, tableId);
- log.debug(" tid " + tid + " bulkDir " + bulkDir);
+ log.debug(" tid {} bulkDir {}", tid, bulkDir);
return new LoadFiles(tableId, sourceDir, bulkDir, errorDir, setTime);
} catch (IOException ex) {
- log.error("error preparing the bulk import directory", ex);
+ log.error("error preparing the bulk import directory {}", ex);
throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.BULK_IMPORT,
TableOperationExceptionType.BULK_BAD_INPUT_DIRECTORY, sourceDir + ": " + ex);
}
@@ -171,7 +171,7 @@ public class BulkImport extends MasterRepo {
throw new IOException("Dir exist when it should not " + newBulkDir);
if (fs.mkdirs(newBulkDir))
return newBulkDir;
- log.warn("Failed to create " + newBulkDir + " for unknown reason");
+ log.warn("Failed to create {} for unknown reason", newBulkDir);
sleepUninterruptibly(3, TimeUnit.SECONDS);
}
@@ -203,7 +203,7 @@ public class BulkImport extends MasterRepo {
extension = sa[sa.length - 1];
if (!FileOperations.getValidExtensions().contains(extension)) {
- log.warn(fileStatus.getPath() + " does not have a valid extension, ignoring");
+ log.warn("{} does not have a valid extension, ignoring", fileStatus.getPath());
return null;
}
} else {
@@ -213,22 +213,22 @@ public class BulkImport extends MasterRepo {
if (extension.equals(Constants.MAPFILE_EXTENSION)) {
if (!fileStatus.isDirectory()) {
- log.warn(fileStatus.getPath() + " is not a map file, ignoring");
+ log.warn("{} is not a map file, ignoring", fileStatus.getPath());
return null;
}
if (fileStatus.getPath().getName().equals("_logs")) {
- log.info(fileStatus.getPath() + " is probably a log directory from a map/reduce task, skipping");
+ log.info("{} is probably a log directory from a map/reduce task, skipping", fileStatus.getPath());
return null;
}
try {
FileStatus dataStatus = fs.getFileStatus(new Path(fileStatus.getPath(), MapFile.DATA_FILE_NAME));
if (dataStatus.isDirectory()) {
- log.warn(fileStatus.getPath() + " is not a map file, ignoring");
+ log.warn("{} is not a map file, ignoring", fileStatus.getPath());
return null;
}
} catch (FileNotFoundException fnfe) {
- log.warn(fileStatus.getPath() + " is not a map file, ignoring");
+ log.warn("{} is not a map file, ignoring", fileStatus.getPath());
return null;
}
}
@@ -237,7 +237,7 @@ public class BulkImport extends MasterRepo {
Path newPath = new Path(bulkDir, newName);
try {
fs.rename(fileStatus.getPath(), newPath);
- log.debug("Moved " + fileStatus.getPath() + " to " + newPath);
+ log.debug("Moved {} to {}", fileStatus.getPath(), newPath);
} catch (IOException E1) {
log.error("Could not move: {} {}", fileStatus.getPath().toString(), E1.getMessage());
}
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChangeTableState.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChangeTableState.java
index 94a7d4e..6535af6 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChangeTableState.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChangeTableState.java
@@ -56,7 +56,7 @@ public class ChangeTableState extends MasterRepo {
TableManager.getInstance().transitionTableState(tableId, ts);
Utils.unreserveNamespace(namespaceId, tid, false);
Utils.unreserveTable(tableId, tid, true);
- LoggerFactory.getLogger(ChangeTableState.class).debug("Changed table state " + tableId + " " + ts);
+ LoggerFactory.getLogger(ChangeTableState.class).debug("Changed table state {} {}", tableId, ts);
env.getEventCoordinator().event("Set table state of %s to %s", tableId, ts);
return null;
}
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
index 4375ee7..12e83eb 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
@@ -104,8 +104,8 @@ public class CompactRange extends MasterRepo {
if (tokens[i].startsWith(txidString))
continue; // skip self
- log.debug("txidString : " + txidString);
- log.debug("tokens[" + i + "] : " + tokens[i]);
+ log.debug("txidString : {}", txidString);
+ log.debug("tokens[{}] : {}", i, tokens[i]);
throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.COMPACT, TableOperationExceptionType.OTHER,
"Another compaction with iterators and/or a compaction strategy is running");
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameNamespace.java
index 8637406..8d90337 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameNamespace.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameNamespace.java
@@ -81,7 +81,7 @@ public class RenameNamespace extends MasterRepo {
Utils.unreserveNamespace(namespaceId, id, true);
}
- LoggerFactory.getLogger(RenameNamespace.class).debug("Renamed namespace " + namespaceId + " " + oldName + " " + newName);
+ LoggerFactory.getLogger(RenameNamespace.class).debug("Renamed namespace {} {} {}", namespaceId, oldName, newName);
return null;
}
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java
index 1a6c696..33c2d9e 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java
@@ -99,7 +99,7 @@ public class RenameTable extends MasterRepo {
Utils.unreserveNamespace(namespaceId, tid, false);
}
- LoggerFactory.getLogger(RenameTable.class).debug("Renamed table " + tableId + " " + oldTableName + " " + newTableName);
+ LoggerFactory.getLogger(RenameTable.class).debug("Renamed table {} {} {}", tableId, oldTableName, newTableName);
return null;
}
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java
index 3470761..74e7fc4 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java
@@ -62,7 +62,7 @@ public class TableRangeOp extends MasterRepo {
public Repo<Master> call(long tid, Master env) throws Exception {
if (RootTable.ID.equals(tableId) && Operation.MERGE.equals(op)) {
- log.warn("Attempt to merge tablets for " + RootTable.NAME + " does nothing. It is not splittable.");
+ log.warn("Attempt to merge tablets for {} does nothing. It is not splittable.", RootTable.NAME);
}
Text start = startRow.length == 0 ? null : new Text(startRow);
@@ -90,7 +90,7 @@ public class TableRangeOp extends MasterRepo {
// Not sure this is a good thing to do. The Master state engine should be the one to remove it.
MergeInfo mergeInfo = env.getMergeInfo(tableId);
if (mergeInfo.getState() != MergeState.NONE)
- log.info("removing merge information " + mergeInfo);
+ log.info("removing merge information {}", mergeInfo);
env.clearMergeState(tableId);
Utils.unreserveNamespace(namespaceId, tid, false);
Utils.unreserveTable(tableId, tid, true);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
index faa5efe..96e954e 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
@@ -85,7 +85,7 @@ public class Utils {
if (!zk.exists(ZooUtil.getRoot(instance) + Constants.ZTABLES + "/" + tableId))
throw new AcceptableThriftTableOperationException(tableId.canonicalID(), "", op, TableOperationExceptionType.NOTFOUND, "Table does not exist");
}
- log.info("table " + tableId + " (" + Long.toHexString(tid) + ") locked for " + (writeLock ? "write" : "read") + " operation: " + op);
+ log.info("table {} ({}) locked for {} operation: {}", tableId, Long.toHexString(tid), (writeLock ? "write" : "read"), op);
return 0;
} else
return 100;
@@ -93,12 +93,12 @@ public class Utils {
public static void unreserveTable(Table.ID tableId, long tid, boolean writeLock) throws Exception {
getLock(tableId, tid, writeLock).unlock();
- log.info("table " + tableId + " (" + Long.toHexString(tid) + ") unlocked for " + (writeLock ? "write" : "read"));
+ log.info("table {} ({}) unlocked for ", tableId, Long.toHexString(tid), (writeLock ? "write" : "read"));
}
public static void unreserveNamespace(Namespace.ID namespaceId, long id, boolean writeLock) throws Exception {
getLock(namespaceId, id, writeLock).unlock();
- log.info("namespace " + namespaceId + " (" + Long.toHexString(id) + ") unlocked for " + (writeLock ? "write" : "read"));
+ log.info("namespace {} ({}) unlocked for {}", namespaceId, Long.toHexString(id), (writeLock ? "write" : "read"));
}
public static long reserveNamespace(Namespace.ID namespaceId, long id, boolean writeLock, boolean mustExist, TableOperation op) throws Exception {
@@ -110,7 +110,7 @@ public class Utils {
throw new AcceptableThriftTableOperationException(namespaceId.canonicalID(), "", op, TableOperationExceptionType.NAMESPACE_NOTFOUND,
"Namespace does not exist");
}
- log.info("namespace " + namespaceId + " (" + Long.toHexString(id) + ") locked for " + (writeLock ? "write" : "read") + " operation: " + op);
+ log.info("namespace {} ({}) locked for {} operation: {}", namespaceId, Long.toHexString(id), (writeLock ? "write" : "read"), op);
return 0;
} else
return 100;
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java b/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
index 171e312..11403cb 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
@@ -67,15 +67,15 @@ public class ShutdownTServer extends MasterRepo {
try {
TabletServerStatus status = connection.getTableMap(false);
if (status.tableMap != null && status.tableMap.isEmpty()) {
- log.info("tablet server hosts no tablets " + server);
+ log.info("tablet server hosts no tablets {}", server);
connection.halt(master.getMasterLock());
- log.info("tablet server asked to halt " + server);
+ log.info("tablet server asked to halt {}", server);
return 0;
}
} catch (TTransportException ex) {
// expected
} catch (Exception ex) {
- log.error("Error talking to tablet server " + server + ": " + ex);
+ log.error("Error talking to tablet server {}: ", server, ex);
}
// If the connection was non-null and we could communicate with it
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java
index 5cdc80a..972da46 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java
@@ -285,7 +285,7 @@ public class Monitor implements HighlyAvailableService {
Monitor.gcStatus = fetchGcStatus();
} catch (Exception e) {
mmi = null;
- log.info("Error fetching stats: " + e);
+ log.info("Error fetching stats: ", e);
} finally {
if (client != null) {
MasterClient.close(client);
@@ -412,7 +412,7 @@ public class Monitor implements HighlyAvailableService {
}
}
} catch (Exception ex) {
- log.warn("Unable to contact the garbage collector at " + address, ex);
+ log.warn("Unable to contact the garbage collector at {}", address, ex);
}
return result;
}
@@ -451,7 +451,7 @@ public class Monitor implements HighlyAvailableService {
int ports[] = config.getSystemConfiguration().getPort(Property.MONITOR_PORT);
for (int port : ports) {
try {
- log.debug("Creating monitor on port " + port);
+ log.debug("Creating monitor on port {}", port);
server = new EmbeddedWebServer(hostname, port);
server.addServlet(getDefaultServlet(), "/resources/*");
server.addServlet(getRestServlet(), "/rest/*");
@@ -481,13 +481,13 @@ public class Monitor implements HighlyAvailableService {
log.error("Unable to get hostname", e);
}
}
- log.debug("Using " + advertiseHost + " to advertise monitor location");
+ log.debug("Using {} to advertise monitor location in ZooKeeper", hostname);
try {
String monitorAddress = HostAndPort.fromParts(advertiseHost, server.getPort()).toString();
ZooReaderWriter.getInstance().putPersistentData(ZooUtil.getRoot(instance) + Constants.ZMONITOR_HTTP_ADDR, monitorAddress.getBytes(UTF_8),
NodeExistsPolicy.OVERWRITE);
- log.info("Set monitor address in zookeeper to " + monitorAddress);
+ log.info("Set monitor address in zookeeper to {}", monitorAddress);
} catch (Exception ex) {
log.error("Unable to set monitor HTTP address in zookeeper", ex);
}
@@ -509,7 +509,7 @@ public class Monitor implements HighlyAvailableService {
try {
Monitor.fetchData();
} catch (Exception e) {
- log.warn("{}", e.getMessage(), e);
+ log.warn(e.getMessage(), e);
}
sleepUninterruptibly(333, TimeUnit.MILLISECONDS);
@@ -525,7 +525,7 @@ public class Monitor implements HighlyAvailableService {
try {
Monitor.fetchScans();
} catch (Exception e) {
- log.warn("{}", e.getMessage(), e);
+ log.warn(e.getMessage(), e);
}
sleepUninterruptibly(5, TimeUnit.SECONDS);
}
@@ -706,7 +706,7 @@ public class Monitor implements HighlyAvailableService {
@Override
public synchronized void failedToAcquireLock(Exception e) {
- log.warn("Failed to get monitor lock " + e);
+ log.warn("Failed to get monitor lock", e);
if (acquiredLock) {
Halt.halt("Zoolock in unexpected state FAL " + acquiredLock + " " + failedToAcquireLock, -1);
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/ZooKeeperStatus.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/ZooKeeperStatus.java
index 62f872f..704e737 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/ZooKeeperStatus.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/ZooKeeperStatus.java
@@ -130,7 +130,7 @@ public class ZooKeeperStatus implements Runnable {
}
update.add(new ZooKeeperState(keeper, mode, clients));
} catch (Exception ex) {
- log.info("Exception talking to zookeeper " + keeper, ex);
+ log.info("Exception talking to zookeeper {}", keeper, ex);
update.add(new ZooKeeperState(keeper, "Down", -1));
} finally {
if (transport != null) {
diff --git a/server/tracer/src/main/java/org/apache/accumulo/tracer/AsyncSpanReceiver.java b/server/tracer/src/main/java/org/apache/accumulo/tracer/AsyncSpanReceiver.java
index be924ba..c167b6c 100644
--- a/server/tracer/src/main/java/org/apache/accumulo/tracer/AsyncSpanReceiver.java
+++ b/server/tracer/src/main/java/org/apache/accumulo/tracer/AsyncSpanReceiver.java
@@ -128,7 +128,7 @@ public abstract class AsyncSpanReceiver<SpanKey,Destination> implements SpanRece
}
sent = true;
} catch (Exception ex) {
- log.warn("Got error sending to " + dest + ", refreshing client", ex);
+ log.warn("Got error sending to {}, refreshing client", dest, ex);
clients.remove(dest);
}
}
@@ -171,8 +171,8 @@ public abstract class AsyncSpanReceiver<SpanKey,Destination> implements SpanRece
if (sendQueueSize.get() > maxQueueSize) {
long now = System.currentTimeMillis();
if (now - lastNotificationOfDroppedSpans > 60 * 1000) {
- log.warn("Tracing spans are being dropped because there are already " + maxQueueSize + " spans queued for delivery.\n"
- + "This does not affect performance, security or data integrity, but distributed tracing information is being lost.");
+ log.warn("Tracing spans are being dropped because there are already {} spans queued for delivery.\n" +
+ "This does not affect performance, security or data integrity, but distributed tracing information is being lost.", maxQueueSize);
lastNotificationOfDroppedSpans = now;
}
return;
diff --git a/server/tracer/src/main/java/org/apache/accumulo/tracer/SendSpansViaThrift.java b/server/tracer/src/main/java/org/apache/accumulo/tracer/SendSpansViaThrift.java
index 0f2fbaa..18ad748 100644
--- a/server/tracer/src/main/java/org/apache/accumulo/tracer/SendSpansViaThrift.java
+++ b/server/tracer/src/main/java/org/apache/accumulo/tracer/SendSpansViaThrift.java
@@ -53,7 +53,7 @@ public class SendSpansViaThrift extends AsyncSpanReceiver<String,Client> {
int portSeparatorIndex = destination.lastIndexOf(':');
String host = destination.substring(0, portSeparatorIndex);
int port = Integer.parseInt(destination.substring(portSeparatorIndex + 1));
- log.debug("Connecting to " + host + ":" + port);
+ log.debug("Connecting to {}:{}", host, port);
InetSocketAddress addr = new InetSocketAddress(host, port);
Socket sock = new Socket();
sock.connect(addr);
@@ -64,7 +64,7 @@ public class SendSpansViaThrift extends AsyncSpanReceiver<String,Client> {
log.trace("{}", ex, ex);
return null;
} catch (Exception ex) {
- log.error("{}", ex.getMessage(), ex);
+ log.error(ex.getMessage(), ex);
return null;
}
}
diff --git a/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceServer.java b/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceServer.java
index 12c7123..6137759 100644
--- a/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceServer.java
+++ b/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceServer.java
@@ -169,13 +169,13 @@ public class TraceServer implements Watcher {
if (timeMutation != null)
writer.addMutation(timeMutation);
} catch (MutationsRejectedException exception) {
- log.warn("Unable to write mutation to table; discarding span. set log level to DEBUG for span information and stacktrace. cause: " + exception);
+ log.warn("Unable to write mutation to table; discarding span. set log level to DEBUG for span information and stacktrace. cause:", exception);
if (log.isDebugEnabled()) {
- log.debug("discarded span due to rejection of mutation: " + spanMutation, exception);
+ log.debug("discarded span due to rejection of mutation: {}", spanMutation, exception);
}
/* XXX this could be e.g. an IllegalArgumentExceptoion if we're trying to write this mutation to a writer that has been closed since we retrieved it */
} catch (RuntimeException exception) {
- log.warn("Unable to write mutation to table; discarding span. set log level to DEBUG for stacktrace. cause: " + exception);
+ log.warn("Unable to write mutation to table; discarding span. set log level to DEBUG for stacktrace. cause:", exception);
log.debug("unable to write mutation to table due to exception.", exception);
}
}
@@ -300,12 +300,12 @@ public class TraceServer implements Watcher {
}
}
} catch (MutationsRejectedException exception) {
- log.warn("Problem flushing traces, resetting writer. Set log level to DEBUG to see stacktrace. cause: " + exception);
+ log.warn("Problem flushing traces, resetting writer. Set log level to DEBUG to see stacktrace. cause:", exception);
log.debug("flushing traces failed due to exception", exception);
resetWriter();
/* XXX e.g. if the writer was closed between when we grabbed it and when we called flush. */
} catch (RuntimeException exception) {
- log.warn("Problem flushing traces, resetting writer. Set log level to DEBUG to see stacktrace. cause: " + exception);
+ log.warn("Problem flushing traces, resetting writer. Set log level to DEBUG to see stacktrace. cause:", exception);
log.debug("flushing traces failed due to exception", exception);
resetWriter();
}
@@ -316,7 +316,7 @@ public class TraceServer implements Watcher {
try {
writer = connector.createBatchWriter(tableName, new BatchWriterConfig().setMaxLatency(BATCH_WRITER_MAX_LATENCY, TimeUnit.SECONDS));
} catch (Exception ex) {
- log.warn("Unable to create a batch writer, will retry. Set log level to DEBUG to see stacktrace. cause: " + ex);
+ log.warn("Unable to create a batch writer, will retry. Set log level to DEBUG to see stacktrace. cause:", ex);
log.debug("batch writer creation failed with exception.", ex);
} finally {
/* Trade in the new writer (even if null) for the one we need to close. */
@@ -326,7 +326,7 @@ public class TraceServer implements Watcher {
writer.close();
}
} catch (Exception ex) {
- log.warn("Problem closing batch writer. Set log level to DEBUG to see stacktrace. cause: " + ex);
+ log.warn("Problem closing batch writer. Set log level to DEBUG to see stacktrace. cause:", ex);
log.debug("batch writer close failed with exception", ex);
}
}
@@ -335,7 +335,7 @@ public class TraceServer implements Watcher {
private void registerInZooKeeper(String name, String root) throws Exception {
IZooReaderWriter zoo = ZooReaderWriter.getInstance();
zoo.putPersistentData(root, new byte[0], NodeExistsPolicy.SKIP);
- log.info("Registering tracer " + name + " at " + root);
+ log.info("Registering tracer {} at {}", name, root);
String path = zoo.putEphemeralSequential(root + "/trace-", name.getBytes(UTF_8));
zoo.exists(path, this);
}
@@ -398,12 +398,12 @@ public class TraceServer implements Watcher {
@Override
public void process(WatchedEvent event) {
- log.debug("event " + event.getPath() + " " + event.getType() + " " + event.getState());
+ log.debug("event {} {} {}", event.getPath(), event.getType(), event.getState());
if (event.getState() == KeeperState.Expired) {
- log.warn("Trace server lost zookeeper registration at " + event.getPath());
+ log.warn("Trace server lost zookeeper registration at {} ", event.getPath());
server.stop();
} else if (event.getType() == EventType.NodeDeleted) {
- log.warn("Trace server zookeeper entry lost " + event.getPath());
+ log.warn("Trace server zookeeper entry lost {}", event.getPath());
server.stop();
}
if (event.getPath() != null) {
@@ -411,7 +411,7 @@ public class TraceServer implements Watcher {
if (ZooReaderWriter.getInstance().exists(event.getPath(), this))
return;
} catch (Exception ex) {
- log.error("{}", ex.getMessage(), ex);
+ log.error(ex.getMessage(), ex);
}
log.warn("Trace server unable to reset watch on zookeeper registration");
server.stop();
diff --git a/server/tracer/src/main/java/org/apache/accumulo/tracer/ZooTraceClient.java b/server/tracer/src/main/java/org/apache/accumulo/tracer/ZooTraceClient.java
index 6954983..68f2247 100644
--- a/server/tracer/src/main/java/org/apache/accumulo/tracer/ZooTraceClient.java
+++ b/server/tracer/src/main/java/org/apache/accumulo/tracer/ZooTraceClient.java
@@ -105,7 +105,7 @@ public class ZooTraceClient extends SendSpansViaThrift implements Watcher {
// Once this passes, we can issue a shutdown of the pool
svc.shutdown();
} catch (Exception e) {
- log.error("Unabled to get destination tracer hosts in ZooKeeper, will retry in " + retryPause + " milliseconds", e);
+ log.error("Unabled to get destination tracer hosts in ZooKeeper, will retry in {} milliseconds", retryPause, e);
// We failed to connect to ZK, try again in `retryPause` milliseconds
svc.schedule(this, retryPause, TimeUnit.MILLISECONDS);
}
@@ -142,7 +142,7 @@ public class ZooTraceClient extends SendSpansViaThrift implements Watcher {
}
synchronized private void updateHosts(String path, List<String> children) {
- log.debug("Scanning trace hosts in zookeeper: " + path);
+ log.debug("Scanning trace hosts in zookeeper: {}", path);
try {
List<String> hosts = new ArrayList<>();
for (String child : children) {
@@ -151,7 +151,7 @@ public class ZooTraceClient extends SendSpansViaThrift implements Watcher {
}
this.hosts.clear();
this.hosts.addAll(hosts);
- log.debug("Trace hosts: " + this.hosts);
+ log.debug("Trace hosts: {}", this.hosts);
} catch (Exception ex) {
log.error("unable to get destination hosts in zookeeper", ex);
}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/BulkFailedCopyProcessor.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/BulkFailedCopyProcessor.java
index 26fe8ba..2d09732 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/BulkFailedCopyProcessor.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/BulkFailedCopyProcessor.java
@@ -59,15 +59,15 @@ public class BulkFailedCopyProcessor implements Processor {
FileUtil.copy(origFs, orig, destFs, tmp, false, true, CachedConfiguration.getInstance());
destFs.rename(tmp, dest);
- log.debug("copied " + orig + " to " + dest);
+ log.debug("copied {} to {}", orig, dest);
} catch (IOException ex) {
try {
VolumeManager vm = VolumeManagerImpl.get(SiteConfiguration.getInstance());
FileSystem destFs = vm.getVolumeByPath(dest).getFileSystem();
destFs.create(dest).close();
- log.warn(" marked " + dest + " failed", ex);
+ log.warn(" marked {} failed", dest, ex);
} catch (IOException e) {
- log.error("Unable to create failure flag file " + dest, e);
+ log.error("Unable to create failure flag file {}", dest, e);
}
}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/FileManager.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/FileManager.java
index 790de81..eb69490 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/FileManager.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/FileManager.java
@@ -330,7 +330,7 @@ public class FileManager {
if (!tablet.isMeta()) {
filePermits.release(1);
}
- log.warn("Failed to open file {} {} continuing...", file, e.getMessage());
+ log.warn("Failed to open file {} {} continuing...", file, e.getMessage());
} else {
// close whatever files were opened
closeReaders(reservedFiles);
@@ -368,7 +368,7 @@ public class FileManager {
try {
reader.closeDeepCopies();
} catch (IOException e) {
- log.warn("{}", e.getMessage(), e);
+ log.warn(e.getMessage(), e);
sawIOException = true;
}
}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java
index 293b274..63edb03 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java
@@ -702,7 +702,7 @@ public class InMemoryMap {
if (mds.reader != null)
mds.reader.close();
} catch (IOException e) {
- log.warn("{}", e.getMessage(), e);
+ log.warn(e.getMessage(), e);
}
}
}
@@ -814,7 +814,7 @@ public class InMemoryMap {
out.close();
- log.debug("Created mem dump file " + tmpFile);
+ log.debug("Created mem dump file {}", tmpFile);
memDumpFile = tmpFile;
@@ -829,7 +829,7 @@ public class InMemoryMap {
fs.delete(new Path(memDumpFile), true);
} catch (IOException ioe) {
- log.error("Failed to create mem dump file ", ioe);
+ log.error("Failed to create mem dump file", ioe);
while (activeIters.size() > 0) {
sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/NativeMap.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/NativeMap.java
index a447937..361d66a 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/NativeMap.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/NativeMap.java
@@ -85,7 +85,7 @@ public class NativeMap implements Iterable<Map.Entry<Key,Value>> {
try {
System.loadLibrary("accumulo");
loadedNativeLibraries.set(true);
- log.info("Loaded native map shared library from " + ldLibraryPath);
+ log.info("Loaded native map shared library from {}", ldLibraryPath);
} catch (Exception | UnsatisfiedLinkError e) {
log.error("Tried and failed to load Accumulo native library from {}", ldLibraryPath, e);
}
@@ -154,18 +154,18 @@ public class NativeMap implements Iterable<Map.Entry<Key,Value>> {
}
private static boolean loadNativeLib(File libFile) {
- log.debug("Trying to load native map library " + libFile);
+ log.debug("Trying to load native map library {}", libFile);
if (libFile.exists() && libFile.isFile()) {
try {
System.load(libFile.getAbsolutePath());
loadedNativeLibraries.set(true);
- log.info("Loaded native map shared library " + libFile);
+ log.info("Loaded native map shared library {}", libFile);
return true;
} catch (Exception | UnsatisfiedLinkError e) {
log.error("Tried and failed to load native map library " + libFile, e);
}
} else {
- log.debug("Native map library " + libFile + " not found or is not a file.");
+ log.debug("Native map library {} not found or is not a file.", libFile);
}
return false;
}
@@ -205,9 +205,9 @@ public class NativeMap implements Iterable<Map.Entry<Key,Value>> {
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
if (allocatedNativeMaps.size() > 0) {
- log.info("There are " + allocatedNativeMaps.size() + " allocated native maps");
+ log.info("There are {} allocated native maps", allocatedNativeMaps.size());
}
- log.debug(totalAllocations + " native maps were allocated");
+ log.debug("{} native maps were allocated", totalAllocations);
}));
init = true;
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TLevel.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TLevel.java
index b57e2da..53cfb3a 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TLevel.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TLevel.java
@@ -17,6 +17,8 @@
package org.apache.accumulo.tserver;
import org.apache.log4j.Level;
+import org.apache.log4j.Priority;
+import org.slf4j.Logger;
public class TLevel extends Level {
@@ -24,7 +26,28 @@ public class TLevel extends Level {
public final static Level TABLET_HIST = new TLevel();
protected TLevel() {
- super(Level.DEBUG_INT + 100, "TABLET_HIST", Level.DEBUG_INT + 100);
+ super(Priority.DEBUG_INT + 100, "TABLET_HIST", Priority.DEBUG_INT + 100);
}
+
+ static public void logAtLevel(Logger log, Level level, String msg, Object...objects) {
+ switch(level.toInt()) {
+ case Priority.DEBUG_INT:
+ log.debug(msg, objects);
+ break;
+ case Priority.ERROR_INT:
+ case Priority.FATAL_INT:
+ log.error(msg, objects);
+ break;
+ case Priority.INFO_INT:
+ log.info(msg, objects);
+ break;
+ case Level.TRACE_INT:
+ log.trace(msg, objects);
+ break;
+ case Priority.WARN_INT:
+ log.warn(msg, objects);
+ break;
+ }
+ }
}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
index 1506b0e..d176f55 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
@@ -445,7 +445,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
ThriftClientHandler() {
super(TabletServer.this, watcher, fs);
- log.debug(ThriftClientHandler.class.getName() + " created");
+ log.debug("{} created", ThriftClientHandler.class.getName());
}
@Override
@@ -811,7 +811,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
updateMetrics.add(TabletServerUpdateMetrics.UNKNOWN_TABLET_ERRORS, 0);
}
} else {
- log.warn("Denying access to table " + keyExtent.getTableId() + " for user " + us.getUser());
+ log.warn("Denying access to table {} for user {}", keyExtent.getTableId(), us.getUser());
long t2 = System.currentTimeMillis();
us.authTimes.addStat(t2 - t1);
us.currentTablet = null;
@@ -830,7 +830,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
updateMetrics.add(TabletServerUpdateMetrics.UNKNOWN_TABLET_ERRORS, 0);
return;
} catch (ThriftSecurityException e) {
- log.error("Denying permission to check user " + us.getUser() + " with user " + e.getUser(), e);
+ log.error("Denying permission to check user {} for user {}", us.getUser(), e.getUser(), e);
long t2 = System.currentTimeMillis();
us.authTimes.addStat(t2 - t1);
us.currentTablet = null;
@@ -1441,7 +1441,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
throw new NotServingTabletException(tkeyExtent);
}
} catch (IOException e) {
- log.warn("Failed to split " + keyExtent, e);
+ log.warn("Failed to split {}", keyExtent, e);
throw new RuntimeException(e);
}
}
@@ -1479,13 +1479,13 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
private void checkPermission(TCredentials credentials, String lock, final String request) throws ThriftSecurityException {
try {
- log.trace("Got " + request + " message from user: " + credentials.getPrincipal());
+ log.trace("Got {} message from user: {}", request, credentials.getPrincipal());
if (!security.canPerformSystemActions(credentials)) {
- log.warn("Got " + request + " message from user: " + credentials.getPrincipal());
+ log.warn("Got {} message from user: {}", request, credentials.getPrincipal());
throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
}
} catch (ThriftSecurityException e) {
- log.warn("Got " + request + " message from unauthenticatable user: " + e.getUser());
+ log.warn("Got {} message from unauthenticatable user: {}", request, e.getUser());
if (getCredentials().getToken().getClass().getName().equals(credentials.getTokenClassName())) {
log.error("Got message from a service with a mismatched configuration. Please ensure a compatible configuration.", e);
}
@@ -1493,7 +1493,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
}
if (tabletServerLock == null || !tabletServerLock.wasLockAcquired()) {
- log.debug("Got " + request + " message before my lock was acquired, ignoring...");
+ log.debug("Got {} message before my lock was acquired, ignoring...", request);
throw new RuntimeException("Lock not acquired");
}
@@ -1501,7 +1501,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
Halt.halt(1, new Runnable() {
@Override
public void run() {
- log.info("Tablet server no longer holds lock during checkPermission() : " + request + ", exiting");
+ log.info("Tablet server no longer holds lock during checkPermission() : {}, exiting", request);
gcLogger.logGCInfo(TabletServer.this.getConfiguration());
}
});
@@ -1516,7 +1516,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
// lock?
masterLockCache.clear();
if (!ZooLock.isLockHeld(masterLockCache, lid)) {
- log.warn("Got " + request + " message from a master that does not hold the current lock " + lock);
+ log.warn("Got {} message from a master that does not hold the current lock {}", request, lock);
throw new RuntimeException("bad master lock");
}
}
@@ -1569,7 +1569,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
all.remove(extent);
if (all.size() > 0) {
- log.error("Tablet " + extent + " overlaps previously assigned " + unopenedOverlapping + " " + openingOverlapping + " " + onlineOverlapping
+ log.error("Tablet {} overlaps previously assigned {} {} {}", extent, unopenedOverlapping, openingOverlapping, onlineOverlapping
+ " " + all);
}
return;
@@ -1581,7 +1581,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
}
// add the assignment job to the appropriate queue
- log.info("Loading tablet " + extent);
+ log.info("Loading tablet {}", extent);
final AssignmentHandler ah = new AssignmentHandler(extent);
// final Runnable ah = new LoggingRunnable(log, );
@@ -1593,7 +1593,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
public void run() {
ah.run();
if (onlineTablets.containsKey(extent)) {
- log.info("Root tablet loaded: " + extent);
+ log.info("Root tablet loaded: {}", extent);
} else {
log.info("Root tablet failed to load");
}
@@ -1671,7 +1671,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
Tablet tablet = onlineTablets.get(new KeyExtent(textent));
if (tablet != null) {
- log.info("Flushing " + tablet.getExtent());
+ log.info("Flushing {}", tablet.getExtent());
try {
tablet.flush(tablet.getFlushID());
} catch (NoNodeException nne) {
@@ -1998,7 +1998,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
int maxLogEntriesPerTablet = getTableConfiguration(tablet.getExtent()).getCount(Property.TABLE_MINC_LOGS_MAX);
if (tablet.getLogCount() >= maxLogEntriesPerTablet) {
- log.debug("Initiating minor compaction for " + tablet.getExtent() + " because it has " + tablet.getLogCount() + " write ahead logs");
+ log.debug("Initiating minor compaction for {} because it has {} write ahead logs", tablet.getExtent(), tablet.getLogCount());
tablet.initiateMinorCompaction(MinorCompactionReason.SYSTEM);
}
@@ -2027,7 +2027,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
}
}
} catch (Throwable t) {
- log.error("Unexpected exception in " + Thread.currentThread().getName(), t);
+ log.error("Unexpected exception in {}", Thread.currentThread().getName(), t);
sleepUninterruptibly(1, TimeUnit.SECONDS);
}
}
@@ -2049,7 +2049,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
log.error("split failed: {} for tablet {}", e.getMessage(), tablet.getExtent(), e);
} catch (Exception e) {
statsKeeper.updateTime(Operation.SPLIT, 0, 0, true);
- log.error("Unknown error on split: {}", e, e);
+ log.error("Unknown error on split:", e);
}
}
@@ -2061,7 +2061,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
return null;
}
- log.info("Starting split: " + tablet.getExtent());
+ log.info("Starting split: {}", tablet.getExtent());
statsKeeper.incrementStatusSplit();
long start = System.currentTimeMillis();
@@ -2091,8 +2091,8 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
statsKeeper.updateTime(Operation.SPLIT, start, 0, false);
long t2 = System.currentTimeMillis();
- log.info("Tablet split: " + tablet.getExtent() + " size0 " + newTablets[0].estimateTabletSize() + " size1 " + newTablets[1].estimateTabletSize() + " time "
- + (t2 - t1) + "ms");
+ log.info("Tablet split: {} size0 {} size1 {} time {}ms",
+ tablet.getExtent(), newTablets[0].estimateTabletSize(), newTablets[1].estimateTabletSize(), (t2 - t1));
return tabletInfo;
}
@@ -2142,7 +2142,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
// Tablet has probably been recently unloaded: repeated master
// unload request is crossing the successful unloaded message
if (!recentlyUnloadedCache.containsKey(extent)) {
- log.info("told to unload tablet that was not being served " + extent);
+ log.info("told to unload tablet that was not being served {}", extent);
enqueueMasterMessage(new TabletStatusMessage(TabletLoadState.UNLOAD_FAILURE_NOT_SERVING, extent));
}
return;
@@ -2153,7 +2153,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
} catch (Throwable e) {
if ((t.isClosing() || t.isClosed()) && e instanceof IllegalStateException) {
- log.debug("Failed to unload tablet {} ... it was alread closing or closed : {}", extent, e.getMessage());
+ log.debug("Failed to unload tablet {}... it was alread closing or closed : {}", extent, e.getMessage());
} else {
log.error("Failed to close tablet {}... Aborting migration", extent, e);
enqueueMasterMessage(new TabletStatusMessage(TabletLoadState.UNLOAD_ERROR, extent));
@@ -2172,11 +2172,11 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
try {
tls = new TabletLocationState(extent, null, instance, null, null, null, false);
} catch (BadLocationStateException e) {
- log.error("Unexpected error ", e);
+ log.error("Unexpected error", e);
}
if (!goalState.equals(TUnloadTabletGoal.SUSPENDED) || extent.isRootTablet()
|| (extent.isMeta() && !getConfiguration().getBoolean(Property.MASTER_METADATA_SUSPENDABLE))) {
- log.debug("Unassigning " + tls);
+ log.debug("Unassigning {}", tls);
TabletStateStore.unassign(TabletServer.this, tls, null);
} else {
log.debug("Suspending " + tls);
@@ -2196,7 +2196,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
// roll tablet stats over into tablet server's statsKeeper object as
// historical data
statsKeeper.saveMajorMinorTimes(t.getTabletStats());
- log.info("unloaded " + extent);
+ log.info("unloaded {}", extent);
}
}
@@ -2216,7 +2216,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
@Override
public void run() {
- log.info(clientAddress + ": got assignment from master: " + extent);
+ log.info("{}: got assignment from master: {}", clientAddress, extent);
synchronized (unopenedTablets) {
synchronized (openingTablets) {
@@ -2231,7 +2231,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
return;
if (!unopenedOverlapping.contains(extent)) {
- log.info("assignment " + extent + " no longer in the unopened set");
+ log.info("assignment {} no longer in the unopened set", extent);
return;
}
@@ -2246,7 +2246,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
}
}
- log.debug("Loading extent: " + extent);
+ log.debug("Loading extent: {}", extent);
// check Metadata table before accepting assignment
Text locationToOpen = null;
@@ -2276,13 +2276,13 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
openingTablets.remove(extent);
openingTablets.notifyAll();
}
- log.warn("Failed to verify tablet " + extent, e);
+ log.warn("Failed to verify tablet {}", extent, e);
enqueueMasterMessage(new TabletStatusMessage(TabletLoadState.LOAD_FAILURE, extent));
throw new RuntimeException(e);
}
if (locationToOpen == null) {
- log.debug("Reporting tablet " + extent + " assignment failure: unable to verify Tablet Information");
+ log.debug("Reporting tablet {} assignment failure: unable to verify Tablet Information", extent);
synchronized (openingTablets) {
openingTablets.remove(extent);
openingTablets.notifyAll();
@@ -2350,14 +2350,14 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
openingTablets.notifyAll();
}
}
- log.warn("failed to open tablet " + extent + " reporting failure to master");
+ log.warn("failed to open tablet {} reporting failure to master", extent);
enqueueMasterMessage(new TabletStatusMessage(TabletLoadState.LOAD_FAILURE, extent));
long reschedule = Math.min((1l << Math.min(32, retryAttempt)) * 1000, 10 * 60 * 1000l);
log.warn(String.format("rescheduling tablet load in %.2f seconds", reschedule / 1000.));
SimpleTimer.getInstance(getConfiguration()).schedule(new TimerTask() {
@Override
public void run() {
- log.info("adding tablet " + extent + " back to the assignment pool (retry " + retryAttempt + ")");
+ log.info("adding tablet {} back to the assignment pool (retry {})", extent, retryAttempt);
AssignmentHandler handler = new AssignmentHandler(extent, retryAttempt + 1);
if (extent.isMeta()) {
if (extent.isRootTablet()) {
@@ -2404,7 +2404,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
return null;
return HostAndPort.fromString(locations.get(0));
} catch (Exception e) {
- log.warn("Failed to obtain master host " + e);
+ log.warn("Failed to obtain master host", e);
}
return null;
@@ -2420,7 +2420,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
// log.info("Listener API to master has been opened");
return client;
} catch (Exception e) {
- log.warn("Issue with masterConnection (" + address + ") " + e, e);
+ log.warn("Issue with masterConnection ({}) ", address, e);
}
return null;
}
@@ -2442,7 +2442,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
}
HostAndPort address = startServer(getServerConfigurationFactory().getSystemConfiguration(), clientAddress.getHostText(), Property.TSERV_CLIENTPORT,
processor, "Thrift Client Server");
- log.info("address = " + address);
+ log.info("address = {}", address);
return address;
}
@@ -2456,7 +2456,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
ServerAddress sp = TServerUtils.startServer(this, clientAddress.getHostText(), Property.REPLICATION_RECEIPT_SERVICE_PORT, processor,
"ReplicationServicerHandler", "Replication Servicer", null, Property.REPLICATION_MIN_THREADS, Property.REPLICATION_THREADCHECK, maxMessageSizeProperty);
this.replServer = sp.server;
- log.info("Started replication service on " + sp.address);
+ log.info("Started replication service on {}", sp.address);
try {
// The replication service is unique to the thrift service for a tserver, not just a host.
@@ -2499,7 +2499,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
@Override
public void run() {
if (!serverStopRequested)
- log.error("Lost tablet server lock (reason = " + reason + "), exiting.");
+ log.error("Lost tablet server lock (reason = {}), exiting.", reason);
gcLogger.logGCInfo(getConfiguration());
}
});
@@ -2522,7 +2522,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
zoo.putPersistentData(zPath, new byte[0], NodeExistsPolicy.SKIP);
if (tabletServerLock.tryLock(lw, lockContent)) {
- log.debug("Obtained tablet server lock " + tabletServerLock.getLockPath());
+ log.debug("Obtained tablet server lock {}", tabletServerLock.getLockPath());
lockID = tabletServerLock.getLockID().serialize(ZooUtil.getRoot(getInstance()) + Constants.ZTSERVERS + "/");
return;
}
@@ -2571,7 +2571,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
} catch (KeeperException | InterruptedException e) {
// TODO Does there need to be a better check? What are the error conditions that we'd fall out here? AUTH_FAILURE?
// If we get the error, do we just put it on a timer and retry the exists(String, Watcher) call?
- log.error("Failed to perform initial check for authentication tokens in ZooKeeper. Delegation token authentication will be unavailable.", e);
+ log.error("Failed to perform initial check for authentication tokens in ZooKeeper. Delegation token authentication will be unavailable. {}", e);
}
}
@@ -2623,7 +2623,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
public void run() {
int maxPoolSize = aconf.getCount(Property.REPLICATION_WORKER_THREADS);
if (replicationThreadPool.getMaximumPoolSize() != maxPoolSize) {
- log.info("Resizing thread pool for sending replication work from " + replicationThreadPool.getMaximumPoolSize() + " to " + maxPoolSize);
+ log.info("Resizing thread pool for sending replication work from {} to {}", replicationThreadPool.getMaximumPoolSize(), maxPoolSize);
replicationThreadPool.setMaximumPoolSize(maxPoolSize);
}
}
@@ -2689,7 +2689,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
// may have lost connection with master
// loop back to the beginning and wait for a new one
// this way we survive master failures
- log.error(getClientAddressString() + ": TServerInfo: Exception. Master down?", e);
+ log.error("{}: TServerInfo: Exception. Master down?", getClientAddressString(), e);
}
}
@@ -2755,7 +2755,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
public static Pair<Text,KeyExtent> verifyTabletInformation(AccumuloServerContext context, KeyExtent extent, TServerInstance instance,
SortedMap<Key,Value> tabletsKeyValues, String clientAddress, ZooLock lock) throws AccumuloSecurityException, DistributedStoreException, AccumuloException {
- log.debug("verifying extent " + extent);
+ log.debug("verifying extent {}", extent);
if (extent.isRootTablet()) {
return verifyRootTablet(extent, instance);
}
@@ -2803,7 +2803,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
try {
fke = MasterMetadataUtil.fixSplit(context, metadataEntry, tabletEntries.get(metadataEntry), instance, lock);
} catch (IOException e) {
- log.error("Error fixing split " + metadataEntry);
+ log.error("Error fixing split {}", metadataEntry);
throw new AccumuloException(e.toString());
}
@@ -2829,7 +2829,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
for (Entry<Key,Value> entry : tabletsKeyValues.entrySet()) {
Key key = entry.getKey();
if (!metadataEntry.equals(key.getRow())) {
- log.info("Unexpected row in tablet metadata " + metadataEntry + " " + key.getRow());
+ log.info("Unexpected row in tablet metadata {} {}", metadataEntry, key.getRow());
return null;
}
Text cf = key.getColumnFamily();
@@ -2839,7 +2839,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
}
future = new TServerInstance(entry.getValue(), key.getColumnQualifier());
} else if (cf.equals(TabletsSection.CurrentLocationColumnFamily.NAME)) {
- log.info("Tablet seems to be already assigned to " + new TServerInstance(entry.getValue(), key.getColumnQualifier()));
+ log.info("Tablet seems to be already assigned to {} {}", new TServerInstance(entry.getValue(), key.getColumnQualifier()));
return null;
} else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
prevEndRow = entry.getValue();
@@ -2855,7 +2855,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
} else {
KeyExtent ke2 = new KeyExtent(metadataEntry, prevEndRow);
if (!extent.equals(ke2)) {
- log.info("Tablet prev end row mismatch " + extent + " " + ke2.getPrevEndRow());
+ log.info("Tablet prev end row mismatch {} {}", extent, ke2.getPrevEndRow());
return null;
}
}
@@ -2869,12 +2869,12 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
}
if (future == null) {
- log.info("The master has not assigned " + extent + " to " + instance);
+ log.info("The master has not assigned {} to ", extent, instance);
return null;
}
if (!instance.equals(future)) {
- log.info("Table " + extent + " has been assigned to " + future + " which is not " + instance);
+ log.info("Table {} has been assigned to {} which is not {}", extent, future, instance);
return null;
}
@@ -2902,13 +2902,13 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
try {
return new TServerInstance(address, tabletServerLock.getSessionId());
} catch (Exception ex) {
- log.warn("Unable to read session from tablet server lock" + ex);
+ log.warn("Unable to read session from tablet server lock", ex);
return null;
}
}
public void config(String hostname) {
- log.info("Tablet server starting on " + hostname);
+ log.info("Tablet server starting on {}", hostname);
majorCompactorThread = new Daemon(new LoggingRunnable(log, new MajorCompactor(getConfiguration())));
majorCompactorThread.setName("Split/MajC initiator");
majorCompactorThread.start();
@@ -2939,7 +2939,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
try {
AccumuloVFSClassLoader.getContextManager().removeUnusedContexts(configuredContexts);
} catch (IOException e) {
- log.warn("{}", e.getMessage(), e);
+ log.warn(e.getMessage(), e);
}
}
};
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
index cc498fb..1cdc8bf 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
@@ -125,7 +125,7 @@ public class TabletServerResourceManager {
try {
int max = tserver.getConfiguration().getCount(maxThreads);
if (tp.getMaximumPoolSize() != max) {
- log.info("Changing " + maxThreads.getKey() + " to " + max);
+ log.info("Changing {} to {}", maxThreads.getKey(), max);
tp.setCorePoolSize(max);
tp.setMaximumPoolSize(max);
}
@@ -287,9 +287,9 @@ public class TabletServerResourceManager {
// Print a warning if an assignment has been running for over the configured time length
if (duration > millisBeforeWarning) {
- log.warn("Assignment for " + extent + " has been running for at least " + duration + "ms", runnable.getTask().getException());
+ log.warn("Assignment for {} has been running for at least {}ms", extent, duration, runnable.getTask().getException());
} else if (log.isTraceEnabled()) {
- log.trace("Assignment for " + extent + " only running for " + duration + "ms");
+ log.trace("Assignment for {} only running for {}ms", extent, duration);
}
}
} catch (Exception e) {
@@ -298,7 +298,7 @@ public class TabletServerResourceManager {
// Don't run more often than every 5s
long delay = Math.max((long) (millisBeforeWarning * 0.5), 5000l);
if (log.isTraceEnabled()) {
- log.trace("Rescheduling assignment watcher to run in " + delay + "ms");
+ log.trace("Rescheduling assignment watcher to run in {}ms", delay);
}
timer.schedule(this, delay);
}
@@ -451,7 +451,7 @@ public class TabletServerResourceManager {
TabletStateImpl tabletReport = tabletReportsCopy.get(keyExtent);
if (tabletReport == null) {
- log.warn("Memory manager asked to compact nonexistent tablet " + keyExtent + "; manager implementation might be misbehaving");
+ log.warn("Memory manager asked to compact nonexistent tablet {}; manager implementation might be misbehaving", keyExtent);
continue;
}
Tablet tablet = tabletReport.getTablet();
@@ -465,13 +465,13 @@ public class TabletServerResourceManager {
// different tablet instance => put it back
tabletReports.put(keyExtent, latestReport);
} else {
- log.debug("Cleaned up report for closed tablet " + keyExtent);
+ log.debug("Cleaned up report for closed tablet {}", keyExtent);
}
}
}
- log.debug("Ignoring memory manager recommendation: not minor compacting closed tablet " + keyExtent);
+ log.debug("Ignoring memory manager recommendation: not minor compacting closed tablet {}", keyExtent);
} else {
- log.info("Ignoring memory manager recommendation: not minor compacting " + keyExtent);
+ log.info("Ignoring memory manager recommendation: not minor compacting {}", keyExtent);
}
}
}
@@ -558,7 +558,7 @@ public class TabletServerResourceManager {
try {
if (entry.getValue().awaitTermination(60, TimeUnit.SECONDS))
break;
- log.info("Waiting for thread pool " + entry.getKey() + " to shutdown");
+ log.info("Waiting for thread pool {} to shutdown", entry.getKey());
} catch (InterruptedException e) {
log.warn("Interrupted waiting for executor to terminate", e);
}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/constraints/ConstraintChecker.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/constraints/ConstraintChecker.java
index 9d065c7..62cee69 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/constraints/ConstraintChecker.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/constraints/ConstraintChecker.java
@@ -64,7 +64,7 @@ public class ConstraintChecker {
if (entry.getKey().startsWith(Property.TABLE_CONSTRAINT_PREFIX.getKey())) {
String className = entry.getValue();
Class<? extends Constraint> clazz = loader.loadClass(className).asSubclass(Constraint.class);
- log.debug("Loaded constraint " + clazz.getName() + " for " + conf.getTableId());
+ log.debug("Loaded constraint {} for {}", clazz.getName(), conf.getTableId());
constrains.add(clazz.newInstance());
}
}
@@ -75,7 +75,7 @@ public class ConstraintChecker {
constrains.clear();
loader = null;
constrains.add(new UnsatisfiableConstraint((short) -1, "Failed to load constraints, not accepting mutations."));
- log.error("Failed to load constraints " + conf.getTableId() + " " + e.toString(), e);
+ log.error("Failed to load constraints {} {}", conf.getTableId(), e.toString(), e);
}
}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
index 9f0c7f7..1e20ed1 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
@@ -415,7 +415,7 @@ public class DfsLogger implements Comparable<DfsLogger> {
}
} catch (EOFException e) {
- log.warn("Got EOFException trying to read WAL header information, assuming the rest of the file (" + path + ") has no data.");
+ log.warn("Got EOFException trying to read WAL header information, assuming the rest of the file ({}) has no data.", path);
// A TabletServer might have died before the (complete) header was written
throw new LogHeaderIncompleteException(e);
}
@@ -433,7 +433,7 @@ public class DfsLogger implements Comparable<DfsLogger> {
*/
public synchronized void open(String address) throws IOException {
String filename = UUID.randomUUID().toString();
- log.debug("Address is " + address);
+ log.debug("Address is {}", address);
String logger = Joiner.on("+").join(address.split(":"));
log.debug("DfsLogger.open() begin");
@@ -507,7 +507,7 @@ public class DfsLogger implements Comparable<DfsLogger> {
syncThread.setName("Accumulo WALog thread " + toString());
syncThread.start();
op.await();
- log.debug("Got new write-ahead log: " + this);
+ log.debug("Got new write-ahead log: {}", this);
}
@Override
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/LogSorter.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/LogSorter.java
index 7c89230..8aa5486 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/LogSorter.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/LogSorter.java
@@ -83,7 +83,7 @@ public class LogSorter {
String src = parts[0];
String dest = parts[1];
String sortId = new Path(src).getName();
- log.debug("Sorting " + src + " to " + dest + " using sortId " + sortId);
+ log.debug("Sorting {} to {} using sortId {}", src, dest, sortId);
synchronized (currentWork) {
if (currentWork.containsKey(sortId))
@@ -92,7 +92,7 @@ public class LogSorter {
}
try {
- log.info("Copying " + src + " to " + dest);
+ log.info("Copying {} to {}", src, dest);
sort(sortId, new Path(src), dest);
} finally {
currentWork.remove(sortId);
@@ -117,7 +117,7 @@ public class LogSorter {
try {
inputStreams = DfsLogger.readHeaderAndReturnStream(fs, srcPath, conf);
} catch (LogHeaderIncompleteException e) {
- log.warn("Could not read header from write-ahead log " + srcPath + ". Not sorting.");
+ log.warn("Could not read header from write-ahead log {}. Not sorting.", srcPath);
// Creating a 'finished' marker will cause recovery to proceed normally and the
// empty file will be correctly ignored downstream.
fs.mkdirs(new Path(destPath));
@@ -150,14 +150,14 @@ public class LogSorter {
}
}
fs.create(new Path(destPath, "finished")).close();
- log.info("Finished log sort " + name + " " + getBytesCopied() + " bytes " + part + " parts in " + getSortTime() + "ms");
+ log.info("Finished log sort {} {} bytes {} parts in {}ms", name, getBytesCopied(), part, getSortTime());
} catch (Throwable t) {
try {
// parent dir may not exist
fs.mkdirs(new Path(destPath));
fs.create(SortedLogState.getFailedMarkerPath(destPath)).close();
} catch (IOException e) {
- log.error("Error creating failed flag file " + name, e);
+ log.error("Error creating failed flag file {}", name, e);
}
log.error("Caught throwable", t);
} finally {
@@ -165,7 +165,7 @@ public class LogSorter {
try {
close();
} catch (Exception e) {
- log.error("Error during cleanup sort/copy " + name, e);
+ log.error("Error during cleanup sort/copy {}", name, e);
}
synchronized (this) {
sortStop = System.currentTimeMillis();
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/SortedLogRecovery.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/SortedLogRecovery.java
index 3d403e0..8438869 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/SortedLogRecovery.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/SortedLogRecovery.java
@@ -101,16 +101,16 @@ public class SortedLogRecovery {
LastStartToFinish lastStartToFinish = new LastStartToFinish();
for (int i = 0; i < recoveryLogs.size(); i++) {
Path logfile = recoveryLogs.get(i);
- log.info("Looking at mutations from " + logfile + " for " + extent);
+ log.info("Looking at mutations from {} for {}", logfile, extent);
MultiReader reader = new MultiReader(fs, logfile);
try {
try {
tids[i] = findLastStartToFinish(reader, i, extent, tabletFiles, lastStartToFinish);
} catch (EmptyMapFileException ex) {
- log.info("Ignoring empty map file " + logfile);
+ log.info("Ignoring empty map file {}", logfile);
tids[i] = -1;
} catch (UnusedException ex) {
- log.info("Ignoring log file " + logfile + " appears to be unused by " + extent);
+ log.info("Ignoring log file {} appears to be unused by ", logfile, extent);
tids[i] = -1;
}
} finally {
@@ -138,7 +138,7 @@ public class SortedLogRecovery {
log.warn("Ignoring error closing file");
}
}
- log.info("Recovery complete for " + extent + " using " + logfile);
+ log.info("Recovery complete for {} using{} ", extent, logfile);
}
}
@@ -180,7 +180,7 @@ public class SortedLogRecovery {
// find the maximum tablet id... because a tablet may leave a tserver and then come back, in which case it would have a different tablet id
// for the maximum tablet id, find the minimum sequence #... may be ok to find the max seq, but just want to make the code behave like it used to
while (reader.next(key, value)) {
- // log.debug("Event " + key.event + " tablet " + key.tablet);
+
if (key.event != DEFINE_TABLET)
break;
if (key.tablet.equals(extent) || key.tablet.equals(alternative)) {
@@ -195,7 +195,7 @@ public class SortedLogRecovery {
throw new UnusedException();
}
- log.debug("Found tid, seq " + tid + " " + defineKey.seq);
+ log.debug("Found tid, seq {} {}", tid, defineKey.seq);
// Scan start/stop events for this tablet
key = defineKey;
@@ -213,7 +213,7 @@ public class SortedLogRecovery {
lastStartToFinish.update(fileno, key.seq);
// Tablet server finished the minor compaction, but didn't remove the entry from the METADATA table.
- log.debug("minor compaction into " + key.filename + " finished, but was still in the METADATA");
+ log.debug("minor compaction into {} finished, but was still in the METADATA", key.filename);
if (suffixes.contains(getPathSuffix(key.filename)))
lastStartToFinish.update(-1);
} else if (key.event == COMPACTION_FINISH) {
@@ -237,7 +237,7 @@ public class SortedLogRecovery {
LogFileValue value = new LogFileValue();
// Playback mutations after the last stop to finish
- log.info("Scanning for mutations starting at sequence number " + lastStartToFinish.seq + " for tid " + tid);
+ log.info("Scanning for mutations starting at sequence number {} for tid {}", lastStartToFinish.seq, tid);
key.event = MUTATION;
key.tid = tid;
// the seq number for the minor compaction start is now the same as the
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/TabletServerLogger.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/TabletServerLogger.java
index a4cd6b0..2b71e50 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/TabletServerLogger.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/TabletServerLogger.java
@@ -221,7 +221,7 @@ public class TabletServerLogger {
if (next instanceof DfsLogger) {
currentLog = (DfsLogger) next;
logId.incrementAndGet();
- log.info("Using next log " + currentLog.getFileName());
+ log.info("Using next log {}", currentLog.getFileName());
// When we successfully create a WAL, make sure to reset the Retry.
if (null != retry) {
@@ -281,7 +281,7 @@ public class TabletServerLogger {
log.debug("Created next WAL " + fileName);
tserver.addNewLogMarker(alog);
while (!nextLog.offer(alog, 12, TimeUnit.HOURS)) {
- log.info("Our WAL was not used for 12 hours: " + fileName);
+ log.info("Our WAL was not used for 12 hours: {}", fileName);
}
} catch (Exception t) {
log.error("Failed to open WAL", t);
@@ -335,7 +335,7 @@ public class TabletServerLogger {
} catch (DfsLogger.LogClosedException ex) {
// ignore
} catch (Throwable ex) {
- log.error("Unable to cleanly close log " + currentLog.getFileName() + ": " + ex, ex);
+ log.error("Unable to cleanly close log {}: {} {}", currentLog.getFileName(), ex, ex);
} finally {
this.tserver.walogClosed(currentLog);
}
@@ -410,10 +410,10 @@ public class TabletServerLogger {
success = (currentLogId == logId.get());
}
} catch (DfsLogger.LogClosedException ex) {
- log.debug("Logs closed while writing, retrying " + attempt);
+ log.debug("Logs closed while writing, retrying {}", attempt);
} catch (Exception t) {
if (attempt != 1) {
- log.error("Unexpected error writing to log, retrying attempt " + attempt, t);
+ log.error("Unexpected error writing to log, retrying attempt {}", attempt, t);
}
sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
} finally {
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/logger/LogReader.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/logger/LogReader.java
index f53a5e8..479550f 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/logger/LogReader.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/logger/LogReader.java
@@ -106,7 +106,7 @@ public class LogReader {
try {
streams = DfsLogger.readHeaderAndReturnStream(fs, path, SiteConfiguration.getInstance());
} catch (LogHeaderIncompleteException e) {
- log.warn("Could not read header for " + path + ". Ignoring...");
+ log.warn("Could not read header for {}. Ignoring...", path);
continue;
}
DataInputStream input = streams.getDecryptingInputStream();
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/scan/LookupTask.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/scan/LookupTask.java
index 1bdf428..8ad62b4 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/scan/LookupTask.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/scan/LookupTask.java
@@ -121,7 +121,7 @@ public class LookupTask extends ScanTask<MultiScanResult> {
interruptFlag.set(false);
} catch (IOException e) {
- log.warn("lookup failed for tablet " + entry.getKey(), e);
+ log.warn("lookup failed for tablet {}", entry.getKey(), e);
throw new RuntimeException(e);
}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/scan/NextBatchTask.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/scan/NextBatchTask.java
index 110eda3..c0c0d8c 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/scan/NextBatchTask.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/scan/NextBatchTask.java
@@ -91,7 +91,7 @@ public class NextBatchTask extends ScanTask<ScanBatch> {
Halt.halt("Ran out of memory scanning " + scanSession.extent + " for " + scanSession.client, 1);
addResult(ome);
} catch (Throwable e) {
- log.warn("exception while scanning tablet " + (scanSession == null ? "(unknown)" : scanSession.extent), e);
+ log.warn("exception while scanning tablet {}", (scanSession == null ? "(unknown)" : scanSession.extent), e);
addResult(e);
} finally {
runState.set(ScanRunState.FINISHED);
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/session/SessionManager.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/session/SessionManager.java
index d4785e8..5f0a962 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/session/SessionManager.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/session/SessionManager.java
@@ -181,7 +181,7 @@ public class SessionManager {
}
long idleTime = System.currentTimeMillis() - session.lastAccessTime;
if (idleTime > configuredIdle && !session.reserved) {
- log.info("Closing idle session from user=" + session.getUser() + ", client=" + session.client + ", idle=" + idleTime + "ms");
+ log.info("Closing idle session from user={}, client={}, idle={}ms", session.getUser(), session.client, idleTime);
iter.remove();
sessionsToCleanup.add(session);
}
@@ -215,7 +215,7 @@ public class SessionManager {
synchronized (SessionManager.this) {
Session session2 = sessions.get(sessionId);
if (session2 != null && session2.lastAccessTime == removeTime && !session2.reserved) {
- log.info("Closing not accessed session from user=" + session2.getUser() + ", client=" + session2.client + ", duration=" + delay + "ms");
+ log.info("Closing not accessed session from user={}, client={}, duration={}ms", session2.getUser(), session2.client, delay);
sessions.remove(sessionId);
sessionToCleanup = session2;
}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactionWatcher.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactionWatcher.java
index 64345c2..28a575e 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactionWatcher.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactionWatcher.java
@@ -74,7 +74,7 @@ public class CompactionWatcher implements Runnable {
for (ObservedCompactionInfo oci : copy.values()) {
if (oci.loggedWarning) {
- LoggerFactory.getLogger(CompactionWatcher.class).info("Compaction of " + oci.compactionInfo.getExtent() + " is no longer stuck");
+ LoggerFactory.getLogger(CompactionWatcher.class).info("Compaction of {} is no longer stuck", oci.compactionInfo.getExtent());
}
}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java
index 6c83f19..5e7924c 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java
@@ -231,7 +231,7 @@ public class Compactor implements Callable<CompactionStats> {
} catch (IOException ex) {
if (!fs.deleteRecursively(outputFile.path())) {
if (fs.exists(outputFile.path())) {
- log.error("Unable to delete " + outputFile);
+ log.error("Unable to delete {}", outputFile);
}
}
throw ex;
@@ -244,10 +244,10 @@ public class Compactor implements Callable<CompactionStats> {
majCStats.setFileSize(mfwTmp.getLength());
return majCStats;
} catch (IOException e) {
- log.error("{}", e.getMessage(), e);
+ log.error(e.getMessage(), e);
throw e;
} catch (RuntimeException e) {
- log.error("{}", e.getMessage(), e);
+ log.error(e.getMessage(), e);
throw e;
} finally {
Thread.currentThread().setName(oldThreadName);
@@ -264,13 +264,13 @@ public class Compactor implements Callable<CompactionStats> {
} finally {
if (!fs.deleteRecursively(outputFile.path()))
if (fs.exists(outputFile.path()))
- log.error("Unable to delete " + outputFile);
+ log.error("Unable to delete {}", outputFile);
}
}
} catch (IOException e) {
- log.warn("{}", e.getMessage(), e);
+ log.warn(e.getMessage(), e);
} catch (RuntimeException exception) {
- log.warn("{}", exception.getMessage(), exception);
+ log.warn(exception.getMessage(), exception);
}
}
}
@@ -380,11 +380,11 @@ public class Compactor implements Callable<CompactionStats> {
try {
mfw.close();
} catch (IOException e) {
- log.error("{}", e.getMessage(), e);
+ log.error(e.getMessage(), e);
}
fs.deleteRecursively(outputFile.path());
} catch (Exception e) {
- log.warn("Failed to delete Canceled compaction output file " + outputFile, e);
+ log.warn("Failed to delete Canceled compaction output file {}", outputFile, e);
}
throw new CompactionCanceledException();
}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
index 3867120..aad2b3f 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
@@ -52,10 +52,12 @@ import org.apache.accumulo.server.util.ReplicationTableUtil;
import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
import org.apache.accumulo.tserver.TLevel;
import org.apache.hadoop.fs.Path;
-import org.apache.log4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.Logger;
+
class DatafileManager {
- private final Logger log = Logger.getLogger(DatafileManager.class);
+ private final Logger log = LoggerFactory.getLogger(DatafileManager.class);
// access to datafilesizes needs to be synchronized: see CompactionRunner#getNumFiles
private final Map<FileRef,DataFileValue> datafileSizes = Collections.synchronizedMap(new TreeMap<FileRef,DataFileValue>());
private final Tablet tablet;
@@ -93,7 +95,7 @@ class DatafileManager {
try {
tablet.wait(50);
} catch (InterruptedException e) {
- log.warn(e, e);
+ log.warn("", e);
}
}
@@ -140,7 +142,7 @@ class DatafileManager {
}
if (filesToDelete.size() > 0) {
- log.debug("Removing scan refs from metadata " + tablet.getExtent() + " " + filesToDelete);
+ log.debug("Removing scan refs from metadata {} {}", tablet.getExtent(), filesToDelete);
MetadataTableUtil.removeScanFiles(tablet.getExtent(), filesToDelete, tablet.getTabletServer(), tablet.getTabletServer().getLock());
}
}
@@ -161,7 +163,7 @@ class DatafileManager {
}
if (filesToDelete.size() > 0) {
- log.debug("Removing scan refs from metadata " + tablet.getExtent() + " " + filesToDelete);
+ log.debug("Removing scan refs from metadata {} {}", tablet.getExtent(), filesToDelete);
MetadataTableUtil.removeScanFiles(tablet.getExtent(), filesToDelete, tablet.getTabletServer(), tablet.getTabletServer().getLock());
}
}
@@ -185,7 +187,7 @@ class DatafileManager {
try {
tablet.wait(100);
} catch (InterruptedException e) {
- log.warn(e, e);
+ log.warn("", e);
}
}
}
@@ -261,7 +263,7 @@ class DatafileManager {
synchronized (tablet) {
for (Entry<FileRef,DataFileValue> tpath : paths.entrySet()) {
if (datafileSizes.containsKey(tpath.getKey())) {
- log.error("Adding file that is already in set " + tpath.getKey());
+ log.error("Adding file that is already in set {}", tpath.getKey());
}
datafileSizes.put(tpath.getKey(), tpath.getValue());
@@ -273,7 +275,7 @@ class DatafileManager {
}
for (Entry<FileRef,DataFileValue> entry : paths.entrySet()) {
- log.log(TLevel.TABLET_HIST, tablet.getExtent() + " import " + entry.getKey() + " " + entry.getValue());
+ TLevel.logAtLevel(log, TLevel.TABLET_HIST, "{} import {} {}", tablet.getExtent(), entry.getKey() ,entry.getValue());
}
}
@@ -354,7 +356,7 @@ class DatafileManager {
tablet.getTabletServer().getFileSystem().deleteRecursively(tmpDatafile.path());
} else {
if (tablet.getTabletServer().getFileSystem().exists(newDatafile.path())) {
- log.warn("Target map file already exist " + newDatafile);
+ log.warn("Target map file already exist {}", newDatafile);
tablet.getTabletServer().getFileSystem().deleteRecursively(newDatafile.path());
}
@@ -362,7 +364,7 @@ class DatafileManager {
}
break;
} catch (IOException ioe) {
- log.warn("Tablet " + tablet.getExtent() + " failed to rename " + newDatafile + " after MinC, will retry in 60 secs...", ioe);
+ log.warn("Tablet {} failed to rename {} after MinC, will retry in 60 secs...", tablet.getExtent(), newDatafile, ioe);
sleepUninterruptibly(1, TimeUnit.MINUTES);
}
} while (true);
@@ -417,7 +419,7 @@ class DatafileManager {
// tablet is online and thus these WALs are referenced by that tablet. Therefore, the WAL replication status cannot be 'closed'.
if (replicate) {
if (log.isDebugEnabled()) {
- log.debug("Recording that data has been ingested into " + tablet.getExtent() + " using " + logFileOnly);
+ log.debug("Recording that data has been ingested into {} using {}", tablet.getExtent(), logFileOnly);
}
for (String logFile : logFileOnly) {
ReplicationTableUtil.updateFiles(tablet.getTabletServer(), tablet.getExtent(), logFile, StatusUtil.openWithUnknownLength());
@@ -435,7 +437,7 @@ class DatafileManager {
tablet.getTabletServer().minorCompactionFinished(tablet.getTabletMemory().getCommitSession(), newDatafile.toString(), commitSession.getWALogSeq() + 2);
break;
} catch (IOException e) {
- log.error("Failed to write to write-ahead log " + e.getMessage() + " will retry", e);
+ log.error("Failed to write to write-ahead log {} will retry", e.getMessage(), e);
sleepUninterruptibly(1, TimeUnit.SECONDS);
}
} while (true);
@@ -444,7 +446,7 @@ class DatafileManager {
t1 = System.currentTimeMillis();
if (datafileSizes.containsKey(newDatafile)) {
- log.error("Adding file that is already in set " + newDatafile);
+ log.error("Adding file that is already in set {}", newDatafile);
}
if (dfv.getNumEntries() > 0) {
@@ -466,9 +468,9 @@ class DatafileManager {
removeFilesAfterScan(filesInUseByScans);
if (absMergeFile != null)
- log.log(TLevel.TABLET_HIST, tablet.getExtent() + " MinC [" + absMergeFile + ",memory] -> " + newDatafile);
+ TLevel.logAtLevel(log, TLevel.TABLET_HIST, "{} MinC [{},memory] -> {}", tablet.getExtent(), absMergeFile, newDatafile);
else
- log.log(TLevel.TABLET_HIST, tablet.getExtent() + " MinC [memory] -> " + newDatafile);
+ TLevel.logAtLevel(log, TLevel.TABLET_HIST, "{} MinC [memory] -> {}", tablet.getExtent(), newDatafile);
log.debug(String.format("MinC finish lock %.2f secs %s", (t2 - t1) / 1000.0, tablet.getExtent().toString()));
long splitSize = tablet.getTableConfiguration().getAsBytes(Property.TABLE_SPLIT_THRESHOLD);
if (dfv.getSize() > splitSize) {
@@ -497,7 +499,7 @@ class DatafileManager {
if (!extent.isRootTablet()) {
if (tablet.getTabletServer().getFileSystem().exists(newDatafile.path())) {
- log.error("Target map file already exist " + newDatafile, new Exception());
+ log.error("Target map file already exist {} {}", newDatafile, new Exception());
throw new IllegalStateException("Target map file already exist " + newDatafile);
}
@@ -543,14 +545,14 @@ class DatafileManager {
// atomically remove old files and add new file
for (FileRef oldDatafile : oldDatafiles) {
if (!datafileSizes.containsKey(oldDatafile)) {
- log.error("file does not exist in set " + oldDatafile);
+ log.error("file does not exist in set {}", oldDatafile);
}
datafileSizes.remove(oldDatafile);
majorCompactingFiles.remove(oldDatafile);
}
if (datafileSizes.containsKey(newDatafile)) {
- log.error("Adding file that is already in set " + newDatafile);
+ log.error("Adding file that is already in set {}", newDatafile);
}
if (dfv.getNumEntries() > 0) {
@@ -571,14 +573,14 @@ class DatafileManager {
if (!extent.isRootTablet()) {
Set<FileRef> filesInUseByScans = waitForScansToFinish(oldDatafiles, false, 10000);
if (filesInUseByScans.size() > 0)
- log.debug("Adding scan refs to metadata " + extent + " " + filesInUseByScans);
+ log.debug("Adding scan refs to metadata {} {}", extent, filesInUseByScans);
MasterMetadataUtil.replaceDatafiles(tablet.getTabletServer(), extent, oldDatafiles, filesInUseByScans, newDatafile, compactionId, dfv, tablet
.getTabletServer().getClientAddressString(), lastLocation, tablet.getTabletServer().getLock());
removeFilesAfterScan(filesInUseByScans);
}
log.debug(String.format("MajC finish lock %.2f secs", (t2 - t1) / 1000.0));
- log.log(TLevel.TABLET_HIST, extent + " MajC " + oldDatafiles + " --> " + newDatafile);
+ TLevel.logAtLevel(log, TLevel.TABLET_HIST, "{} MajC --> ", oldDatafiles, newDatafile);
}
public SortedMap<FileRef,DataFileValue> getDatafileSizes() {
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactionTask.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactionTask.java
index 1b8347e..686b46d 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactionTask.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactionTask.java
@@ -96,7 +96,7 @@ class MinorCompactionTask implements Runnable {
tablet.initiateMajorCompaction(MajorCompactionReason.NORMAL);
}
} catch (Throwable t) {
- log.error("Unknown error during minor compaction for extent: " + tablet.getExtent(), t);
+ log.error("Unknown error during minor compaction for extent: {}", tablet.getExtent(), t);
throw new RuntimeException(t);
} finally {
tablet.minorCompactionComplete();
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java
index 6bd2545..2bfdc62 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java
@@ -88,7 +88,7 @@ public class MinorCompactor extends Compactor {
try {
return Tables.getTableState(tabletServer.getInstance(), extent.getTableId()) == TableState.DELETING;
} catch (Exception e) {
- log.warn("Failed to determine if table " + extent.getTableId() + " was deleting ", e);
+ log.warn("Failed to determine if table {} was deleting", extent.getTableId(), e);
return false; // can not get positive confirmation that its deleting.
}
}
@@ -96,7 +96,7 @@ public class MinorCompactor extends Compactor {
@Override
public CompactionStats call() {
final String outputFileName = getOutputFile();
- log.debug("Begin minor compaction " + outputFileName + " " + getExtent());
+ log.debug("Begin minor compaction {} {}", outputFileName, getExtent());
// output to new MapFile with a temporary name
int sleepTime = 100;
@@ -119,7 +119,7 @@ public class MinorCompactor extends Compactor {
return ret;
} catch (IOException e) {
- log.warn("MinC failed ({}) to create {} retrying ...", e.getMessage(), outputFileName);
+ log.warn("MinC failed ({}) to create {} retrying ...", e.getMessage(),outputFileName);
ProblemReports.getInstance(tabletServer).report(new ProblemReport(getExtent().getTableId(), ProblemType.FILE_WRITE, outputFileName, e));
reportedProblem = true;
} catch (RuntimeException e) {
@@ -135,7 +135,7 @@ public class MinorCompactor extends Compactor {
Random random = new Random();
int sleep = sleepTime + random.nextInt(sleepTime);
- log.debug("MinC failed sleeping " + sleep + " ms before retrying");
+ log.debug("MinC failed sleeping {}ms before retrying", sleep);
sleepUninterruptibly(sleep, TimeUnit.MILLISECONDS);
sleepTime = (int) Math.round(Math.min(maxSleepTime, sleepTime * growthFactor));
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/RootFiles.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/RootFiles.java
index 56dbec9..b68e65a 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/RootFiles.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/RootFiles.java
@@ -48,7 +48,7 @@ public class RootFiles {
public static void renameReplacement(VolumeManager fs, FileRef tmpDatafile, FileRef newDatafile) throws IOException {
if (fs.exists(newDatafile.path())) {
- log.error("Target map file already exist " + newDatafile, new Exception());
+ log.error("Target map file already exist {}", newDatafile, new Exception());
throw new IllegalStateException("Target map file already exist " + newDatafile);
}
@@ -99,7 +99,7 @@ public class RootFiles {
if (fs.exists(new Path(expectedCompactedFile))) {
// compaction finished, but did not finish deleting compacted files.. so delete it
if (!fs.deleteRecursively(file.getPath()))
- log.warn("Delete of file: " + file.getPath().toString() + " return false");
+ log.warn("Delete of file: {} return false", file.getPath().toString());
continue;
}
// compaction did not finish, so put files back
@@ -113,16 +113,16 @@ public class RootFiles {
if (filename.endsWith("_tmp")) {
if (deleteTmp) {
- log.warn("cleaning up old tmp file: " + path);
+ log.warn("cleaning up old tmp file: {}", path);
if (!fs.deleteRecursively(file.getPath()))
- log.warn("Delete of tmp file: " + file.getPath().toString() + " return false");
+ log.warn("Delete of tmp file: {} return false", file.getPath().toString());
}
continue;
}
if (!filename.startsWith(Constants.MAPFILE_EXTENSION + "_") && !FileOperations.getValidExtensions().contains(filename.split("\\.")[1])) {
- log.error("unknown file in tablet: " + path);
+ log.error("unknown file in tablet: {}", path);
continue;
}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
index 0da4482..074f253 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
@@ -142,7 +142,7 @@ import org.apache.accumulo.tserver.constraints.ConstraintChecker;
import org.apache.accumulo.tserver.log.DfsLogger;
import org.apache.accumulo.tserver.log.MutationReceiver;
import org.apache.accumulo.tserver.mastermessage.TabletStatusMessage;
-import org.apache.accumulo.tserver.metrics.TabletServerMinCMetrics;
+import org.apache.accumulo.tserver.metrics.TabletServerMinCMetricsKeys;
import org.apache.accumulo.tserver.metrics.TabletServerScanMetrics;
import org.apache.accumulo.tserver.tablet.Compactor.CompactionCanceledException;
import org.apache.accumulo.tserver.tablet.Compactor.CompactionEnv;
@@ -152,9 +152,10 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
-import org.apache.log4j.Logger;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NoNodeException;
+import org.slf4j.LoggerFactory;
+import org.slf4j.Logger;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.cache.Cache;
@@ -166,7 +167,7 @@ import com.google.common.cache.CacheBuilder;
*
*/
public class Tablet implements TabletCommitter {
- static private final Logger log = Logger.getLogger(Tablet.class);
+ static private final Logger log = LoggerFactory.getLogger(Tablet.class);
private final TabletServer tabletServer;
private final KeyExtent extent;
@@ -285,9 +286,9 @@ public class Tablet implements TabletCommitter {
if (files == null) {
if (location.getName().startsWith(Constants.CLONE_PREFIX))
- log.debug("Tablet " + extent + " had no dir, creating " + location); // its a clone dir...
+ log.debug("Tablet {} had no dir, creating {}", extent, location); // its a clone dir...
else
- log.warn("Tablet " + extent + " had no dir, creating " + location);
+ log.warn("Tablet {} had no dir, creating {}", extent, location);
getTabletServer().getFileSystem().mkdirs(location);
}
@@ -375,7 +376,7 @@ public class Tablet implements TabletCommitter {
try {
setupDefaultSecurityLabels(extent);
} catch (Exception e) {
- log.error("Failed to reload default security labels for extent: " + extent.toString());
+ log.error("Failed to reload default security labels for extent: {}", extent.toString());
}
}
@@ -385,10 +386,10 @@ public class Tablet implements TabletCommitter {
reloadConstraints();
else if (prop.equals(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey())) {
try {
- log.info("Default security labels changed for extent: " + extent.toString());
+ log.info("Default security labels changed for extent: {}", extent.toString());
setupDefaultSecurityLabels(extent);
} catch (Exception e) {
- log.error("Failed to reload default security labels for extent: " + extent.toString());
+ log.error("Failed to reload default security labels for extent: {}", extent.toString());
}
}
@@ -407,7 +408,7 @@ public class Tablet implements TabletCommitter {
// Force a load of any per-table properties
configObserver.propertiesChanged();
if (!logEntries.isEmpty()) {
- log.info("Starting Write-Ahead Log recovery for " + this.extent);
+ log.info("Starting Write-Ahead Log recovery for {}", this.extent);
final AtomicLong entriesUsedOnTablet = new AtomicLong(0);
// track max time from walog entries without timestamps
final AtomicLong maxTime = new AtomicLong(Long.MIN_VALUE);
@@ -440,7 +441,7 @@ public class Tablet implements TabletCommitter {
commitSession.updateMaxCommittedTime(tabletTime.getTime());
if (entriesUsedOnTablet.get() == 0) {
- log.debug("No replayed mutations applied, removing unused entries for " + extent);
+ log.debug("No replayed mutations applied, removing unused entries for {}", extent);
MetadataTableUtil.removeUnusedWALEntries(getTabletServer(), extent, logEntries, tabletServer.getLock());
// No replication update to be made because the fact that this tablet didn't use any mutations
@@ -458,7 +459,7 @@ public class Tablet implements TabletCommitter {
// the WAL isn't closed (WRT replication Status) and thus we're safe to update its progress.
Status status = StatusUtil.openWithUnknownLength();
for (LogEntry logEntry : logEntries) {
- log.debug("Writing updated status to metadata table for " + logEntry.filename + " " + ProtobufUtil.toString(status));
+ log.debug("Writing updated status to metadata table for {} {}",logEntry.filename, ProtobufUtil.toString(status));
ReplicationTableUtil.updateFiles(tabletServer, extent, logEntry.filename, status);
}
}
@@ -476,8 +477,7 @@ public class Tablet implements TabletCommitter {
currentLogs.add(new DfsLogger(tabletServer.getServerConfig(), logEntry.filename, logEntry.getColumnQualifier().toString()));
}
- log.info("Write-Ahead Log recovery complete for " + this.extent + " (" + entriesUsedOnTablet.get() + " mutations applied, "
- + getTabletMemory().getNumEntries() + " entries created)");
+ log.info("Write-Ahead Log recovery complete for {} ({} mutations applied, {} entries created)", this.extent, entriesUsedOnTablet.get(), getTabletMemory().getNumEntries());
}
String contextName = tableConfiguration.get(Property.TABLE_CLASSPATH);
@@ -501,7 +501,7 @@ public class Tablet implements TabletCommitter {
removeOldTemporaryFiles();
}
- log.log(TLevel.TABLET_HIST, extent + " opened");
+ TLevel.logAtLevel(log, TLevel.TABLET_HIST, "{} opened", extent);
}
private void removeOldTemporaryFiles() {
@@ -509,14 +509,14 @@ public class Tablet implements TabletCommitter {
try {
for (FileStatus tmp : getTabletServer().getFileSystem().globStatus(new Path(location, "*_tmp"))) {
try {
- log.debug("Removing old temp file " + tmp.getPath());
+ log.debug("Removing old temp file {}", tmp.getPath());
getTabletServer().getFileSystem().delete(tmp.getPath());
} catch (IOException ex) {
- log.error("Unable to remove old temp file " + tmp.getPath() + ": " + ex);
+ log.error("Unable to remove old temp file {}:", tmp.getPath(), ex);
}
}
} catch (IOException ex) {
- log.error("Error scanning for old temp files in " + location);
+ log.error("Error scanning for old temp files in {}", location);
}
}
@@ -528,7 +528,7 @@ public class Tablet implements TabletCommitter {
ColumnVisibility cv = new ColumnVisibility(tableConfiguration.get(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY));
this.defaultSecurityLabel = cv.getExpression();
} catch (Exception e) {
- log.error(e, e);
+ log.error("", e);
this.defaultSecurityLabel = new byte[0];
}
}
@@ -616,13 +616,13 @@ public class Tablet implements TabletCommitter {
}
} catch (TooManyFilesException tmfe) {
// treat this as a closed tablet, and let the client retry
- log.warn("Tablet " + getExtent() + " has too many files, batch lookup can not run");
+ log.warn("Tablet {} has too many files, batch lookup can not run", getExtent());
handleTabletClosedDuringScan(results, lookupResult, exceededMemoryUsage, range, entriesAdded);
tabletClosed = true;
} catch (IOException ioe) {
if (shutdownInProgress()) {
// assume HDFS shutdown hook caused this exception
- log.debug("IOException while shutdown in progress ", ioe);
+ log.debug("IOException while shutdown in progress", ioe);
handleTabletClosedDuringScan(results, lookupResult, exceededMemoryUsage, range, entriesAdded);
tabletClosed = true;
} else {
@@ -889,11 +889,11 @@ public class Tablet implements TabletCommitter {
}
Metrics minCMetrics = getTabletServer().getMinCMetrics();
if (minCMetrics.isEnabled())
- minCMetrics.add(TabletServerMinCMetrics.MINC, (lastMinorCompactionFinishTime - start));
+ minCMetrics.add(TabletServerMinCMetricsKeys.MINC, (lastMinorCompactionFinishTime - start));
if (hasQueueTime) {
timer.updateTime(Operation.MINOR, queued, start, count, failed);
if (minCMetrics.isEnabled())
- minCMetrics.add(TabletServerMinCMetrics.QUEUE, (start - queued));
+ minCMetrics.add(TabletServerMinCMetricsKeys.QUEUE, (start - queued));
} else
timer.updateTime(Operation.MINOR, start, count, failed);
}
@@ -970,7 +970,7 @@ public class Tablet implements TabletCommitter {
try {
flushId = getFlushID();
} catch (NoNodeException e) {
- log.info("Asked to initiate MinC when there was no flush id " + getExtent() + " " + e.getMessage());
+ log.info("Asked to initiate MinC when there was no flush id {} {}", getExtent(), e.getMessage());
return false;
}
return initiateMinorCompaction(flushId, mincReason);
@@ -981,7 +981,7 @@ public class Tablet implements TabletCommitter {
try {
flushId = getFlushID();
} catch (NoNodeException e) {
- log.info("Asked to initiate MinC when there was no flush id " + getExtent() + " " + e.getMessage());
+ log.info("Asked to initiate MinC when there was no flush id {} {}", getExtent(), e.getMessage());
return false;
}
MinorCompactionTask mct = createMinorCompactionTask(flushId, mincReason);
@@ -1032,7 +1032,7 @@ public class Tablet implements TabletCommitter {
} finally {
// log outside of sync block
if (logMessage != null && log.isDebugEnabled())
- log.debug(logMessage);
+ log.debug("{}", logMessage);
}
log.debug(String.format("MinC initiate lock %.2f secs", (t2 - t1) / 1000.0));
@@ -1255,7 +1255,7 @@ public class Tablet implements TabletCommitter {
throw new IllegalArgumentException("Not saving state on close and requesting minor compactions queue does not make sense");
}
- log.debug("initiateClose(saveState=" + saveState + " queueMinC=" + queueMinC + " disableWrites=" + disableWrites + ") " + getExtent());
+ log.debug("initiateClose(saveState={} queueMinC={} disableWrites={}) {}", saveState, queueMinC, disableWrites, getExtent());
MinorCompactionTask mct = null;
@@ -1325,7 +1325,7 @@ public class Tablet implements TabletCommitter {
throw new IllegalStateException("closeState = " + closeState);
}
- log.debug("completeClose(saveState=" + saveState + " completeClose=" + completeClose + ") " + getExtent());
+ log.debug("completeClose(saveState={} completeClose={}) {}", saveState, completeClose, getExtent());
// ensure this method is only called once, also guards against multiple
// threads entering the method at the same time
@@ -1367,20 +1367,20 @@ public class Tablet implements TabletCommitter {
err = null;
} catch (RuntimeException t) {
err = t;
- log.error("Consistency check fails, retrying " + t);
+ log.error("Consistency check fails, retrying", t);
sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
}
}
if (err != null) {
ProblemReports.getInstance(tabletServer).report(new ProblemReport(extent.getTableId(), ProblemType.TABLET_LOAD, this.extent.toString(), err));
- log.error("Tablet closed consistency check has failed for " + this.extent + " giving up and closing");
+ log.error("Tablet closed consistency check has failed for {} giving up and closing", this.extent);
}
}
try {
getTabletMemory().getMemTable().delete(0);
} catch (Throwable t) {
- log.error("Failed to delete mem table : " + t.getMessage(), t);
+ log.error("Failed to delete mem table : {}", t.getMessage(), t);
}
getTabletMemory().close();
@@ -1388,7 +1388,7 @@ public class Tablet implements TabletCommitter {
// close map files
getTabletResources().close();
- log.log(TLevel.TABLET_HIST, extent + " closed");
+ TLevel.logAtLevel(log, TLevel.TABLET_HIST, "{} closed", extent);
tableConfiguration.getNamespaceConfiguration().removeObserver(configObserver);
tableConfiguration.removeObserver(configObserver);
@@ -1537,7 +1537,7 @@ public class Tablet implements TabletCommitter {
keys = FileUtil.findMidPoint(getTabletServer().getFileSystem(), tabletDirectory, getTabletServer().getConfiguration(), extent.getPrevEndRow(),
extent.getEndRow(), FileUtil.toPathStrings(files), .25);
} catch (IOException e) {
- log.error("Failed to find midpoint " + e.getMessage());
+ log.error("Failed to find midpoint {}", e.getMessage());
return null;
}
@@ -1563,7 +1563,7 @@ public class Tablet implements TabletCommitter {
if (keys.firstKey() < .5) {
Key candidate = keys.get(keys.firstKey());
if (candidate.getLength() > maxEndRow) {
- log.warn("Cannot split tablet " + extent + ", selected split point too long. Length : " + candidate.getLength());
+ log.warn("Cannot split tablet {}, selected split point too long. Length : {}", extent, candidate.getLength());
sawBigRow = true;
timeOfLastMinCWhenBigFreakinRowWasSeen = lastMinorCompactionFinishTime;
@@ -1580,7 +1580,7 @@ public class Tablet implements TabletCommitter {
}
- log.warn("Cannot split tablet " + extent + " it contains a big row : " + lastRow);
+ log.warn("Cannot split tablet {} it contains a big row : {}", extent, lastRow);
sawBigRow = true;
timeOfLastMinCWhenBigFreakinRowWasSeen = lastMinorCompactionFinishTime;
@@ -1600,7 +1600,7 @@ public class Tablet implements TabletCommitter {
}
if (text.getLength() > maxEndRow) {
- log.warn("Cannot split tablet " + extent + ", selected split point too long. Length : " + text.getLength());
+ log.warn("Cannot split tablet {}, selected split point too long. Length : {}", extent, text.getLength());
sawBigRow = true;
timeOfLastMinCWhenBigFreakinRowWasSeen = lastMinorCompactionFinishTime;
@@ -1612,7 +1612,7 @@ public class Tablet implements TabletCommitter {
return new SplitRowSpec(.5, text);
} catch (IOException e) {
// don't split now, but check again later
- log.error("Failed to find lastkey " + e.getMessage());
+ log.error("Failed to find lastkey {}", e.getMessage());
return null;
}
@@ -1791,7 +1791,7 @@ public class Tablet implements TabletCommitter {
if (plan != null)
droppedFiles.addAll(plan.deleteFiles);
propogateDeletes = !(droppedFiles.equals(allFiles.keySet()));
- log.debug("Major compaction plan: " + plan + " propogate deletes : " + propogateDeletes);
+ log.debug("Major compaction plan: {} propogate deletes : {}", plan, propogateDeletes);
filesToCompact = new HashMap<>(allFiles);
filesToCompact.keySet().retainAll(inputFiles);
@@ -1891,7 +1891,7 @@ public class Tablet implements TabletCommitter {
copy.keySet().retainAll(smallestFiles);
- log.debug("Starting MajC " + extent + " (" + reason + ") " + copy.keySet() + " --> " + compactTmpName + " " + compactionIterators);
+ log.debug("Starting MajC {} ({}) {} --> {} {}", extent, reason, copy.keySet(), compactTmpName, compactionIterators);
// always propagate deletes, unless last batch
boolean lastBatch = filesToCompact.isEmpty();
@@ -2022,11 +2022,11 @@ public class Tablet implements TabletCommitter {
}
success = true;
} catch (CompactionCanceledException cce) {
- log.debug("Major compaction canceled, extent = " + getExtent());
+ log.debug("Major compaction canceled, extent = {}", getExtent());
} catch (IOException ioe) {
- log.error("MajC Failed, extent = " + getExtent(), ioe);
+ log.error("MajC Failed, extent = {}", getExtent(), ioe);
} catch (RuntimeException e) {
- log.error("MajC Unexpected exception, extent = " + getExtent(), e);
+ log.error("MajC Unexpected exception, extent = {}", getExtent(), e);
} finally {
// ensure we always reset boolean, even
// when an exception is thrown
@@ -2129,7 +2129,7 @@ public class Tablet implements TabletCommitter {
try {
initiateClose(true, false, false);
} catch (IllegalStateException ise) {
- log.debug("File " + extent + " not splitting : " + ise.getMessage());
+ log.debug("File {} not splitting : {}", extent, ise.getMessage());
return null;
}
@@ -2181,8 +2181,8 @@ public class Tablet implements TabletCommitter {
MetadataTableUtil.splitDatafiles(midRow, splitRatio, firstAndLastRows, getDatafileManager().getDatafileSizes(), lowDatafileSizes, highDatafileSizes,
highDatafilesToRemove);
- log.debug("Files for low split " + low + " " + lowDatafileSizes.keySet());
- log.debug("Files for high split " + high + " " + highDatafileSizes.keySet());
+ log.debug("Files for low split {} {}", low, lowDatafileSizes.keySet());
+ log.debug("Files for high split {} {}", high, highDatafileSizes.keySet());
String time = tabletTime.getMetadataValue();
@@ -2191,7 +2191,7 @@ public class Tablet implements TabletCommitter {
time, lastFlushID, lastCompactID, getTabletServer().getLock());
MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, getTabletServer(), getTabletServer().getLock());
- log.log(TLevel.TABLET_HIST, extent + " split " + low + " " + high);
+ TLevel.logAtLevel(log, TLevel.TABLET_HIST, "{} split {} {}", extent, low, high);
newTablets.put(high, new TabletData(tabletDirectory, highDatafileSizes, time, lastFlushID, lastCompactID, lastLocation, getBulkIngestedFiles()));
newTablets.put(low, new TabletData(lowDirectory, lowDatafileSizes, time, lastFlushID, lastCompactID, lastLocation, getBulkIngestedFiles()));
@@ -2359,15 +2359,15 @@ public class Tablet implements TabletCommitter {
// do debug logging outside tablet lock
for (String logger : otherLogsCopy) {
- log.debug("Logs for memory compacted: " + getExtent() + " " + logger.toString());
+ log.debug("Logs for memory compacted: {} {}", getExtent(), logger.toString());
}
for (String logger : currentLogsCopy) {
- log.debug("Logs for current memory: " + getExtent() + " " + logger);
+ log.debug("Logs for current memory: {} {}", getExtent(), logger);
}
for (String logger : doomed) {
- log.debug("Logs to be destroyed: " + getExtent() + " " + logger);
+ log.debug("Logs to be destroyed: {} {}", getExtent(), logger);
}
return doomed;
@@ -2680,7 +2680,7 @@ public class Tablet implements TabletCommitter {
FileSystem pathFs = fs.getVolumeByPath(lowDirectoryPath).getFileSystem();
return lowDirectoryPath.makeQualified(pathFs.getUri(), pathFs.getWorkingDirectory()).toString();
}
- log.warn("Failed to create " + lowDirectoryPath + " for unknown reason");
+ log.warn("Failed to create {} for unknown reason", lowDirectoryPath);
} else {
lowDirectory = "/" + Constants.GENERATED_TABLET_DIRECTORY_PREFIX + namer.getNextName();
Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" + lowDirectory);
@@ -2692,10 +2692,10 @@ public class Tablet implements TabletCommitter {
}
}
} catch (IOException e) {
- log.warn(e);
+ log.warn("", e);
}
- log.warn("Failed to create dir for tablet in table " + tableId + " in volume " + volume + " + will retry ...");
+ log.warn("Failed to create dir for tablet in table {} in volume {} will retry ...", tableId, volume);
sleepUninterruptibly(3, TimeUnit.SECONDS);
}
diff --git a/start/src/main/java/org/apache/accumulo/start/Main.java b/start/src/main/java/org/apache/accumulo/start/Main.java
index e37f1a4..85c6e28 100644
--- a/start/src/main/java/org/apache/accumulo/start/Main.java
+++ b/start/src/main/java/org/apache/accumulo/start/Main.java
@@ -144,7 +144,7 @@ public class Main {
try {
main = classWithMain.getMethod("main", args.getClass());
} catch (Throwable t) {
- log.error("Could not run main method on '" + classWithMain.getName() + "'.", t);
+ log.error("Could not run main method on '{}'. {}", classWithMain.getName(), t);
}
if (main == null || !Modifier.isPublic(main.getModifiers()) || !Modifier.isStatic(main.getModifiers())) {
System.out.println(classWithMain.getName() + " must implement a public static void main(String args[]) method");
@@ -189,7 +189,7 @@ public class Main {
* The {@link Throwable} containing a stack trace to print.
*/
private static void die(final Throwable t) {
- log.error("Thread '" + Thread.currentThread().getName() + "' died.", t);
+ log.error("Thread '{}' died. {}", Thread.currentThread().getName(), t);
System.exit(1);
}
@@ -245,6 +245,6 @@ public class Main {
}
private static void warnDuplicate(final KeywordExecutable service) {
- log.warn("Ambiguous duplicate binding for keyword '" + service.keyword() + "' found: " + service.getClass().getName());
+ log.warn("Ambiguous duplicate binding for keyword '{}' found: {}", service.keyword(), service.getClass().getName());
}
}
diff --git a/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java b/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
index 9186312..73aae00 100644
--- a/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
+++ b/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
@@ -179,10 +179,10 @@ public class AccumuloClassLoader {
for (File jar : extJars)
urls.add(jar.toURI().toURL());
} else {
- log.debug("ignoring classpath entry " + classpath);
+ log.debug("ignoring classpath entry {}", classpath);
}
} else {
- log.debug("ignoring classpath entry " + classpath);
+ log.debug("ignoring classpath entry {}", classpath);
}
}
} else {
@@ -238,7 +238,7 @@ public class AccumuloClassLoader {
ClassLoader parentClassLoader = AccumuloClassLoader.class.getClassLoader();
- log.debug("Create 2nd tier ClassLoader using URLs: " + urls.toString());
+ log.debug("Create 2nd tier ClassLoader using URLs: {}", urls.toString());
URLClassLoader aClassLoader = new URLClassLoader(urls.toArray(new URL[urls.size()]), parentClassLoader) {
@Override
protected synchronized Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
diff --git a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoader.java b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoader.java
index 6a884e9..41daf81 100644
--- a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoader.java
+++ b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoader.java
@@ -78,7 +78,7 @@ public class AccumuloReloadingVFSClassLoader implements FileListener, ReloadingC
FileSystemManager vfs = AccumuloVFSClassLoader.generateVfs();
FileObject[] files = AccumuloVFSClassLoader.resolve(vfs, uris);
- log.debug("Rebuilding dynamic classloader using files- " + stringify(files));
+ log.debug("Rebuilding dynamic classloader using files- {}", stringify(files));
VFSClassLoader cl;
if (preDelegate)
@@ -88,11 +88,11 @@ public class AccumuloReloadingVFSClassLoader implements FileListener, ReloadingC
updateClassloader(files, cl);
return;
} catch (Exception e) {
- log.error("{}", e.getMessage(), e);
+ log.error(e.getMessage(), e);
try {
Thread.sleep(DEFAULT_TIMEOUT);
} catch (InterruptedException ie) {
- log.error("{}", e.getMessage(), ie);
+ log.error(e.getMessage(), ie);
}
}
}
@@ -153,7 +153,7 @@ public class AccumuloReloadingVFSClassLoader implements FileListener, ReloadingC
monitor.setRecursive(false);
for (FileObject file : pathsToMonitor) {
monitor.addFile(file);
- log.debug("monitoring " + file);
+ log.debug("monitoring {}", file);
}
monitor.start();
}
@@ -177,21 +177,21 @@ public class AccumuloReloadingVFSClassLoader implements FileListener, ReloadingC
@Override
public void fileCreated(FileChangeEvent event) throws Exception {
if (log.isDebugEnabled())
- log.debug(event.getFile().getURL().toString() + " created, recreating classloader");
+ log.debug("{} created, recreating classloader", event.getFile().getURL().toString());
scheduleRefresh();
}
@Override
public void fileDeleted(FileChangeEvent event) throws Exception {
if (log.isDebugEnabled())
- log.debug(event.getFile().getURL().toString() + " deleted, recreating classloader");
+ log.debug("{} deleted, recreating classloader", event.getFile().getURL().toString());
scheduleRefresh();
}
@Override
public void fileChanged(FileChangeEvent event) throws Exception {
if (log.isDebugEnabled())
- log.debug(event.getFile().getURL().toString() + " changed, recreating classloader");
+ log.debug("{} changed, recreating classloader", event.getFile().getURL().toString());
scheduleRefresh();
}
@@ -203,7 +203,7 @@ public class AccumuloReloadingVFSClassLoader implements FileListener, ReloadingC
try {
buf.append("\t").append(f.getURL().toString()).append("\n");
} catch (FileSystemException e) {
- log.error("Error getting URL for file", e);
+ log.error("Error getting URL for file {}", e);
}
}
diff --git a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
index 15ed3ff..9bc8e70 100644
--- a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
+++ b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
@@ -158,11 +158,11 @@ public class AccumuloVFSClassLoader {
}
}
} else {
- log.warn("ignoring classpath entry " + fo);
+ log.warn("ignoring classpath entry {}", fo);
}
break;
default:
- log.warn("ignoring classpath entry " + fo);
+ log.warn("ignoring classpath entry {}", fo);
break;
}
diff --git a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/UniqueFileReplicator.java b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/UniqueFileReplicator.java
index 85b47df..56d6644 100644
--- a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/UniqueFileReplicator.java
+++ b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/UniqueFileReplicator.java
@@ -48,7 +48,7 @@ public class UniqueFileReplicator implements VfsComponent, FileReplicator {
public UniqueFileReplicator(File tempDir) {
this.tempDir = tempDir;
if (!tempDir.exists() && !tempDir.mkdirs())
- log.warn("Unexpected error creating directory " + tempDir);
+ log.warn("Unexpected error creating directory {}", tempDir);
}
@Override
@@ -90,7 +90,7 @@ public class UniqueFileReplicator implements VfsComponent, FileReplicator {
synchronized (tmpFiles) {
for (File tmpFile : tmpFiles) {
if (!tmpFile.delete())
- log.warn("File does not exist: " + tmpFile);
+ log.warn("File does not exist: {}", tmpFile);
}
}
@@ -98,7 +98,7 @@ public class UniqueFileReplicator implements VfsComponent, FileReplicator {
String[] list = tempDir.list();
int numChildren = list == null ? 0 : list.length;
if (0 == numChildren && !tempDir.delete())
- log.warn("Cannot delete empty directory: " + tempDir);
+ log.warn("Cannot delete empty directory: {}", tempDir);
}
}
}
diff --git a/test/src/main/java/org/apache/accumulo/test/BalanceWithOfflineTableIT.java b/test/src/main/java/org/apache/accumulo/test/BalanceWithOfflineTableIT.java
index fc1719f..5b7ba29 100644
--- a/test/src/main/java/org/apache/accumulo/test/BalanceWithOfflineTableIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/BalanceWithOfflineTableIT.java
@@ -48,13 +48,13 @@ public class BalanceWithOfflineTableIT extends ConfigurableMacBase {
// create a table with a bunch of splits
final Connector c = getConnector();
- log.info("Creating table " + tableName);
+ log.info("Creating table {}", tableName);
c.tableOperations().create(tableName);
final SortedSet<Text> splits = new TreeSet<>();
for (String split : "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",")) {
splits.add(new Text(split));
}
- log.info("Splitting table " + tableName);
+ log.info("Splitting table {}", tableName);
c.tableOperations().addSplits(tableName, splits);
log.info("Balancing");
c.instanceOperations().waitForBalance();
@@ -62,15 +62,15 @@ public class BalanceWithOfflineTableIT extends ConfigurableMacBase {
// create a new table which will unbalance the cluster
final String table2 = tableNames[1];
- log.info("Creating table " + table2);
+ log.info("Creating table {}", table2);
c.tableOperations().create(table2);
- log.info("Creating splits " + table2);
+ log.info("Creating splits {}", table2);
c.tableOperations().addSplits(table2, splits);
// offline the table, hopefully while there are some migrations going on
- log.info("Offlining " + table2);
+ log.info("Offlining {}", table2);
c.tableOperations().offline(table2, true);
- log.info("Offlined " + table2);
+ log.info("Offlined {}", table2);
log.info("Waiting for balance");
diff --git a/test/src/main/java/org/apache/accumulo/test/CleanWalIT.java b/test/src/main/java/org/apache/accumulo/test/CleanWalIT.java
index 937ccb8..d3ddd72 100644
--- a/test/src/main/java/org/apache/accumulo/test/CleanWalIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/CleanWalIT.java
@@ -102,10 +102,10 @@ public class CleanWalIT extends AccumuloClusterHarness {
for (String table : new String[] {MetadataTable.NAME, RootTable.NAME})
conn.tableOperations().flush(table, null, null, true);
- log.debug("Checking entries for " + tableName);
+ log.debug("Checking entries for {}", tableName);
assertEquals(1, count(tableName, conn));
for (String table : new String[] {MetadataTable.NAME, RootTable.NAME}) {
- log.debug("Checking logs for " + table);
+ log.debug("Checking logs for {}", table);
assertEquals("Found logs for " + table, 0, countLogs(table, conn));
}
@@ -133,7 +133,7 @@ public class CleanWalIT extends AccumuloClusterHarness {
scanner.setRange(MetadataSchema.TabletsSection.getRange());
int count = 0;
for (Entry<Key,Value> entry : scanner) {
- log.debug("Saw " + entry.getKey() + "=" + entry.getValue());
+ log.debug("Saw {}={}", entry.getKey(), entry.getValue());
count++;
}
return count;
diff --git a/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java b/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java
index 0812328..663c0c8 100644
--- a/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java
+++ b/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java
@@ -72,7 +72,7 @@ public class CreateRFiles {
try {
TestIngest.main(tia.split(" "));
} catch (Exception e) {
- log.error("Could not run " + TestIngest.class.getName() + ".main using the input '" + tia + "'", e);
+ log.error("Could not run {}.main using the input '{}'", TestIngest.class.getName(), tia, e);
}
}
diff --git a/test/src/main/java/org/apache/accumulo/test/FileArchiveIT.java b/test/src/main/java/org/apache/accumulo/test/FileArchiveIT.java
index 0a98ccf..11464b4 100644
--- a/test/src/main/java/org/apache/accumulo/test/FileArchiveIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/FileArchiveIT.java
@@ -88,7 +88,7 @@ public class FileArchiveIT extends ConfigurableMacBase {
// Then force another to make an unreferenced file
conn.tableOperations().compact(tableName, null, null, true, true);
- log.info("File for table: " + file);
+ log.info("File for table: {}", file);
FileSystem fs = getCluster().getFileSystem();
int i = 0;
@@ -96,7 +96,7 @@ public class FileArchiveIT extends ConfigurableMacBase {
i++;
Thread.sleep(1000);
if (0 == i % 10) {
- log.info("Waited " + i + " iterations, file still exists");
+ log.info("Waited {} iterations, file still exists", i);
}
}
@@ -104,7 +104,7 @@ public class FileArchiveIT extends ConfigurableMacBase {
String filePath = p.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
- log.info("File relative to accumulo dir: " + filePath);
+ log.info("File relative to accumulo dir: {}", filePath);
Path fileArchiveDir = new Path(getCluster().getConfig().getAccumuloDir().toString(), ServerConstants.FILE_ARCHIVE_DIR);
@@ -145,7 +145,7 @@ public class FileArchiveIT extends ConfigurableMacBase {
conn.tableOperations().delete(tableName);
- log.info("File for table: " + file);
+ log.info("File for table: {}", file);
FileSystem fs = getCluster().getFileSystem();
int i = 0;
@@ -153,7 +153,7 @@ public class FileArchiveIT extends ConfigurableMacBase {
i++;
Thread.sleep(1000);
if (0 == i % 10) {
- log.info("Waited " + i + " iterations, file still exists");
+ log.info("Waited {} iterations, file still exists", i);
}
}
@@ -161,7 +161,7 @@ public class FileArchiveIT extends ConfigurableMacBase {
String filePath = p.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
- log.info("File relative to accumulo dir: " + filePath);
+ log.info("File relative to accumulo dir: {}", filePath);
Path fileArchiveDir = new Path(getCluster().getConfig().getAccumuloDir().toString(), ServerConstants.FILE_ARCHIVE_DIR);
@@ -203,7 +203,7 @@ public class FileArchiveIT extends ConfigurableMacBase {
// Then force another to make an unreferenced file
conn.tableOperations().compact(tableName, null, null, true, true);
- log.info("File for table: " + file);
+ log.info("File for table: {}", file);
FileSystem fs = getCluster().getFileSystem();
int i = 0;
@@ -211,7 +211,7 @@ public class FileArchiveIT extends ConfigurableMacBase {
i++;
Thread.sleep(1000);
if (0 == i % 10) {
- log.info("Waited " + i + " iterations, file still exists");
+ log.info("Waited {} iterations, file still exists", i);
}
}
@@ -219,7 +219,7 @@ public class FileArchiveIT extends ConfigurableMacBase {
String filePath = p.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
- log.info("File relative to accumulo dir: " + filePath);
+ log.info("File relative to accumulo dir: {}", filePath);
Path fileArchiveDir = new Path(getCluster().getConfig().getAccumuloDir().toString(), ServerConstants.FILE_ARCHIVE_DIR);
@@ -244,14 +244,14 @@ public class FileArchiveIT extends ConfigurableMacBase {
conn.tableOperations().delete(tableName);
- log.info("File for table: " + finalPath);
+ log.info("File for table: {}", finalPath);
i = 0;
while (fs.exists(finalPath)) {
i++;
Thread.sleep(1000);
if (0 == i % 10) {
- log.info("Waited " + i + " iterations, file still exists");
+ log.info("Waited {} iterations, file still exists", i);
}
}
@@ -259,7 +259,7 @@ public class FileArchiveIT extends ConfigurableMacBase {
String finalFilePath = finalPath.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
- log.info("File relative to accumulo dir: " + finalFilePath);
+ log.info("File relative to accumulo dir: {}", finalFilePath);
Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
diff --git a/test/src/main/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java b/test/src/main/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java
index 12e9d0b..49d2b6d 100644
--- a/test/src/main/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java
@@ -115,7 +115,7 @@ public class MissingWalHeaderCompletesRecoveryIT extends ConfigurableMacBase {
File walogServerDir = new File(walogs, fakeServer.replace(':', '+'));
File emptyWalog = new File(walogServerDir, UUID.randomUUID().toString());
- log.info("Created empty WAL at " + emptyWalog.toURI());
+ log.info("Created empty WAL at {}", emptyWalog.toURI());
fs.create(new Path(emptyWalog.toURI())).close();
@@ -167,7 +167,7 @@ public class MissingWalHeaderCompletesRecoveryIT extends ConfigurableMacBase {
File walogServerDir = new File(walogs, fakeServer.replace(':', '+'));
File partialHeaderWalog = new File(walogServerDir, UUID.randomUUID().toString());
- log.info("Created WAL with malformed header at " + partialHeaderWalog.toURI());
+ log.info("Created WAL with malformed header at {}", partialHeaderWalog.toURI());
// Write half of the header
FSDataOutputStream wal = fs.create(new Path(partialHeaderWalog.toURI()));
diff --git a/test/src/main/java/org/apache/accumulo/test/NativeMapConcurrencyTest.java b/test/src/main/java/org/apache/accumulo/test/NativeMapConcurrencyTest.java
index 616121d..a8ce127 100644
--- a/test/src/main/java/org/apache/accumulo/test/NativeMapConcurrencyTest.java
+++ b/test/src/main/java/org/apache/accumulo/test/NativeMapConcurrencyTest.java
@@ -182,7 +182,7 @@ public class NativeMapConcurrencyTest {
try {
thread.join();
} catch (InterruptedException e) {
- log.error("Could not join thread '" + thread.getName() + "'", e);
+ log.error("Could not join thread '{}'", thread.getName(), e);
}
}
diff --git a/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java b/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java
index e80989b..1283052 100644
--- a/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java
+++ b/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java
@@ -41,12 +41,13 @@ import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.server.cli.ClientOpts;
import org.apache.accumulo.server.client.HdfsZooInstance;
import org.apache.hadoop.io.Text;
-import org.apache.log4j.Logger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import com.beust.jcommander.Parameter;
public class QueryMetadataTable {
- private static final Logger log = Logger.getLogger(QueryMetadataTable.class);
+ private static final Logger log = LoggerFactory.getLogger(QueryMetadataTable.class);
private static String principal;
private static AuthenticationToken token;
@@ -77,7 +78,7 @@ public class QueryMetadataTable {
}
} catch (TableNotFoundException e) {
- log.error("Table '" + MetadataTable.NAME + "' not found.", e);
+ log.error("Table '{}' not found.", MetadataTable.NAME, e);
throw new RuntimeException(e);
} catch (AccumuloException e) {
log.error("AccumuloException encountered.", e);
diff --git a/test/src/main/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java b/test/src/main/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java
index e192183..2189fff 100644
--- a/test/src/main/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java
@@ -152,8 +152,8 @@ public class RewriteTabletDirectoriesIT extends ConfigurableMacBase {
}
}
- log.info("Count for volume1: " + v1Count);
- log.info("Count for volume2: " + v2Count);
+ log.info("Count for volume1: {}", v1Count);
+ log.info("Count for volume2: {}", v2Count);
assertEquals(splits.size() + 1, v1Count + v2Count);
// a fair chooser will differ by less than count(volumes)
diff --git a/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java b/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
index dcb49dc..18b9868 100644
--- a/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
@@ -364,10 +364,10 @@ public class ShellServerIT extends SharedMiniClusterBase {
for (String line; (line = reader.readLine()) != null;) {
Path exportedFile = new Path(line);
// There isn't a cp on FileSystem??
- log.info("Copying " + line + " to " + localTmpPath);
+ log.info("Copying {} to {}", line, localTmpPath);
fs.copyToLocalFile(exportedFile, localTmpPath);
Path tmpFile = new Path(localTmpPath, exportedFile.getName());
- log.info("Moving " + tmpFile + " to the import directory " + importDir);
+ log.info("Moving {} to the import directory {}", tmpFile, importDir);
fs.moveFromLocalFile(tmpFile, importDir);
}
}
@@ -1529,15 +1529,15 @@ public class ShellServerIT extends SharedMiniClusterBase {
// Try to find the active scan for about 15seconds
for (int i = 0; i < 50 && scans.isEmpty(); i++) {
String currentScans = ts.exec("listscans", true);
- log.info("Got output from listscans:\n" + currentScans);
+ log.info("Got output from listscans:\n{}", currentScans);
String[] lines = currentScans.split("\n");
for (int scanOffset = 2; scanOffset < lines.length; scanOffset++) {
String currentScan = lines[scanOffset];
if (currentScan.contains(table)) {
- log.info("Retaining scan: " + currentScan);
+ log.info("Retaining scan: {}", currentScan);
scans.add(currentScan);
} else {
- log.info("Ignoring scan because of wrong table: " + currentScan);
+ log.info("Ignoring scan because of wrong table: {}", currentScan);
}
}
sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
@@ -1548,7 +1548,7 @@ public class ShellServerIT extends SharedMiniClusterBase {
for (String scan : scans) {
if (!scan.contains("RUNNING")) {
- log.info("Ignoring scan because it doesn't contain 'RUNNING': " + scan);
+ log.info("Ignoring scan because it doesn't contain 'RUNNING': {}", scan);
continue;
}
String parts[] = scan.split("\\|");
@@ -1868,7 +1868,7 @@ public class ShellServerIT extends SharedMiniClusterBase {
ts.exec("scan -t " + MetadataTable.NAME + " -np -c file -b " + tableId + " -e " + tableId + "~");
- log.debug("countFiles(): " + ts.output.get());
+ log.debug("countFiles(): {}", ts.output.get());
String[] lines = StringUtils.split(ts.output.get(), "\n");
ts.output.clear();
diff --git a/test/src/main/java/org/apache/accumulo/test/TableConfigurationUpdateIT.java b/test/src/main/java/org/apache/accumulo/test/TableConfigurationUpdateIT.java
index 26ee491..a6198f1 100644
--- a/test/src/main/java/org/apache/accumulo/test/TableConfigurationUpdateIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/TableConfigurationUpdateIT.java
@@ -85,8 +85,8 @@ public class TableConfigurationUpdateIT extends AccumuloClusterHarness {
}
long end = System.currentTimeMillis();
- log.debug(tableConf + " with " + iterations + " iterations and " + numThreads + " threads and cache invalidates " + ((1. / randomMax) * 100.) + "% took "
- + (end - start) / 1000 + " second(s)");
+ log.debug("{} with {} iterations and {} threads and cache invalidates {}% took {} second(s)",
+ tableConf, iterations, numThreads, ((1. / randomMax) * 100.), (end - start) / 1000);
}
public static class TableConfRunner implements Callable<Exception> {
diff --git a/test/src/main/java/org/apache/accumulo/test/TestRandomDeletes.java b/test/src/main/java/org/apache/accumulo/test/TestRandomDeletes.java
index 5292b87..0520226 100644
--- a/test/src/main/java/org/apache/accumulo/test/TestRandomDeletes.java
+++ b/test/src/main/java/org/apache/accumulo/test/TestRandomDeletes.java
@@ -136,7 +136,7 @@ public class TestRandomDeletes {
String tableName = opts.getTableName();
TreeSet<RowColumn> doomed = scanAll(opts, scanOpts, tableName);
- log.info("Got " + doomed.size() + " rows");
+ log.info("Got {} rows", doomed.size());
long startTime = System.currentTimeMillis();
while (true) {
@@ -148,7 +148,7 @@ public class TestRandomDeletes {
long stopTime = System.currentTimeMillis();
long elapsed = (stopTime - startTime) / 1000;
- log.info("deleted " + deleted + " values in " + elapsed + " seconds");
+ log.info("deleted {} values in {} seconds", deleted, elapsed);
} catch (Exception e) {
throw new RuntimeException(e);
}
diff --git a/test/src/main/java/org/apache/accumulo/test/TracerRecoversAfterOfflineTableIT.java b/test/src/main/java/org/apache/accumulo/test/TracerRecoversAfterOfflineTableIT.java
index 2c4d970..afa3a1a 100644
--- a/test/src/main/java/org/apache/accumulo/test/TracerRecoversAfterOfflineTableIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/TracerRecoversAfterOfflineTableIT.java
@@ -105,11 +105,11 @@ public class TracerRecoversAfterOfflineTableIT extends ConfigurableMacBase {
}
});
String traceOutput = finalBuffer.toString();
- log.info("Trace output:" + traceOutput);
+ log.info("Trace output:{}", traceOutput);
if (traceCount > 0) {
int lastPos = 0;
for (String part : "traceTest,close,binMutations".split(",")) {
- log.info("Looking in trace output for '" + part + "'");
+ log.info("Looking in trace output for '{}'", part);
int pos = traceOutput.indexOf(part);
assertTrue("Did not find '" + part + "' in output", pos > 0);
assertTrue("'" + part + "' occurred earlier than the previous element unexpectedly", pos > lastPos);
@@ -117,7 +117,7 @@ public class TracerRecoversAfterOfflineTableIT extends ConfigurableMacBase {
}
break;
} else {
- log.info("Ignoring trace output as traceCount not greater than zero: " + traceCount);
+ log.info("Ignoring trace output as traceCount not greater than zero: {}", traceCount);
Thread.sleep(1000);
}
}
diff --git a/test/src/main/java/org/apache/accumulo/test/TransportCachingIT.java b/test/src/main/java/org/apache/accumulo/test/TransportCachingIT.java
index 5edfe0a..9663bb7 100644
--- a/test/src/main/java/org/apache/accumulo/test/TransportCachingIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/TransportCachingIT.java
@@ -81,7 +81,7 @@ public class TransportCachingIT extends AccumuloClusterHarness {
// Get a transport (cached or not)
first = pool.getAnyTransport(servers, true).getSecond();
} catch (TTransportException e) {
- log.warn("Failed to obtain transport to " + servers);
+ log.warn("Failed to obtain transport to {}", servers);
}
}
@@ -95,7 +95,7 @@ public class TransportCachingIT extends AccumuloClusterHarness {
// Get a cached transport (should be the first)
second = pool.getAnyTransport(servers, true).getSecond();
} catch (TTransportException e) {
- log.warn("Failed obtain 2nd transport to " + servers);
+ log.warn("Failed obtain 2nd transport to {}", servers);
}
}
@@ -110,7 +110,7 @@ public class TransportCachingIT extends AccumuloClusterHarness {
// Get a non-cached transport
third = pool.getAnyTransport(servers, false).getSecond();
} catch (TTransportException e) {
- log.warn("Failed obtain 2nd transport to " + servers);
+ log.warn("Failed obtain 2nd transport to {}", servers);
}
}
diff --git a/test/src/main/java/org/apache/accumulo/test/VerifyIngest.java b/test/src/main/java/org/apache/accumulo/test/VerifyIngest.java
index be14150..a914444 100644
--- a/test/src/main/java/org/apache/accumulo/test/VerifyIngest.java
+++ b/test/src/main/java/org/apache/accumulo/test/VerifyIngest.java
@@ -127,14 +127,14 @@ public class VerifyIngest {
}
if (val == null) {
- log.error("Did not find " + rowKey + " " + colf + " " + colq);
+ log.error("Did not find {} {} {}", rowKey, colf, colq);
errors++;
} else {
recsRead++;
bytesRead += val.length;
Value value = new Value(val);
if (value.compareTo(ev) != 0) {
- log.error("unexpected value (" + rowKey + " " + colf + " " + colq + " : saw " + value + " expected " + new Value(ev));
+ log.error("unexpected value ({} {} {} : saw {} expected {}",rowKey, colf, colq, value, new Value(ev));
errors++;
}
}
@@ -169,19 +169,18 @@ public class VerifyIngest {
int colNum = getCol(entry.getKey());
if (rowNum != expectedRow) {
- log.error("rowNum != expectedRow " + rowNum + " != " + expectedRow);
+ log.error("rowNum != expectedRow {} != {}", rowNum, expectedRow);
errors++;
expectedRow = rowNum;
}
if (colNum != expectedCol) {
- log.error("colNum != expectedCol " + colNum + " != " + expectedCol + " rowNum : " + rowNum);
+ log.error("colNum != expectedCol {} != {} rowNum : {}", colNum, expectedCol, rowNum);
errors++;
}
if (expectedRow >= (opts.rows + opts.startRow)) {
- log.error("expectedRow (" + expectedRow + ") >= (ingestArgs.rows + ingestArgs.startRow) (" + (opts.rows + opts.startRow)
- + "), get batch returned data passed end key");
+ log.error("expectedRow ({}) >= (ingestArgs.rows + ingestArgs.startRow) ({}), get batch returned data passed end key", expectedRow, (opts.rows + opts.startRow));
errors++;
break;
}
@@ -194,13 +193,13 @@ public class VerifyIngest {
}
if (entry.getValue().compareTo(value) != 0) {
- log.error("unexpected value, rowNum : " + rowNum + " colNum : " + colNum);
- log.error(" saw = " + new String(entry.getValue().get()) + " expected = " + new String(value));
+ log.error("unexpected value, rowNum : {} colNum : {}", rowNum, colNum);
+ log.error(" saw = {} expected = {}", new String(entry.getValue().get()), new String(value));
errors++;
}
if (opts.timestamp >= 0 && entry.getKey().getTimestamp() != opts.timestamp) {
- log.error("unexpected timestamp " + entry.getKey().getTimestamp() + ", rowNum : " + rowNum + " colNum : " + colNum);
+ log.error("unexpected timestamp {}, rowNum : {} colNum : {}", entry.getKey().getTimestamp(), rowNum, colNum);
errors++;
}
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BalanceAfterCommsFailureIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BalanceAfterCommsFailureIT.java
index 527c055..fcd0126 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/BalanceAfterCommsFailureIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BalanceAfterCommsFailureIT.java
@@ -123,7 +123,7 @@ public class BalanceAfterCommsFailureIT extends ConfigurableMacBase {
}
unassignedTablets = stats.getUnassignedTablets();
if (unassignedTablets > 0) {
- log.info("Found " + unassignedTablets + " unassigned tablets, sleeping 3 seconds for tablet assignment");
+ log.info("Found {} unassigned tablets, sleeping 3 seconds for tablet assignment", unassignedTablets);
Thread.sleep(3000);
}
}
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BalanceInPresenceOfOfflineTableIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BalanceInPresenceOfOfflineTableIT.java
index 7b7a118..9ebc0b3 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/BalanceInPresenceOfOfflineTableIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BalanceInPresenceOfOfflineTableIT.java
@@ -177,11 +177,11 @@ public class BalanceInPresenceOfOfflineTableIT extends AccumuloClusterHarness {
}
if (stats.getTServerInfoSize() < 2) {
- log.debug("we need >= 2 servers. sleeping for " + currentWait + "ms");
+ log.debug("we need >= 2 servers. sleeping for {}ms", currentWait);
continue;
}
if (stats.getUnassignedTablets() != 0) {
- log.debug("We shouldn't have unassigned tablets. sleeping for " + currentWait + "ms");
+ log.debug("We shouldn't have unassigned tablets. sleeping for {}ms", currentWait);
continue;
}
@@ -194,13 +194,13 @@ public class BalanceInPresenceOfOfflineTableIT extends AccumuloClusterHarness {
}
if (tabletsPerServer[0] <= 10) {
- log.debug("We should have > 10 tablets. sleeping for " + currentWait + "ms");
+ log.debug("We should have > 10 tablets. sleeping for {}ms", currentWait);
continue;
}
long min = NumberUtils.min(tabletsPerServer), max = NumberUtils.max(tabletsPerServer);
- log.debug("Min=" + min + ", Max=" + max);
+ log.debug("Min={}, Max={}", min, max);
if ((min / ((double) max)) < 0.5) {
- log.debug("ratio of min to max tablets per server should be roughly even. sleeping for " + currentWait + "ms");
+ log.debug("ratio of min to max tablets per server should be roughly even. sleeping for {}ms", currentWait);
continue;
}
balancingWorked = true;
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BatchScanSplitIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BatchScanSplitIT.java
index 528f486..09a18d7 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/BatchScanSplitIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BatchScanSplitIT.java
@@ -124,7 +124,7 @@ public class BatchScanSplitIT extends AccumuloClusterHarness {
}
splits = getConnector().tableOperations().listSplits(tableName);
- log.info("splits : " + splits);
+ log.info("splits : {}", splits);
}
}
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CleanUpIT.java b/test/src/main/java/org/apache/accumulo/test/functional/CleanUpIT.java
index 2ff55e8..5c6a6ac 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/CleanUpIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CleanUpIT.java
@@ -138,7 +138,7 @@ public class CleanUpIT extends SharedMiniClusterBase {
Exception e = new Exception();
for (Thread thread : threads) {
e.setStackTrace(thread.getStackTrace());
- log.info("thread name: " + thread.getName(), e);
+ log.info("thread name: {}", thread.getName(), e);
}
}
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsIT.java b/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
index fd89caa..0c0b236 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
@@ -147,7 +147,7 @@ public class DeleteRowsIT extends AccumuloClusterHarness {
assertTrue(startText != null || endText != null);
count++;
}
- log.info("Finished table " + table);
+ log.info("Finished table {}", table);
assertEquals(entries, count);
}
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java b/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
index ed48d10..66fc7fb 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
@@ -72,7 +72,7 @@ public class DeleteRowsSplitIT extends AccumuloClusterHarness {
// Eliminate whole tablets
for (int test = 0; test < 10; test++) {
// create a table
- log.info("Test " + test);
+ log.info("Test {}", test);
conn.tableOperations().create(tableName);
// put some data in it
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java b/test/src/main/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
index 97a2543..5cc364e 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
@@ -159,7 +159,7 @@ public class HalfDeadTServerIT extends ConfigurableMacBase {
sleepUninterruptibly(seconds, TimeUnit.SECONDS);
} finally {
if (!trickFile.delete()) {
- log.error("Couldn't delete " + trickFile);
+ log.error("Couldn't delete {}", trickFile);
}
}
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/KerberosProxyIT.java b/test/src/main/java/org/apache/accumulo/test/functional/KerberosProxyIT.java
index 562f46f..dced6dc 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/KerberosProxyIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/KerberosProxyIT.java
@@ -189,7 +189,7 @@ public class KerberosProxyIT extends AccumuloITBase {
}
TSocket socket = new TSocket(hostname, proxyPort);
- log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
+ log.info("Connecting to proxy with server primary '{}' running on {}", proxyPrimary, hostname);
TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
"auth"), null, socket);
@@ -315,7 +315,7 @@ public class KerberosProxyIT extends AccumuloITBase {
UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
TSocket socket = new TSocket(hostname, proxyPort);
- log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
+ log.info("Connecting to proxy with server primary '{}' running on {}", proxyPrimary, hostname);
TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
"auth"), null, socket);
@@ -394,7 +394,7 @@ public class KerberosProxyIT extends AccumuloITBase {
// Login as the new user
UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(user, keytab.getAbsolutePath());
- log.info("Logged in as " + ugi);
+ log.info("Logged in as {}", ugi);
// Expect an AccumuloSecurityException
thrown.expect(AccumuloSecurityException.class);
@@ -407,7 +407,7 @@ public class KerberosProxyIT extends AccumuloITBase {
thrown.expect(new ThriftExceptionMatchesPattern(".*Expected '" + proxyPrincipal + "' but was '" + kdc.qualifyUser(user) + "'.*"));
TSocket socket = new TSocket(hostname, proxyPort);
- log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
+ log.info("Connecting to proxy with server primary '{}' running on {}", proxyPrimary, hostname);
// Should fail to open the tran
TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
@@ -446,10 +446,10 @@ public class KerberosProxyIT extends AccumuloITBase {
// Login as the new user
UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(user, keytab.getAbsolutePath());
- log.info("Logged in as " + ugi);
+ log.info("Logged in as {}", ugi);
TSocket socket = new TSocket(hostname, proxyPort);
- log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
+ log.info("Connecting to proxy with server primary '{}' running on {}", proxyPrimary, hostname);
// Should fail to open the tran
TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java b/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java
index 39764cd..90438ee 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java
@@ -81,7 +81,7 @@ public class LargeRowIT extends AccumuloClusterHarness {
try {
timeoutFactor = Integer.parseInt(System.getProperty("timeout.factor"));
} catch (NumberFormatException e) {
- log.warn("Could not parse property value for 'timeout.factor' as integer: " + System.getProperty("timeout.factor"));
+ log.warn("Could not parse property value for 'timeout.factor' as integer: {}", System.getProperty("timeout.factor"));
}
Assert.assertTrue("Timeout factor must be greater than or equal to 1", timeoutFactor >= 1);
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java b/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
index b033dbf..d92cd07 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
@@ -72,7 +72,7 @@ public class LogicalTimeIT extends AccumuloClusterHarness {
private void runMergeTest(Connector conn, String table, String[] splits, String[] inserts, String start, String end, String last, long expected)
throws Exception {
- log.info("table " + table);
+ log.info("table {}", table);
conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
TreeSet<Text> splitSet = new TreeSet<>();
for (String split : splits) {
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
index 05f6de2..f78477d 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
@@ -72,7 +72,7 @@ public class MetadataMaxFilesIT extends ConfigurableMacBase {
sleepUninterruptibly(5, TimeUnit.SECONDS);
for (int i = 0; i < 5; i++) {
String tableName = "table" + i;
- log.info("Creating " + tableName);
+ log.info("Creating {}", tableName);
c.tableOperations().create(tableName);
log.info("adding splits");
c.tableOperations().addSplits(tableName, splits);
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MonitorSslIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MonitorSslIT.java
index 7283c4d..45cd39b 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/MonitorSslIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MonitorSslIT.java
@@ -123,7 +123,7 @@ public class MonitorSslIT extends ConfigurableMacBase {
}
}
URL url = new URL("https://" + monitorLocation);
- log.debug("Fetching web page " + url);
+ log.debug("Fetching web page {}", url);
String result = FunctionalTestUtils.readAll(url.openStream());
assertTrue(result.length() > 100);
assertTrue(result.indexOf("Accumulo Overview") >= 0);
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/PermissionsIT.java b/test/src/main/java/org/apache/accumulo/test/functional/PermissionsIT.java
index 4207665..f02cd72 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/PermissionsIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/PermissionsIT.java
@@ -115,7 +115,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
// test each permission
for (SystemPermission perm : SystemPermission.values()) {
- log.debug("Verifying the " + perm + " permission");
+ log.debug("Verifying the {} permission", perm);
// test permission before and after granting it
String tableNamePrefix = getUniqueNames(1)[0];
@@ -142,7 +142,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
SystemPermission perm) throws Exception {
String tableName, user, password = "password", namespace;
boolean passwordBased = testUser.getPassword() != null;
- log.debug("Confirming that the lack of the " + perm + " permission properly restricts the user");
+ log.debug("Confirming that the lack of the {} permission properly restricts the user", perm);
// test permission prior to granting it
switch (perm) {
@@ -235,7 +235,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
} catch (AccumuloSecurityException e) {
loginAs(rootUser);
if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.securityOperations().listLocalUsers().contains(user)) {
- log.info("Failed to authenticate as " + user);
+ log.info("Failed to authenticate as {}", user);
throw e;
}
}
@@ -347,7 +347,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
SystemPermission perm) throws Exception {
String tableName, user, password = "password", namespace;
boolean passwordBased = testUser.getPassword() != null;
- log.debug("Confirming that the presence of the " + perm + " permission properly permits the user");
+ log.debug("Confirming that the presence of the {} permission properly permits the user", perm);
// test permission after granting it
switch (perm) {
@@ -533,7 +533,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
// test each permission
for (TablePermission perm : TablePermission.values()) {
- log.debug("Verifying the " + perm + " permission");
+ log.debug("Verifying the {} permission", perm);
// test permission before and after granting it
createTestTable(c, principal, tableName);
@@ -574,7 +574,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
Scanner scanner;
BatchWriter writer;
Mutation m;
- log.debug("Confirming that the lack of the " + perm + " permission properly restricts the user");
+ log.debug("Confirming that the lack of the {} permission properly restricts the user", perm);
// test permission prior to granting it
switch (perm) {
@@ -661,7 +661,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
Scanner scanner;
BatchWriter writer;
Mutation m;
- log.debug("Confirming that the presence of the " + perm + " permission properly permits the user");
+ log.debug("Confirming that the presence of the {} permission properly permits the user", perm);
// test permission after granting it
switch (perm) {
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java
index a799cb3..e961b74 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java
@@ -167,7 +167,7 @@ public class ReadWriteIT extends AccumuloClusterHarness {
}
}
URL url = new URL(scheme + monitorLocation);
- log.debug("Fetching web page " + url);
+ log.debug("Fetching web page {}", url);
String result = FunctionalTestUtils.readAll(url.openStream());
assertTrue(result.length() > 100);
log.debug("Stopping accumulo cluster");
@@ -430,7 +430,7 @@ public class ReadWriteIT extends AccumuloClusterHarness {
args.add(new Path(hadoopConfDir, "core-site.xml").toString());
args.add(new Path(hadoopConfDir, "hdfs-site.xml").toString());
}
- log.info("Invoking PrintInfo with " + args);
+ log.info("Invoking PrintInfo with {}", args);
PrintInfo.main(args.toArray(new String[args.size()]));
newOut.flush();
String stdout = baos.toString();
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java b/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
index 0408aa0..5c2e1a1 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
@@ -82,7 +82,7 @@ public class RecoveryWithEmptyRFileIT extends ConfigurableMacBase {
for (Entry<Key,Value> entry : meta) {
foundFile = true;
Path rfile = new Path(entry.getKey().getColumnQualifier().toString());
- log.debug("Removing rfile '" + rfile + "'");
+ log.debug("Removing rfile '{}'", rfile);
cluster.getFileSystem().delete(rfile, false);
Process info = cluster.exec(CreateEmpty.class, rfile.toString());
assertEquals(0, info.waitFor());
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java b/test/src/main/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java
index cbed280..7c27300 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java
@@ -67,7 +67,7 @@ public class SimpleBalancerFairnessIT extends ConfigurableMacBase {
c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
c.tableOperations().create("unused");
TreeSet<Text> splits = TestIngest.getSplitPoints(0, 10000000, 500);
- log.info("Creating " + splits.size() + " splits");
+ log.info("Creating {} splits", splits.size());
c.tableOperations().addSplits("unused", splits);
List<String> tservers = c.instanceOperations().getTabletServers();
TestIngest.Opts opts = new TestIngest.Opts();
@@ -98,7 +98,7 @@ public class SimpleBalancerFairnessIT extends ConfigurableMacBase {
}
unassignedTablets = stats.getUnassignedTablets();
if (unassignedTablets > 0) {
- log.info("Found " + unassignedTablets + " unassigned tablets, sleeping 3 seconds for tablet assignment");
+ log.info("Found {} unassigned tablets, sleeping 3 seconds for tablet assignment", unassignedTablets);
Thread.sleep(3000);
}
}
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ZombieTServer.java b/test/src/main/java/org/apache/accumulo/test/functional/ZombieTServer.java
index b1f4924..45b1531 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/ZombieTServer.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ZombieTServer.java
@@ -140,7 +140,7 @@ public class ZombieTServer {
byte[] lockContent = new ServerServices(addressString, Service.TSERV_CLIENT).toString().getBytes(UTF_8);
if (zlock.tryLock(lw, lockContent)) {
- log.debug("Obtained tablet server lock " + zlock.getLockPath());
+ log.debug("Obtained tablet server lock {}", zlock.getLockPath());
}
// modify metadata
synchronized (tch) {
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java b/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java
index 8fcf258..b8dbcbb 100644
--- a/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java
+++ b/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java
@@ -116,7 +116,7 @@ public class CollectTabletStats {
Table.ID tableId = Tables.getTableId(instance, opts.getTableName());
if (tableId == null) {
- log.error("Unable to find table named " + opts.getTableName());
+ log.error("Unable to find table named {}", opts.getTableName());
System.exit(-1);
}
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/CyclicReplicationIT.java b/test/src/main/java/org/apache/accumulo/test/replication/CyclicReplicationIT.java
index 8e8cc2c..b16419b 100644
--- a/test/src/main/java/org/apache/accumulo/test/replication/CyclicReplicationIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/replication/CyclicReplicationIT.java
@@ -173,7 +173,7 @@ public class CyclicReplicationIT {
master1Cluster.start();
break;
} catch (ZooKeeperBindException e) {
- log.warn("Failed to start ZooKeeper on " + master1Cfg.getZooKeeperPort() + ", will retry");
+ log.warn("Failed to start ZooKeeper on {}, will retry", master1Cfg.getZooKeeperPort());
}
}
@@ -199,7 +199,7 @@ public class CyclicReplicationIT {
master2Cluster.start();
break;
} catch (ZooKeeperBindException e) {
- log.warn("Failed to start ZooKeeper on " + master2Cfg.getZooKeeperPort() + ", will retry");
+ log.warn("Failed to start ZooKeeper on {}, will retry", master2Cfg.getZooKeeperPort());
}
}
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/MultiInstanceReplicationIT.java b/test/src/main/java/org/apache/accumulo/test/replication/MultiInstanceReplicationIT.java
index 2852c70..16b2e5e 100644
--- a/test/src/main/java/org/apache/accumulo/test/replication/MultiInstanceReplicationIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/replication/MultiInstanceReplicationIT.java
@@ -237,16 +237,16 @@ public class MultiInstanceReplicationIT extends ConfigurableMacBase {
log.info("Fetching metadata records:");
for (Entry<Key,Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
- log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
} else {
- log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue());
+ log.info("{} {}", kv.getKey().toStringNoTruncate(), kv.getValue());
}
}
log.info("");
log.info("Fetching replication records:");
for (Entry<Key,Value> kv : ReplicationTable.getScanner(connMaster)) {
- log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
}
Future<Boolean> future = executor.submit(new Callable<Boolean>() {
@@ -277,16 +277,16 @@ public class MultiInstanceReplicationIT extends ConfigurableMacBase {
log.info("Fetching metadata records:");
for (Entry<Key,Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
- log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
} else {
- log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue());
+ log.info("{} {}", kv.getKey().toStringNoTruncate(), kv.getValue());
}
}
log.info("");
log.info("Fetching replication records:");
for (Entry<Key,Value> kv : ReplicationTable.getScanner(connMaster)) {
- log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
}
Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY), peer = connPeer.createScanner(peerTable, Authorizations.EMPTY);
@@ -300,8 +300,8 @@ public class MultiInstanceReplicationIT extends ConfigurableMacBase {
Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue());
}
- log.info("Last master entry: " + masterEntry);
- log.info("Last peer entry: " + peerEntry);
+ log.info("Last master entry: {}", masterEntry);
+ log.info("Last peer entry: {}", peerEntry);
Assert.assertFalse("Had more data to read from the master", masterIter.hasNext());
Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext());
@@ -544,7 +544,7 @@ public class MultiInstanceReplicationIT extends ConfigurableMacBase {
Iterators.size(connMaster.createScanner(masterTable, Authorizations.EMPTY).iterator());
for (Entry<Key,Value> kv : ReplicationTable.getScanner(connMaster)) {
- log.debug(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ log.debug("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
}
connMaster.replicationOperations().drain(masterTable, files);
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/ReplicationIT.java b/test/src/main/java/org/apache/accumulo/test/replication/ReplicationIT.java
index 770a884..28b37f4 100644
--- a/test/src/main/java/org/apache/accumulo/test/replication/ReplicationIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/replication/ReplicationIT.java
@@ -733,7 +733,7 @@ public class ReplicationIT extends ConfigurableMacBase {
s.fetchColumnFamily(LogColumnFamily.NAME);
s.setRange(TabletsSection.getRange(tableId));
for (Entry<Key,Value> entry : s) {
- log.info(entry.getKey().toStringNoTruncate() + "=" + entry.getValue());
+ log.info("{}={}", entry.getKey().toStringNoTruncate(), entry.getValue());
}
try {
@@ -862,7 +862,7 @@ public class ReplicationIT extends ConfigurableMacBase {
s = ReplicationTable.getScanner(conn);
StatusSection.limit(s);
for (Entry<Key,Value> content : s) {
- log.info(content.getKey().toStringNoTruncate() + " => " + content.getValue());
+ log.info("{} => {}", content.getKey().toStringNoTruncate(), content.getValue());
}
throw e;
} finally {
@@ -892,7 +892,7 @@ public class ReplicationIT extends ConfigurableMacBase {
if (notFound) {
s = ReplicationTable.getScanner(conn);
for (Entry<Key,Value> content : s) {
- log.info(content.getKey().toStringNoTruncate() + " => " + content.getValue());
+ log.info("{} => {}", content.getKey().toStringNoTruncate(), content.getValue());
}
Assert.assertFalse("Did not find the work entry for the status entry", notFound);
}
@@ -936,7 +936,7 @@ public class ReplicationIT extends ConfigurableMacBase {
if (notFound) {
s = ReplicationTable.getScanner(conn);
for (Entry<Key,Value> content : s) {
- log.info(content.getKey().toStringNoTruncate() + " => " + content.getValue());
+ log.info("{} => {}", content.getKey().toStringNoTruncate(), content.getValue());
}
Assert.assertFalse("Did not find the work entries for the status entries", notFound);
}
@@ -1002,7 +1002,7 @@ public class ReplicationIT extends ConfigurableMacBase {
} catch (NoSuchElementException e) {} catch (IllegalArgumentException e) {
s = ReplicationTable.getScanner(conn);
for (Entry<Key,Value> content : s) {
- log.info(content.getKey().toStringNoTruncate() + " => " + content.getValue());
+ log.info("{} => {}", content.getKey().toStringNoTruncate(), content.getValue());
}
Assert.fail("Found more than one work section entry");
}
@@ -1013,7 +1013,7 @@ public class ReplicationIT extends ConfigurableMacBase {
if (notFound) {
s = ReplicationTable.getScanner(conn);
for (Entry<Key,Value> content : s) {
- log.info(content.getKey().toStringNoTruncate() + " => " + content.getValue());
+ log.info("{} => {}", content.getKey().toStringNoTruncate(), content.getValue());
}
Assert.assertFalse("Did not find the work entry for the status entry", notFound);
}
@@ -1138,7 +1138,7 @@ public class ReplicationIT extends ConfigurableMacBase {
Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
s.setRange(Range.prefix(ReplicationSection.getRowPrefix()));
for (Entry<Key,Value> entry : s) {
- log.info(entry.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
+ log.info("{} {}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
}
Assert.fail("Expected all replication records in the metadata table to be closed");
}
@@ -1173,7 +1173,7 @@ public class ReplicationIT extends ConfigurableMacBase {
Scanner s = ReplicationTable.getScanner(conn);
StatusSection.limit(s);
for (Entry<Key,Value> entry : s) {
- log.info(entry.getKey().toStringNoTruncate() + " " + TextFormat.shortDebugString(Status.parseFrom(entry.getValue().get())));
+ log.info("{} {}", entry.getKey().toStringNoTruncate(), TextFormat.shortDebugString(Status.parseFrom(entry.getValue().get())));
}
Assert.fail("Expected all replication records in the replication table to be closed");
}
@@ -1264,7 +1264,7 @@ public class ReplicationIT extends ConfigurableMacBase {
s = ReplicationTable.getScanner(conn);
WorkSection.limit(s);
Entry<Key,Value> e = Iterables.getOnlyElement(s);
- log.info("Found entry: " + e.getKey().toStringNoTruncate());
+ log.info("Found entry: {}", e.getKey().toStringNoTruncate());
Text expectedColqual = new ReplicationTarget("cluster1", "4", tableId).toText();
Assert.assertEquals(expectedColqual, e.getKey().getColumnQualifier());
notFound = false;
@@ -1274,7 +1274,7 @@ public class ReplicationIT extends ConfigurableMacBase {
// Somehow we got more than one element. Log what they were
s = ReplicationTable.getScanner(conn);
for (Entry<Key,Value> content : s) {
- log.info(content.getKey().toStringNoTruncate() + " => " + content.getValue());
+ log.info("{} => {}", content.getKey().toStringNoTruncate(), content.getValue());
}
Assert.fail("Found more than one work section entry");
} catch (RuntimeException e) {
@@ -1301,7 +1301,7 @@ public class ReplicationIT extends ConfigurableMacBase {
if (notFound) {
s = ReplicationTable.getScanner(conn);
for (Entry<Key,Value> content : s) {
- log.info(content.getKey().toStringNoTruncate() + " => " + ProtobufUtil.toString(Status.parseFrom(content.getValue().get())));
+ log.info("{} => {}", content.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(content.getValue().get())));
}
Assert.assertFalse("Did not find the work entry for the status entry", notFound);
}
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/UnorderedWorkAssignerReplicationIT.java b/test/src/main/java/org/apache/accumulo/test/replication/UnorderedWorkAssignerReplicationIT.java
index 282e5b3..75b1842 100644
--- a/test/src/main/java/org/apache/accumulo/test/replication/UnorderedWorkAssignerReplicationIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/replication/UnorderedWorkAssignerReplicationIT.java
@@ -237,16 +237,16 @@ public class UnorderedWorkAssignerReplicationIT extends ConfigurableMacBase {
log.info("Fetching metadata records:");
for (Entry<Key,Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
- log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
} else {
- log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue());
+ log.info("{} {}", kv.getKey().toStringNoTruncate(), kv.getValue());
}
}
log.info("");
log.info("Fetching replication records:");
for (Entry<Key,Value> kv : ReplicationTable.getScanner(connMaster)) {
- log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
}
Future<Boolean> future = executor.submit(new Callable<Boolean>() {
@@ -274,16 +274,16 @@ public class UnorderedWorkAssignerReplicationIT extends ConfigurableMacBase {
log.info("Fetching metadata records:");
for (Entry<Key,Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
- log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
} else {
- log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue());
+ log.info("{} {}", kv.getKey().toStringNoTruncate(), kv.getValue());
}
}
log.info("");
log.info("Fetching replication records:");
for (Entry<Key,Value> kv : ReplicationTable.getScanner(connMaster)) {
- log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
}
Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY), peer = connPeer.createScanner(peerTable, Authorizations.EMPTY);
@@ -297,8 +297,8 @@ public class UnorderedWorkAssignerReplicationIT extends ConfigurableMacBase {
Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue());
}
- log.info("Last master entry: " + masterEntry);
- log.info("Last peer entry: " + peerEntry);
+ log.info("Last master entry: {}", masterEntry);
+ log.info("Last peer entry: {}", peerEntry);
Assert.assertFalse("Had more data to read from the master", masterIter.hasNext());
Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext());
@@ -535,7 +535,7 @@ public class UnorderedWorkAssignerReplicationIT extends ConfigurableMacBase {
Set<String> files = connMaster.replicationOperations().referencedFiles(masterTable);
for (String s : files) {
- log.info("Found referenced file for " + masterTable + ": " + s);
+ log.info("Found referenced file for {}: {}", masterTable, s);
}
for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
@@ -547,7 +547,7 @@ public class UnorderedWorkAssignerReplicationIT extends ConfigurableMacBase {
Iterators.size(connMaster.createScanner(masterTable, Authorizations.EMPTY).iterator());
for (Entry<Key,Value> kv : connMaster.createScanner(ReplicationTable.NAME, Authorizations.EMPTY)) {
- log.debug(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ log.debug("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
}
connMaster.replicationOperations().drain(masterTable, files);
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java b/test/src/main/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java
index bde9101..63961bd 100644
--- a/test/src/main/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java
@@ -187,13 +187,13 @@ public class UnusedWalDoesntCloseReplicationStatusIT extends ConfigurableMacBase
s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
for (Entry<Key,Value> entry : s) {
- log.info(entry.getKey().toStringNoTruncate() + " " + entry.getValue());
+ log.info("{} {}", entry.getKey().toStringNoTruncate(), entry.getValue());
}
s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
s.setRange(MetadataSchema.ReplicationSection.getRange());
for (Entry<Key,Value> entry : s) {
- log.info(entry.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
+ log.info("{} {}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
}
log.info("Bringing table online");
@@ -206,14 +206,14 @@ public class UnusedWalDoesntCloseReplicationStatusIT extends ConfigurableMacBase
s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
for (Entry<Key,Value> entry : s) {
- log.info(entry.getKey().toStringNoTruncate() + " " + entry.getValue());
+ log.info("{} {}", entry.getKey().toStringNoTruncate(), entry.getValue());
}
s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
s.setRange(MetadataSchema.ReplicationSection.getRange());
for (Entry<Key,Value> entry : s) {
Status status = Status.parseFrom(entry.getValue().get());
- log.info(entry.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(status));
+ log.info("{} {}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(status));
Assert.assertFalse("Status record was closed and it should not be", status.getClosed());
}
}
diff --git a/test/src/main/java/org/apache/accumulo/test/start/KeywordStartIT.java b/test/src/main/java/org/apache/accumulo/test/start/KeywordStartIT.java
index 3b67e1f..18683ce 100644
--- a/test/src/main/java/org/apache/accumulo/test/start/KeywordStartIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/start/KeywordStartIT.java
@@ -133,14 +133,14 @@ public class KeywordStartIT {
boolean moreExpected = expectIter.hasNext();
if (moreExpected) {
while (expectIter.hasNext()) {
- log.warn("Missing class for keyword '" + expectIter.next() + "'");
+ log.warn("Missing class for keyword '{}'", expectIter.next());
}
}
assertFalse("Missing expected classes", moreExpected);
boolean moreActual = actualIter.hasNext();
if (moreActual) {
while (actualIter.hasNext()) {
- log.warn("Extra class found with keyword '" + actualIter.next() + "'");
+ log.warn("Extra class found with keyword '{}'", actualIter.next());
}
}
assertFalse("Found additional unexpected classes", moreActual);
diff --git a/test/src/main/java/org/apache/accumulo/test/util/CertUtils.java b/test/src/main/java/org/apache/accumulo/test/util/CertUtils.java
index 8b48e24..f2e1178 100644
--- a/test/src/main/java/org/apache/accumulo/test/util/CertUtils.java
+++ b/test/src/main/java/org/apache/accumulo/test/util/CertUtils.java
@@ -317,7 +317,7 @@ public class CertUtils {
if (cert == null) {
cert = keyStore.getCertificate(alias);
} else {
- log.warn("Found multiple certificates in keystore. Ignoring " + alias);
+ log.warn("Found multiple certificates in keystore. Ignoring {}", alias);
}
}
}
@@ -336,7 +336,7 @@ public class CertUtils {
if (key == null) {
key = (PrivateKey) keyStore.getKey(alias, keystorePassword);
} else {
- log.warn("Found multiple keys in keystore. Ignoring " + alias);
+ log.warn("Found multiple keys in keystore. Ignoring {}", alias);
}
}
}
--
To stop receiving notification emails like this one, please contact
"commits@accumulo.apache.org" <co...@accumulo.apache.org>.