You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by er...@apache.org on 2018/08/23 03:24:51 UTC
[4/5] lucene-solr:branch_7x: SOLR-12690: Regularize LoggerFactory
declarations
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03eba329/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java b/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
index f361324..24c53bd 100644
--- a/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
+++ b/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
@@ -96,7 +96,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
}
}
- private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private int waitForUpdatesWithStaleStatePauseMilliSeconds = Integer.getInteger("solr.cloud.wait-for-updates-with-stale-state-pause", 2500);
private int maxRetries = 500;
@@ -171,13 +171,13 @@ public class RecoveryStrategy implements Runnable, Closeable {
if (prevSendPreRecoveryHttpUriRequest != null) {
prevSendPreRecoveryHttpUriRequest.abort();
}
- LOG.warn("Stopping recovery for core=[{}] coreNodeName=[{}]", coreName, coreZkNodeName);
+ log.warn("Stopping recovery for core=[{}] coreNodeName=[{}]", coreName, coreZkNodeName);
}
final private void recoveryFailed(final SolrCore core,
final ZkController zkController, final String baseUrl,
final String shardZkNodeName, final CoreDescriptor cd) throws Exception {
- SolrException.log(LOG, "Recovery failed - I give up.");
+ SolrException.log(log, "Recovery failed - I give up.");
try {
zkController.publish(cd, Replica.State.RECOVERY_FAILED);
} finally {
@@ -200,7 +200,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
final String leaderUrl = getReplicateLeaderUrl(leaderprops);
- LOG.info("Attempting to replicate from [{}].", leaderUrl);
+ log.info("Attempting to replicate from [{}].", leaderUrl);
// send commit
commitOnLeader(leaderUrl);
@@ -231,14 +231,14 @@ public class RecoveryStrategy implements Runnable, Closeable {
}
// solrcloud_debug
- if (LOG.isDebugEnabled()) {
+ if (log.isDebugEnabled()) {
try {
RefCounted<SolrIndexSearcher> searchHolder = core
.getNewestSearcher(false);
SolrIndexSearcher searcher = searchHolder.get();
Directory dir = core.getDirectoryFactory().get(core.getIndexDir(), DirContext.META_DATA, null);
try {
- LOG.debug(core.getCoreContainer()
+ log.debug(core.getCoreContainer()
.getZkController().getNodeName()
+ " replicated "
+ searcher.count(new MatchAllDocsQuery())
@@ -255,7 +255,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
searchHolder.decref();
}
} catch (Exception e) {
- LOG.debug("Error in solrcloud_debug block", e);
+ log.debug("Error in solrcloud_debug block", e);
}
}
@@ -283,21 +283,21 @@ public class RecoveryStrategy implements Runnable, Closeable {
try (SolrCore core = cc.getCore(coreName)) {
if (core == null) {
- SolrException.log(LOG, "SolrCore not found - cannot recover:" + coreName);
+ SolrException.log(log, "SolrCore not found - cannot recover:" + coreName);
return;
}
MDCLoggingContext.setCore(core);
- LOG.info("Starting recovery process. recoveringAfterStartup=" + recoveringAfterStartup);
+ log.info("Starting recovery process. recoveringAfterStartup=" + recoveringAfterStartup);
try {
doRecovery(core);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- SolrException.log(LOG, "", e);
+ SolrException.log(log, "", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
} catch (Exception e) {
- LOG.error("", e);
+ log.error("", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
}
} finally {
@@ -317,7 +317,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
boolean successfulRecovery = false;
// if (core.getUpdateHandler().getUpdateLog() != null) {
-// SolrException.log(LOG, "'replicate-only' recovery strategy should only be used if no update logs are present, but this core has one: "
+// SolrException.log(log, "'replicate-only' recovery strategy should only be used if no update logs are present, but this core has one: "
// + core.getUpdateHandler().getUpdateLog());
// return;
// }
@@ -340,50 +340,50 @@ public class RecoveryStrategy implements Runnable, Closeable {
if (cloudDesc.isLeader()) {
assert cloudDesc.getReplicaType() != Replica.Type.PULL;
// we are now the leader - no one else must have been suitable
- LOG.warn("We have not yet recovered - but we are now the leader!");
- LOG.info("Finished recovery process.");
+ log.warn("We have not yet recovered - but we are now the leader!");
+ log.info("Finished recovery process.");
zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
return;
}
- LOG.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(), leaderUrl,
+ log.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(), leaderUrl,
ourUrl);
zkController.publish(core.getCoreDescriptor(), Replica.State.RECOVERING);
if (isClosed()) {
- LOG.info("Recovery for core {} has been closed", core.getName());
+ log.info("Recovery for core {} has been closed", core.getName());
break;
}
- LOG.info("Starting Replication Recovery.");
+ log.info("Starting Replication Recovery.");
try {
- LOG.info("Stopping background replicate from leader process");
+ log.info("Stopping background replicate from leader process");
zkController.stopReplicationFromLeader(coreName);
replicate(zkController.getNodeName(), core, leaderprops);
if (isClosed()) {
- LOG.info("Recovery for core {} has been closed", core.getName());
+ log.info("Recovery for core {} has been closed", core.getName());
break;
}
- LOG.info("Replication Recovery was successful.");
+ log.info("Replication Recovery was successful.");
successfulRecovery = true;
} catch (Exception e) {
- SolrException.log(LOG, "Error while trying to recover", e);
+ SolrException.log(log, "Error while trying to recover", e);
}
} catch (Exception e) {
- SolrException.log(LOG, "Error while trying to recover. core=" + coreName, e);
+ SolrException.log(log, "Error while trying to recover. core=" + coreName, e);
} finally {
if (successfulRecovery) {
- LOG.info("Restaring background replicate from leader process");
+ log.info("Restaring background replicate from leader process");
zkController.startReplicationFromLeader(coreName, false);
- LOG.info("Registering as Active after recovery.");
+ log.info("Registering as Active after recovery.");
try {
zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
} catch (Exception e) {
- LOG.error("Could not publish as ACTIVE after succesful recovery", e);
+ log.error("Could not publish as ACTIVE after succesful recovery", e);
successfulRecovery = false;
}
@@ -401,24 +401,24 @@ public class RecoveryStrategy implements Runnable, Closeable {
try {
if (isClosed()) {
- LOG.info("Recovery for core {} has been closed", core.getName());
+ log.info("Recovery for core {} has been closed", core.getName());
break;
}
- LOG.error("Recovery failed - trying again... (" + retries + ")");
+ log.error("Recovery failed - trying again... (" + retries + ")");
retries++;
if (retries >= maxRetries) {
- SolrException.log(LOG, "Recovery failed - max retries exceeded (" + retries + ").");
+ SolrException.log(log, "Recovery failed - max retries exceeded (" + retries + ").");
try {
recoveryFailed(core, zkController, baseUrl, coreZkNodeName, core.getCoreDescriptor());
} catch (Exception e) {
- SolrException.log(LOG, "Could not publish that recovery failed", e);
+ SolrException.log(log, "Could not publish that recovery failed", e);
}
break;
}
} catch (Exception e) {
- SolrException.log(LOG, "An error has occurred during recovery", e);
+ SolrException.log(log, "An error has occurred during recovery", e);
}
try {
@@ -427,25 +427,25 @@ public class RecoveryStrategy implements Runnable, Closeable {
// will always be the minimum of the two (12). Since we sleep at 5 seconds sub-intervals in
// order to check if we were closed, 12 is chosen as the maximum loopCount (5s * 12 = 1m).
int loopCount = retries < 4 ? (int) Math.min(Math.pow(2, retries), 12) : 12;
- LOG.info("Wait [{}] seconds before trying to recover again (attempt={})",
+ log.info("Wait [{}] seconds before trying to recover again (attempt={})",
TimeUnit.MILLISECONDS.toSeconds(loopCount * startingRecoveryDelayMilliSeconds), retries);
for (int i = 0; i < loopCount; i++) {
if (isClosed()) {
- LOG.info("Recovery for core {} has been closed", core.getName());
+ log.info("Recovery for core {} has been closed", core.getName());
break; // check if someone closed us
}
Thread.sleep(startingRecoveryDelayMilliSeconds);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- LOG.warn("Recovery was interrupted.", e);
+ log.warn("Recovery was interrupted.", e);
close = true;
}
}
}
// We skip core.seedVersionBuckets(); We don't have a transaction log
- LOG.info("Finished recovery process, successful=[{}]", Boolean.toString(successfulRecovery));
+ log.info("Finished recovery process, successful=[{}]", Boolean.toString(successfulRecovery));
}
// TODO: perhaps make this grab a new core each time through the loop to handle core reloads?
@@ -455,7 +455,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
UpdateLog ulog;
ulog = core.getUpdateHandler().getUpdateLog();
if (ulog == null) {
- SolrException.log(LOG, "No UpdateLog found - cannot recover.");
+ SolrException.log(log, "No UpdateLog found - cannot recover.");
recoveryFailed(core, zkController, baseUrl, coreZkNodeName,
core.getCoreDescriptor());
return;
@@ -468,7 +468,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
try (UpdateLog.RecentUpdates recentUpdates = ulog.getRecentUpdates()) {
recentVersions = recentUpdates.getVersions(ulog.getNumRecordsToKeep());
} catch (Exception e) {
- SolrException.log(LOG, "Corrupt tlog - ignoring.", e);
+ SolrException.log(log, "Corrupt tlog - ignoring.", e);
recentVersions = new ArrayList<>(0);
}
@@ -484,13 +484,13 @@ public class RecoveryStrategy implements Runnable, Closeable {
}
if (oldIdx > 0) {
- LOG.info("####### Found new versions added after startup: num=[{}]", oldIdx);
- LOG.info("###### currentVersions=[{}]",recentVersions);
+ log.info("####### Found new versions added after startup: num=[{}]", oldIdx);
+ log.info("###### currentVersions=[{}]",recentVersions);
}
- LOG.info("###### startupVersions=[{}]", startingVersions);
+ log.info("###### startupVersions=[{}]", startingVersions);
} catch (Exception e) {
- SolrException.log(LOG, "Error getting recent versions.", e);
+ SolrException.log(log, "Error getting recent versions.", e);
recentVersions = new ArrayList<>(0);
}
}
@@ -504,11 +504,11 @@ public class RecoveryStrategy implements Runnable, Closeable {
// this means we were previously doing a full index replication
// that probably didn't complete and buffering updates in the
// meantime.
- LOG.info("Looks like a previous replication recovery did not complete - skipping peer sync.");
+ log.info("Looks like a previous replication recovery did not complete - skipping peer sync.");
firstTime = false; // skip peersync
}
} catch (Exception e) {
- SolrException.log(LOG, "Error trying to get ulog starting operation.", e);
+ SolrException.log(log, "Error trying to get ulog starting operation.", e);
firstTime = false; // skip peersync
}
}
@@ -524,7 +524,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
CloudDescriptor cloudDesc = core.getCoreDescriptor().getCloudDescriptor();
final Replica leader = pingLeader(ourUrl, core.getCoreDescriptor(), true);
if (isClosed()) {
- LOG.info("RecoveryStrategy has been closed");
+ log.info("RecoveryStrategy has been closed");
break;
}
@@ -534,17 +534,17 @@ public class RecoveryStrategy implements Runnable, Closeable {
}
if (cloudDesc.isLeader()) {
// we are now the leader - no one else must have been suitable
- LOG.warn("We have not yet recovered - but we are now the leader!");
- LOG.info("Finished recovery process.");
+ log.warn("We have not yet recovered - but we are now the leader!");
+ log.info("Finished recovery process.");
zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
return;
}
- LOG.info("Begin buffering updates. core=[{}]", coreName);
+ log.info("Begin buffering updates. core=[{}]", coreName);
// recalling buffer updates will drop the old buffer tlog
ulog.bufferUpdates();
- LOG.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(), leader.getCoreUrl(),
+ log.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(), leader.getCoreUrl(),
ourUrl);
zkController.publish(core.getCoreDescriptor(), Replica.State.RECOVERING);
@@ -559,14 +559,14 @@ public class RecoveryStrategy implements Runnable, Closeable {
}
if (isClosed()) {
- LOG.info("RecoveryStrategy has been closed");
+ log.info("RecoveryStrategy has been closed");
break;
}
sendPrepRecoveryCmd(leader.getBaseUrl(), leader.getCoreName(), slice);
if (isClosed()) {
- LOG.info("RecoveryStrategy has been closed");
+ log.info("RecoveryStrategy has been closed");
break;
}
@@ -584,7 +584,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
// first thing we just try to sync
if (firstTime) {
firstTime = false; // only try sync the first time through the loop
- LOG.info("Attempting to PeerSync from [{}] - recoveringAfterStartup=[{}]", leader.getCoreUrl(), recoveringAfterStartup);
+ log.info("Attempting to PeerSync from [{}] - recoveringAfterStartup=[{}]", leader.getCoreUrl(), recoveringAfterStartup);
// System.out.println("Attempting to PeerSync from " + leaderUrl
// + " i am:" + zkController.getNodeName());
PeerSyncWithLeader peerSyncWithLeader = new PeerSyncWithLeader(core,
@@ -596,12 +596,12 @@ public class RecoveryStrategy implements Runnable, Closeable {
// force open a new searcher
core.getUpdateHandler().commit(new CommitUpdateCommand(req, false));
req.close();
- LOG.info("PeerSync stage of recovery was successful.");
+ log.info("PeerSync stage of recovery was successful.");
// solrcloud_debug
cloudDebugLog(core, "synced");
- LOG.info("Replaying updates buffered during PeerSync.");
+ log.info("Replaying updates buffered during PeerSync.");
replay(core);
// sync success
@@ -609,54 +609,54 @@ public class RecoveryStrategy implements Runnable, Closeable {
return;
}
- LOG.info("PeerSync Recovery was not successful - trying replication.");
+ log.info("PeerSync Recovery was not successful - trying replication.");
}
if (isClosed()) {
- LOG.info("RecoveryStrategy has been closed");
+ log.info("RecoveryStrategy has been closed");
break;
}
- LOG.info("Starting Replication Recovery.");
+ log.info("Starting Replication Recovery.");
try {
replicate(zkController.getNodeName(), core, leader);
if (isClosed()) {
- LOG.info("RecoveryStrategy has been closed");
+ log.info("RecoveryStrategy has been closed");
break;
}
replayFuture = replay(core);
if (isClosed()) {
- LOG.info("RecoveryStrategy has been closed");
+ log.info("RecoveryStrategy has been closed");
break;
}
- LOG.info("Replication Recovery was successful.");
+ log.info("Replication Recovery was successful.");
successfulRecovery = true;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- LOG.warn("Recovery was interrupted", e);
+ log.warn("Recovery was interrupted", e);
close = true;
} catch (Exception e) {
- SolrException.log(LOG, "Error while trying to recover", e);
+ SolrException.log(log, "Error while trying to recover", e);
}
} catch (Exception e) {
- SolrException.log(LOG, "Error while trying to recover. core=" + coreName, e);
+ SolrException.log(log, "Error while trying to recover. core=" + coreName, e);
} finally {
if (successfulRecovery) {
- LOG.info("Registering as Active after recovery.");
+ log.info("Registering as Active after recovery.");
try {
if (replicaType == Replica.Type.TLOG) {
zkController.startReplicationFromLeader(coreName, true);
}
zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
} catch (Exception e) {
- LOG.error("Could not publish as ACTIVE after succesful recovery", e);
+ log.error("Could not publish as ACTIVE after succesful recovery", e);
successfulRecovery = false;
}
@@ -674,24 +674,24 @@ public class RecoveryStrategy implements Runnable, Closeable {
try {
if (isClosed()) {
- LOG.info("RecoveryStrategy has been closed");
+ log.info("RecoveryStrategy has been closed");
break;
}
- LOG.error("Recovery failed - trying again... (" + retries + ")");
+ log.error("Recovery failed - trying again... (" + retries + ")");
retries++;
if (retries >= maxRetries) {
- SolrException.log(LOG, "Recovery failed - max retries exceeded (" + retries + ").");
+ SolrException.log(log, "Recovery failed - max retries exceeded (" + retries + ").");
try {
recoveryFailed(core, zkController, baseUrl, coreZkNodeName, core.getCoreDescriptor());
} catch (Exception e) {
- SolrException.log(LOG, "Could not publish that recovery failed", e);
+ SolrException.log(log, "Could not publish that recovery failed", e);
}
break;
}
} catch (Exception e) {
- SolrException.log(LOG, "An error has occurred during recovery", e);
+ SolrException.log(log, "An error has occurred during recovery", e);
}
try {
@@ -700,17 +700,17 @@ public class RecoveryStrategy implements Runnable, Closeable {
// will always be the minimum of the two (12). Since we sleep at 5 seconds sub-intervals in
// order to check if we were closed, 12 is chosen as the maximum loopCount (5s * 12 = 1m).
double loopCount = retries < 4 ? Math.min(Math.pow(2, retries), 12) : 12;
- LOG.info("Wait [{}] seconds before trying to recover again (attempt={})", loopCount, retries);
+ log.info("Wait [{}] seconds before trying to recover again (attempt={})", loopCount, retries);
for (int i = 0; i < loopCount; i++) {
if (isClosed()) {
- LOG.info("RecoveryStrategy has been closed");
+ log.info("RecoveryStrategy has been closed");
break; // check if someone closed us
}
Thread.sleep(startingRecoveryDelayMilliSeconds);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- LOG.warn("Recovery was interrupted.", e);
+ log.warn("Recovery was interrupted.", e);
close = true;
}
}
@@ -720,11 +720,11 @@ public class RecoveryStrategy implements Runnable, Closeable {
// if replay was skipped (possibly to due pulling a full index from the leader),
// then we still need to update version bucket seeds after recovery
if (successfulRecovery && replayFuture == null) {
- LOG.info("Updating version bucket highest from index after successful recovery.");
+ log.info("Updating version bucket highest from index after successful recovery.");
core.seedVersionBuckets();
}
- LOG.info("Finished recovery process, successful=[{}]", Boolean.toString(successfulRecovery));
+ log.info("Finished recovery process, successful=[{}]", Boolean.toString(successfulRecovery));
}
private final Replica pingLeader(String ourUrl, CoreDescriptor coreDesc, boolean mayPutReplicaAsDown) throws Exception {
@@ -763,11 +763,11 @@ public class RecoveryStrategy implements Runnable, Closeable {
SolrPingResponse resp = httpSolrClient.ping();
return leaderReplica;
} catch (IOException e) {
- LOG.info("Failed to connect leader {} on recovery, try again", leaderReplica.getBaseUrl());
+ log.info("Failed to connect leader {} on recovery, try again", leaderReplica.getBaseUrl());
Thread.sleep(500);
} catch (Exception e) {
if (e.getCause() instanceof IOException) {
- LOG.info("Failed to connect leader {} on recovery, try again", leaderReplica.getBaseUrl());
+ log.info("Failed to connect leader {} on recovery, try again", leaderReplica.getBaseUrl());
Thread.sleep(500);
} else {
return leaderReplica;
@@ -794,13 +794,13 @@ public class RecoveryStrategy implements Runnable, Closeable {
Future<RecoveryInfo> future = core.getUpdateHandler().getUpdateLog().applyBufferedUpdates();
if (future == null) {
// no replay needed\
- LOG.info("No replay needed.");
+ log.info("No replay needed.");
} else {
- LOG.info("Replaying buffered documents.");
+ log.info("Replaying buffered documents.");
// wait for replay
RecoveryInfo report = future.get();
if (report.failed) {
- SolrException.log(LOG, "Replay failed");
+ SolrException.log(log, "Replay failed");
throw new SolrException(ErrorCode.SERVER_ERROR, "Replay failed");
}
}
@@ -815,7 +815,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
}
final private void cloudDebugLog(SolrCore core, String op) {
- if (!LOG.isDebugEnabled()) {
+ if (!log.isDebugEnabled()) {
return;
}
try {
@@ -824,12 +824,12 @@ public class RecoveryStrategy implements Runnable, Closeable {
try {
final int totalHits = searcher.count(new MatchAllDocsQuery());
final String nodeName = core.getCoreContainer().getZkController().getNodeName();
- LOG.debug("[{}] {} [{} total hits]", nodeName, op, totalHits);
+ log.debug("[{}] {} [{} total hits]", nodeName, op, totalHits);
} finally {
searchHolder.decref();
}
} catch (Exception e) {
- LOG.debug("Error in solrcloud_debug block", e);
+ log.debug("Error in solrcloud_debug block", e);
}
}
@@ -861,7 +861,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
HttpUriRequestResponse mrr = client.httpUriRequest(prepCmd);
prevSendPreRecoveryHttpUriRequest = mrr.httpUriRequest;
- LOG.info("Sending prep recovery command to [{}]; [{}]", leaderBaseUrl, prepCmd.toString());
+ log.info("Sending prep recovery command to [{}]; [{}]", leaderBaseUrl, prepCmd.toString());
mrr.future.get();
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03eba329/solr/core/src/java/org/apache/solr/cloud/ReplicateFromLeader.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ReplicateFromLeader.java b/solr/core/src/java/org/apache/solr/cloud/ReplicateFromLeader.java
index aa648dd..5fb0946 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ReplicateFromLeader.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ReplicateFromLeader.java
@@ -37,7 +37,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ReplicateFromLeader {
- private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private CoreContainer cc;
private String coreName;
@@ -71,7 +71,7 @@ public class ReplicateFromLeader {
} else if (uinfo.autoSoftCommmitMaxTime != -1) {
pollIntervalStr = toPollIntervalStr(uinfo.autoSoftCommmitMaxTime/2);
}
- LOG.info("Will start replication from leader with poll interval: {}", pollIntervalStr );
+ log.info("Will start replication from leader with poll interval: {}", pollIntervalStr );
NamedList<Object> slaveConfig = new NamedList<>();
slaveConfig.add("fetchFromLeader", Boolean.TRUE);
@@ -114,7 +114,7 @@ public class ReplicateFromLeader {
if (commitVersion == null) return null;
else return commitVersion;
} catch (Exception e) {
- LOG.warn("Cannot get commit command version from index commit point ",e);
+ log.warn("Cannot get commit command version from index commit point ",e);
return null;
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03eba329/solr/core/src/java/org/apache/solr/cloud/SolrZkServer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/SolrZkServer.java b/solr/core/src/java/org/apache/solr/cloud/SolrZkServer.java
index 418a5f0..664b541 100644
--- a/solr/core/src/java/org/apache/solr/cloud/SolrZkServer.java
+++ b/solr/core/src/java/org/apache/solr/cloud/SolrZkServer.java
@@ -150,7 +150,7 @@ public class SolrZkServer {
// Allows us to set a default for the data dir before parsing
// zoo.cfg (which validates that there is a dataDir)
class SolrZkServerProps extends QuorumPeerConfig {
- private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private static final Pattern MISSING_MYID_FILE_PATTERN = Pattern.compile(".*myid file is missing$");
String solrPort; // port that Solr is listening on
@@ -164,7 +164,7 @@ class SolrZkServerProps extends QuorumPeerConfig {
public static Properties getProperties(String path) throws ConfigException {
File configFile = new File(path);
- LOG.info("Reading configuration from: " + configFile);
+ log.info("Reading configuration from: " + configFile);
try {
if (!configFile.exists()) {
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03eba329/solr/core/src/java/org/apache/solr/cloud/ZkDistributedQueue.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkDistributedQueue.java b/solr/core/src/java/org/apache/solr/cloud/ZkDistributedQueue.java
index 3a7c750..7acdfef 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ZkDistributedQueue.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ZkDistributedQueue.java
@@ -55,7 +55,7 @@ import org.slf4j.LoggerFactory;
* the results should be correct but inefficient
*/
public class ZkDistributedQueue implements DistributedQueue {
- private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
static final String PREFIX = "qn-";
@@ -245,7 +245,7 @@ public class ZkDistributedQueue implements DistributedQueue {
try {
zookeeper.delete(ops.get(j).getPath(), -1, true);
} catch (KeeperException.NoNodeException e2) {
- LOG.debug("Can not remove node which is not exist : " + ops.get(j).getPath());
+ log.debug("Can not remove node which is not exist : " + ops.get(j).getPath());
}
}
}
@@ -412,7 +412,7 @@ public class ZkDistributedQueue implements DistributedQueue {
for (String childName : childNames) {
// Check format
if (!childName.regionMatches(0, PREFIX, 0, PREFIX.length())) {
- LOG.debug("Found child node with improper name: " + childName);
+ log.debug("Found child node with improper name: " + childName);
continue;
}
orderedChildren.add(childName);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03eba329/solr/core/src/java/org/apache/solr/cloud/autoscaling/HttpTriggerListener.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/HttpTriggerListener.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/HttpTriggerListener.java
index e620966..b4f9bf0 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/HttpTriggerListener.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/HttpTriggerListener.java
@@ -57,7 +57,7 @@ import org.slf4j.LoggerFactory;
* </ul>
*/
public class HttpTriggerListener extends TriggerListenerBase {
- private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private String urlTemplate;
private String payloadTemplate;
@@ -158,7 +158,7 @@ public class HttpTriggerListener extends TriggerListenerBase {
try {
cloudManager.httpRequest(url, SolrRequest.METHOD.POST, headers, payload, timeout, followRedirects);
} catch (IOException e) {
- LOG.warn("Exception sending request for event " + event, e);
+ log.warn("Exception sending request for event " + event, e);
}
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03eba329/solr/core/src/java/org/apache/solr/cloud/autoscaling/LoggingListener.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/LoggingListener.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/LoggingListener.java
index bfda572..a7dcf63 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/LoggingListener.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/LoggingListener.java
@@ -28,11 +28,11 @@ import org.slf4j.LoggerFactory;
* events to a log.
*/
public class LoggingListener extends TriggerListenerBase {
- private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@Override
public void onEvent(TriggerEvent event, TriggerEventProcessorStage stage, String actionName, ActionContext context,
Throwable error, String message) {
- LOG.info("{}: stage={}, actionName={}, event={}, error={}, messsage={}", config.name, stage, actionName, event, error, message);
+ log.info("{}: stage={}, actionName={}, event={}, error={}, messsage={}", config.name, stage, actionName, event, error, message);
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03eba329/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerBase.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerBase.java
index 12d95bc..214552e 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerBase.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerBase.java
@@ -49,7 +49,7 @@ import org.slf4j.LoggerFactory;
* It handles state snapshot / restore in ZK.
*/
public abstract class TriggerBase implements AutoScaling.Trigger {
- private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
protected final String name;
protected SolrCloudManager cloudManager;
@@ -128,7 +128,7 @@ public abstract class TriggerBase implements AutoScaling.Trigger {
} catch (AlreadyExistsException e) {
// ignore
} catch (InterruptedException | KeeperException | IOException e) {
- LOG.warn("Exception checking ZK path " + ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH, e);
+ log.warn("Exception checking ZK path " + ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH, e);
throw e;
}
for (TriggerAction action : actions) {
@@ -240,7 +240,7 @@ public abstract class TriggerBase implements AutoScaling.Trigger {
}
lastState = state;
} catch (InterruptedException | BadVersionException | AlreadyExistsException | IOException | KeeperException e) {
- LOG.warn("Exception updating trigger state '" + path + "'", e);
+ log.warn("Exception updating trigger state '" + path + "'", e);
}
}
@@ -254,7 +254,7 @@ public abstract class TriggerBase implements AutoScaling.Trigger {
data = versionedData.getData();
}
} catch (Exception e) {
- LOG.warn("Exception getting trigger state '" + path + "'", e);
+ log.warn("Exception getting trigger state '" + path + "'", e);
}
if (data != null) {
Map<String, Object> restoredState = (Map<String, Object>)Utils.fromJSON(data);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03eba329/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerEventQueue.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerEventQueue.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerEventQueue.java
index db76314..fd587de 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerEventQueue.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerEventQueue.java
@@ -35,7 +35,7 @@ import org.slf4j.LoggerFactory;
*
*/
public class TriggerEventQueue {
- private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static final String ENQUEUE_TIME = "_enqueue_time_";
public static final String DEQUEUE_TIME = "_dequeue_time_";
@@ -58,7 +58,7 @@ public class TriggerEventQueue {
delegate.offer(data);
return true;
} catch (Exception e) {
- LOG.warn("Exception adding event " + event + " to queue " + triggerName, e);
+ log.warn("Exception adding event " + event + " to queue " + triggerName, e);
return false;
}
}
@@ -68,19 +68,19 @@ public class TriggerEventQueue {
try {
while ((data = delegate.peek()) != null) {
if (data.length == 0) {
- LOG.warn("ignoring empty data...");
+ log.warn("ignoring empty data...");
continue;
}
try {
Map<String, Object> map = (Map<String, Object>) Utils.fromJSON(data);
return fromMap(map);
} catch (Exception e) {
- LOG.warn("Invalid event data, ignoring: " + new String(data, StandardCharsets.UTF_8));
+ log.warn("Invalid event data, ignoring: " + new String(data, StandardCharsets.UTF_8));
continue;
}
}
} catch (Exception e) {
- LOG.warn("Exception peeking queue of trigger " + triggerName, e);
+ log.warn("Exception peeking queue of trigger " + triggerName, e);
}
return null;
}
@@ -90,19 +90,19 @@ public class TriggerEventQueue {
try {
while ((data = delegate.poll()) != null) {
if (data.length == 0) {
- LOG.warn("ignoring empty data...");
+ log.warn("ignoring empty data...");
continue;
}
try {
Map<String, Object> map = (Map<String, Object>) Utils.fromJSON(data);
return fromMap(map);
} catch (Exception e) {
- LOG.warn("Invalid event data, ignoring: " + new String(data, StandardCharsets.UTF_8));
+ log.warn("Invalid event data, ignoring: " + new String(data, StandardCharsets.UTF_8));
continue;
}
}
} catch (Exception e) {
- LOG.warn("Exception polling queue of trigger " + triggerName, e);
+ log.warn("Exception polling queue of trigger " + triggerName, e);
}
return null;
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03eba329/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java
index e4aabb0..13e1de1 100644
--- a/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java
+++ b/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java
@@ -72,7 +72,7 @@ import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
public class HdfsDirectoryFactory extends CachingDirectoryFactory implements SolrCoreAware, SolrMetricProducer {
- private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static final String BLOCKCACHE_SLAB_COUNT = "solr.hdfs.blockcache.slab.count";
public static final String BLOCKCACHE_DIRECT_MEMORY_ALLOCATION = "solr.hdfs.blockcache.direct.memory.allocation";
@@ -155,12 +155,12 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
if (this.hdfsDataDir != null && this.hdfsDataDir.length() == 0) {
this.hdfsDataDir = null;
} else {
- LOG.info(HDFS_HOME + "=" + this.hdfsDataDir);
+ log.info(HDFS_HOME + "=" + this.hdfsDataDir);
}
cacheMerges = getConfig(CACHE_MERGES, false);
cacheReadOnce = getConfig(CACHE_READONCE, false);
boolean kerberosEnabled = getConfig(KERBEROS_ENABLED, false);
- LOG.info("Solr Kerberos Authentication "
+ log.info("Solr Kerberos Authentication "
+ (kerberosEnabled ? "enabled" : "disabled"));
if (kerberosEnabled) {
initKerberos();
@@ -171,7 +171,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
protected LockFactory createLockFactory(String rawLockType) throws IOException {
if (null == rawLockType) {
rawLockType = DirectoryFactory.LOCK_TYPE_HDFS;
- LOG.warn("No lockType configured, assuming '"+rawLockType+"'.");
+ log.warn("No lockType configured, assuming '"+rawLockType+"'.");
}
final String lockType = rawLockType.toLowerCase(Locale.ROOT).trim();
switch (lockType) {
@@ -191,7 +191,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
@SuppressWarnings("resource")
protected Directory create(String path, LockFactory lockFactory, DirContext dirContext) throws IOException {
assert params != null : "init must be called before create";
- LOG.info("creating directory factory for path {}", path);
+ log.info("creating directory factory for path {}", path);
Configuration conf = getConf();
if (metrics == null) {
@@ -215,10 +215,10 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
boolean directAllocation = getConfig(BLOCKCACHE_DIRECT_MEMORY_ALLOCATION, true);
int slabSize = numberOfBlocksPerBank * blockSize;
- LOG.info(
+ log.info(
"Number of slabs of block cache [{}] with direct memory allocation set to [{}]",
bankCount, directAllocation);
- LOG.info(
+ log.info(
"Block cache target memory usage, slab size of [{}] will allocate [{}] slabs and use ~[{}] bytes",
new Object[] {slabSize, bankCount,
((long) bankCount * (long) slabSize)});
@@ -285,13 +285,13 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
private BlockCache getBlockDirectoryCache(int numberOfBlocksPerBank, int blockSize, int bankCount,
boolean directAllocation, int slabSize, int bufferSize, int bufferCount, boolean staticBlockCache) {
if (!staticBlockCache) {
- LOG.info("Creating new single instance HDFS BlockCache");
+ log.info("Creating new single instance HDFS BlockCache");
return createBlockCache(numberOfBlocksPerBank, blockSize, bankCount, directAllocation, slabSize, bufferSize, bufferCount);
}
synchronized (HdfsDirectoryFactory.class) {
if (globalBlockCache == null) {
- LOG.info("Creating new global HDFS BlockCache");
+ log.info("Creating new global HDFS BlockCache");
globalBlockCache = createBlockCache(numberOfBlocksPerBank, blockSize, bankCount,
directAllocation, slabSize, bufferSize, bufferCount);
}
@@ -328,7 +328,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
try {
return fileSystem.exists(hdfsDirPath);
} catch (IOException e) {
- LOG.error("Error checking if hdfs path exists", e);
+ log.error("Error checking if hdfs path exists", e);
throw new RuntimeException("Error checking if hdfs path exists", e);
}
}
@@ -351,7 +351,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
throw new RuntimeException("Could not remove directory");
}
} catch (Exception e) {
- LOG.error("Could not remove directory", e);
+ log.error("Could not remove directory", e);
throw new SolrException(ErrorCode.SERVER_ERROR,
"Could not remove directory", e);
}
@@ -428,7 +428,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
try {
return fileSystem.getContentSummary(hdfsDirPath).getLength();
} catch (IOException e) {
- LOG.error("Error checking if hdfs path exists", e);
+ log.error("Error checking if hdfs path exists", e);
throw new SolrException(ErrorCode.SERVER_ERROR, "Error checking if hdfs path exists", e);
} finally {
IOUtils.closeQuietly(fileSystem);
@@ -474,7 +474,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
final Configuration ugiConf = new Configuration(getConf());
ugiConf.set(HADOOP_SECURITY_AUTHENTICATION, kerberos);
UserGroupInformation.setConfiguration(ugiConf);
- LOG.info(
+ log.info(
"Attempting to acquire kerberos ticket with keytab: {}, principal: {} ",
keytabFile, principal);
try {
@@ -482,7 +482,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
- LOG.info("Got Kerberos ticket");
+ log.info("Got Kerberos ticket");
}
}
}
@@ -514,10 +514,10 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
try {
pathExists = fileSystem.exists(dataDirPath);
} catch (IOException e) {
- LOG.error("Error checking if hdfs path "+dataDir+" exists", e);
+ log.error("Error checking if hdfs path "+dataDir+" exists", e);
}
if (!pathExists) {
- LOG.warn("{} does not point to a valid data directory; skipping clean-up of old index directories.", dataDir);
+ log.warn("{} does not point to a valid data directory; skipping clean-up of old index directories.", dataDir);
return;
}
@@ -534,16 +534,16 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
accept = fs.isDirectory(path) && !path.equals(currentIndexDirPath) &&
(pathName.equals("index") || pathName.matches(INDEX_W_TIMESTAMP_REGEX));
} catch (IOException e) {
- LOG.error("Error checking if path {} is an old index directory, caused by: {}", path, e);
+ log.error("Error checking if path {} is an old index directory, caused by: {}", path, e);
}
return accept;
}
});
} catch (FileNotFoundException fnfe) {
// already deleted - ignore
- LOG.debug("Old index directory already deleted - skipping...", fnfe);
+ log.debug("Old index directory already deleted - skipping...", fnfe);
} catch (IOException ioExc) {
- LOG.error("Error checking for old index directories to clean-up.", ioExc);
+ log.error("Error checking for old index directories to clean-up.", ioExc);
}
if (oldIndexDirs == null || oldIndexDirs.length == 0)
@@ -560,23 +560,23 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
int i = 0;
if (afterReload) {
- LOG.info("Will not remove most recent old directory on reload {}", oldIndexDirs[0]);
+ log.info("Will not remove most recent old directory on reload {}", oldIndexDirs[0]);
i = 1;
}
- LOG.info("Found {} old index directories to clean-up under {} afterReload={}", oldIndexDirs.length - i, dataDirPath, afterReload);
+ log.info("Found {} old index directories to clean-up under {} afterReload={}", oldIndexDirs.length - i, dataDirPath, afterReload);
for (; i < oldIndexPaths.size(); i++) {
Path oldDirPath = oldIndexPaths.get(i);
if (livePaths.contains(oldDirPath.toString())) {
- LOG.warn("Cannot delete directory {} because it is still being referenced in the cache.", oldDirPath);
+ log.warn("Cannot delete directory {} because it is still being referenced in the cache.", oldDirPath);
} else {
try {
if (fileSystem.delete(oldDirPath, true)) {
- LOG.info("Deleted old index directory {}", oldDirPath);
+ log.info("Deleted old index directory {}", oldDirPath);
} else {
- LOG.warn("Failed to delete old index directory {}", oldDirPath);
+ log.warn("Failed to delete old index directory {}", oldDirPath);
}
} catch (IOException e) {
- LOG.error("Failed to delete old index directory {} due to: {}", oldDirPath, e);
+ log.error("Failed to delete old index directory {} due to: {}", oldDirPath, e);
}
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03eba329/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java b/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
index 2969d5b..40e65b7 100644
--- a/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
+++ b/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
@@ -50,7 +50,7 @@ import org.slf4j.LoggerFactory;
* @see org.apache.lucene.index.IndexDeletionPolicy
*/
public final class IndexDeletionPolicyWrapper extends IndexDeletionPolicy {
- private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private final IndexDeletionPolicy deletionPolicy;
private volatile Map<Long, IndexCommit> solrVersionVsCommits = new ConcurrentHashMap<>();
@@ -94,7 +94,7 @@ public final class IndexDeletionPolicyWrapper extends IndexDeletionPolicy {
// this is the common success case: the older time didn't exist, or
// came before the new time.
if (previousTime == null || previousTime <= timeToSet) {
- LOG.debug("Commit point reservation for generation {} set to {} (requested reserve time of {})",
+ log.debug("Commit point reservation for generation {} set to {} (requested reserve time of {})",
indexGen, timeToSet, reserveTime);
break;
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03eba329/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepositoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepositoryFactory.java b/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepositoryFactory.java
index 22a4895..9e02b21 100644
--- a/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepositoryFactory.java
+++ b/solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepositoryFactory.java
@@ -31,7 +31,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class BackupRepositoryFactory {
- private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private final Map<String,PluginInfo> backupRepoPluginByName = new HashMap<>();
private PluginInfo defaultBackupRepoPlugin = null;
@@ -52,14 +52,14 @@ public class BackupRepositoryFactory {
this.defaultBackupRepoPlugin = backupRepoPlugins[i];
}
backupRepoPluginByName.put(name, backupRepoPlugins[i]);
- LOG.info("Added backup repository with configuration params {}", backupRepoPlugins[i]);
+ log.info("Added backup repository with configuration params {}", backupRepoPlugins[i]);
}
if (backupRepoPlugins.length == 1) {
this.defaultBackupRepoPlugin = backupRepoPlugins[0];
}
if (this.defaultBackupRepoPlugin != null) {
- LOG.info("Default configuration for backup repository is with configuration params {}",
+ log.info("Default configuration for backup repository is with configuration params {}",
defaultBackupRepoPlugin);
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03eba329/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
index 4b4145d..a34ad7a 100644
--- a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
+++ b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
@@ -120,7 +120,7 @@ public class IndexFetcher {
public static final String INDEX_PROPERTIES = "index.properties";
- private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private String masterUrl;
@@ -242,7 +242,7 @@ public class IndexFetcher {
"'masterUrl' is required for a slave");
if (masterUrl != null && masterUrl.endsWith(ReplicationHandler.PATH)) {
masterUrl = masterUrl.substring(0, masterUrl.length()-12);
- LOG.warn("'masterUrl' must be specified without the "+ReplicationHandler.PATH+" suffix");
+ log.warn("'masterUrl' must be specified without the "+ReplicationHandler.PATH+" suffix");
}
this.masterUrl = masterUrl;
@@ -327,7 +327,7 @@ public class IndexFetcher {
filesToDownload = Collections.synchronizedList(files);
else {
filesToDownload = Collections.emptyList();
- LOG.error("No files to download for index generation: "+ gen);
+ log.error("No files to download for index generation: "+ gen);
}
files = (List<Map<String,Object>>) response.get(CONF_FILES);
@@ -373,7 +373,7 @@ public class IndexFetcher {
// when we are a bit more confident we may want to try a partial replication
// if the error is connection related or something, but we have to be careful
forceReplication = true;
- LOG.info("Last replication failed, so I'll force replication");
+ log.info("Last replication failed, so I'll force replication");
}
try {
@@ -385,19 +385,19 @@ public class IndexFetcher {
return IndexFetchResult.EXPECTING_NON_LEADER;
}
if (replica.getState() != Replica.State.ACTIVE) {
- LOG.info("Replica {} is leader but it's state is {}, skipping replication", replica.getName(), replica.getState());
+ log.info("Replica {} is leader but it's state is {}, skipping replication", replica.getName(), replica.getState());
return IndexFetchResult.LEADER_IS_NOT_ACTIVE;
}
if (!solrCore.getCoreContainer().getZkController().getClusterState().liveNodesContain(replica.getNodeName())) {
- LOG.info("Replica {} is leader but it's not hosted on a live node, skipping replication", replica.getName());
+ log.info("Replica {} is leader but it's not hosted on a live node, skipping replication", replica.getName());
return IndexFetchResult.LEADER_IS_NOT_ACTIVE;
}
if (!replica.getCoreUrl().equals(masterUrl)) {
masterUrl = replica.getCoreUrl();
- LOG.info("Updated masterUrl to {}", masterUrl);
+ log.info("Updated masterUrl to {}", masterUrl);
// TODO: Do we need to set forceReplication = true?
} else {
- LOG.debug("masterUrl didn't change");
+ log.debug("masterUrl didn't change");
}
}
//get the current 'replicateable' index version in the master
@@ -407,10 +407,10 @@ public class IndexFetcher {
} catch (Exception e) {
final String errorMsg = e.toString();
if (!Strings.isNullOrEmpty(errorMsg) && errorMsg.contains(INTERRUPT_RESPONSE_MESSAGE)) {
- LOG.warn("Master at: " + masterUrl + " is not available. Index fetch failed by interrupt. Exception: " + errorMsg);
+ log.warn("Master at: " + masterUrl + " is not available. Index fetch failed by interrupt. Exception: " + errorMsg);
return new IndexFetchResult(IndexFetchResult.FAILED_BY_INTERRUPT_MESSAGE, false, e);
} else {
- LOG.warn("Master at: " + masterUrl + " is not available. Index fetch failed by exception: " + errorMsg);
+ log.warn("Master at: " + masterUrl + " is not available. Index fetch failed by exception: " + errorMsg);
return new IndexFetchResult(IndexFetchResult.FAILED_BY_EXCEPTION_MESSAGE, false, e);
}
}
@@ -418,8 +418,8 @@ public class IndexFetcher {
long latestVersion = (Long) response.get(CMD_INDEX_VERSION);
long latestGeneration = (Long) response.get(GENERATION);
- LOG.info("Master's generation: " + latestGeneration);
- LOG.info("Master's version: " + latestVersion);
+ log.info("Master's generation: " + latestGeneration);
+ log.info("Master's version: " + latestVersion);
// TODO: make sure that getLatestCommit only returns commit points for the main index (i.e. no side-car indexes)
IndexCommit commit = solrCore.getDeletionPolicy().getLatestCommit();
@@ -429,7 +429,7 @@ public class IndexFetcher {
try {
searcherRefCounted = solrCore.getNewestSearcher(false);
if (searcherRefCounted == null) {
- LOG.warn("No open searcher found - fetch aborted");
+ log.warn("No open searcher found - fetch aborted");
return IndexFetchResult.NO_INDEX_COMMIT_EXIST;
}
commit = searcherRefCounted.get().getIndexReader().getIndexCommit();
@@ -439,14 +439,14 @@ public class IndexFetcher {
}
}
- LOG.info("Slave's generation: " + commit.getGeneration());
- LOG.info("Slave's version: " + IndexDeletionPolicyWrapper.getCommitTimestamp(commit));
+ log.info("Slave's generation: " + commit.getGeneration());
+ log.info("Slave's version: " + IndexDeletionPolicyWrapper.getCommitTimestamp(commit));
if (latestVersion == 0L) {
if (commit.getGeneration() != 0) {
// since we won't get the files for an empty index,
// we just clear ours and commit
- LOG.info("New index in Master. Deleting mine...");
+ log.info("New index in Master. Deleting mine...");
RefCounted<IndexWriter> iw = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(solrCore);
try {
iw.get().deleteAll();
@@ -464,27 +464,27 @@ public class IndexFetcher {
//there is nothing to be replicated
successfulInstall = true;
- LOG.debug("Nothing to replicate, master's version is 0");
+ log.debug("Nothing to replicate, master's version is 0");
return IndexFetchResult.MASTER_VERSION_ZERO;
}
// TODO: Should we be comparing timestamps (across machines) here?
if (!forceReplication && IndexDeletionPolicyWrapper.getCommitTimestamp(commit) == latestVersion) {
//master and slave are already in sync just return
- LOG.info("Slave in sync with master.");
+ log.info("Slave in sync with master.");
successfulInstall = true;
return IndexFetchResult.ALREADY_IN_SYNC;
}
- LOG.info("Starting replication process");
+ log.info("Starting replication process");
// get the list of files first
fetchFileList(latestGeneration);
// this can happen if the commit point is deleted before we fetch the file list.
if (filesToDownload.isEmpty()) {
return IndexFetchResult.PEER_INDEX_COMMIT_DELETED;
}
- LOG.info("Number of files in latest index in master: " + filesToDownload.size());
+ log.info("Number of files in latest index in master: " + filesToDownload.size());
if (tlogFilesToDownload != null) {
- LOG.info("Number of tlog files in master: " + tlogFilesToDownload.size());
+ log.info("Number of tlog files in master: " + tlogFilesToDownload.size());
}
// Create the sync service
@@ -540,17 +540,17 @@ public class IndexFetcher {
indexWriter.deleteUnusedFiles();
while (hasUnusedFiles(indexDir, commit)) {
indexWriter.deleteUnusedFiles();
- LOG.info("Sleeping for 1000ms to wait for unused lucene index files to be delete-able");
+ log.info("Sleeping for 1000ms to wait for unused lucene index files to be delete-able");
Thread.sleep(1000);
c++;
if (c >= 30) {
- LOG.warn("IndexFetcher unable to cleanup unused lucene index files so we must do a full copy instead");
+ log.warn("IndexFetcher unable to cleanup unused lucene index files so we must do a full copy instead");
isFullCopyNeeded = true;
break;
}
}
if (c > 0) {
- LOG.info("IndexFetcher slept for " + (c * 1000) + "ms for unused lucene index files to be delete-able");
+ log.info("IndexFetcher slept for " + (c * 1000) + "ms for unused lucene index files to be delete-able");
}
} finally {
writer.decref();
@@ -564,7 +564,7 @@ public class IndexFetcher {
solrCore.getUpdateHandler().getSolrCoreState().closeIndexWriter(solrCore, true);
}
- LOG.info("Starting download (fullCopy={}) to {}", isFullCopyNeeded, tmpIndexDir);
+ log.info("Starting download (fullCopy={}) to {}", isFullCopyNeeded, tmpIndexDir);
successfulInstall = false;
long bytesDownloaded = downloadIndexFiles(isFullCopyNeeded, indexDir,
@@ -575,7 +575,7 @@ public class IndexFetcher {
}
final long timeTakenSeconds = getReplicationTimeElapsed();
final Long bytesDownloadedPerSecond = (timeTakenSeconds != 0 ? Long.valueOf(bytesDownloaded / timeTakenSeconds) : null);
- LOG.info("Total time taken for download (fullCopy={},bytesDownloaded={}) : {} secs ({} bytes/sec) to {}",
+ log.info("Total time taken for download (fullCopy={},bytesDownloaded={}) : {} secs ({} bytes/sec) to {}",
isFullCopyNeeded, bytesDownloaded, timeTakenSeconds, bytesDownloadedPerSecond, tmpIndexDir);
Collection<Map<String,Object>> modifiedConfFiles = getModifiedConfFiles(confFilesToDownload);
@@ -603,7 +603,7 @@ public class IndexFetcher {
}
}
- LOG.info("Configuration files are modified, core will be reloaded");
+ log.info("Configuration files are modified, core will be reloaded");
logReplicationTimeAndConfFiles(modifiedConfFiles,
successfulInstall);// write to a file time of replication and
// conf files.
@@ -633,7 +633,7 @@ public class IndexFetcher {
// we must reload the core after we open the IW back up
if (successfulInstall && (reloadCore || forceCoreReload)) {
- LOG.info("Reloading SolrCore {}", solrCore.getName());
+ log.info("Reloading SolrCore {}", solrCore.getName());
reloadCore();
}
@@ -642,7 +642,7 @@ public class IndexFetcher {
// let the system know we are changing dir's and the old one
// may be closed
if (indexDir != null) {
- LOG.info("removing old index directory " + indexDir);
+ log.info("removing old index directory " + indexDir);
solrCore.getDirectoryFactory().doneWithDirectory(indexDir);
solrCore.getDirectoryFactory().remove(indexDir);
}
@@ -658,7 +658,7 @@ public class IndexFetcher {
cleanup(solrCore, tmpIndexDir, indexDir, deleteTmpIdxDir, tmpTlogDir, successfulInstall);
cleanupDone = true;
// we try with a full copy of the index
- LOG.warn(
+ log.warn(
"Replication attempt was not successful - trying a full index replication reloadCore={}",
reloadCore);
successfulInstall = fetchLatestIndex(true, reloadCore).getSuccessful();
@@ -667,7 +667,7 @@ public class IndexFetcher {
markReplicationStop();
return successfulInstall ? IndexFetchResult.INDEX_FETCH_SUCCESS : IndexFetchResult.INDEX_FETCH_FAILURE;
} catch (ReplicationHandlerException e) {
- LOG.error("User aborted Replication");
+ log.error("User aborted Replication");
return new IndexFetchResult(IndexFetchResult.FAILED_BY_EXCEPTION_MESSAGE, false, e);
} catch (SolrException e) {
throw e;
@@ -699,7 +699,7 @@ public class IndexFetcher {
logReplicationTimeAndConfFiles(null, successfulInstall);
} catch (Exception e) {
// this can happen on shutdown, a fetch may be running in a thread after DirectoryFactory is closed
- LOG.warn("Could not log failed replication details", e);
+ log.warn("Could not log failed replication details", e);
}
}
@@ -724,24 +724,24 @@ public class IndexFetcher {
core.getDirectoryFactory().remove(tmpIndexDir);
}
} catch (Exception e) {
- SolrException.log(LOG, e);
+ SolrException.log(log, e);
} finally {
try {
if (tmpIndexDir != null) core.getDirectoryFactory().release(tmpIndexDir);
} catch (Exception e) {
- SolrException.log(LOG, e);
+ SolrException.log(log, e);
}
try {
if (indexDir != null) {
core.getDirectoryFactory().release(indexDir);
}
} catch (Exception e) {
- SolrException.log(LOG, e);
+ SolrException.log(log, e);
}
try {
if (tmpTlogDir != null) delTree(tmpTlogDir);
} catch (Exception e) {
- SolrException.log(LOG, e);
+ SolrException.log(log, e);
}
}
}
@@ -754,7 +754,7 @@ public class IndexFetcher {
String[] allFiles = indexDir.listAll();
for (String file : allFiles) {
if (!file.equals(segmentsFileName) && !currentFiles.contains(file) && !file.endsWith(".lock")) {
- LOG.info("Found unused file: " + file);
+ log.info("Found unused file: " + file);
return true;
}
}
@@ -838,7 +838,7 @@ public class IndexFetcher {
solrCore.getDirectoryFactory().renameWithOverwrite(dir, tmpFileName, REPLICATION_PROPERTIES);
} catch (Exception e) {
- LOG.warn("Exception while updating statistics", e);
+ log.warn("Exception while updating statistics", e);
} finally {
if (dir != null) {
solrCore.getDirectoryFactory().release(dir);
@@ -899,7 +899,7 @@ public class IndexFetcher {
try {
waitSearcher[0].get();
} catch (InterruptedException | ExecutionException e) {
- SolrException.log(LOG, e);
+ SolrException.log(log, e);
}
}
commitPoint = searcher.get().getIndexReader().getIndexCommit();
@@ -921,7 +921,7 @@ public class IndexFetcher {
try {
solrCore.getCoreContainer().reload(solrCore.getName());
} catch (Exception e) {
- LOG.error("Could not reload core ", e);
+ log.error("Could not reload core ", e);
} finally {
latch.countDown();
}
@@ -935,7 +935,7 @@ public class IndexFetcher {
}
private void downloadConfFiles(List<Map<String, Object>> confFilesToDownload, long latestGeneration) throws Exception {
- LOG.info("Starting download of configuration files from master: " + confFilesToDownload);
+ log.info("Starting download of configuration files from master: " + confFilesToDownload);
confFilesDownloaded = Collections.synchronizedList(new ArrayList<>());
File tmpconfDir = new File(solrCore.getResourceLoader().getConfigDir(), "conf." + getDateAsStr(new Date()));
try {
@@ -964,7 +964,7 @@ public class IndexFetcher {
* Download all the tlog files to the temp tlog directory.
*/
private long downloadTlogFiles(File tmpTlogDir, long latestGeneration) throws Exception {
- LOG.info("Starting download of tlog files from master: " + tlogFilesToDownload);
+ log.info("Starting download of tlog files from master: " + tlogFilesToDownload);
tlogFilesDownloaded = Collections.synchronizedList(new ArrayList<>());
long bytesDownloaded = 0;
@@ -998,8 +998,8 @@ public class IndexFetcher {
private long downloadIndexFiles(boolean downloadCompleteIndex, Directory indexDir, Directory tmpIndexDir,
String indexDirPath, String tmpIndexDirPath, long latestGeneration)
throws Exception {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Download files to dir: " + Arrays.asList(indexDir.listAll()));
+ if (log.isDebugEnabled()) {
+ log.debug("Download files to dir: " + Arrays.asList(indexDir.listAll()));
}
long bytesDownloaded = 0;
long bytesSkippedCopying = 0;
@@ -1013,12 +1013,12 @@ public class IndexFetcher {
long size = (Long) file.get(SIZE);
CompareResult compareResult = compareFile(indexDir, filename, size, (Long) file.get(CHECKSUM));
boolean alwaysDownload = filesToAlwaysDownloadIfNoChecksums(filename, size, compareResult);
- LOG.debug("Downloading file={} size={} checksum={} alwaysDownload={}", filename, size, file.get(CHECKSUM), alwaysDownload);
+ log.debug("Downloading file={} size={} checksum={} alwaysDownload={}", filename, size, file.get(CHECKSUM), alwaysDownload);
if (!compareResult.equal || downloadCompleteIndex || alwaysDownload) {
File localFile = new File(indexDirPath, filename);
if (downloadCompleteIndex && doDifferentialCopy && compareResult.equal && compareResult.checkSummed
&& localFile.exists()) {
- LOG.info("Don't need to download this file. Local file's path is: {}, checksum is: {}",
+ log.info("Don't need to download this file. Local file's path is: {}, checksum is: {}",
localFile.getAbsolutePath(), file.get(CHECKSUM));
// A hard link here should survive the eventual directory move, and should be more space efficient as
// compared to a file copy. TODO: Maybe we could do a move safely here?
@@ -1033,10 +1033,10 @@ public class IndexFetcher {
}
filesDownloaded.add(new HashMap<>(file));
} else {
- LOG.info("Skipping download for {} because it already exists", file.get(NAME));
+ log.info("Skipping download for {} because it already exists", file.get(NAME));
}
}
- LOG.info("Bytes downloaded: {}, Bytes skipped downloading: {}", bytesDownloaded, bytesSkippedCopying);
+ log.info("Bytes downloaded: {}, Bytes skipped downloading: {}", bytesDownloaded, bytesSkippedCopying);
return bytesDownloaded;
}
@@ -1065,7 +1065,7 @@ public class IndexFetcher {
indexFileChecksum = CodecUtil.retrieveChecksum(indexInput);
compareResult.checkSummed = true;
} catch (Exception e) {
- LOG.warn("Could not retrieve checksum from file.", e);
+ log.warn("Could not retrieve checksum from file.", e);
}
}
@@ -1076,7 +1076,7 @@ public class IndexFetcher {
compareResult.equal = true;
return compareResult;
} else {
- LOG.info(
+ log.info(
"File {} did not match. expected length is {} and actual length is {}", filename, backupIndexFileLen, indexFileLen);
compareResult.equal = false;
return compareResult;
@@ -1089,7 +1089,7 @@ public class IndexFetcher {
compareResult.equal = true;
return compareResult;
} else {
- LOG.warn("File {} did not match. expected checksum is {} and actual is checksum {}. " +
+ log.warn("File {} did not match. expected checksum is {} and actual is checksum {}. " +
"expected length is {} and actual length is {}", filename, backupIndexFileChecksum, indexFileChecksum,
backupIndexFileLen, indexFileLen);
compareResult.equal = false;
@@ -1100,7 +1100,7 @@ public class IndexFetcher {
compareResult.equal = false;
return compareResult;
} catch (IOException e) {
- LOG.error("Could not read file " + filename + ". Downloading it again", e);
+ log.error("Could not read file " + filename + ". Downloading it again", e);
compareResult.equal = false;
return compareResult;
}
@@ -1139,7 +1139,7 @@ public class IndexFetcher {
}
} else {
if (length != dir.fileLength(filename)) {
- LOG.warn("File {} did not match. expected length is {} and actual length is {}",
+ log.warn("File {} did not match. expected length is {} and actual length is {}",
filename, length, dir.fileLength(filename));
return true;
}
@@ -1154,25 +1154,25 @@ public class IndexFetcher {
* <p/>
*/
private boolean moveAFile(Directory tmpIdxDir, Directory indexDir, String fname) {
- LOG.debug("Moving file: {}", fname);
+ log.debug("Moving file: {}", fname);
boolean success = false;
try {
if (slowFileExists(indexDir, fname)) {
- LOG.warn("Cannot complete replication attempt because file already exists:" + fname);
+ log.warn("Cannot complete replication attempt because file already exists:" + fname);
// we fail - we downloaded the files we need, if we can't move one in, we can't
// count on the correct index
return false;
}
} catch (IOException e) {
- SolrException.log(LOG, "could not check if a file exists", e);
+ SolrException.log(log, "could not check if a file exists", e);
return false;
}
try {
solrCore.getDirectoryFactory().move(tmpIdxDir, indexDir, fname, DirectoryFactory.IOCONTEXT_NO_CACHE);
success = true;
} catch (IOException e) {
- SolrException.log(LOG, "Could not move file", e);
+ SolrException.log(log, "Could not move file", e);
}
return success;
}
@@ -1181,10 +1181,10 @@ public class IndexFetcher {
* Copy all index files from the temp index dir to the actual index. The segments_N file is copied last.
*/
private boolean moveIndexFiles(Directory tmpIdxDir, Directory indexDir) {
- if (LOG.isDebugEnabled()) {
+ if (log.isDebugEnabled()) {
try {
- LOG.info("From dir files:" + Arrays.asList(tmpIdxDir.listAll()));
- LOG.info("To dir files:" + Arrays.asList(indexDir.listAll()));
+ log.info("From dir files:" + Arrays.asList(tmpIdxDir.listAll()));
+ log.info("To dir files:" + Arrays.asList(indexDir.listAll()));
} catch (IOException e) {
throw new RuntimeException(e);
}
@@ -1245,7 +1245,7 @@ public class IndexFetcher {
((CdcrUpdateLog) ulog).initForRecovery(bufferedUpdates.tlog, bufferedUpdates.offset);
}
catch (Exception e) {
- LOG.error("Unable to copy tlog files", e);
+ log.error("Unable to copy tlog files", e);
return false;
}
finally {
@@ -1319,7 +1319,7 @@ public class IndexFetcher {
try {
Files.move(tlogDir, backupTlogDir, StandardCopyOption.ATOMIC_MOVE);
} catch (IOException e) {
- SolrException.log(LOG, "Unable to rename: " + tlogDir + " to: " + backupTlogDir, e);
+ SolrException.log(log, "Unable to rename: " + tlogDir + " to: " + backupTlogDir, e);
return false;
}
@@ -1327,7 +1327,7 @@ public class IndexFetcher {
try {
Files.move(src, tlogDir, StandardCopyOption.ATOMIC_MOVE);
} catch (IOException e) {
- SolrException.log(LOG, "Unable to rename: " + src + " to: " + tlogDir, e);
+ SolrException.log(log, "Unable to rename: " + src + " to: " + tlogDir, e);
// In case of error, try to revert back the original tlog directory
try {
@@ -1404,7 +1404,7 @@ public class IndexFetcher {
org.apache.lucene.util.IOUtils.rm(dir.toPath());
return true;
} catch (IOException e) {
- LOG.warn("Unable to delete directory : " + dir, e);
+ log.warn("Unable to delete directory : " + dir, e);
return false;
}
}
@@ -1551,7 +1551,7 @@ public class IndexFetcher {
fetch();
} catch(Exception e) {
if (!aborted) {
- SolrException.log(IndexFetcher.LOG, "Error fetching file, doing one retry...", e);
+ SolrException.log(IndexFetcher.log, "Error fetching file, doing one retry...", e);
// one retry
fetch();
} else {
@@ -1605,7 +1605,7 @@ public class IndexFetcher {
//read the size of the packet
int packetSize = readInt(intbytes);
if (packetSize <= 0) {
- LOG.warn("No content received for file: {}", fileName);
+ log.warn("No content received for file: {}", fileName);
return NO_CONTENT;
}
//TODO consider recoding the remaining logic to not use/need buf[]; instead use the internal buffer of fis
@@ -1626,7 +1626,7 @@ public class IndexFetcher {
checksum.update(buf, 0, packetSize);
long checkSumClient = checksum.getValue();
if (checkSumClient != checkSumServer) {
- LOG.error("Checksum not matched between client and server for file: {}", fileName);
+ log.error("Checksum not matched between client and server for file: {}", fileName);
//if checksum is wrong it is a problem return for retry
return 1;
}
@@ -1634,7 +1634,7 @@ public class IndexFetcher {
//if everything is fine, write down the packet to the file
file.write(buf, packetSize);
bytesDownloaded += packetSize;
- LOG.debug("Fetched and wrote {} bytes of file: {}", bytesDownloaded, fileName);
+ log.debug("Fetched and wrote {} bytes of file: {}", bytesDownloaded, fileName);
if (bytesDownloaded >= size)
return 0;
//errorCount is always set to zero after a successful packet
@@ -1643,7 +1643,7 @@ public class IndexFetcher {
} catch (ReplicationHandlerException e) {
throw e;
} catch (Exception e) {
- LOG.warn("Error in fetching file: {} (downloaded {} of {} bytes)",
+ log.warn("Error in fetching file: {} (downloaded {} of {} bytes)",
fileName, bytesDownloaded, size, e);
//for any failure, increment the error count
errorCount++;
@@ -1686,7 +1686,7 @@ public class IndexFetcher {
try {
file.close();
} catch (Exception e) {/* no-op */
- LOG.error("Error closing file: {}", this.saveAs, e);
+ log.error("Error closing file: {}", this.saveAs, e);
}
if (bytesDownloaded != size) {
//if the download is not complete then
@@ -1694,7 +1694,7 @@ public class IndexFetcher {
try {
file.delete();
} catch (Exception e) {
- LOG.error("Error deleting file: {}", this.saveAs, e);
+ log.error("Error deleting file: {}", this.saveAs, e);
}
//if the failure is due to a user abort it is returned normally else an exception is thrown
if (!aborted)